prefect-client 3.1.5__py3-none-any.whl → 3.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/__init__.py +3 -0
- prefect/_internal/compatibility/migration.py +1 -1
- prefect/_internal/concurrency/api.py +52 -52
- prefect/_internal/concurrency/calls.py +59 -35
- prefect/_internal/concurrency/cancellation.py +34 -18
- prefect/_internal/concurrency/event_loop.py +7 -6
- prefect/_internal/concurrency/threads.py +41 -33
- prefect/_internal/concurrency/waiters.py +28 -21
- prefect/_internal/pydantic/v1_schema.py +2 -2
- prefect/_internal/pydantic/v2_schema.py +10 -9
- prefect/_internal/schemas/bases.py +9 -7
- prefect/_internal/schemas/validators.py +2 -1
- prefect/_version.py +3 -3
- prefect/automations.py +53 -47
- prefect/blocks/abstract.py +12 -10
- prefect/blocks/core.py +4 -2
- prefect/cache_policies.py +11 -11
- prefect/client/__init__.py +3 -1
- prefect/client/base.py +36 -37
- prefect/client/cloud.py +26 -19
- prefect/client/collections.py +2 -2
- prefect/client/orchestration.py +342 -273
- prefect/client/schemas/__init__.py +24 -0
- prefect/client/schemas/actions.py +123 -116
- prefect/client/schemas/objects.py +110 -81
- prefect/client/schemas/responses.py +18 -18
- prefect/client/schemas/schedules.py +136 -93
- prefect/client/subscriptions.py +28 -14
- prefect/client/utilities.py +32 -36
- prefect/concurrency/asyncio.py +6 -9
- prefect/concurrency/sync.py +35 -5
- prefect/context.py +39 -31
- prefect/deployments/flow_runs.py +3 -5
- prefect/docker/__init__.py +1 -1
- prefect/events/schemas/events.py +25 -20
- prefect/events/utilities.py +1 -2
- prefect/filesystems.py +3 -3
- prefect/flow_engine.py +61 -21
- prefect/flow_runs.py +3 -3
- prefect/flows.py +214 -170
- prefect/logging/configuration.py +1 -1
- prefect/logging/highlighters.py +1 -2
- prefect/logging/loggers.py +30 -20
- prefect/main.py +17 -24
- prefect/runner/runner.py +43 -21
- prefect/runner/server.py +30 -32
- prefect/runner/submit.py +3 -6
- prefect/runner/utils.py +6 -6
- prefect/runtime/flow_run.py +7 -0
- prefect/settings/constants.py +2 -2
- prefect/settings/legacy.py +1 -1
- prefect/settings/models/server/events.py +10 -0
- prefect/task_engine.py +72 -19
- prefect/task_runners.py +2 -2
- prefect/tasks.py +46 -33
- prefect/telemetry/bootstrap.py +15 -2
- prefect/telemetry/run_telemetry.py +107 -0
- prefect/transactions.py +14 -14
- prefect/types/__init__.py +1 -4
- prefect/utilities/_engine.py +96 -0
- prefect/utilities/annotations.py +25 -18
- prefect/utilities/asyncutils.py +126 -140
- prefect/utilities/callables.py +87 -78
- prefect/utilities/collections.py +278 -117
- prefect/utilities/compat.py +13 -21
- prefect/utilities/context.py +6 -5
- prefect/utilities/dispatch.py +23 -12
- prefect/utilities/dockerutils.py +33 -32
- prefect/utilities/engine.py +126 -239
- prefect/utilities/filesystem.py +18 -15
- prefect/utilities/hashing.py +10 -11
- prefect/utilities/importtools.py +40 -27
- prefect/utilities/math.py +9 -5
- prefect/utilities/names.py +3 -3
- prefect/utilities/processutils.py +121 -57
- prefect/utilities/pydantic.py +41 -36
- prefect/utilities/render_swagger.py +22 -12
- prefect/utilities/schema_tools/__init__.py +2 -1
- prefect/utilities/schema_tools/hydration.py +50 -43
- prefect/utilities/schema_tools/validation.py +52 -42
- prefect/utilities/services.py +13 -12
- prefect/utilities/templating.py +45 -45
- prefect/utilities/text.py +2 -1
- prefect/utilities/timeout.py +4 -4
- prefect/utilities/urls.py +9 -4
- prefect/utilities/visualization.py +46 -24
- prefect/variables.py +9 -8
- prefect/workers/base.py +15 -8
- {prefect_client-3.1.5.dist-info → prefect_client-3.1.6.dist-info}/METADATA +4 -2
- {prefect_client-3.1.5.dist-info → prefect_client-3.1.6.dist-info}/RECORD +93 -91
- {prefect_client-3.1.5.dist-info → prefect_client-3.1.6.dist-info}/LICENSE +0 -0
- {prefect_client-3.1.5.dist-info → prefect_client-3.1.6.dist-info}/WHEEL +0 -0
- {prefect_client-3.1.5.dist-info → prefect_client-3.1.6.dist-info}/top_level.txt +0 -0
prefect/client/orchestration.py
CHANGED
@@ -2,21 +2,10 @@ import asyncio
|
|
2
2
|
import datetime
|
3
3
|
import ssl
|
4
4
|
import warnings
|
5
|
+
from collections.abc import Iterable
|
5
6
|
from contextlib import AsyncExitStack
|
6
|
-
from
|
7
|
-
|
8
|
-
Any,
|
9
|
-
Dict,
|
10
|
-
Iterable,
|
11
|
-
List,
|
12
|
-
Literal,
|
13
|
-
Optional,
|
14
|
-
Set,
|
15
|
-
Tuple,
|
16
|
-
TypeVar,
|
17
|
-
Union,
|
18
|
-
overload,
|
19
|
-
)
|
7
|
+
from logging import Logger
|
8
|
+
from typing import TYPE_CHECKING, Any, Literal, NoReturn, Optional, Union, overload
|
20
9
|
from uuid import UUID, uuid4
|
21
10
|
|
22
11
|
import certifi
|
@@ -27,7 +16,7 @@ import pydantic
|
|
27
16
|
from asgi_lifespan import LifespanManager
|
28
17
|
from packaging import version
|
29
18
|
from starlette import status
|
30
|
-
from typing_extensions import ParamSpec
|
19
|
+
from typing_extensions import ParamSpec, Self, TypeVar
|
31
20
|
|
32
21
|
import prefect
|
33
22
|
import prefect.exceptions
|
@@ -138,6 +127,7 @@ from prefect.settings import (
|
|
138
127
|
PREFECT_TESTING_UNIT_TEST_MODE,
|
139
128
|
get_current_settings,
|
140
129
|
)
|
130
|
+
from prefect.types import KeyValueLabelsField
|
141
131
|
|
142
132
|
if TYPE_CHECKING:
|
143
133
|
from prefect.flows import Flow as FlowObject
|
@@ -152,26 +142,29 @@ from prefect.client.base import (
|
|
152
142
|
)
|
153
143
|
|
154
144
|
P = ParamSpec("P")
|
155
|
-
R = TypeVar("R")
|
145
|
+
R = TypeVar("R", infer_variance=True)
|
146
|
+
T = TypeVar("T")
|
156
147
|
|
157
148
|
|
158
149
|
@overload
|
159
150
|
def get_client(
|
160
|
-
|
151
|
+
*,
|
152
|
+
httpx_settings: Optional[dict[str, Any]] = ...,
|
153
|
+
sync_client: Literal[False] = False,
|
161
154
|
) -> "PrefectClient":
|
162
155
|
...
|
163
156
|
|
164
157
|
|
165
158
|
@overload
|
166
159
|
def get_client(
|
167
|
-
httpx_settings: Optional[
|
160
|
+
*, httpx_settings: Optional[dict[str, Any]] = ..., sync_client: Literal[True] = ...
|
168
161
|
) -> "SyncPrefectClient":
|
169
162
|
...
|
170
163
|
|
171
164
|
|
172
165
|
def get_client(
|
173
|
-
httpx_settings: Optional[
|
174
|
-
):
|
166
|
+
httpx_settings: Optional[dict[str, Any]] = None, sync_client: bool = False
|
167
|
+
) -> Union["SyncPrefectClient", "PrefectClient"]:
|
175
168
|
"""
|
176
169
|
Retrieve a HTTP client for communicating with the Prefect REST API.
|
177
170
|
|
@@ -200,18 +193,21 @@ def get_client(
|
|
200
193
|
|
201
194
|
if sync_client:
|
202
195
|
if client_ctx := prefect.context.SyncClientContext.get():
|
203
|
-
if
|
196
|
+
if (
|
197
|
+
client_ctx.client
|
198
|
+
and getattr(client_ctx, "_httpx_settings", None) == httpx_settings
|
199
|
+
):
|
204
200
|
return client_ctx.client
|
205
201
|
else:
|
206
202
|
if client_ctx := prefect.context.AsyncClientContext.get():
|
207
203
|
if (
|
208
204
|
client_ctx.client
|
209
|
-
and client_ctx
|
210
|
-
and loop in (client_ctx.client
|
205
|
+
and getattr(client_ctx, "_httpx_settings", None) == httpx_settings
|
206
|
+
and loop in (getattr(client_ctx.client, "_loop", None), None)
|
211
207
|
):
|
212
208
|
return client_ctx.client
|
213
209
|
|
214
|
-
api = PREFECT_API_URL.value()
|
210
|
+
api: str = PREFECT_API_URL.value()
|
215
211
|
server_type = None
|
216
212
|
|
217
213
|
if not api and PREFECT_SERVER_ALLOW_EPHEMERAL_MODE:
|
@@ -277,7 +273,7 @@ class PrefectClient:
|
|
277
273
|
*,
|
278
274
|
api_key: Optional[str] = None,
|
279
275
|
api_version: Optional[str] = None,
|
280
|
-
httpx_settings: Optional[
|
276
|
+
httpx_settings: Optional[dict[str, Any]] = None,
|
281
277
|
server_type: Optional[ServerType] = None,
|
282
278
|
) -> None:
|
283
279
|
httpx_settings = httpx_settings.copy() if httpx_settings else {}
|
@@ -357,7 +353,7 @@ class PrefectClient:
|
|
357
353
|
)
|
358
354
|
|
359
355
|
# Connect to an in-process application
|
360
|
-
|
356
|
+
else:
|
361
357
|
self._ephemeral_app = api
|
362
358
|
self.server_type = ServerType.EPHEMERAL
|
363
359
|
|
@@ -377,12 +373,6 @@ class PrefectClient:
|
|
377
373
|
)
|
378
374
|
httpx_settings.setdefault("base_url", "http://ephemeral-prefect/api")
|
379
375
|
|
380
|
-
else:
|
381
|
-
raise TypeError(
|
382
|
-
f"Unexpected type {type(api).__name__!r} for argument `api`. Expected"
|
383
|
-
" 'str' or 'ASGIApp/FastAPI'"
|
384
|
-
)
|
385
|
-
|
386
376
|
# See https://www.python-httpx.org/advanced/#timeout-configuration
|
387
377
|
httpx_settings.setdefault(
|
388
378
|
"timeout",
|
@@ -426,9 +416,9 @@ class PrefectClient:
|
|
426
416
|
if isinstance(server_transport, httpx.AsyncHTTPTransport):
|
427
417
|
pool = getattr(server_transport, "_pool", None)
|
428
418
|
if isinstance(pool, httpcore.AsyncConnectionPool):
|
429
|
-
pool
|
419
|
+
setattr(pool, "_retries", 3)
|
430
420
|
|
431
|
-
self.logger = get_logger("client")
|
421
|
+
self.logger: Logger = get_logger("client")
|
432
422
|
|
433
423
|
@property
|
434
424
|
def api_url(self) -> httpx.URL:
|
@@ -458,7 +448,7 @@ class PrefectClient:
|
|
458
448
|
"""
|
459
449
|
return await self._client.get("/hello")
|
460
450
|
|
461
|
-
async def create_flow(self, flow: "FlowObject") -> UUID:
|
451
|
+
async def create_flow(self, flow: "FlowObject[Any, Any]") -> UUID:
|
462
452
|
"""
|
463
453
|
Create a flow in the Prefect API.
|
464
454
|
|
@@ -511,19 +501,37 @@ class PrefectClient:
|
|
511
501
|
response = await self._client.get(f"/flows/{flow_id}")
|
512
502
|
return Flow.model_validate(response.json())
|
513
503
|
|
504
|
+
async def delete_flow(self, flow_id: UUID) -> None:
|
505
|
+
"""
|
506
|
+
Delete a flow by UUID.
|
507
|
+
|
508
|
+
Args:
|
509
|
+
flow_id: ID of the flow to be deleted
|
510
|
+
Raises:
|
511
|
+
prefect.exceptions.ObjectNotFound: If request returns 404
|
512
|
+
httpx.RequestError: If requests fail
|
513
|
+
"""
|
514
|
+
try:
|
515
|
+
await self._client.delete(f"/flows/{flow_id}")
|
516
|
+
except httpx.HTTPStatusError as e:
|
517
|
+
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
518
|
+
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
519
|
+
else:
|
520
|
+
raise
|
521
|
+
|
514
522
|
async def read_flows(
|
515
523
|
self,
|
516
524
|
*,
|
517
|
-
flow_filter: FlowFilter = None,
|
518
|
-
flow_run_filter: FlowRunFilter = None,
|
519
|
-
task_run_filter: TaskRunFilter = None,
|
520
|
-
deployment_filter: DeploymentFilter = None,
|
521
|
-
work_pool_filter: WorkPoolFilter = None,
|
522
|
-
work_queue_filter: WorkQueueFilter = None,
|
523
|
-
sort: FlowSort = None,
|
525
|
+
flow_filter: Optional[FlowFilter] = None,
|
526
|
+
flow_run_filter: Optional[FlowRunFilter] = None,
|
527
|
+
task_run_filter: Optional[TaskRunFilter] = None,
|
528
|
+
deployment_filter: Optional[DeploymentFilter] = None,
|
529
|
+
work_pool_filter: Optional[WorkPoolFilter] = None,
|
530
|
+
work_queue_filter: Optional[WorkQueueFilter] = None,
|
531
|
+
sort: Optional[FlowSort] = None,
|
524
532
|
limit: Optional[int] = None,
|
525
533
|
offset: int = 0,
|
526
|
-
) ->
|
534
|
+
) -> list[Flow]:
|
527
535
|
"""
|
528
536
|
Query the Prefect API for flows. Only flows matching all criteria will
|
529
537
|
be returned.
|
@@ -542,7 +550,7 @@ class PrefectClient:
|
|
542
550
|
Returns:
|
543
551
|
a list of Flow model representations of the flows
|
544
552
|
"""
|
545
|
-
body = {
|
553
|
+
body: dict[str, Any] = {
|
546
554
|
"flows": flow_filter.model_dump(mode="json") if flow_filter else None,
|
547
555
|
"flow_runs": (
|
548
556
|
flow_run_filter.model_dump(mode="json", exclude_unset=True)
|
@@ -567,7 +575,7 @@ class PrefectClient:
|
|
567
575
|
}
|
568
576
|
|
569
577
|
response = await self._client.post("/flows/filter", json=body)
|
570
|
-
return pydantic.TypeAdapter(
|
578
|
+
return pydantic.TypeAdapter(list[Flow]).validate_python(response.json())
|
571
579
|
|
572
580
|
async def read_flow_by_name(
|
573
581
|
self,
|
@@ -589,15 +597,15 @@ class PrefectClient:
|
|
589
597
|
self,
|
590
598
|
deployment_id: UUID,
|
591
599
|
*,
|
592
|
-
parameters: Optional[
|
593
|
-
context: Optional[
|
594
|
-
state: Optional[prefect.states.State] = None,
|
600
|
+
parameters: Optional[dict[str, Any]] = None,
|
601
|
+
context: Optional[dict[str, Any]] = None,
|
602
|
+
state: Optional[prefect.states.State[Any]] = None,
|
595
603
|
name: Optional[str] = None,
|
596
604
|
tags: Optional[Iterable[str]] = None,
|
597
605
|
idempotency_key: Optional[str] = None,
|
598
606
|
parent_task_run_id: Optional[UUID] = None,
|
599
607
|
work_queue_name: Optional[str] = None,
|
600
|
-
job_variables: Optional[
|
608
|
+
job_variables: Optional[dict[str, Any]] = None,
|
601
609
|
) -> FlowRun:
|
602
610
|
"""
|
603
611
|
Create a flow run for a deployment.
|
@@ -638,7 +646,7 @@ class PrefectClient:
|
|
638
646
|
parameters=parameters,
|
639
647
|
context=context,
|
640
648
|
state=state.to_state_create(),
|
641
|
-
tags=tags,
|
649
|
+
tags=list(tags),
|
642
650
|
name=name,
|
643
651
|
idempotency_key=idempotency_key,
|
644
652
|
parent_task_run_id=parent_task_run_id,
|
@@ -657,13 +665,13 @@ class PrefectClient:
|
|
657
665
|
|
658
666
|
async def create_flow_run(
|
659
667
|
self,
|
660
|
-
flow: "FlowObject",
|
668
|
+
flow: "FlowObject[Any, R]",
|
661
669
|
name: Optional[str] = None,
|
662
|
-
parameters: Optional[
|
663
|
-
context: Optional[
|
670
|
+
parameters: Optional[dict[str, Any]] = None,
|
671
|
+
context: Optional[dict[str, Any]] = None,
|
664
672
|
tags: Optional[Iterable[str]] = None,
|
665
673
|
parent_task_run_id: Optional[UUID] = None,
|
666
|
-
state: Optional["prefect.states.State"] = None,
|
674
|
+
state: Optional["prefect.states.State[R]"] = None,
|
667
675
|
) -> FlowRun:
|
668
676
|
"""
|
669
677
|
Create a flow run for a flow.
|
@@ -705,7 +713,7 @@ class PrefectClient:
|
|
705
713
|
state=state.to_state_create(),
|
706
714
|
empirical_policy=FlowRunPolicy(
|
707
715
|
retries=flow.retries,
|
708
|
-
retry_delay=flow.retry_delay_seconds,
|
716
|
+
retry_delay=int(flow.retry_delay_seconds or 0),
|
709
717
|
),
|
710
718
|
)
|
711
719
|
|
@@ -723,12 +731,12 @@ class PrefectClient:
|
|
723
731
|
self,
|
724
732
|
flow_run_id: UUID,
|
725
733
|
flow_version: Optional[str] = None,
|
726
|
-
parameters: Optional[dict] = None,
|
734
|
+
parameters: Optional[dict[str, Any]] = None,
|
727
735
|
name: Optional[str] = None,
|
728
736
|
tags: Optional[Iterable[str]] = None,
|
729
737
|
empirical_policy: Optional[FlowRunPolicy] = None,
|
730
738
|
infrastructure_pid: Optional[str] = None,
|
731
|
-
job_variables: Optional[dict] = None,
|
739
|
+
job_variables: Optional[dict[str, Any]] = None,
|
732
740
|
) -> httpx.Response:
|
733
741
|
"""
|
734
742
|
Update a flow run's details.
|
@@ -749,7 +757,7 @@ class PrefectClient:
|
|
749
757
|
Returns:
|
750
758
|
an `httpx.Response` object from the PATCH request
|
751
759
|
"""
|
752
|
-
params = {}
|
760
|
+
params: dict[str, Any] = {}
|
753
761
|
if flow_version is not None:
|
754
762
|
params["flow_version"] = flow_version
|
755
763
|
if parameters is not None:
|
@@ -832,7 +840,7 @@ class PrefectClient:
|
|
832
840
|
async def read_concurrency_limit_by_tag(
|
833
841
|
self,
|
834
842
|
tag: str,
|
835
|
-
):
|
843
|
+
) -> ConcurrencyLimit:
|
836
844
|
"""
|
837
845
|
Read the concurrency limit set on a specific tag.
|
838
846
|
|
@@ -868,7 +876,7 @@ class PrefectClient:
|
|
868
876
|
self,
|
869
877
|
limit: int,
|
870
878
|
offset: int,
|
871
|
-
):
|
879
|
+
) -> list[ConcurrencyLimit]:
|
872
880
|
"""
|
873
881
|
Lists concurrency limits set on task run tags.
|
874
882
|
|
@@ -886,15 +894,15 @@ class PrefectClient:
|
|
886
894
|
}
|
887
895
|
|
888
896
|
response = await self._client.post("/concurrency_limits/filter", json=body)
|
889
|
-
return pydantic.TypeAdapter(
|
897
|
+
return pydantic.TypeAdapter(list[ConcurrencyLimit]).validate_python(
|
890
898
|
response.json()
|
891
899
|
)
|
892
900
|
|
893
901
|
async def reset_concurrency_limit_by_tag(
|
894
902
|
self,
|
895
903
|
tag: str,
|
896
|
-
slot_override: Optional[
|
897
|
-
):
|
904
|
+
slot_override: Optional[list[Union[UUID, str]]] = None,
|
905
|
+
) -> None:
|
898
906
|
"""
|
899
907
|
Resets the concurrency limit slots set on a specific tag.
|
900
908
|
|
@@ -927,7 +935,7 @@ class PrefectClient:
|
|
927
935
|
async def delete_concurrency_limit_by_tag(
|
928
936
|
self,
|
929
937
|
tag: str,
|
930
|
-
):
|
938
|
+
) -> None:
|
931
939
|
"""
|
932
940
|
Delete the concurrency limit set on a specific tag.
|
933
941
|
|
@@ -951,7 +959,7 @@ class PrefectClient:
|
|
951
959
|
|
952
960
|
async def increment_v1_concurrency_slots(
|
953
961
|
self,
|
954
|
-
names:
|
962
|
+
names: list[str],
|
955
963
|
task_run_id: UUID,
|
956
964
|
) -> httpx.Response:
|
957
965
|
"""
|
@@ -961,7 +969,7 @@ class PrefectClient:
|
|
961
969
|
names (List[str]): A list of limit names for which to increment limits.
|
962
970
|
task_run_id (UUID): The task run ID incrementing the limits.
|
963
971
|
"""
|
964
|
-
data = {
|
972
|
+
data: dict[str, Any] = {
|
965
973
|
"names": names,
|
966
974
|
"task_run_id": str(task_run_id),
|
967
975
|
}
|
@@ -973,7 +981,7 @@ class PrefectClient:
|
|
973
981
|
|
974
982
|
async def decrement_v1_concurrency_slots(
|
975
983
|
self,
|
976
|
-
names:
|
984
|
+
names: list[str],
|
977
985
|
task_run_id: UUID,
|
978
986
|
occupancy_seconds: float,
|
979
987
|
) -> httpx.Response:
|
@@ -989,7 +997,7 @@ class PrefectClient:
|
|
989
997
|
Returns:
|
990
998
|
httpx.Response: The HTTP response from the server.
|
991
999
|
"""
|
992
|
-
data = {
|
1000
|
+
data: dict[str, Any] = {
|
993
1001
|
"names": names,
|
994
1002
|
"task_run_id": str(task_run_id),
|
995
1003
|
"occupancy_seconds": occupancy_seconds,
|
@@ -1089,7 +1097,7 @@ class PrefectClient:
|
|
1089
1097
|
|
1090
1098
|
return WorkQueue.model_validate(response.json())
|
1091
1099
|
|
1092
|
-
async def update_work_queue(self, id: UUID, **kwargs):
|
1100
|
+
async def update_work_queue(self, id: UUID, **kwargs: Any) -> None:
|
1093
1101
|
"""
|
1094
1102
|
Update properties of a work queue.
|
1095
1103
|
|
@@ -1119,8 +1127,8 @@ class PrefectClient:
|
|
1119
1127
|
self,
|
1120
1128
|
id: UUID,
|
1121
1129
|
limit: int = 10,
|
1122
|
-
scheduled_before: datetime.datetime = None,
|
1123
|
-
) ->
|
1130
|
+
scheduled_before: Optional[datetime.datetime] = None,
|
1131
|
+
) -> list[FlowRun]:
|
1124
1132
|
"""
|
1125
1133
|
Read flow runs off a work queue.
|
1126
1134
|
|
@@ -1153,7 +1161,7 @@ class PrefectClient:
|
|
1153
1161
|
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
1154
1162
|
else:
|
1155
1163
|
raise
|
1156
|
-
return pydantic.TypeAdapter(
|
1164
|
+
return pydantic.TypeAdapter(list[FlowRun]).validate_python(response.json())
|
1157
1165
|
|
1158
1166
|
async def read_work_queue(
|
1159
1167
|
self,
|
@@ -1209,9 +1217,9 @@ class PrefectClient:
|
|
1209
1217
|
|
1210
1218
|
async def match_work_queues(
|
1211
1219
|
self,
|
1212
|
-
prefixes:
|
1220
|
+
prefixes: list[str],
|
1213
1221
|
work_pool_name: Optional[str] = None,
|
1214
|
-
) ->
|
1222
|
+
) -> list[WorkQueue]:
|
1215
1223
|
"""
|
1216
1224
|
Query the Prefect API for work queues with names with a specific prefix.
|
1217
1225
|
|
@@ -1225,7 +1233,7 @@ class PrefectClient:
|
|
1225
1233
|
"""
|
1226
1234
|
page_length = 100
|
1227
1235
|
current_page = 0
|
1228
|
-
work_queues = []
|
1236
|
+
work_queues: list[WorkQueue] = []
|
1229
1237
|
|
1230
1238
|
while True:
|
1231
1239
|
new_queues = await self.read_work_queues(
|
@@ -1246,7 +1254,7 @@ class PrefectClient:
|
|
1246
1254
|
async def delete_work_queue_by_id(
|
1247
1255
|
self,
|
1248
1256
|
id: UUID,
|
1249
|
-
):
|
1257
|
+
) -> None:
|
1250
1258
|
"""
|
1251
1259
|
Delete a work queue by its ID.
|
1252
1260
|
|
@@ -1343,7 +1351,7 @@ class PrefectClient:
|
|
1343
1351
|
self,
|
1344
1352
|
block_document_id: UUID,
|
1345
1353
|
block_document: BlockDocumentUpdate,
|
1346
|
-
):
|
1354
|
+
) -> None:
|
1347
1355
|
"""
|
1348
1356
|
Update a block document in the Prefect API.
|
1349
1357
|
"""
|
@@ -1362,7 +1370,7 @@ class PrefectClient:
|
|
1362
1370
|
else:
|
1363
1371
|
raise
|
1364
1372
|
|
1365
|
-
async def delete_block_document(self, block_document_id: UUID):
|
1373
|
+
async def delete_block_document(self, block_document_id: UUID) -> None:
|
1366
1374
|
"""
|
1367
1375
|
Delete a block document.
|
1368
1376
|
"""
|
@@ -1405,7 +1413,9 @@ class PrefectClient:
|
|
1405
1413
|
raise
|
1406
1414
|
return BlockSchema.model_validate(response.json())
|
1407
1415
|
|
1408
|
-
async def update_block_type(
|
1416
|
+
async def update_block_type(
|
1417
|
+
self, block_type_id: UUID, block_type: BlockTypeUpdate
|
1418
|
+
) -> None:
|
1409
1419
|
"""
|
1410
1420
|
Update a block document in the Prefect API.
|
1411
1421
|
"""
|
@@ -1424,7 +1434,7 @@ class PrefectClient:
|
|
1424
1434
|
else:
|
1425
1435
|
raise
|
1426
1436
|
|
1427
|
-
async def delete_block_type(self, block_type_id: UUID):
|
1437
|
+
async def delete_block_type(self, block_type_id: UUID) -> None:
|
1428
1438
|
"""
|
1429
1439
|
Delete a block type.
|
1430
1440
|
"""
|
@@ -1444,7 +1454,7 @@ class PrefectClient:
|
|
1444
1454
|
else:
|
1445
1455
|
raise
|
1446
1456
|
|
1447
|
-
async def read_block_types(self) ->
|
1457
|
+
async def read_block_types(self) -> list[BlockType]:
|
1448
1458
|
"""
|
1449
1459
|
Read all block types
|
1450
1460
|
Raises:
|
@@ -1454,9 +1464,9 @@ class PrefectClient:
|
|
1454
1464
|
List of BlockTypes.
|
1455
1465
|
"""
|
1456
1466
|
response = await self._client.post("/block_types/filter", json={})
|
1457
|
-
return pydantic.TypeAdapter(
|
1467
|
+
return pydantic.TypeAdapter(list[BlockType]).validate_python(response.json())
|
1458
1468
|
|
1459
|
-
async def read_block_schemas(self) ->
|
1469
|
+
async def read_block_schemas(self) -> list[BlockSchema]:
|
1460
1470
|
"""
|
1461
1471
|
Read all block schemas
|
1462
1472
|
Raises:
|
@@ -1466,7 +1476,7 @@ class PrefectClient:
|
|
1466
1476
|
A BlockSchema.
|
1467
1477
|
"""
|
1468
1478
|
response = await self._client.post("/block_schemas/filter", json={})
|
1469
|
-
return pydantic.TypeAdapter(
|
1479
|
+
return pydantic.TypeAdapter(list[BlockSchema]).validate_python(response.json())
|
1470
1480
|
|
1471
1481
|
async def get_most_recent_block_schema_for_block_type(
|
1472
1482
|
self,
|
@@ -1502,7 +1512,7 @@ class PrefectClient:
|
|
1502
1512
|
self,
|
1503
1513
|
block_document_id: UUID,
|
1504
1514
|
include_secrets: bool = True,
|
1505
|
-
):
|
1515
|
+
) -> BlockDocument:
|
1506
1516
|
"""
|
1507
1517
|
Read the block document with the specified ID.
|
1508
1518
|
|
@@ -1580,7 +1590,7 @@ class PrefectClient:
|
|
1580
1590
|
offset: Optional[int] = None,
|
1581
1591
|
limit: Optional[int] = None,
|
1582
1592
|
include_secrets: bool = True,
|
1583
|
-
):
|
1593
|
+
) -> list[BlockDocument]:
|
1584
1594
|
"""
|
1585
1595
|
Read block documents
|
1586
1596
|
|
@@ -1607,7 +1617,7 @@ class PrefectClient:
|
|
1607
1617
|
include_secrets=include_secrets,
|
1608
1618
|
),
|
1609
1619
|
)
|
1610
|
-
return pydantic.TypeAdapter(
|
1620
|
+
return pydantic.TypeAdapter(list[BlockDocument]).validate_python(
|
1611
1621
|
response.json()
|
1612
1622
|
)
|
1613
1623
|
|
@@ -1617,7 +1627,7 @@ class PrefectClient:
|
|
1617
1627
|
offset: Optional[int] = None,
|
1618
1628
|
limit: Optional[int] = None,
|
1619
1629
|
include_secrets: bool = True,
|
1620
|
-
) ->
|
1630
|
+
) -> list[BlockDocument]:
|
1621
1631
|
"""Retrieve block documents by block type slug.
|
1622
1632
|
|
1623
1633
|
Args:
|
@@ -1638,7 +1648,7 @@ class PrefectClient:
|
|
1638
1648
|
),
|
1639
1649
|
)
|
1640
1650
|
|
1641
|
-
return pydantic.TypeAdapter(
|
1651
|
+
return pydantic.TypeAdapter(list[BlockDocument]).validate_python(
|
1642
1652
|
response.json()
|
1643
1653
|
)
|
1644
1654
|
|
@@ -1647,23 +1657,23 @@ class PrefectClient:
|
|
1647
1657
|
flow_id: UUID,
|
1648
1658
|
name: str,
|
1649
1659
|
version: Optional[str] = None,
|
1650
|
-
schedules: Optional[
|
1660
|
+
schedules: Optional[list[DeploymentScheduleCreate]] = None,
|
1651
1661
|
concurrency_limit: Optional[int] = None,
|
1652
1662
|
concurrency_options: Optional[ConcurrencyOptions] = None,
|
1653
|
-
parameters: Optional[
|
1663
|
+
parameters: Optional[dict[str, Any]] = None,
|
1654
1664
|
description: Optional[str] = None,
|
1655
1665
|
work_queue_name: Optional[str] = None,
|
1656
1666
|
work_pool_name: Optional[str] = None,
|
1657
|
-
tags: Optional[
|
1667
|
+
tags: Optional[list[str]] = None,
|
1658
1668
|
storage_document_id: Optional[UUID] = None,
|
1659
1669
|
path: Optional[str] = None,
|
1660
1670
|
entrypoint: Optional[str] = None,
|
1661
1671
|
infrastructure_document_id: Optional[UUID] = None,
|
1662
|
-
parameter_openapi_schema: Optional[
|
1672
|
+
parameter_openapi_schema: Optional[dict[str, Any]] = None,
|
1663
1673
|
paused: Optional[bool] = None,
|
1664
|
-
pull_steps: Optional[
|
1674
|
+
pull_steps: Optional[list[dict[str, Any]]] = None,
|
1665
1675
|
enforce_parameter_schema: Optional[bool] = None,
|
1666
|
-
job_variables: Optional[
|
1676
|
+
job_variables: Optional[dict[str, Any]] = None,
|
1667
1677
|
) -> UUID:
|
1668
1678
|
"""
|
1669
1679
|
Create a deployment.
|
@@ -1743,7 +1753,9 @@ class PrefectClient:
|
|
1743
1753
|
|
1744
1754
|
return UUID(deployment_id)
|
1745
1755
|
|
1746
|
-
async def set_deployment_paused_state(
|
1756
|
+
async def set_deployment_paused_state(
|
1757
|
+
self, deployment_id: UUID, paused: bool
|
1758
|
+
) -> None:
|
1747
1759
|
await self._client.patch(
|
1748
1760
|
f"/deployments/{deployment_id}", json={"paused": paused}
|
1749
1761
|
)
|
@@ -1752,7 +1764,7 @@ class PrefectClient:
|
|
1752
1764
|
self,
|
1753
1765
|
deployment_id: UUID,
|
1754
1766
|
deployment: DeploymentUpdate,
|
1755
|
-
):
|
1767
|
+
) -> None:
|
1756
1768
|
await self._client.patch(
|
1757
1769
|
f"/deployments/{deployment_id}",
|
1758
1770
|
json=deployment.model_dump(mode="json", exclude_unset=True),
|
@@ -1775,7 +1787,7 @@ class PrefectClient:
|
|
1775
1787
|
|
1776
1788
|
async def read_deployment(
|
1777
1789
|
self,
|
1778
|
-
deployment_id: UUID,
|
1790
|
+
deployment_id: Union[UUID, str],
|
1779
1791
|
) -> DeploymentResponse:
|
1780
1792
|
"""
|
1781
1793
|
Query the Prefect API for a deployment by id.
|
@@ -1868,7 +1880,7 @@ class PrefectClient:
|
|
1868
1880
|
limit: Optional[int] = None,
|
1869
1881
|
sort: Optional[DeploymentSort] = None,
|
1870
1882
|
offset: int = 0,
|
1871
|
-
) ->
|
1883
|
+
) -> list[DeploymentResponse]:
|
1872
1884
|
"""
|
1873
1885
|
Query the Prefect API for deployments. Only deployments matching all
|
1874
1886
|
the provided criteria will be returned.
|
@@ -1887,7 +1899,7 @@ class PrefectClient:
|
|
1887
1899
|
a list of Deployment model representations
|
1888
1900
|
of the deployments
|
1889
1901
|
"""
|
1890
|
-
body = {
|
1902
|
+
body: dict[str, Any] = {
|
1891
1903
|
"flows": flow_filter.model_dump(mode="json") if flow_filter else None,
|
1892
1904
|
"flow_runs": (
|
1893
1905
|
flow_run_filter.model_dump(mode="json", exclude_unset=True)
|
@@ -1912,14 +1924,14 @@ class PrefectClient:
|
|
1912
1924
|
}
|
1913
1925
|
|
1914
1926
|
response = await self._client.post("/deployments/filter", json=body)
|
1915
|
-
return pydantic.TypeAdapter(
|
1927
|
+
return pydantic.TypeAdapter(list[DeploymentResponse]).validate_python(
|
1916
1928
|
response.json()
|
1917
1929
|
)
|
1918
1930
|
|
1919
1931
|
async def delete_deployment(
|
1920
1932
|
self,
|
1921
1933
|
deployment_id: UUID,
|
1922
|
-
):
|
1934
|
+
) -> None:
|
1923
1935
|
"""
|
1924
1936
|
Delete deployment by id.
|
1925
1937
|
|
@@ -1940,8 +1952,8 @@ class PrefectClient:
|
|
1940
1952
|
async def create_deployment_schedules(
|
1941
1953
|
self,
|
1942
1954
|
deployment_id: UUID,
|
1943
|
-
schedules:
|
1944
|
-
) ->
|
1955
|
+
schedules: list[tuple[SCHEDULE_TYPES, bool]],
|
1956
|
+
) -> list[DeploymentSchedule]:
|
1945
1957
|
"""
|
1946
1958
|
Create deployment schedules.
|
1947
1959
|
|
@@ -1968,14 +1980,14 @@ class PrefectClient:
|
|
1968
1980
|
response = await self._client.post(
|
1969
1981
|
f"/deployments/{deployment_id}/schedules", json=json
|
1970
1982
|
)
|
1971
|
-
return pydantic.TypeAdapter(
|
1983
|
+
return pydantic.TypeAdapter(list[DeploymentSchedule]).validate_python(
|
1972
1984
|
response.json()
|
1973
1985
|
)
|
1974
1986
|
|
1975
1987
|
async def read_deployment_schedules(
|
1976
1988
|
self,
|
1977
1989
|
deployment_id: UUID,
|
1978
|
-
) ->
|
1990
|
+
) -> list[DeploymentSchedule]:
|
1979
1991
|
"""
|
1980
1992
|
Query the Prefect API for a deployment's schedules.
|
1981
1993
|
|
@@ -1992,7 +2004,7 @@ class PrefectClient:
|
|
1992
2004
|
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
1993
2005
|
else:
|
1994
2006
|
raise
|
1995
|
-
return pydantic.TypeAdapter(
|
2007
|
+
return pydantic.TypeAdapter(list[DeploymentSchedule]).validate_python(
|
1996
2008
|
response.json()
|
1997
2009
|
)
|
1998
2010
|
|
@@ -2002,7 +2014,7 @@ class PrefectClient:
|
|
2002
2014
|
schedule_id: UUID,
|
2003
2015
|
active: Optional[bool] = None,
|
2004
2016
|
schedule: Optional[SCHEDULE_TYPES] = None,
|
2005
|
-
):
|
2017
|
+
) -> None:
|
2006
2018
|
"""
|
2007
2019
|
Update a deployment schedule by ID.
|
2008
2020
|
|
@@ -2012,7 +2024,7 @@ class PrefectClient:
|
|
2012
2024
|
active: whether or not the schedule should be active
|
2013
2025
|
schedule: the cron, rrule, or interval schedule this deployment schedule should use
|
2014
2026
|
"""
|
2015
|
-
kwargs = {}
|
2027
|
+
kwargs: dict[str, Any] = {}
|
2016
2028
|
if active is not None:
|
2017
2029
|
kwargs["active"] = active
|
2018
2030
|
if schedule is not None:
|
@@ -2076,8 +2088,8 @@ class PrefectClient:
|
|
2076
2088
|
return FlowRun.model_validate(response.json())
|
2077
2089
|
|
2078
2090
|
async def resume_flow_run(
|
2079
|
-
self, flow_run_id: UUID, run_input: Optional[
|
2080
|
-
) -> OrchestrationResult:
|
2091
|
+
self, flow_run_id: UUID, run_input: Optional[dict[str, Any]] = None
|
2092
|
+
) -> OrchestrationResult[Any]:
|
2081
2093
|
"""
|
2082
2094
|
Resumes a paused flow run.
|
2083
2095
|
|
@@ -2095,21 +2107,24 @@ class PrefectClient:
|
|
2095
2107
|
except httpx.HTTPStatusError:
|
2096
2108
|
raise
|
2097
2109
|
|
2098
|
-
|
2110
|
+
result: OrchestrationResult[Any] = OrchestrationResult.model_validate(
|
2111
|
+
response.json()
|
2112
|
+
)
|
2113
|
+
return result
|
2099
2114
|
|
2100
2115
|
async def read_flow_runs(
|
2101
2116
|
self,
|
2102
2117
|
*,
|
2103
|
-
flow_filter: FlowFilter = None,
|
2104
|
-
flow_run_filter: FlowRunFilter = None,
|
2105
|
-
task_run_filter: TaskRunFilter = None,
|
2106
|
-
deployment_filter: DeploymentFilter = None,
|
2107
|
-
work_pool_filter: WorkPoolFilter = None,
|
2108
|
-
work_queue_filter: WorkQueueFilter = None,
|
2109
|
-
sort: FlowRunSort = None,
|
2118
|
+
flow_filter: Optional[FlowFilter] = None,
|
2119
|
+
flow_run_filter: Optional[FlowRunFilter] = None,
|
2120
|
+
task_run_filter: Optional[TaskRunFilter] = None,
|
2121
|
+
deployment_filter: Optional[DeploymentFilter] = None,
|
2122
|
+
work_pool_filter: Optional[WorkPoolFilter] = None,
|
2123
|
+
work_queue_filter: Optional[WorkQueueFilter] = None,
|
2124
|
+
sort: Optional[FlowRunSort] = None,
|
2110
2125
|
limit: Optional[int] = None,
|
2111
2126
|
offset: int = 0,
|
2112
|
-
) ->
|
2127
|
+
) -> list[FlowRun]:
|
2113
2128
|
"""
|
2114
2129
|
Query the Prefect API for flow runs. Only flow runs matching all criteria will
|
2115
2130
|
be returned.
|
@@ -2129,7 +2144,7 @@ class PrefectClient:
|
|
2129
2144
|
a list of Flow Run model representations
|
2130
2145
|
of the flow runs
|
2131
2146
|
"""
|
2132
|
-
body = {
|
2147
|
+
body: dict[str, Any] = {
|
2133
2148
|
"flows": flow_filter.model_dump(mode="json") if flow_filter else None,
|
2134
2149
|
"flow_runs": (
|
2135
2150
|
flow_run_filter.model_dump(mode="json", exclude_unset=True)
|
@@ -2154,14 +2169,14 @@ class PrefectClient:
|
|
2154
2169
|
}
|
2155
2170
|
|
2156
2171
|
response = await self._client.post("/flow_runs/filter", json=body)
|
2157
|
-
return pydantic.TypeAdapter(
|
2172
|
+
return pydantic.TypeAdapter(list[FlowRun]).validate_python(response.json())
|
2158
2173
|
|
2159
2174
|
async def set_flow_run_state(
|
2160
2175
|
self,
|
2161
|
-
flow_run_id: UUID,
|
2162
|
-
state: "prefect.states.State",
|
2176
|
+
flow_run_id: Union[UUID, str],
|
2177
|
+
state: "prefect.states.State[T]",
|
2163
2178
|
force: bool = False,
|
2164
|
-
) -> OrchestrationResult:
|
2179
|
+
) -> OrchestrationResult[T]:
|
2165
2180
|
"""
|
2166
2181
|
Set the state of a flow run.
|
2167
2182
|
|
@@ -2194,11 +2209,14 @@ class PrefectClient:
|
|
2194
2209
|
else:
|
2195
2210
|
raise
|
2196
2211
|
|
2197
|
-
|
2212
|
+
result: OrchestrationResult[T] = OrchestrationResult.model_validate(
|
2213
|
+
response.json()
|
2214
|
+
)
|
2215
|
+
return result
|
2198
2216
|
|
2199
2217
|
async def read_flow_run_states(
|
2200
2218
|
self, flow_run_id: UUID
|
2201
|
-
) ->
|
2219
|
+
) -> list[prefect.states.State]:
|
2202
2220
|
"""
|
2203
2221
|
Query for the states of a flow run
|
2204
2222
|
|
@@ -2212,18 +2230,18 @@ class PrefectClient:
|
|
2212
2230
|
response = await self._client.get(
|
2213
2231
|
"/flow_run_states/", params=dict(flow_run_id=str(flow_run_id))
|
2214
2232
|
)
|
2215
|
-
return pydantic.TypeAdapter(
|
2233
|
+
return pydantic.TypeAdapter(list[prefect.states.State]).validate_python(
|
2216
2234
|
response.json()
|
2217
2235
|
)
|
2218
2236
|
|
2219
|
-
async def set_flow_run_name(self, flow_run_id: UUID, name: str):
|
2237
|
+
async def set_flow_run_name(self, flow_run_id: UUID, name: str) -> httpx.Response:
|
2220
2238
|
flow_run_data = FlowRunUpdate(name=name)
|
2221
2239
|
return await self._client.patch(
|
2222
2240
|
f"/flow_runs/{flow_run_id}",
|
2223
2241
|
json=flow_run_data.model_dump(mode="json", exclude_unset=True),
|
2224
2242
|
)
|
2225
2243
|
|
2226
|
-
async def set_task_run_name(self, task_run_id: UUID, name: str):
|
2244
|
+
async def set_task_run_name(self, task_run_id: UUID, name: str) -> httpx.Response:
|
2227
2245
|
task_run_data = TaskRunUpdate(name=name)
|
2228
2246
|
return await self._client.patch(
|
2229
2247
|
f"/task_runs/{task_run_id}",
|
@@ -2240,9 +2258,9 @@ class PrefectClient:
|
|
2240
2258
|
extra_tags: Optional[Iterable[str]] = None,
|
2241
2259
|
state: Optional[prefect.states.State[R]] = None,
|
2242
2260
|
task_inputs: Optional[
|
2243
|
-
|
2261
|
+
dict[
|
2244
2262
|
str,
|
2245
|
-
|
2263
|
+
list[
|
2246
2264
|
Union[
|
2247
2265
|
TaskRunResult,
|
2248
2266
|
Parameter,
|
@@ -2276,6 +2294,12 @@ class PrefectClient:
|
|
2276
2294
|
if state is None:
|
2277
2295
|
state = prefect.states.Pending()
|
2278
2296
|
|
2297
|
+
retry_delay = task.retry_delay_seconds
|
2298
|
+
if isinstance(retry_delay, list):
|
2299
|
+
retry_delay = [int(rd) for rd in retry_delay]
|
2300
|
+
elif isinstance(retry_delay, float):
|
2301
|
+
retry_delay = int(retry_delay)
|
2302
|
+
|
2279
2303
|
task_run_data = TaskRunCreate(
|
2280
2304
|
id=id,
|
2281
2305
|
name=name,
|
@@ -2286,7 +2310,7 @@ class PrefectClient:
|
|
2286
2310
|
task_version=task.version,
|
2287
2311
|
empirical_policy=TaskRunPolicy(
|
2288
2312
|
retries=task.retries,
|
2289
|
-
retry_delay=
|
2313
|
+
retry_delay=retry_delay,
|
2290
2314
|
retry_jitter_factor=task.retry_jitter_factor,
|
2291
2315
|
),
|
2292
2316
|
state=state.to_state_create(),
|
@@ -2319,14 +2343,14 @@ class PrefectClient:
|
|
2319
2343
|
async def read_task_runs(
|
2320
2344
|
self,
|
2321
2345
|
*,
|
2322
|
-
flow_filter: FlowFilter = None,
|
2323
|
-
flow_run_filter: FlowRunFilter = None,
|
2324
|
-
task_run_filter: TaskRunFilter = None,
|
2325
|
-
deployment_filter: DeploymentFilter = None,
|
2326
|
-
sort: TaskRunSort = None,
|
2346
|
+
flow_filter: Optional[FlowFilter] = None,
|
2347
|
+
flow_run_filter: Optional[FlowRunFilter] = None,
|
2348
|
+
task_run_filter: Optional[TaskRunFilter] = None,
|
2349
|
+
deployment_filter: Optional[DeploymentFilter] = None,
|
2350
|
+
sort: Optional[TaskRunSort] = None,
|
2327
2351
|
limit: Optional[int] = None,
|
2328
2352
|
offset: int = 0,
|
2329
|
-
) ->
|
2353
|
+
) -> list[TaskRun]:
|
2330
2354
|
"""
|
2331
2355
|
Query the Prefect API for task runs. Only task runs matching all criteria will
|
2332
2356
|
be returned.
|
@@ -2344,7 +2368,7 @@ class PrefectClient:
|
|
2344
2368
|
a list of Task Run model representations
|
2345
2369
|
of the task runs
|
2346
2370
|
"""
|
2347
|
-
body = {
|
2371
|
+
body: dict[str, Any] = {
|
2348
2372
|
"flows": flow_filter.model_dump(mode="json") if flow_filter else None,
|
2349
2373
|
"flow_runs": (
|
2350
2374
|
flow_run_filter.model_dump(mode="json", exclude_unset=True)
|
@@ -2362,7 +2386,7 @@ class PrefectClient:
|
|
2362
2386
|
"offset": offset,
|
2363
2387
|
}
|
2364
2388
|
response = await self._client.post("/task_runs/filter", json=body)
|
2365
|
-
return pydantic.TypeAdapter(
|
2389
|
+
return pydantic.TypeAdapter(list[TaskRun]).validate_python(response.json())
|
2366
2390
|
|
2367
2391
|
async def delete_task_run(self, task_run_id: UUID) -> None:
|
2368
2392
|
"""
|
@@ -2385,9 +2409,9 @@ class PrefectClient:
|
|
2385
2409
|
async def set_task_run_state(
|
2386
2410
|
self,
|
2387
2411
|
task_run_id: UUID,
|
2388
|
-
state: prefect.states.State,
|
2412
|
+
state: prefect.states.State[T],
|
2389
2413
|
force: bool = False,
|
2390
|
-
) -> OrchestrationResult:
|
2414
|
+
) -> OrchestrationResult[T]:
|
2391
2415
|
"""
|
2392
2416
|
Set the state of a task run.
|
2393
2417
|
|
@@ -2406,11 +2430,14 @@ class PrefectClient:
|
|
2406
2430
|
f"/task_runs/{task_run_id}/set_state",
|
2407
2431
|
json=dict(state=state_create.model_dump(mode="json"), force=force),
|
2408
2432
|
)
|
2409
|
-
|
2433
|
+
result: OrchestrationResult[T] = OrchestrationResult.model_validate(
|
2434
|
+
response.json()
|
2435
|
+
)
|
2436
|
+
return result
|
2410
2437
|
|
2411
2438
|
async def read_task_run_states(
|
2412
2439
|
self, task_run_id: UUID
|
2413
|
-
) ->
|
2440
|
+
) -> list[prefect.states.State]:
|
2414
2441
|
"""
|
2415
2442
|
Query for the states of a task run
|
2416
2443
|
|
@@ -2423,11 +2450,13 @@ class PrefectClient:
|
|
2423
2450
|
response = await self._client.get(
|
2424
2451
|
"/task_run_states/", params=dict(task_run_id=str(task_run_id))
|
2425
2452
|
)
|
2426
|
-
return pydantic.TypeAdapter(
|
2453
|
+
return pydantic.TypeAdapter(list[prefect.states.State]).validate_python(
|
2427
2454
|
response.json()
|
2428
2455
|
)
|
2429
2456
|
|
2430
|
-
async def create_logs(
|
2457
|
+
async def create_logs(
|
2458
|
+
self, logs: Iterable[Union[LogCreate, dict[str, Any]]]
|
2459
|
+
) -> None:
|
2431
2460
|
"""
|
2432
2461
|
Create logs for a flow or task run
|
2433
2462
|
|
@@ -2444,8 +2473,8 @@ class PrefectClient:
|
|
2444
2473
|
self,
|
2445
2474
|
block_document_id: UUID,
|
2446
2475
|
is_active: bool = True,
|
2447
|
-
tags:
|
2448
|
-
state_names:
|
2476
|
+
tags: Optional[list[str]] = None,
|
2477
|
+
state_names: Optional[list[str]] = None,
|
2449
2478
|
message_template: Optional[str] = None,
|
2450
2479
|
) -> UUID:
|
2451
2480
|
"""
|
@@ -2507,8 +2536,8 @@ class PrefectClient:
|
|
2507
2536
|
id: UUID,
|
2508
2537
|
block_document_id: Optional[UUID] = None,
|
2509
2538
|
is_active: Optional[bool] = None,
|
2510
|
-
tags: Optional[
|
2511
|
-
state_names: Optional[
|
2539
|
+
tags: Optional[list[str]] = None,
|
2540
|
+
state_names: Optional[list[str]] = None,
|
2512
2541
|
message_template: Optional[str] = None,
|
2513
2542
|
) -> None:
|
2514
2543
|
"""
|
@@ -2525,7 +2554,7 @@ class PrefectClient:
|
|
2525
2554
|
prefect.exceptions.ObjectNotFound: If request returns 404
|
2526
2555
|
httpx.RequestError: If requests fails
|
2527
2556
|
"""
|
2528
|
-
params = {}
|
2557
|
+
params: dict[str, Any] = {}
|
2529
2558
|
if block_document_id is not None:
|
2530
2559
|
params["block_document_id"] = block_document_id
|
2531
2560
|
if is_active is not None:
|
@@ -2555,7 +2584,7 @@ class PrefectClient:
|
|
2555
2584
|
flow_run_notification_policy_filter: FlowRunNotificationPolicyFilter,
|
2556
2585
|
limit: Optional[int] = None,
|
2557
2586
|
offset: int = 0,
|
2558
|
-
) ->
|
2587
|
+
) -> list[FlowRunNotificationPolicy]:
|
2559
2588
|
"""
|
2560
2589
|
Query the Prefect API for flow run notification policies. Only policies matching all criteria will
|
2561
2590
|
be returned.
|
@@ -2569,7 +2598,7 @@ class PrefectClient:
|
|
2569
2598
|
a list of FlowRunNotificationPolicy model representations
|
2570
2599
|
of the notification policies
|
2571
2600
|
"""
|
2572
|
-
body = {
|
2601
|
+
body: dict[str, Any] = {
|
2573
2602
|
"flow_run_notification_policy_filter": (
|
2574
2603
|
flow_run_notification_policy_filter.model_dump(mode="json")
|
2575
2604
|
if flow_run_notification_policy_filter
|
@@ -2581,7 +2610,7 @@ class PrefectClient:
|
|
2581
2610
|
response = await self._client.post(
|
2582
2611
|
"/flow_run_notification_policies/filter", json=body
|
2583
2612
|
)
|
2584
|
-
return pydantic.TypeAdapter(
|
2613
|
+
return pydantic.TypeAdapter(list[FlowRunNotificationPolicy]).validate_python(
|
2585
2614
|
response.json()
|
2586
2615
|
)
|
2587
2616
|
|
@@ -2591,11 +2620,11 @@ class PrefectClient:
|
|
2591
2620
|
limit: Optional[int] = None,
|
2592
2621
|
offset: Optional[int] = None,
|
2593
2622
|
sort: LogSort = LogSort.TIMESTAMP_ASC,
|
2594
|
-
) ->
|
2623
|
+
) -> list[Log]:
|
2595
2624
|
"""
|
2596
2625
|
Read flow and task run logs.
|
2597
2626
|
"""
|
2598
|
-
body = {
|
2627
|
+
body: dict[str, Any] = {
|
2599
2628
|
"logs": log_filter.model_dump(mode="json") if log_filter else None,
|
2600
2629
|
"limit": limit,
|
2601
2630
|
"offset": offset,
|
@@ -2603,7 +2632,7 @@ class PrefectClient:
|
|
2603
2632
|
}
|
2604
2633
|
|
2605
2634
|
response = await self._client.post("/logs/filter", json=body)
|
2606
|
-
return pydantic.TypeAdapter(
|
2635
|
+
return pydantic.TypeAdapter(list[Log]).validate_python(response.json())
|
2607
2636
|
|
2608
2637
|
async def send_worker_heartbeat(
|
2609
2638
|
self,
|
@@ -2622,7 +2651,7 @@ class PrefectClient:
|
|
2622
2651
|
return_id: Whether to return the worker ID. Note: will return `None` if the connected server does not support returning worker IDs, even if `return_id` is `True`.
|
2623
2652
|
worker_metadata: Metadata about the worker to send to the server.
|
2624
2653
|
"""
|
2625
|
-
params = {
|
2654
|
+
params: dict[str, Any] = {
|
2626
2655
|
"name": worker_name,
|
2627
2656
|
"heartbeat_interval_seconds": heartbeat_interval_seconds,
|
2628
2657
|
}
|
@@ -2654,7 +2683,7 @@ class PrefectClient:
|
|
2654
2683
|
worker_filter: Optional[WorkerFilter] = None,
|
2655
2684
|
offset: Optional[int] = None,
|
2656
2685
|
limit: Optional[int] = None,
|
2657
|
-
) ->
|
2686
|
+
) -> list[Worker]:
|
2658
2687
|
"""
|
2659
2688
|
Reads workers for a given work pool.
|
2660
2689
|
|
@@ -2678,7 +2707,7 @@ class PrefectClient:
|
|
2678
2707
|
},
|
2679
2708
|
)
|
2680
2709
|
|
2681
|
-
return pydantic.TypeAdapter(
|
2710
|
+
return pydantic.TypeAdapter(list[Worker]).validate_python(response.json())
|
2682
2711
|
|
2683
2712
|
async def read_work_pool(self, work_pool_name: str) -> WorkPool:
|
2684
2713
|
"""
|
@@ -2705,7 +2734,7 @@ class PrefectClient:
|
|
2705
2734
|
limit: Optional[int] = None,
|
2706
2735
|
offset: int = 0,
|
2707
2736
|
work_pool_filter: Optional[WorkPoolFilter] = None,
|
2708
|
-
) ->
|
2737
|
+
) -> list[WorkPool]:
|
2709
2738
|
"""
|
2710
2739
|
Reads work pools.
|
2711
2740
|
|
@@ -2718,7 +2747,7 @@ class PrefectClient:
|
|
2718
2747
|
A list of work pools.
|
2719
2748
|
"""
|
2720
2749
|
|
2721
|
-
body = {
|
2750
|
+
body: dict[str, Any] = {
|
2722
2751
|
"limit": limit,
|
2723
2752
|
"offset": offset,
|
2724
2753
|
"work_pools": (
|
@@ -2726,7 +2755,7 @@ class PrefectClient:
|
|
2726
2755
|
),
|
2727
2756
|
}
|
2728
2757
|
response = await self._client.post("/work_pools/filter", json=body)
|
2729
|
-
return pydantic.TypeAdapter(
|
2758
|
+
return pydantic.TypeAdapter(list[WorkPool]).validate_python(response.json())
|
2730
2759
|
|
2731
2760
|
async def create_work_pool(
|
2732
2761
|
self,
|
@@ -2776,7 +2805,7 @@ class PrefectClient:
|
|
2776
2805
|
self,
|
2777
2806
|
work_pool_name: str,
|
2778
2807
|
work_pool: WorkPoolUpdate,
|
2779
|
-
):
|
2808
|
+
) -> None:
|
2780
2809
|
"""
|
2781
2810
|
Updates a work pool.
|
2782
2811
|
|
@@ -2798,7 +2827,7 @@ class PrefectClient:
|
|
2798
2827
|
async def delete_work_pool(
|
2799
2828
|
self,
|
2800
2829
|
work_pool_name: str,
|
2801
|
-
):
|
2830
|
+
) -> None:
|
2802
2831
|
"""
|
2803
2832
|
Deletes a work pool.
|
2804
2833
|
|
@@ -2819,7 +2848,7 @@ class PrefectClient:
|
|
2819
2848
|
work_queue_filter: Optional[WorkQueueFilter] = None,
|
2820
2849
|
limit: Optional[int] = None,
|
2821
2850
|
offset: Optional[int] = None,
|
2822
|
-
) ->
|
2851
|
+
) -> list[WorkQueue]:
|
2823
2852
|
"""
|
2824
2853
|
Retrieves queues for a work pool.
|
2825
2854
|
|
@@ -2832,7 +2861,7 @@ class PrefectClient:
|
|
2832
2861
|
Returns:
|
2833
2862
|
List of queues for the specified work pool.
|
2834
2863
|
"""
|
2835
|
-
json = {
|
2864
|
+
json: dict[str, Any] = {
|
2836
2865
|
"work_queues": (
|
2837
2866
|
work_queue_filter.model_dump(mode="json", exclude_unset=True)
|
2838
2867
|
if work_queue_filter
|
@@ -2856,15 +2885,15 @@ class PrefectClient:
|
|
2856
2885
|
else:
|
2857
2886
|
response = await self._client.post("/work_queues/filter", json=json)
|
2858
2887
|
|
2859
|
-
return pydantic.TypeAdapter(
|
2888
|
+
return pydantic.TypeAdapter(list[WorkQueue]).validate_python(response.json())
|
2860
2889
|
|
2861
2890
|
async def get_scheduled_flow_runs_for_deployments(
|
2862
2891
|
self,
|
2863
|
-
deployment_ids:
|
2892
|
+
deployment_ids: list[UUID],
|
2864
2893
|
scheduled_before: Optional[datetime.datetime] = None,
|
2865
2894
|
limit: Optional[int] = None,
|
2866
|
-
) ->
|
2867
|
-
body:
|
2895
|
+
) -> list[FlowRunResponse]:
|
2896
|
+
body: dict[str, Any] = dict(deployment_ids=[str(id) for id in deployment_ids])
|
2868
2897
|
if scheduled_before:
|
2869
2898
|
body["scheduled_before"] = str(scheduled_before)
|
2870
2899
|
if limit:
|
@@ -2875,16 +2904,16 @@ class PrefectClient:
|
|
2875
2904
|
json=body,
|
2876
2905
|
)
|
2877
2906
|
|
2878
|
-
return pydantic.TypeAdapter(
|
2907
|
+
return pydantic.TypeAdapter(list[FlowRunResponse]).validate_python(
|
2879
2908
|
response.json()
|
2880
2909
|
)
|
2881
2910
|
|
2882
2911
|
async def get_scheduled_flow_runs_for_work_pool(
|
2883
2912
|
self,
|
2884
2913
|
work_pool_name: str,
|
2885
|
-
work_queue_names: Optional[
|
2914
|
+
work_queue_names: Optional[list[str]] = None,
|
2886
2915
|
scheduled_before: Optional[datetime.datetime] = None,
|
2887
|
-
) ->
|
2916
|
+
) -> list[WorkerFlowRunResponse]:
|
2888
2917
|
"""
|
2889
2918
|
Retrieves scheduled flow runs for the provided set of work pool queues.
|
2890
2919
|
|
@@ -2900,7 +2929,7 @@ class PrefectClient:
|
|
2900
2929
|
A list of worker flow run responses containing information about the
|
2901
2930
|
retrieved flow runs.
|
2902
2931
|
"""
|
2903
|
-
body:
|
2932
|
+
body: dict[str, Any] = {}
|
2904
2933
|
if work_queue_names is not None:
|
2905
2934
|
body["work_queue_names"] = list(work_queue_names)
|
2906
2935
|
if scheduled_before:
|
@@ -2910,7 +2939,7 @@ class PrefectClient:
|
|
2910
2939
|
f"/work_pools/{work_pool_name}/get_scheduled_flow_runs",
|
2911
2940
|
json=body,
|
2912
2941
|
)
|
2913
|
-
return pydantic.TypeAdapter(
|
2942
|
+
return pydantic.TypeAdapter(list[WorkerFlowRunResponse]).validate_python(
|
2914
2943
|
response.json()
|
2915
2944
|
)
|
2916
2945
|
|
@@ -2956,13 +2985,13 @@ class PrefectClient:
|
|
2956
2985
|
async def read_artifacts(
|
2957
2986
|
self,
|
2958
2987
|
*,
|
2959
|
-
artifact_filter: ArtifactFilter = None,
|
2960
|
-
flow_run_filter: FlowRunFilter = None,
|
2961
|
-
task_run_filter: TaskRunFilter = None,
|
2962
|
-
sort: ArtifactSort = None,
|
2988
|
+
artifact_filter: Optional[ArtifactFilter] = None,
|
2989
|
+
flow_run_filter: Optional[FlowRunFilter] = None,
|
2990
|
+
task_run_filter: Optional[TaskRunFilter] = None,
|
2991
|
+
sort: Optional[ArtifactSort] = None,
|
2963
2992
|
limit: Optional[int] = None,
|
2964
2993
|
offset: int = 0,
|
2965
|
-
) ->
|
2994
|
+
) -> list[Artifact]:
|
2966
2995
|
"""
|
2967
2996
|
Query the Prefect API for artifacts. Only artifacts matching all criteria will
|
2968
2997
|
be returned.
|
@@ -2976,7 +3005,7 @@ class PrefectClient:
|
|
2976
3005
|
Returns:
|
2977
3006
|
a list of Artifact model representations of the artifacts
|
2978
3007
|
"""
|
2979
|
-
body = {
|
3008
|
+
body: dict[str, Any] = {
|
2980
3009
|
"artifacts": (
|
2981
3010
|
artifact_filter.model_dump(mode="json") if artifact_filter else None
|
2982
3011
|
),
|
@@ -2991,18 +3020,18 @@ class PrefectClient:
|
|
2991
3020
|
"offset": offset,
|
2992
3021
|
}
|
2993
3022
|
response = await self._client.post("/artifacts/filter", json=body)
|
2994
|
-
return pydantic.TypeAdapter(
|
3023
|
+
return pydantic.TypeAdapter(list[Artifact]).validate_python(response.json())
|
2995
3024
|
|
2996
3025
|
async def read_latest_artifacts(
|
2997
3026
|
self,
|
2998
3027
|
*,
|
2999
|
-
artifact_filter: ArtifactCollectionFilter = None,
|
3000
|
-
flow_run_filter: FlowRunFilter = None,
|
3001
|
-
task_run_filter: TaskRunFilter = None,
|
3002
|
-
sort: ArtifactCollectionSort = None,
|
3028
|
+
artifact_filter: Optional[ArtifactCollectionFilter] = None,
|
3029
|
+
flow_run_filter: Optional[FlowRunFilter] = None,
|
3030
|
+
task_run_filter: Optional[TaskRunFilter] = None,
|
3031
|
+
sort: Optional[ArtifactCollectionSort] = None,
|
3003
3032
|
limit: Optional[int] = None,
|
3004
3033
|
offset: int = 0,
|
3005
|
-
) ->
|
3034
|
+
) -> list[ArtifactCollection]:
|
3006
3035
|
"""
|
3007
3036
|
Query the Prefect API for artifacts. Only artifacts matching all criteria will
|
3008
3037
|
be returned.
|
@@ -3016,7 +3045,7 @@ class PrefectClient:
|
|
3016
3045
|
Returns:
|
3017
3046
|
a list of Artifact model representations of the artifacts
|
3018
3047
|
"""
|
3019
|
-
body = {
|
3048
|
+
body: dict[str, Any] = {
|
3020
3049
|
"artifacts": (
|
3021
3050
|
artifact_filter.model_dump(mode="json") if artifact_filter else None
|
3022
3051
|
),
|
@@ -3031,7 +3060,7 @@ class PrefectClient:
|
|
3031
3060
|
"offset": offset,
|
3032
3061
|
}
|
3033
3062
|
response = await self._client.post("/artifacts/latest/filter", json=body)
|
3034
|
-
return pydantic.TypeAdapter(
|
3063
|
+
return pydantic.TypeAdapter(list[ArtifactCollection]).validate_python(
|
3035
3064
|
response.json()
|
3036
3065
|
)
|
3037
3066
|
|
@@ -3090,7 +3119,7 @@ class PrefectClient:
|
|
3090
3119
|
else:
|
3091
3120
|
raise
|
3092
3121
|
|
3093
|
-
async def delete_variable_by_name(self, name: str):
|
3122
|
+
async def delete_variable_by_name(self, name: str) -> None:
|
3094
3123
|
"""Deletes a variable by name."""
|
3095
3124
|
try:
|
3096
3125
|
await self._client.delete(f"/variables/name/{name}")
|
@@ -3100,12 +3129,12 @@ class PrefectClient:
|
|
3100
3129
|
else:
|
3101
3130
|
raise
|
3102
3131
|
|
3103
|
-
async def read_variables(self, limit: Optional[int] = None) ->
|
3132
|
+
async def read_variables(self, limit: Optional[int] = None) -> list[Variable]:
|
3104
3133
|
"""Reads all variables."""
|
3105
3134
|
response = await self._client.post("/variables/filter", json={"limit": limit})
|
3106
|
-
return pydantic.TypeAdapter(
|
3135
|
+
return pydantic.TypeAdapter(list[Variable]).validate_python(response.json())
|
3107
3136
|
|
3108
|
-
async def read_worker_metadata(self) ->
|
3137
|
+
async def read_worker_metadata(self) -> dict[str, Any]:
|
3109
3138
|
"""Reads worker metadata stored in Prefect collection registry."""
|
3110
3139
|
response = await self._client.get("collections/views/aggregate-worker-metadata")
|
3111
3140
|
response.raise_for_status()
|
@@ -3113,7 +3142,7 @@ class PrefectClient:
|
|
3113
3142
|
|
3114
3143
|
async def increment_concurrency_slots(
|
3115
3144
|
self,
|
3116
|
-
names:
|
3145
|
+
names: list[str],
|
3117
3146
|
slots: int,
|
3118
3147
|
mode: str,
|
3119
3148
|
create_if_missing: Optional[bool] = None,
|
@@ -3129,7 +3158,7 @@ class PrefectClient:
|
|
3129
3158
|
)
|
3130
3159
|
|
3131
3160
|
async def release_concurrency_slots(
|
3132
|
-
self, names:
|
3161
|
+
self, names: list[str], slots: int, occupancy_seconds: float
|
3133
3162
|
) -> httpx.Response:
|
3134
3163
|
"""
|
3135
3164
|
Release concurrency slots for the specified limits.
|
@@ -3201,7 +3230,9 @@ class PrefectClient:
|
|
3201
3230
|
else:
|
3202
3231
|
raise
|
3203
3232
|
|
3204
|
-
async def upsert_global_concurrency_limit_by_name(
|
3233
|
+
async def upsert_global_concurrency_limit_by_name(
|
3234
|
+
self, name: str, limit: int
|
3235
|
+
) -> None:
|
3205
3236
|
"""Creates a global concurrency limit with the given name and limit if one does not already exist.
|
3206
3237
|
|
3207
3238
|
If one does already exist matching the name then update it's limit if it is different.
|
@@ -3227,7 +3258,7 @@ class PrefectClient:
|
|
3227
3258
|
|
3228
3259
|
async def read_global_concurrency_limits(
|
3229
3260
|
self, limit: int = 10, offset: int = 0
|
3230
|
-
) ->
|
3261
|
+
) -> list[GlobalConcurrencyLimitResponse]:
|
3231
3262
|
response = await self._client.post(
|
3232
3263
|
"/v2/concurrency_limits/filter",
|
3233
3264
|
json={
|
@@ -3236,12 +3267,12 @@ class PrefectClient:
|
|
3236
3267
|
},
|
3237
3268
|
)
|
3238
3269
|
return pydantic.TypeAdapter(
|
3239
|
-
|
3270
|
+
list[GlobalConcurrencyLimitResponse]
|
3240
3271
|
).validate_python(response.json())
|
3241
3272
|
|
3242
3273
|
async def create_flow_run_input(
|
3243
3274
|
self, flow_run_id: UUID, key: str, value: str, sender: Optional[str] = None
|
3244
|
-
):
|
3275
|
+
) -> None:
|
3245
3276
|
"""
|
3246
3277
|
Creates a flow run input.
|
3247
3278
|
|
@@ -3262,8 +3293,8 @@ class PrefectClient:
|
|
3262
3293
|
response.raise_for_status()
|
3263
3294
|
|
3264
3295
|
async def filter_flow_run_input(
|
3265
|
-
self, flow_run_id: UUID, key_prefix: str, limit: int, exclude_keys:
|
3266
|
-
) ->
|
3296
|
+
self, flow_run_id: UUID, key_prefix: str, limit: int, exclude_keys: set[str]
|
3297
|
+
) -> list[FlowRunInput]:
|
3267
3298
|
response = await self._client.post(
|
3268
3299
|
f"/flow_runs/{flow_run_id}/input/filter",
|
3269
3300
|
json={
|
@@ -3273,7 +3304,7 @@ class PrefectClient:
|
|
3273
3304
|
},
|
3274
3305
|
)
|
3275
3306
|
response.raise_for_status()
|
3276
|
-
return pydantic.TypeAdapter(
|
3307
|
+
return pydantic.TypeAdapter(list[FlowRunInput]).validate_python(response.json())
|
3277
3308
|
|
3278
3309
|
async def read_flow_run_input(self, flow_run_id: UUID, key: str) -> str:
|
3279
3310
|
"""
|
@@ -3287,7 +3318,7 @@ class PrefectClient:
|
|
3287
3318
|
response.raise_for_status()
|
3288
3319
|
return response.content.decode()
|
3289
3320
|
|
3290
|
-
async def delete_flow_run_input(self, flow_run_id: UUID, key: str):
|
3321
|
+
async def delete_flow_run_input(self, flow_run_id: UUID, key: str) -> None:
|
3291
3322
|
"""
|
3292
3323
|
Deletes a flow run input.
|
3293
3324
|
|
@@ -3307,7 +3338,9 @@ class PrefectClient:
|
|
3307
3338
|
|
3308
3339
|
return UUID(response.json()["id"])
|
3309
3340
|
|
3310
|
-
async def update_automation(
|
3341
|
+
async def update_automation(
|
3342
|
+
self, automation_id: UUID, automation: AutomationCore
|
3343
|
+
) -> None:
|
3311
3344
|
"""Updates an automation in Prefect Cloud."""
|
3312
3345
|
response = await self._client.put(
|
3313
3346
|
f"/automations/{automation_id}",
|
@@ -3315,21 +3348,23 @@ class PrefectClient:
|
|
3315
3348
|
)
|
3316
3349
|
response.raise_for_status
|
3317
3350
|
|
3318
|
-
async def read_automations(self) ->
|
3351
|
+
async def read_automations(self) -> list[Automation]:
|
3319
3352
|
response = await self._client.post("/automations/filter")
|
3320
3353
|
response.raise_for_status()
|
3321
|
-
return pydantic.TypeAdapter(
|
3354
|
+
return pydantic.TypeAdapter(list[Automation]).validate_python(response.json())
|
3322
3355
|
|
3323
3356
|
async def find_automation(
|
3324
3357
|
self, id_or_name: Union[str, UUID]
|
3325
3358
|
) -> Optional[Automation]:
|
3326
3359
|
if isinstance(id_or_name, str):
|
3360
|
+
name = id_or_name
|
3327
3361
|
try:
|
3328
3362
|
id = UUID(id_or_name)
|
3329
3363
|
except ValueError:
|
3330
3364
|
id = None
|
3331
|
-
|
3365
|
+
else:
|
3332
3366
|
id = id_or_name
|
3367
|
+
name = str(id)
|
3333
3368
|
|
3334
3369
|
if id:
|
3335
3370
|
try:
|
@@ -3343,24 +3378,26 @@ class PrefectClient:
|
|
3343
3378
|
|
3344
3379
|
# Look for it by an exact name
|
3345
3380
|
for automation in automations:
|
3346
|
-
if automation.name ==
|
3381
|
+
if automation.name == name:
|
3347
3382
|
return automation
|
3348
3383
|
|
3349
3384
|
# Look for it by a case-insensitive name
|
3350
3385
|
for automation in automations:
|
3351
|
-
if automation.name.lower() ==
|
3386
|
+
if automation.name.lower() == name.lower():
|
3352
3387
|
return automation
|
3353
3388
|
|
3354
3389
|
return None
|
3355
3390
|
|
3356
|
-
async def read_automation(
|
3391
|
+
async def read_automation(
|
3392
|
+
self, automation_id: Union[UUID, str]
|
3393
|
+
) -> Optional[Automation]:
|
3357
3394
|
response = await self._client.get(f"/automations/{automation_id}")
|
3358
3395
|
if response.status_code == 404:
|
3359
3396
|
return None
|
3360
3397
|
response.raise_for_status()
|
3361
3398
|
return Automation.model_validate(response.json())
|
3362
3399
|
|
3363
|
-
async def read_automations_by_name(self, name: str) ->
|
3400
|
+
async def read_automations_by_name(self, name: str) -> list[Automation]:
|
3364
3401
|
"""
|
3365
3402
|
Query the Prefect API for an automation by name. Only automations matching the provided name will be returned.
|
3366
3403
|
|
@@ -3370,7 +3407,9 @@ class PrefectClient:
|
|
3370
3407
|
Returns:
|
3371
3408
|
a list of Automation model representations of the automations
|
3372
3409
|
"""
|
3373
|
-
automation_filter = filters.AutomationFilter(
|
3410
|
+
automation_filter = filters.AutomationFilter(
|
3411
|
+
name=filters.AutomationFilterName(any_=[name])
|
3412
|
+
)
|
3374
3413
|
|
3375
3414
|
response = await self._client.post(
|
3376
3415
|
"/automations/filter",
|
@@ -3384,21 +3423,21 @@ class PrefectClient:
|
|
3384
3423
|
|
3385
3424
|
response.raise_for_status()
|
3386
3425
|
|
3387
|
-
return pydantic.TypeAdapter(
|
3426
|
+
return pydantic.TypeAdapter(list[Automation]).validate_python(response.json())
|
3388
3427
|
|
3389
|
-
async def pause_automation(self, automation_id: UUID):
|
3428
|
+
async def pause_automation(self, automation_id: UUID) -> None:
|
3390
3429
|
response = await self._client.patch(
|
3391
3430
|
f"/automations/{automation_id}", json={"enabled": False}
|
3392
3431
|
)
|
3393
3432
|
response.raise_for_status()
|
3394
3433
|
|
3395
|
-
async def resume_automation(self, automation_id: UUID):
|
3434
|
+
async def resume_automation(self, automation_id: UUID) -> None:
|
3396
3435
|
response = await self._client.patch(
|
3397
3436
|
f"/automations/{automation_id}", json={"enabled": True}
|
3398
3437
|
)
|
3399
3438
|
response.raise_for_status()
|
3400
3439
|
|
3401
|
-
async def delete_automation(self, automation_id: UUID):
|
3440
|
+
async def delete_automation(self, automation_id: UUID) -> None:
|
3402
3441
|
response = await self._client.delete(f"/automations/{automation_id}")
|
3403
3442
|
if response.status_code == 404:
|
3404
3443
|
return
|
@@ -3407,12 +3446,12 @@ class PrefectClient:
|
|
3407
3446
|
|
3408
3447
|
async def read_resource_related_automations(
|
3409
3448
|
self, resource_id: str
|
3410
|
-
) ->
|
3449
|
+
) -> list[Automation]:
|
3411
3450
|
response = await self._client.get(f"/automations/related-to/{resource_id}")
|
3412
3451
|
response.raise_for_status()
|
3413
|
-
return pydantic.TypeAdapter(
|
3452
|
+
return pydantic.TypeAdapter(list[Automation]).validate_python(response.json())
|
3414
3453
|
|
3415
|
-
async def delete_resource_owned_automations(self, resource_id: str):
|
3454
|
+
async def delete_resource_owned_automations(self, resource_id: str) -> None:
|
3416
3455
|
await self._client.delete(f"/automations/owned-by/{resource_id}")
|
3417
3456
|
|
3418
3457
|
async def api_version(self) -> str:
|
@@ -3422,7 +3461,7 @@ class PrefectClient:
|
|
3422
3461
|
def client_version(self) -> str:
|
3423
3462
|
return prefect.__version__
|
3424
3463
|
|
3425
|
-
async def raise_for_api_version_mismatch(self):
|
3464
|
+
async def raise_for_api_version_mismatch(self) -> None:
|
3426
3465
|
# Cloud is always compatible as a server
|
3427
3466
|
if self.server_type == ServerType.CLOUD:
|
3428
3467
|
return
|
@@ -3441,7 +3480,19 @@ class PrefectClient:
|
|
3441
3480
|
f"Major versions must match."
|
3442
3481
|
)
|
3443
3482
|
|
3444
|
-
async def
|
3483
|
+
async def update_flow_run_labels(
|
3484
|
+
self, flow_run_id: UUID, labels: KeyValueLabelsField
|
3485
|
+
) -> None:
|
3486
|
+
"""
|
3487
|
+
Updates the labels of a flow run.
|
3488
|
+
"""
|
3489
|
+
|
3490
|
+
response = await self._client.patch(
|
3491
|
+
f"/flow_runs/{flow_run_id}/labels", json=labels
|
3492
|
+
)
|
3493
|
+
response.raise_for_status()
|
3494
|
+
|
3495
|
+
async def __aenter__(self) -> Self:
|
3445
3496
|
"""
|
3446
3497
|
Start the client.
|
3447
3498
|
|
@@ -3488,7 +3539,7 @@ class PrefectClient:
|
|
3488
3539
|
|
3489
3540
|
return self
|
3490
3541
|
|
3491
|
-
async def __aexit__(self, *exc_info):
|
3542
|
+
async def __aexit__(self, *exc_info: Any) -> Optional[bool]:
|
3492
3543
|
"""
|
3493
3544
|
Shutdown the client.
|
3494
3545
|
"""
|
@@ -3499,13 +3550,13 @@ class PrefectClient:
|
|
3499
3550
|
self._closed = True
|
3500
3551
|
return await self._exit_stack.__aexit__(*exc_info)
|
3501
3552
|
|
3502
|
-
def __enter__(self):
|
3553
|
+
def __enter__(self) -> NoReturn:
|
3503
3554
|
raise RuntimeError(
|
3504
3555
|
"The `PrefectClient` must be entered with an async context. Use 'async "
|
3505
3556
|
"with PrefectClient(...)' not 'with PrefectClient(...)'"
|
3506
3557
|
)
|
3507
3558
|
|
3508
|
-
def __exit__(self, *_):
|
3559
|
+
def __exit__(self, *_: object) -> NoReturn:
|
3509
3560
|
assert False, "This should never be called but must be defined for __enter__"
|
3510
3561
|
|
3511
3562
|
|
@@ -3541,7 +3592,7 @@ class SyncPrefectClient:
|
|
3541
3592
|
*,
|
3542
3593
|
api_key: Optional[str] = None,
|
3543
3594
|
api_version: Optional[str] = None,
|
3544
|
-
httpx_settings: Optional[
|
3595
|
+
httpx_settings: Optional[dict[str, Any]] = None,
|
3545
3596
|
server_type: Optional[ServerType] = None,
|
3546
3597
|
) -> None:
|
3547
3598
|
httpx_settings = httpx_settings.copy() if httpx_settings else {}
|
@@ -3617,16 +3668,10 @@ class SyncPrefectClient:
|
|
3617
3668
|
)
|
3618
3669
|
|
3619
3670
|
# Connect to an in-process application
|
3620
|
-
|
3671
|
+
else:
|
3621
3672
|
self._ephemeral_app = api
|
3622
3673
|
self.server_type = ServerType.EPHEMERAL
|
3623
3674
|
|
3624
|
-
else:
|
3625
|
-
raise TypeError(
|
3626
|
-
f"Unexpected type {type(api).__name__!r} for argument `api`. Expected"
|
3627
|
-
" 'str' or 'ASGIApp/FastAPI'"
|
3628
|
-
)
|
3629
|
-
|
3630
3675
|
# See https://www.python-httpx.org/advanced/#timeout-configuration
|
3631
3676
|
httpx_settings.setdefault(
|
3632
3677
|
"timeout",
|
@@ -3669,9 +3714,9 @@ class SyncPrefectClient:
|
|
3669
3714
|
if isinstance(server_transport, httpx.HTTPTransport):
|
3670
3715
|
pool = getattr(server_transport, "_pool", None)
|
3671
3716
|
if isinstance(pool, httpcore.ConnectionPool):
|
3672
|
-
pool
|
3717
|
+
setattr(pool, "_retries", 3)
|
3673
3718
|
|
3674
|
-
self.logger = get_logger("client")
|
3719
|
+
self.logger: Logger = get_logger("client")
|
3675
3720
|
|
3676
3721
|
@property
|
3677
3722
|
def api_url(self) -> httpx.URL:
|
@@ -3709,7 +3754,7 @@ class SyncPrefectClient:
|
|
3709
3754
|
|
3710
3755
|
return self
|
3711
3756
|
|
3712
|
-
def __exit__(self, *exc_info) -> None:
|
3757
|
+
def __exit__(self, *exc_info: Any) -> None:
|
3713
3758
|
"""
|
3714
3759
|
Shutdown the client.
|
3715
3760
|
"""
|
@@ -3747,7 +3792,7 @@ class SyncPrefectClient:
|
|
3747
3792
|
def client_version(self) -> str:
|
3748
3793
|
return prefect.__version__
|
3749
3794
|
|
3750
|
-
def raise_for_api_version_mismatch(self):
|
3795
|
+
def raise_for_api_version_mismatch(self) -> None:
|
3751
3796
|
# Cloud is always compatible as a server
|
3752
3797
|
if self.server_type == ServerType.CLOUD:
|
3753
3798
|
return
|
@@ -3766,7 +3811,7 @@ class SyncPrefectClient:
|
|
3766
3811
|
f"Major versions must match."
|
3767
3812
|
)
|
3768
3813
|
|
3769
|
-
def create_flow(self, flow: "FlowObject") -> UUID:
|
3814
|
+
def create_flow(self, flow: "FlowObject[Any, Any]") -> UUID:
|
3770
3815
|
"""
|
3771
3816
|
Create a flow in the Prefect API.
|
3772
3817
|
|
@@ -3806,13 +3851,13 @@ class SyncPrefectClient:
|
|
3806
3851
|
|
3807
3852
|
def create_flow_run(
|
3808
3853
|
self,
|
3809
|
-
flow: "FlowObject",
|
3854
|
+
flow: "FlowObject[Any, R]",
|
3810
3855
|
name: Optional[str] = None,
|
3811
|
-
parameters: Optional[
|
3812
|
-
context: Optional[
|
3856
|
+
parameters: Optional[dict[str, Any]] = None,
|
3857
|
+
context: Optional[dict[str, Any]] = None,
|
3813
3858
|
tags: Optional[Iterable[str]] = None,
|
3814
3859
|
parent_task_run_id: Optional[UUID] = None,
|
3815
|
-
state: Optional["prefect.states.State"] = None,
|
3860
|
+
state: Optional["prefect.states.State[R]"] = None,
|
3816
3861
|
) -> FlowRun:
|
3817
3862
|
"""
|
3818
3863
|
Create a flow run for a flow.
|
@@ -3854,7 +3899,7 @@ class SyncPrefectClient:
|
|
3854
3899
|
state=state.to_state_create(),
|
3855
3900
|
empirical_policy=FlowRunPolicy(
|
3856
3901
|
retries=flow.retries,
|
3857
|
-
retry_delay=flow.retry_delay_seconds,
|
3902
|
+
retry_delay=int(flow.retry_delay_seconds or 0),
|
3858
3903
|
),
|
3859
3904
|
)
|
3860
3905
|
|
@@ -3872,12 +3917,12 @@ class SyncPrefectClient:
|
|
3872
3917
|
self,
|
3873
3918
|
flow_run_id: UUID,
|
3874
3919
|
flow_version: Optional[str] = None,
|
3875
|
-
parameters: Optional[dict] = None,
|
3920
|
+
parameters: Optional[dict[str, Any]] = None,
|
3876
3921
|
name: Optional[str] = None,
|
3877
3922
|
tags: Optional[Iterable[str]] = None,
|
3878
3923
|
empirical_policy: Optional[FlowRunPolicy] = None,
|
3879
3924
|
infrastructure_pid: Optional[str] = None,
|
3880
|
-
job_variables: Optional[dict] = None,
|
3925
|
+
job_variables: Optional[dict[str, Any]] = None,
|
3881
3926
|
) -> httpx.Response:
|
3882
3927
|
"""
|
3883
3928
|
Update a flow run's details.
|
@@ -3898,7 +3943,7 @@ class SyncPrefectClient:
|
|
3898
3943
|
Returns:
|
3899
3944
|
an `httpx.Response` object from the PATCH request
|
3900
3945
|
"""
|
3901
|
-
params = {}
|
3946
|
+
params: dict[str, Any] = {}
|
3902
3947
|
if flow_version is not None:
|
3903
3948
|
params["flow_version"] = flow_version
|
3904
3949
|
if parameters is not None:
|
@@ -3954,7 +3999,7 @@ class SyncPrefectClient:
|
|
3954
3999
|
sort: Optional[FlowRunSort] = None,
|
3955
4000
|
limit: Optional[int] = None,
|
3956
4001
|
offset: int = 0,
|
3957
|
-
) ->
|
4002
|
+
) -> list[FlowRun]:
|
3958
4003
|
"""
|
3959
4004
|
Query the Prefect API for flow runs. Only flow runs matching all criteria will
|
3960
4005
|
be returned.
|
@@ -3974,7 +4019,7 @@ class SyncPrefectClient:
|
|
3974
4019
|
a list of Flow Run model representations
|
3975
4020
|
of the flow runs
|
3976
4021
|
"""
|
3977
|
-
body = {
|
4022
|
+
body: dict[str, Any] = {
|
3978
4023
|
"flows": flow_filter.model_dump(mode="json") if flow_filter else None,
|
3979
4024
|
"flow_runs": (
|
3980
4025
|
flow_run_filter.model_dump(mode="json", exclude_unset=True)
|
@@ -3999,14 +4044,14 @@ class SyncPrefectClient:
|
|
3999
4044
|
}
|
4000
4045
|
|
4001
4046
|
response = self._client.post("/flow_runs/filter", json=body)
|
4002
|
-
return pydantic.TypeAdapter(
|
4047
|
+
return pydantic.TypeAdapter(list[FlowRun]).validate_python(response.json())
|
4003
4048
|
|
4004
4049
|
def set_flow_run_state(
|
4005
4050
|
self,
|
4006
4051
|
flow_run_id: UUID,
|
4007
|
-
state: "prefect.states.State",
|
4052
|
+
state: "prefect.states.State[T]",
|
4008
4053
|
force: bool = False,
|
4009
|
-
) -> OrchestrationResult:
|
4054
|
+
) -> OrchestrationResult[T]:
|
4010
4055
|
"""
|
4011
4056
|
Set the state of a flow run.
|
4012
4057
|
|
@@ -4036,16 +4081,19 @@ class SyncPrefectClient:
|
|
4036
4081
|
else:
|
4037
4082
|
raise
|
4038
4083
|
|
4039
|
-
|
4084
|
+
result: OrchestrationResult[T] = OrchestrationResult.model_validate(
|
4085
|
+
response.json()
|
4086
|
+
)
|
4087
|
+
return result
|
4040
4088
|
|
4041
|
-
def set_flow_run_name(self, flow_run_id: UUID, name: str):
|
4089
|
+
def set_flow_run_name(self, flow_run_id: UUID, name: str) -> httpx.Response:
|
4042
4090
|
flow_run_data = FlowRunUpdate(name=name)
|
4043
4091
|
return self._client.patch(
|
4044
4092
|
f"/flow_runs/{flow_run_id}",
|
4045
4093
|
json=flow_run_data.model_dump(mode="json", exclude_unset=True),
|
4046
4094
|
)
|
4047
4095
|
|
4048
|
-
def set_task_run_name(self, task_run_id: UUID, name: str):
|
4096
|
+
def set_task_run_name(self, task_run_id: UUID, name: str) -> httpx.Response:
|
4049
4097
|
task_run_data = TaskRunUpdate(name=name)
|
4050
4098
|
return self._client.patch(
|
4051
4099
|
f"/task_runs/{task_run_id}",
|
@@ -4062,9 +4110,9 @@ class SyncPrefectClient:
|
|
4062
4110
|
extra_tags: Optional[Iterable[str]] = None,
|
4063
4111
|
state: Optional[prefect.states.State[R]] = None,
|
4064
4112
|
task_inputs: Optional[
|
4065
|
-
|
4113
|
+
dict[
|
4066
4114
|
str,
|
4067
|
-
|
4115
|
+
list[
|
4068
4116
|
Union[
|
4069
4117
|
TaskRunResult,
|
4070
4118
|
Parameter,
|
@@ -4098,6 +4146,12 @@ class SyncPrefectClient:
|
|
4098
4146
|
if state is None:
|
4099
4147
|
state = prefect.states.Pending()
|
4100
4148
|
|
4149
|
+
retry_delay = task.retry_delay_seconds
|
4150
|
+
if isinstance(retry_delay, list):
|
4151
|
+
retry_delay = [int(rd) for rd in retry_delay]
|
4152
|
+
elif isinstance(retry_delay, float):
|
4153
|
+
retry_delay = int(retry_delay)
|
4154
|
+
|
4101
4155
|
task_run_data = TaskRunCreate(
|
4102
4156
|
id=id,
|
4103
4157
|
name=name,
|
@@ -4108,7 +4162,7 @@ class SyncPrefectClient:
|
|
4108
4162
|
task_version=task.version,
|
4109
4163
|
empirical_policy=TaskRunPolicy(
|
4110
4164
|
retries=task.retries,
|
4111
|
-
retry_delay=
|
4165
|
+
retry_delay=retry_delay,
|
4112
4166
|
retry_jitter_factor=task.retry_jitter_factor,
|
4113
4167
|
),
|
4114
4168
|
state=state.to_state_create(),
|
@@ -4142,14 +4196,14 @@ class SyncPrefectClient:
|
|
4142
4196
|
def read_task_runs(
|
4143
4197
|
self,
|
4144
4198
|
*,
|
4145
|
-
flow_filter: FlowFilter = None,
|
4146
|
-
flow_run_filter: FlowRunFilter = None,
|
4147
|
-
task_run_filter: TaskRunFilter = None,
|
4148
|
-
deployment_filter: DeploymentFilter = None,
|
4149
|
-
sort: TaskRunSort = None,
|
4199
|
+
flow_filter: Optional[FlowFilter] = None,
|
4200
|
+
flow_run_filter: Optional[FlowRunFilter] = None,
|
4201
|
+
task_run_filter: Optional[TaskRunFilter] = None,
|
4202
|
+
deployment_filter: Optional[DeploymentFilter] = None,
|
4203
|
+
sort: Optional[TaskRunSort] = None,
|
4150
4204
|
limit: Optional[int] = None,
|
4151
4205
|
offset: int = 0,
|
4152
|
-
) ->
|
4206
|
+
) -> list[TaskRun]:
|
4153
4207
|
"""
|
4154
4208
|
Query the Prefect API for task runs. Only task runs matching all criteria will
|
4155
4209
|
be returned.
|
@@ -4167,7 +4221,7 @@ class SyncPrefectClient:
|
|
4167
4221
|
a list of Task Run model representations
|
4168
4222
|
of the task runs
|
4169
4223
|
"""
|
4170
|
-
body = {
|
4224
|
+
body: dict[str, Any] = {
|
4171
4225
|
"flows": flow_filter.model_dump(mode="json") if flow_filter else None,
|
4172
4226
|
"flow_runs": (
|
4173
4227
|
flow_run_filter.model_dump(mode="json", exclude_unset=True)
|
@@ -4185,14 +4239,14 @@ class SyncPrefectClient:
|
|
4185
4239
|
"offset": offset,
|
4186
4240
|
}
|
4187
4241
|
response = self._client.post("/task_runs/filter", json=body)
|
4188
|
-
return pydantic.TypeAdapter(
|
4242
|
+
return pydantic.TypeAdapter(list[TaskRun]).validate_python(response.json())
|
4189
4243
|
|
4190
4244
|
def set_task_run_state(
|
4191
4245
|
self,
|
4192
4246
|
task_run_id: UUID,
|
4193
|
-
state: prefect.states.State,
|
4247
|
+
state: prefect.states.State[Any],
|
4194
4248
|
force: bool = False,
|
4195
|
-
) -> OrchestrationResult:
|
4249
|
+
) -> OrchestrationResult[Any]:
|
4196
4250
|
"""
|
4197
4251
|
Set the state of a task run.
|
4198
4252
|
|
@@ -4211,9 +4265,12 @@ class SyncPrefectClient:
|
|
4211
4265
|
f"/task_runs/{task_run_id}/set_state",
|
4212
4266
|
json=dict(state=state_create.model_dump(mode="json"), force=force),
|
4213
4267
|
)
|
4214
|
-
|
4268
|
+
result: OrchestrationResult[Any] = OrchestrationResult.model_validate(
|
4269
|
+
response.json()
|
4270
|
+
)
|
4271
|
+
return result
|
4215
4272
|
|
4216
|
-
def read_task_run_states(self, task_run_id: UUID) ->
|
4273
|
+
def read_task_run_states(self, task_run_id: UUID) -> list[prefect.states.State]:
|
4217
4274
|
"""
|
4218
4275
|
Query for the states of a task run
|
4219
4276
|
|
@@ -4226,7 +4283,7 @@ class SyncPrefectClient:
|
|
4226
4283
|
response = self._client.get(
|
4227
4284
|
"/task_run_states/", params=dict(task_run_id=str(task_run_id))
|
4228
4285
|
)
|
4229
|
-
return pydantic.TypeAdapter(
|
4286
|
+
return pydantic.TypeAdapter(list[prefect.states.State]).validate_python(
|
4230
4287
|
response.json()
|
4231
4288
|
)
|
4232
4289
|
|
@@ -4300,7 +4357,7 @@ class SyncPrefectClient:
|
|
4300
4357
|
return Artifact.model_validate(response.json())
|
4301
4358
|
|
4302
4359
|
def release_concurrency_slots(
|
4303
|
-
self, names:
|
4360
|
+
self, names: list[str], slots: int, occupancy_seconds: float
|
4304
4361
|
) -> httpx.Response:
|
4305
4362
|
"""
|
4306
4363
|
Release concurrency slots for the specified limits.
|
@@ -4324,7 +4381,7 @@ class SyncPrefectClient:
|
|
4324
4381
|
)
|
4325
4382
|
|
4326
4383
|
def decrement_v1_concurrency_slots(
|
4327
|
-
self, names:
|
4384
|
+
self, names: list[str], occupancy_seconds: float, task_run_id: UUID
|
4328
4385
|
) -> httpx.Response:
|
4329
4386
|
"""
|
4330
4387
|
Release the specified concurrency limits.
|
@@ -4346,3 +4403,15 @@ class SyncPrefectClient:
|
|
4346
4403
|
"task_run_id": str(task_run_id),
|
4347
4404
|
},
|
4348
4405
|
)
|
4406
|
+
|
4407
|
+
def update_flow_run_labels(
|
4408
|
+
self, flow_run_id: UUID, labels: KeyValueLabelsField
|
4409
|
+
) -> None:
|
4410
|
+
"""
|
4411
|
+
Updates the labels of a flow run.
|
4412
|
+
"""
|
4413
|
+
response = self._client.patch(
|
4414
|
+
f"/flow_runs/{flow_run_id}/labels",
|
4415
|
+
json=labels,
|
4416
|
+
)
|
4417
|
+
response.raise_for_status()
|