prefect-client 3.1.9__py3-none-any.whl → 3.1.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/_experimental/lineage.py +7 -8
- prefect/_internal/_logging.py +15 -3
- prefect/_internal/compatibility/async_dispatch.py +22 -16
- prefect/_internal/compatibility/deprecated.py +42 -18
- prefect/_internal/compatibility/migration.py +2 -2
- prefect/_internal/concurrency/inspection.py +12 -14
- prefect/_internal/concurrency/primitives.py +2 -2
- prefect/_internal/concurrency/services.py +154 -80
- prefect/_internal/concurrency/waiters.py +13 -9
- prefect/_internal/pydantic/annotations/pendulum.py +7 -7
- prefect/_internal/pytz.py +4 -3
- prefect/_internal/retries.py +10 -5
- prefect/_internal/schemas/bases.py +19 -10
- prefect/_internal/schemas/validators.py +227 -388
- prefect/_version.py +3 -3
- prefect/artifacts.py +61 -74
- prefect/automations.py +27 -7
- prefect/blocks/core.py +3 -3
- prefect/client/{orchestration.py → orchestration/__init__.py} +38 -701
- prefect/client/orchestration/_artifacts/__init__.py +0 -0
- prefect/client/orchestration/_artifacts/client.py +239 -0
- prefect/client/orchestration/_concurrency_limits/__init__.py +0 -0
- prefect/client/orchestration/_concurrency_limits/client.py +762 -0
- prefect/client/orchestration/_logs/__init__.py +0 -0
- prefect/client/orchestration/_logs/client.py +95 -0
- prefect/client/orchestration/_variables/__init__.py +0 -0
- prefect/client/orchestration/_variables/client.py +157 -0
- prefect/client/orchestration/base.py +46 -0
- prefect/client/orchestration/routes.py +145 -0
- prefect/client/schemas/actions.py +2 -2
- prefect/client/schemas/filters.py +5 -0
- prefect/client/schemas/objects.py +3 -10
- prefect/client/schemas/schedules.py +22 -10
- prefect/concurrency/_asyncio.py +87 -0
- prefect/concurrency/{events.py → _events.py} +10 -10
- prefect/concurrency/asyncio.py +20 -104
- prefect/concurrency/context.py +6 -4
- prefect/concurrency/services.py +26 -74
- prefect/concurrency/sync.py +23 -44
- prefect/concurrency/v1/_asyncio.py +63 -0
- prefect/concurrency/v1/{events.py → _events.py} +13 -15
- prefect/concurrency/v1/asyncio.py +27 -80
- prefect/concurrency/v1/context.py +6 -4
- prefect/concurrency/v1/services.py +33 -79
- prefect/concurrency/v1/sync.py +18 -37
- prefect/context.py +66 -70
- prefect/deployments/base.py +4 -144
- prefect/deployments/flow_runs.py +12 -2
- prefect/deployments/runner.py +11 -3
- prefect/deployments/steps/pull.py +13 -0
- prefect/events/clients.py +7 -1
- prefect/events/schemas/events.py +3 -2
- prefect/flow_engine.py +54 -47
- prefect/flows.py +2 -1
- prefect/futures.py +42 -27
- prefect/input/run_input.py +2 -1
- prefect/locking/filesystem.py +8 -7
- prefect/locking/memory.py +5 -3
- prefect/locking/protocol.py +1 -1
- prefect/main.py +1 -3
- prefect/plugins.py +12 -10
- prefect/results.py +3 -308
- prefect/runner/storage.py +87 -21
- prefect/serializers.py +32 -25
- prefect/settings/legacy.py +4 -4
- prefect/settings/models/api.py +3 -3
- prefect/settings/models/cli.py +3 -3
- prefect/settings/models/client.py +5 -3
- prefect/settings/models/cloud.py +3 -3
- prefect/settings/models/deployments.py +3 -3
- prefect/settings/models/experiments.py +4 -2
- prefect/settings/models/flows.py +3 -3
- prefect/settings/models/internal.py +4 -2
- prefect/settings/models/logging.py +4 -3
- prefect/settings/models/results.py +3 -3
- prefect/settings/models/root.py +3 -2
- prefect/settings/models/runner.py +4 -4
- prefect/settings/models/server/api.py +3 -3
- prefect/settings/models/server/database.py +11 -4
- prefect/settings/models/server/deployments.py +6 -2
- prefect/settings/models/server/ephemeral.py +4 -2
- prefect/settings/models/server/events.py +3 -2
- prefect/settings/models/server/flow_run_graph.py +6 -2
- prefect/settings/models/server/root.py +3 -3
- prefect/settings/models/server/services.py +26 -11
- prefect/settings/models/server/tasks.py +6 -3
- prefect/settings/models/server/ui.py +3 -3
- prefect/settings/models/tasks.py +5 -5
- prefect/settings/models/testing.py +3 -3
- prefect/settings/models/worker.py +5 -3
- prefect/settings/profiles.py +15 -2
- prefect/states.py +4 -7
- prefect/task_engine.py +54 -75
- prefect/tasks.py +84 -32
- prefect/telemetry/processors.py +6 -6
- prefect/telemetry/run_telemetry.py +13 -8
- prefect/telemetry/services.py +32 -31
- prefect/transactions.py +4 -15
- prefect/utilities/_git.py +34 -0
- prefect/utilities/asyncutils.py +1 -1
- prefect/utilities/engine.py +3 -19
- prefect/utilities/generics.py +18 -0
- prefect/workers/__init__.py +2 -0
- {prefect_client-3.1.9.dist-info → prefect_client-3.1.11.dist-info}/METADATA +1 -1
- {prefect_client-3.1.9.dist-info → prefect_client-3.1.11.dist-info}/RECORD +108 -99
- prefect/records/__init__.py +0 -1
- prefect/records/base.py +0 -235
- prefect/records/filesystem.py +0 -213
- prefect/records/memory.py +0 -184
- prefect/records/result_store.py +0 -70
- {prefect_client-3.1.9.dist-info → prefect_client-3.1.11.dist-info}/LICENSE +0 -0
- {prefect_client-3.1.9.dist-info → prefect_client-3.1.11.dist-info}/WHEEL +0 -0
- {prefect_client-3.1.9.dist-info → prefect_client-3.1.11.dist-info}/top_level.txt +0 -0
File without changes
|
@@ -0,0 +1,95 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
from typing import TYPE_CHECKING, Any, Iterable, Optional, Union
|
4
|
+
|
5
|
+
from prefect.client.orchestration.base import BaseAsyncClient, BaseClient
|
6
|
+
from prefect.client.schemas.sorting import (
|
7
|
+
LogSort,
|
8
|
+
)
|
9
|
+
|
10
|
+
if TYPE_CHECKING:
|
11
|
+
from prefect.client.schemas.actions import (
|
12
|
+
LogCreate,
|
13
|
+
)
|
14
|
+
from prefect.client.schemas.filters import (
|
15
|
+
LogFilter,
|
16
|
+
)
|
17
|
+
from prefect.client.schemas.objects import (
|
18
|
+
Log,
|
19
|
+
)
|
20
|
+
|
21
|
+
|
22
|
+
class LogClient(BaseClient):
|
23
|
+
def create_logs(self, logs: Iterable[Union["LogCreate", dict[str, Any]]]) -> None:
|
24
|
+
"""
|
25
|
+
Create logs for a flow or task run
|
26
|
+
"""
|
27
|
+
from prefect.client.schemas.actions import LogCreate
|
28
|
+
|
29
|
+
serialized_logs = [
|
30
|
+
log.model_dump(mode="json") if isinstance(log, LogCreate) else log
|
31
|
+
for log in logs
|
32
|
+
]
|
33
|
+
self.request("POST", "/logs/", json=serialized_logs)
|
34
|
+
|
35
|
+
def read_logs(
|
36
|
+
self,
|
37
|
+
log_filter: Optional["LogFilter"] = None,
|
38
|
+
limit: Optional[int] = None,
|
39
|
+
offset: Optional[int] = None,
|
40
|
+
sort: "LogSort" = LogSort.TIMESTAMP_ASC,
|
41
|
+
) -> list["Log"]:
|
42
|
+
"""
|
43
|
+
Read flow and task run logs.
|
44
|
+
"""
|
45
|
+
body: dict[str, Any] = {
|
46
|
+
"logs": log_filter.model_dump(mode="json") if log_filter else None,
|
47
|
+
"limit": limit,
|
48
|
+
"offset": offset,
|
49
|
+
"sort": sort,
|
50
|
+
}
|
51
|
+
response = self.request("POST", "/logs/filter", json=body)
|
52
|
+
from prefect.client.schemas.objects import Log
|
53
|
+
|
54
|
+
return Log.model_validate_list(response.json())
|
55
|
+
|
56
|
+
|
57
|
+
class LogAsyncClient(BaseAsyncClient):
|
58
|
+
async def create_logs(
|
59
|
+
self, logs: Iterable[Union["LogCreate", dict[str, Any]]]
|
60
|
+
) -> None:
|
61
|
+
"""
|
62
|
+
Create logs for a flow or task run
|
63
|
+
|
64
|
+
Args:
|
65
|
+
logs: An iterable of `LogCreate` objects or already json-compatible dicts
|
66
|
+
"""
|
67
|
+
from prefect.client.schemas.actions import LogCreate
|
68
|
+
|
69
|
+
serialized_logs = [
|
70
|
+
log.model_dump(mode="json") if isinstance(log, LogCreate) else log
|
71
|
+
for log in logs
|
72
|
+
]
|
73
|
+
await self.request("POST", "/logs/", json=serialized_logs)
|
74
|
+
|
75
|
+
async def read_logs(
|
76
|
+
self,
|
77
|
+
log_filter: Optional["LogFilter"] = None,
|
78
|
+
limit: Optional[int] = None,
|
79
|
+
offset: Optional[int] = None,
|
80
|
+
sort: "LogSort" = LogSort.TIMESTAMP_ASC,
|
81
|
+
) -> list[Log]:
|
82
|
+
"""
|
83
|
+
Read flow and task run logs.
|
84
|
+
"""
|
85
|
+
body: dict[str, Any] = {
|
86
|
+
"logs": log_filter.model_dump(mode="json") if log_filter else None,
|
87
|
+
"limit": limit,
|
88
|
+
"offset": offset,
|
89
|
+
"sort": sort,
|
90
|
+
}
|
91
|
+
|
92
|
+
response = await self.request("POST", "/logs/filter", json=body)
|
93
|
+
from prefect.client.schemas.objects import Log
|
94
|
+
|
95
|
+
return Log.model_validate_list(response.json())
|
File without changes
|
@@ -0,0 +1,157 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
from typing import TYPE_CHECKING
|
4
|
+
|
5
|
+
import httpx
|
6
|
+
|
7
|
+
from prefect.client.orchestration.base import BaseAsyncClient, BaseClient
|
8
|
+
from prefect.exceptions import ObjectNotFound
|
9
|
+
|
10
|
+
if TYPE_CHECKING:
|
11
|
+
from prefect.client.schemas.actions import (
|
12
|
+
VariableCreate,
|
13
|
+
VariableUpdate,
|
14
|
+
)
|
15
|
+
from prefect.client.schemas.objects import (
|
16
|
+
Variable,
|
17
|
+
)
|
18
|
+
|
19
|
+
|
20
|
+
class VariableClient(BaseClient):
|
21
|
+
def create_variable(self, variable: "VariableCreate") -> "Variable":
|
22
|
+
"""
|
23
|
+
Creates an variable with the provided configuration.
|
24
|
+
|
25
|
+
Args:
|
26
|
+
variable: Desired configuration for the new variable.
|
27
|
+
Returns:
|
28
|
+
Information about the newly created variable.
|
29
|
+
"""
|
30
|
+
response = self._client.post(
|
31
|
+
"/variables/",
|
32
|
+
json=variable.model_dump(mode="json", exclude_unset=True),
|
33
|
+
)
|
34
|
+
from prefect.client.schemas.objects import Variable
|
35
|
+
|
36
|
+
return Variable.model_validate(response.json())
|
37
|
+
|
38
|
+
def read_variable_by_name(self, name: str) -> "Variable | None":
|
39
|
+
"""Reads a variable by name. Returns None if no variable is found."""
|
40
|
+
try:
|
41
|
+
response = self.request(
|
42
|
+
"GET", "/variables/name/{name}", path_params={"name": name}
|
43
|
+
)
|
44
|
+
from prefect.client.schemas.objects import Variable
|
45
|
+
|
46
|
+
return Variable(**response.json())
|
47
|
+
except httpx.HTTPStatusError as e:
|
48
|
+
if e.response.status_code == 404:
|
49
|
+
return None
|
50
|
+
else:
|
51
|
+
raise
|
52
|
+
|
53
|
+
def read_variables(self, limit: int | None = None) -> list["Variable"]:
|
54
|
+
"""Reads all variables."""
|
55
|
+
response = self.request("POST", "/variables/filter", json={"limit": limit})
|
56
|
+
from prefect.client.schemas.objects import Variable
|
57
|
+
|
58
|
+
return Variable.model_validate_list(response.json())
|
59
|
+
|
60
|
+
def update_variable(self, variable: "VariableUpdate") -> None:
|
61
|
+
"""
|
62
|
+
Updates a variable with the provided configuration.
|
63
|
+
|
64
|
+
Args:
|
65
|
+
variable: Desired configuration for the updated variable.
|
66
|
+
Returns:
|
67
|
+
Information about the updated variable.
|
68
|
+
"""
|
69
|
+
self._client.patch(
|
70
|
+
f"/variables/name/{variable.name}",
|
71
|
+
json=variable.model_dump(mode="json", exclude_unset=True),
|
72
|
+
)
|
73
|
+
return None
|
74
|
+
|
75
|
+
def delete_variable_by_name(self, name: str) -> None:
|
76
|
+
"""Deletes a variable by name."""
|
77
|
+
try:
|
78
|
+
self.request(
|
79
|
+
"DELETE",
|
80
|
+
"/variables/name/{name}",
|
81
|
+
path_params={"name": name},
|
82
|
+
)
|
83
|
+
return None
|
84
|
+
except httpx.HTTPStatusError as e:
|
85
|
+
if e.response.status_code == 404:
|
86
|
+
raise ObjectNotFound(http_exc=e) from e
|
87
|
+
else:
|
88
|
+
raise
|
89
|
+
|
90
|
+
|
91
|
+
class VariableAsyncClient(BaseAsyncClient):
|
92
|
+
async def create_variable(self, variable: "VariableCreate") -> "Variable":
|
93
|
+
"""Creates a variable with the provided configuration."""
|
94
|
+
response = await self._client.post(
|
95
|
+
"/variables/",
|
96
|
+
json=variable.model_dump(mode="json", exclude_unset=True),
|
97
|
+
)
|
98
|
+
from prefect.client.schemas.objects import Variable
|
99
|
+
|
100
|
+
return Variable.model_validate(response.json())
|
101
|
+
|
102
|
+
async def read_variable_by_name(self, name: str) -> "Variable | None":
|
103
|
+
"""Reads a variable by name. Returns None if no variable is found."""
|
104
|
+
try:
|
105
|
+
response = await self.request(
|
106
|
+
"GET",
|
107
|
+
"/variables/name/{name}",
|
108
|
+
path_params={"name": name},
|
109
|
+
)
|
110
|
+
from prefect.client.schemas.objects import Variable
|
111
|
+
|
112
|
+
return Variable.model_validate(response.json())
|
113
|
+
except httpx.HTTPStatusError as e:
|
114
|
+
if e.response.status_code == 404:
|
115
|
+
return None
|
116
|
+
else:
|
117
|
+
raise
|
118
|
+
|
119
|
+
async def read_variables(self, limit: int | None = None) -> list["Variable"]:
|
120
|
+
"""Reads all variables."""
|
121
|
+
response = await self.request(
|
122
|
+
"POST", "/variables/filter", json={"limit": limit}
|
123
|
+
)
|
124
|
+
from prefect.client.schemas.objects import Variable
|
125
|
+
|
126
|
+
return Variable.model_validate_list(response.json())
|
127
|
+
|
128
|
+
async def update_variable(self, variable: "VariableUpdate") -> None:
|
129
|
+
"""
|
130
|
+
Updates a variable with the provided configuration.
|
131
|
+
|
132
|
+
Args:
|
133
|
+
variable: Desired configuration for the updated variable.
|
134
|
+
Returns:
|
135
|
+
Information about the updated variable.
|
136
|
+
"""
|
137
|
+
await self.request(
|
138
|
+
"PATCH",
|
139
|
+
"/variables/name/{name}",
|
140
|
+
path_params={"name": variable.name},
|
141
|
+
json=variable.model_dump(mode="json", exclude_unset=True),
|
142
|
+
)
|
143
|
+
return None
|
144
|
+
|
145
|
+
async def delete_variable_by_name(self, name: str) -> None:
|
146
|
+
"""Deletes a variable by name."""
|
147
|
+
try:
|
148
|
+
await self.request(
|
149
|
+
"DELETE",
|
150
|
+
"/variables/name/{name}",
|
151
|
+
path_params={"name": name},
|
152
|
+
)
|
153
|
+
except httpx.HTTPStatusError as e:
|
154
|
+
if e.response.status_code == 404:
|
155
|
+
raise ObjectNotFound(http_exc=e) from e
|
156
|
+
else:
|
157
|
+
raise
|
@@ -0,0 +1,46 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
from typing import TYPE_CHECKING, Any, Literal
|
4
|
+
|
5
|
+
from typing_extensions import TypeAlias
|
6
|
+
|
7
|
+
if TYPE_CHECKING:
|
8
|
+
from httpx import AsyncClient, Client, Response
|
9
|
+
|
10
|
+
from prefect.client.orchestration.routes import ServerRoutes
|
11
|
+
|
12
|
+
HTTP_METHODS: TypeAlias = Literal["GET", "POST", "PUT", "DELETE", "PATCH"]
|
13
|
+
|
14
|
+
|
15
|
+
class BaseClient:
|
16
|
+
def __init__(self, client: "Client"):
|
17
|
+
self._client = client
|
18
|
+
|
19
|
+
def request(
|
20
|
+
self,
|
21
|
+
method: HTTP_METHODS,
|
22
|
+
path: "ServerRoutes",
|
23
|
+
params: dict[str, Any] | None = None,
|
24
|
+
path_params: dict[str, Any] | None = None,
|
25
|
+
**kwargs: Any,
|
26
|
+
) -> "Response":
|
27
|
+
if path_params:
|
28
|
+
path = path.format(**path_params) # type: ignore
|
29
|
+
return self._client.request(method, path, params=params, **kwargs)
|
30
|
+
|
31
|
+
|
32
|
+
class BaseAsyncClient:
|
33
|
+
def __init__(self, client: "AsyncClient"):
|
34
|
+
self._client = client
|
35
|
+
|
36
|
+
async def request(
|
37
|
+
self,
|
38
|
+
method: HTTP_METHODS,
|
39
|
+
path: "ServerRoutes",
|
40
|
+
params: dict[str, Any] | None = None,
|
41
|
+
path_params: dict[str, Any] | None = None,
|
42
|
+
**kwargs: Any,
|
43
|
+
) -> "Response":
|
44
|
+
if path_params:
|
45
|
+
path = path.format(**path_params) # type: ignore
|
46
|
+
return await self._client.request(method, path, params=params, **kwargs)
|
@@ -0,0 +1,145 @@
|
|
1
|
+
from typing import Literal
|
2
|
+
|
3
|
+
ServerRoutes = Literal[
|
4
|
+
"/admin/database/clear",
|
5
|
+
"/admin/database/create",
|
6
|
+
"/admin/database/drop",
|
7
|
+
"/admin/settings",
|
8
|
+
"/admin/version",
|
9
|
+
"/artifacts/",
|
10
|
+
"/artifacts/{id}",
|
11
|
+
"/artifacts/{key}/latest",
|
12
|
+
"/artifacts/count",
|
13
|
+
"/artifacts/filter",
|
14
|
+
"/artifacts/latest/count",
|
15
|
+
"/artifacts/latest/filter",
|
16
|
+
"/automations/",
|
17
|
+
"/automations/{id}",
|
18
|
+
"/automations/count",
|
19
|
+
"/automations/filter",
|
20
|
+
"/automations/owned-by/{resource_id}",
|
21
|
+
"/automations/related-to/{resource_id}",
|
22
|
+
"/block_capabilities/",
|
23
|
+
"/block_documents/",
|
24
|
+
"/block_documents/{id}",
|
25
|
+
"/block_documents/count",
|
26
|
+
"/block_documents/filter",
|
27
|
+
"/block_schemas/",
|
28
|
+
"/block_schemas/{id}",
|
29
|
+
"/block_schemas/checksum/{checksum}",
|
30
|
+
"/block_schemas/filter",
|
31
|
+
"/block_types/",
|
32
|
+
"/block_types/{id}",
|
33
|
+
"/block_types/filter",
|
34
|
+
"/block_types/install_system_block_types",
|
35
|
+
"/block_types/slug/{slug}",
|
36
|
+
"/block_types/slug/{slug}/block_documents",
|
37
|
+
"/block_types/slug/{slug}/block_documents/name/{block_document_name}",
|
38
|
+
"/collections/views/{view}",
|
39
|
+
"/concurrency_limits/",
|
40
|
+
"/concurrency_limits/{id}",
|
41
|
+
"/concurrency_limits/decrement",
|
42
|
+
"/concurrency_limits/filter",
|
43
|
+
"/concurrency_limits/increment",
|
44
|
+
"/concurrency_limits/tag/{tag}",
|
45
|
+
"/concurrency_limits/tag/{tag}/reset",
|
46
|
+
"/csrf-token",
|
47
|
+
"/deployments/",
|
48
|
+
"/deployments/{id}",
|
49
|
+
"/deployments/{id}/create_flow_run",
|
50
|
+
"/deployments/{id}/pause_deployment",
|
51
|
+
"/deployments/{id}/resume_deployment",
|
52
|
+
"/deployments/{id}/schedule",
|
53
|
+
"/deployments/{id}/schedules",
|
54
|
+
"/deployments/{id}/schedules/{schedule_id}",
|
55
|
+
"/deployments/{id}/work_queue_check",
|
56
|
+
"/deployments/count",
|
57
|
+
"/deployments/filter",
|
58
|
+
"/deployments/get_scheduled_flow_runs",
|
59
|
+
"/deployments/name/{flow_name}/{deployment_name}",
|
60
|
+
"/deployments/paginate",
|
61
|
+
"/events",
|
62
|
+
"/events/count-by/{countable}",
|
63
|
+
"/events/filter",
|
64
|
+
"/events/filter/next",
|
65
|
+
"/flow_run_notification_policies/",
|
66
|
+
"/flow_run_notification_policies/{id}",
|
67
|
+
"/flow_run_notification_policies/filter",
|
68
|
+
"/flow_run_states/",
|
69
|
+
"/flow_run_states/{id}",
|
70
|
+
"/flow_runs/",
|
71
|
+
"/flow_runs/{id}",
|
72
|
+
"/flow_runs/{id}/graph",
|
73
|
+
"/flow_runs/{id}/graph-v2",
|
74
|
+
"/flow_runs/{id}/input",
|
75
|
+
"/flow_runs/{id}/input/{key}",
|
76
|
+
"/flow_runs/{id}/input/filter",
|
77
|
+
"/flow_runs/{id}/labels",
|
78
|
+
"/flow_runs/{id}/logs/download",
|
79
|
+
"/flow_runs/{id}/resume",
|
80
|
+
"/flow_runs/{id}/set_state",
|
81
|
+
"/flow_runs/count",
|
82
|
+
"/flow_runs/filter",
|
83
|
+
"/flow_runs/history",
|
84
|
+
"/flow_runs/lateness",
|
85
|
+
"/flow_runs/paginate",
|
86
|
+
"/flows/",
|
87
|
+
"/flows/{id}",
|
88
|
+
"/flows/count",
|
89
|
+
"/flows/filter",
|
90
|
+
"/flows/name/{name}",
|
91
|
+
"/flows/paginate",
|
92
|
+
"/health",
|
93
|
+
"/hello",
|
94
|
+
"/logs/",
|
95
|
+
"/logs/filter",
|
96
|
+
"/ready",
|
97
|
+
"/saved_searches/",
|
98
|
+
"/saved_searches/{id}",
|
99
|
+
"/saved_searches/filter",
|
100
|
+
"/task_run_states/",
|
101
|
+
"/task_run_states/{id}",
|
102
|
+
"/task_runs/",
|
103
|
+
"/task_runs/{id}",
|
104
|
+
"/task_runs/{id}/set_state",
|
105
|
+
"/task_runs/count",
|
106
|
+
"/task_runs/filter",
|
107
|
+
"/task_runs/history",
|
108
|
+
"/task_workers/filter",
|
109
|
+
"/templates/validate",
|
110
|
+
"/ui/flow_runs/count-task-runs",
|
111
|
+
"/ui/flow_runs/history",
|
112
|
+
"/ui/flows/count-deployments",
|
113
|
+
"/ui/flows/next-runs",
|
114
|
+
"/ui/schemas/validate",
|
115
|
+
"/ui/task_runs/count",
|
116
|
+
"/ui/task_runs/dashboard/counts",
|
117
|
+
"/v2/concurrency_limits/",
|
118
|
+
"/v2/concurrency_limits/{id_or_name}",
|
119
|
+
"/v2/concurrency_limits/decrement",
|
120
|
+
"/v2/concurrency_limits/filter",
|
121
|
+
"/v2/concurrency_limits/increment",
|
122
|
+
"/variables/",
|
123
|
+
"/variables/{id}",
|
124
|
+
"/variables/count",
|
125
|
+
"/variables/filter",
|
126
|
+
"/variables/name/{name}",
|
127
|
+
"/version",
|
128
|
+
"/work_pools/",
|
129
|
+
"/work_pools/{name}",
|
130
|
+
"/work_pools/{name}/get_scheduled_flow_runs",
|
131
|
+
"/work_pools/{work_pool_name}/queues",
|
132
|
+
"/work_pools/{work_pool_name}/queues/{name}",
|
133
|
+
"/work_pools/{work_pool_name}/queues/filter",
|
134
|
+
"/work_pools/{work_pool_name}/workers/{name}",
|
135
|
+
"/work_pools/{work_pool_name}/workers/filter",
|
136
|
+
"/work_pools/{work_pool_name}/workers/heartbeat",
|
137
|
+
"/work_pools/count",
|
138
|
+
"/work_pools/filter",
|
139
|
+
"/work_queues/",
|
140
|
+
"/work_queues/{id}",
|
141
|
+
"/work_queues/{id}/get_runs",
|
142
|
+
"/work_queues/{id}/status",
|
143
|
+
"/work_queues/filter",
|
144
|
+
"/work_queues/name/{name}",
|
145
|
+
]
|
@@ -39,7 +39,7 @@ from prefect.utilities.collections import listrepr
|
|
39
39
|
from prefect.utilities.pydantic import get_class_fields_only
|
40
40
|
|
41
41
|
if TYPE_CHECKING:
|
42
|
-
from prefect.results import
|
42
|
+
from prefect.results import ResultRecordMetadata
|
43
43
|
|
44
44
|
R = TypeVar("R")
|
45
45
|
|
@@ -51,7 +51,7 @@ class StateCreate(ActionBaseModel):
|
|
51
51
|
name: Optional[str] = Field(default=None)
|
52
52
|
message: Optional[str] = Field(default=None, examples=["Run started"])
|
53
53
|
state_details: StateDetails = Field(default_factory=StateDetails)
|
54
|
-
data: Union["
|
54
|
+
data: Union["ResultRecordMetadata", Any] = Field(
|
55
55
|
default=None,
|
56
56
|
)
|
57
57
|
|
@@ -505,6 +505,11 @@ class DeploymentFilterTags(PrefectBaseModel, OperatorMixin):
|
|
505
505
|
" superset of the list"
|
506
506
|
),
|
507
507
|
)
|
508
|
+
any_: Optional[list[str]] = Field(
|
509
|
+
default=None,
|
510
|
+
examples=[["tag-1", "tag-2"]],
|
511
|
+
description="A list of tags to include",
|
512
|
+
)
|
508
513
|
is_null_: Optional[bool] = Field(
|
509
514
|
default=None, description="If true, only include deployments without tags"
|
510
515
|
)
|
@@ -10,7 +10,6 @@ from typing import (
|
|
10
10
|
Generic,
|
11
11
|
Optional,
|
12
12
|
Union,
|
13
|
-
cast,
|
14
13
|
overload,
|
15
14
|
)
|
16
15
|
from uuid import UUID, uuid4
|
@@ -65,7 +64,7 @@ from prefect.utilities.pydantic import handle_secret_render
|
|
65
64
|
|
66
65
|
if TYPE_CHECKING:
|
67
66
|
from prefect.client.schemas.actions import StateCreate
|
68
|
-
from prefect.results import
|
67
|
+
from prefect.results import ResultRecordMetadata
|
69
68
|
|
70
69
|
DateTime = pendulum.DateTime
|
71
70
|
else:
|
@@ -195,9 +194,7 @@ class StateDetails(PrefectBaseModel):
|
|
195
194
|
|
196
195
|
|
197
196
|
def data_discriminator(x: Any) -> str:
|
198
|
-
if isinstance(x, dict) and "
|
199
|
-
return "BaseResult"
|
200
|
-
elif isinstance(x, dict) and "storage_key" in x:
|
197
|
+
if isinstance(x, dict) and "storage_key" in x:
|
201
198
|
return "ResultRecordMetadata"
|
202
199
|
return "Any"
|
203
200
|
|
@@ -214,7 +211,6 @@ class State(ObjectBaseModel, Generic[R]):
|
|
214
211
|
state_details: StateDetails = Field(default_factory=StateDetails)
|
215
212
|
data: Annotated[
|
216
213
|
Union[
|
217
|
-
Annotated["BaseResult[R]", Tag("BaseResult")],
|
218
214
|
Annotated["ResultRecordMetadata", Tag("ResultRecordMetadata")],
|
219
215
|
Annotated[Any, Tag("Any")],
|
220
216
|
],
|
@@ -347,14 +343,11 @@ class State(ObjectBaseModel, Generic[R]):
|
|
347
343
|
"""
|
348
344
|
from prefect.client.schemas.actions import StateCreate
|
349
345
|
from prefect.results import (
|
350
|
-
BaseResult,
|
351
346
|
ResultRecord,
|
352
347
|
should_persist_result,
|
353
348
|
)
|
354
349
|
|
355
|
-
if isinstance(self.data,
|
356
|
-
data = cast(BaseResult[R], self.data)
|
357
|
-
elif isinstance(self.data, ResultRecord) and should_persist_result():
|
350
|
+
if isinstance(self.data, ResultRecord) and should_persist_result():
|
358
351
|
data = self.data.metadata
|
359
352
|
else:
|
360
353
|
data = None
|
@@ -33,6 +33,23 @@ MAX_ITERATIONS = 1000
|
|
33
33
|
MAX_RRULE_LENGTH = 6500
|
34
34
|
|
35
35
|
|
36
|
+
def is_valid_timezone(v: str) -> bool:
|
37
|
+
"""
|
38
|
+
Validate that the provided timezone is a valid IANA timezone.
|
39
|
+
|
40
|
+
Unfortunately this list is slightly different from the list of valid
|
41
|
+
timezones in pendulum that we use for cron and interval timezone validation.
|
42
|
+
"""
|
43
|
+
from prefect._internal.pytz import HAS_PYTZ
|
44
|
+
|
45
|
+
if HAS_PYTZ:
|
46
|
+
import pytz
|
47
|
+
else:
|
48
|
+
from prefect._internal import pytz
|
49
|
+
|
50
|
+
return v in pytz.all_timezones_set
|
51
|
+
|
52
|
+
|
36
53
|
class IntervalSchedule(PrefectBaseModel):
|
37
54
|
"""
|
38
55
|
A schedule formed by adding `interval` increments to an `anchor_date`. If no
|
@@ -305,18 +322,13 @@ class RRuleSchedule(PrefectBaseModel):
|
|
305
322
|
Unfortunately this list is slightly different from the list of valid
|
306
323
|
timezones in pendulum that we use for cron and interval timezone validation.
|
307
324
|
"""
|
308
|
-
|
325
|
+
if v is None:
|
326
|
+
return "UTC"
|
309
327
|
|
310
|
-
if
|
311
|
-
|
312
|
-
else:
|
313
|
-
from prefect._internal import pytz
|
328
|
+
if is_valid_timezone(v):
|
329
|
+
return v
|
314
330
|
|
315
|
-
|
316
|
-
raise ValueError(f'Invalid timezone: "{v}"')
|
317
|
-
elif v is None:
|
318
|
-
return "UTC"
|
319
|
-
return v
|
331
|
+
raise ValueError(f'Invalid timezone: "{v}"')
|
320
332
|
|
321
333
|
|
322
334
|
class NoSchedule(PrefectBaseModel):
|
@@ -0,0 +1,87 @@
|
|
1
|
+
import asyncio
|
2
|
+
from typing import Literal, Optional
|
3
|
+
|
4
|
+
import httpx
|
5
|
+
|
6
|
+
from prefect._internal.compatibility.deprecated import deprecated_parameter
|
7
|
+
from prefect.client.orchestration import get_client
|
8
|
+
from prefect.client.schemas.responses import MinimalConcurrencyLimitResponse
|
9
|
+
from prefect.logging.loggers import get_run_logger
|
10
|
+
|
11
|
+
from .services import ConcurrencySlotAcquisitionService
|
12
|
+
|
13
|
+
|
14
|
+
class ConcurrencySlotAcquisitionError(Exception):
|
15
|
+
"""Raised when an unhandlable occurs while acquiring concurrency slots."""
|
16
|
+
|
17
|
+
|
18
|
+
class AcquireConcurrencySlotTimeoutError(TimeoutError):
|
19
|
+
"""Raised when acquiring a concurrency slot times out."""
|
20
|
+
|
21
|
+
|
22
|
+
@deprecated_parameter(
|
23
|
+
name="create_if_missing",
|
24
|
+
start_date="Sep 2024",
|
25
|
+
end_date="Oct 2024",
|
26
|
+
when=lambda x: x is not None,
|
27
|
+
help="Limits must be explicitly created before acquiring concurrency slots; see `strict` if you want to enforce this behavior.",
|
28
|
+
)
|
29
|
+
async def aacquire_concurrency_slots(
|
30
|
+
names: list[str],
|
31
|
+
slots: int,
|
32
|
+
mode: Literal["concurrency", "rate_limit"] = "concurrency",
|
33
|
+
timeout_seconds: Optional[float] = None,
|
34
|
+
create_if_missing: Optional[bool] = None,
|
35
|
+
max_retries: Optional[int] = None,
|
36
|
+
strict: bool = False,
|
37
|
+
) -> list[MinimalConcurrencyLimitResponse]:
|
38
|
+
service = ConcurrencySlotAcquisitionService.instance(frozenset(names))
|
39
|
+
future = service.send(
|
40
|
+
(slots, mode, timeout_seconds, create_if_missing, max_retries)
|
41
|
+
)
|
42
|
+
try:
|
43
|
+
response = await asyncio.wrap_future(future)
|
44
|
+
except TimeoutError as timeout:
|
45
|
+
raise AcquireConcurrencySlotTimeoutError(
|
46
|
+
f"Attempt to acquire concurrency slots timed out after {timeout_seconds} second(s)"
|
47
|
+
) from timeout
|
48
|
+
except Exception as exc:
|
49
|
+
raise ConcurrencySlotAcquisitionError(
|
50
|
+
f"Unable to acquire concurrency slots on {names!r}"
|
51
|
+
) from exc
|
52
|
+
|
53
|
+
retval = _response_to_minimal_concurrency_limit_response(response)
|
54
|
+
|
55
|
+
if not retval:
|
56
|
+
if strict:
|
57
|
+
raise ConcurrencySlotAcquisitionError(
|
58
|
+
f"Concurrency limits {names!r} must be created before acquiring slots"
|
59
|
+
)
|
60
|
+
try:
|
61
|
+
logger = get_run_logger()
|
62
|
+
except Exception:
|
63
|
+
pass
|
64
|
+
else:
|
65
|
+
logger.warning(
|
66
|
+
f"Concurrency limits {names!r} do not exist - skipping acquisition."
|
67
|
+
)
|
68
|
+
|
69
|
+
return retval
|
70
|
+
|
71
|
+
|
72
|
+
async def arelease_concurrency_slots(
|
73
|
+
names: list[str], slots: int, occupancy_seconds: float
|
74
|
+
) -> list[MinimalConcurrencyLimitResponse]:
|
75
|
+
async with get_client() as client:
|
76
|
+
response = await client.release_concurrency_slots(
|
77
|
+
names=names, slots=slots, occupancy_seconds=occupancy_seconds
|
78
|
+
)
|
79
|
+
return _response_to_minimal_concurrency_limit_response(response)
|
80
|
+
|
81
|
+
|
82
|
+
def _response_to_minimal_concurrency_limit_response(
|
83
|
+
response: httpx.Response,
|
84
|
+
) -> list[MinimalConcurrencyLimitResponse]:
|
85
|
+
return [
|
86
|
+
MinimalConcurrencyLimitResponse.model_validate(obj_) for obj_ in response.json()
|
87
|
+
]
|