prefect-client 2.19.2__py3-none-any.whl → 3.0.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (239) hide show
  1. prefect/__init__.py +8 -56
  2. prefect/_internal/compatibility/deprecated.py +6 -115
  3. prefect/_internal/compatibility/experimental.py +4 -79
  4. prefect/_internal/concurrency/api.py +0 -34
  5. prefect/_internal/concurrency/calls.py +0 -6
  6. prefect/_internal/concurrency/cancellation.py +0 -3
  7. prefect/_internal/concurrency/event_loop.py +0 -20
  8. prefect/_internal/concurrency/inspection.py +3 -3
  9. prefect/_internal/concurrency/threads.py +35 -0
  10. prefect/_internal/concurrency/waiters.py +0 -28
  11. prefect/_internal/pydantic/__init__.py +0 -45
  12. prefect/_internal/pydantic/v1_schema.py +21 -22
  13. prefect/_internal/pydantic/v2_schema.py +0 -2
  14. prefect/_internal/pydantic/v2_validated_func.py +18 -23
  15. prefect/_internal/schemas/bases.py +44 -177
  16. prefect/_internal/schemas/fields.py +1 -43
  17. prefect/_internal/schemas/validators.py +60 -158
  18. prefect/artifacts.py +161 -14
  19. prefect/automations.py +39 -4
  20. prefect/blocks/abstract.py +1 -1
  21. prefect/blocks/core.py +268 -148
  22. prefect/blocks/fields.py +2 -57
  23. prefect/blocks/kubernetes.py +8 -12
  24. prefect/blocks/notifications.py +40 -20
  25. prefect/blocks/system.py +22 -11
  26. prefect/blocks/webhook.py +2 -9
  27. prefect/client/base.py +4 -4
  28. prefect/client/cloud.py +8 -13
  29. prefect/client/orchestration.py +347 -341
  30. prefect/client/schemas/actions.py +92 -86
  31. prefect/client/schemas/filters.py +20 -40
  32. prefect/client/schemas/objects.py +151 -145
  33. prefect/client/schemas/responses.py +16 -24
  34. prefect/client/schemas/schedules.py +47 -35
  35. prefect/client/subscriptions.py +2 -2
  36. prefect/client/utilities.py +5 -2
  37. prefect/concurrency/asyncio.py +3 -1
  38. prefect/concurrency/events.py +1 -1
  39. prefect/concurrency/services.py +6 -3
  40. prefect/context.py +195 -27
  41. prefect/deployments/__init__.py +5 -6
  42. prefect/deployments/base.py +7 -5
  43. prefect/deployments/flow_runs.py +185 -0
  44. prefect/deployments/runner.py +50 -45
  45. prefect/deployments/schedules.py +28 -23
  46. prefect/deployments/steps/__init__.py +0 -1
  47. prefect/deployments/steps/core.py +1 -0
  48. prefect/deployments/steps/pull.py +7 -21
  49. prefect/engine.py +12 -2422
  50. prefect/events/actions.py +17 -23
  51. prefect/events/cli/automations.py +19 -6
  52. prefect/events/clients.py +14 -37
  53. prefect/events/filters.py +14 -18
  54. prefect/events/related.py +2 -2
  55. prefect/events/schemas/__init__.py +0 -5
  56. prefect/events/schemas/automations.py +55 -46
  57. prefect/events/schemas/deployment_triggers.py +7 -197
  58. prefect/events/schemas/events.py +34 -65
  59. prefect/events/schemas/labelling.py +10 -14
  60. prefect/events/utilities.py +2 -3
  61. prefect/events/worker.py +2 -3
  62. prefect/filesystems.py +6 -517
  63. prefect/{new_flow_engine.py → flow_engine.py} +313 -72
  64. prefect/flow_runs.py +377 -5
  65. prefect/flows.py +307 -166
  66. prefect/futures.py +186 -345
  67. prefect/infrastructure/__init__.py +0 -27
  68. prefect/infrastructure/provisioners/__init__.py +5 -3
  69. prefect/infrastructure/provisioners/cloud_run.py +11 -6
  70. prefect/infrastructure/provisioners/container_instance.py +11 -7
  71. prefect/infrastructure/provisioners/ecs.py +6 -4
  72. prefect/infrastructure/provisioners/modal.py +8 -5
  73. prefect/input/actions.py +2 -4
  74. prefect/input/run_input.py +5 -7
  75. prefect/logging/formatters.py +0 -2
  76. prefect/logging/handlers.py +3 -11
  77. prefect/logging/loggers.py +2 -2
  78. prefect/manifests.py +2 -1
  79. prefect/records/__init__.py +1 -0
  80. prefect/records/result_store.py +42 -0
  81. prefect/records/store.py +9 -0
  82. prefect/results.py +43 -39
  83. prefect/runner/runner.py +19 -15
  84. prefect/runner/server.py +6 -10
  85. prefect/runner/storage.py +3 -8
  86. prefect/runner/submit.py +2 -2
  87. prefect/runner/utils.py +2 -2
  88. prefect/serializers.py +24 -35
  89. prefect/server/api/collections_data/views/aggregate-worker-metadata.json +5 -14
  90. prefect/settings.py +70 -133
  91. prefect/states.py +17 -47
  92. prefect/task_engine.py +697 -58
  93. prefect/task_runners.py +269 -301
  94. prefect/task_server.py +53 -34
  95. prefect/tasks.py +327 -337
  96. prefect/transactions.py +220 -0
  97. prefect/types/__init__.py +61 -82
  98. prefect/utilities/asyncutils.py +195 -136
  99. prefect/utilities/callables.py +311 -43
  100. prefect/utilities/collections.py +23 -38
  101. prefect/utilities/dispatch.py +11 -3
  102. prefect/utilities/dockerutils.py +4 -0
  103. prefect/utilities/engine.py +140 -20
  104. prefect/utilities/importtools.py +97 -27
  105. prefect/utilities/pydantic.py +128 -38
  106. prefect/utilities/schema_tools/hydration.py +5 -1
  107. prefect/utilities/templating.py +12 -2
  108. prefect/variables.py +78 -61
  109. prefect/workers/__init__.py +0 -1
  110. prefect/workers/base.py +15 -17
  111. prefect/workers/process.py +3 -8
  112. prefect/workers/server.py +2 -2
  113. {prefect_client-2.19.2.dist-info → prefect_client-3.0.0rc1.dist-info}/METADATA +22 -21
  114. prefect_client-3.0.0rc1.dist-info/RECORD +176 -0
  115. prefect/_internal/pydantic/_base_model.py +0 -51
  116. prefect/_internal/pydantic/_compat.py +0 -82
  117. prefect/_internal/pydantic/_flags.py +0 -20
  118. prefect/_internal/pydantic/_types.py +0 -8
  119. prefect/_internal/pydantic/utilities/__init__.py +0 -0
  120. prefect/_internal/pydantic/utilities/config_dict.py +0 -72
  121. prefect/_internal/pydantic/utilities/field_validator.py +0 -150
  122. prefect/_internal/pydantic/utilities/model_construct.py +0 -56
  123. prefect/_internal/pydantic/utilities/model_copy.py +0 -55
  124. prefect/_internal/pydantic/utilities/model_dump.py +0 -136
  125. prefect/_internal/pydantic/utilities/model_dump_json.py +0 -112
  126. prefect/_internal/pydantic/utilities/model_fields.py +0 -50
  127. prefect/_internal/pydantic/utilities/model_fields_set.py +0 -29
  128. prefect/_internal/pydantic/utilities/model_json_schema.py +0 -82
  129. prefect/_internal/pydantic/utilities/model_rebuild.py +0 -80
  130. prefect/_internal/pydantic/utilities/model_validate.py +0 -75
  131. prefect/_internal/pydantic/utilities/model_validate_json.py +0 -68
  132. prefect/_internal/pydantic/utilities/model_validator.py +0 -87
  133. prefect/_internal/pydantic/utilities/type_adapter.py +0 -71
  134. prefect/_vendor/__init__.py +0 -0
  135. prefect/_vendor/fastapi/__init__.py +0 -25
  136. prefect/_vendor/fastapi/applications.py +0 -946
  137. prefect/_vendor/fastapi/background.py +0 -3
  138. prefect/_vendor/fastapi/concurrency.py +0 -44
  139. prefect/_vendor/fastapi/datastructures.py +0 -58
  140. prefect/_vendor/fastapi/dependencies/__init__.py +0 -0
  141. prefect/_vendor/fastapi/dependencies/models.py +0 -64
  142. prefect/_vendor/fastapi/dependencies/utils.py +0 -877
  143. prefect/_vendor/fastapi/encoders.py +0 -177
  144. prefect/_vendor/fastapi/exception_handlers.py +0 -40
  145. prefect/_vendor/fastapi/exceptions.py +0 -46
  146. prefect/_vendor/fastapi/logger.py +0 -3
  147. prefect/_vendor/fastapi/middleware/__init__.py +0 -1
  148. prefect/_vendor/fastapi/middleware/asyncexitstack.py +0 -25
  149. prefect/_vendor/fastapi/middleware/cors.py +0 -3
  150. prefect/_vendor/fastapi/middleware/gzip.py +0 -3
  151. prefect/_vendor/fastapi/middleware/httpsredirect.py +0 -3
  152. prefect/_vendor/fastapi/middleware/trustedhost.py +0 -3
  153. prefect/_vendor/fastapi/middleware/wsgi.py +0 -3
  154. prefect/_vendor/fastapi/openapi/__init__.py +0 -0
  155. prefect/_vendor/fastapi/openapi/constants.py +0 -2
  156. prefect/_vendor/fastapi/openapi/docs.py +0 -203
  157. prefect/_vendor/fastapi/openapi/models.py +0 -480
  158. prefect/_vendor/fastapi/openapi/utils.py +0 -485
  159. prefect/_vendor/fastapi/param_functions.py +0 -340
  160. prefect/_vendor/fastapi/params.py +0 -453
  161. prefect/_vendor/fastapi/requests.py +0 -4
  162. prefect/_vendor/fastapi/responses.py +0 -40
  163. prefect/_vendor/fastapi/routing.py +0 -1331
  164. prefect/_vendor/fastapi/security/__init__.py +0 -15
  165. prefect/_vendor/fastapi/security/api_key.py +0 -98
  166. prefect/_vendor/fastapi/security/base.py +0 -6
  167. prefect/_vendor/fastapi/security/http.py +0 -172
  168. prefect/_vendor/fastapi/security/oauth2.py +0 -227
  169. prefect/_vendor/fastapi/security/open_id_connect_url.py +0 -34
  170. prefect/_vendor/fastapi/security/utils.py +0 -10
  171. prefect/_vendor/fastapi/staticfiles.py +0 -1
  172. prefect/_vendor/fastapi/templating.py +0 -3
  173. prefect/_vendor/fastapi/testclient.py +0 -1
  174. prefect/_vendor/fastapi/types.py +0 -3
  175. prefect/_vendor/fastapi/utils.py +0 -235
  176. prefect/_vendor/fastapi/websockets.py +0 -7
  177. prefect/_vendor/starlette/__init__.py +0 -1
  178. prefect/_vendor/starlette/_compat.py +0 -28
  179. prefect/_vendor/starlette/_exception_handler.py +0 -80
  180. prefect/_vendor/starlette/_utils.py +0 -88
  181. prefect/_vendor/starlette/applications.py +0 -261
  182. prefect/_vendor/starlette/authentication.py +0 -159
  183. prefect/_vendor/starlette/background.py +0 -43
  184. prefect/_vendor/starlette/concurrency.py +0 -59
  185. prefect/_vendor/starlette/config.py +0 -151
  186. prefect/_vendor/starlette/convertors.py +0 -87
  187. prefect/_vendor/starlette/datastructures.py +0 -707
  188. prefect/_vendor/starlette/endpoints.py +0 -130
  189. prefect/_vendor/starlette/exceptions.py +0 -60
  190. prefect/_vendor/starlette/formparsers.py +0 -276
  191. prefect/_vendor/starlette/middleware/__init__.py +0 -17
  192. prefect/_vendor/starlette/middleware/authentication.py +0 -52
  193. prefect/_vendor/starlette/middleware/base.py +0 -220
  194. prefect/_vendor/starlette/middleware/cors.py +0 -176
  195. prefect/_vendor/starlette/middleware/errors.py +0 -265
  196. prefect/_vendor/starlette/middleware/exceptions.py +0 -74
  197. prefect/_vendor/starlette/middleware/gzip.py +0 -113
  198. prefect/_vendor/starlette/middleware/httpsredirect.py +0 -19
  199. prefect/_vendor/starlette/middleware/sessions.py +0 -82
  200. prefect/_vendor/starlette/middleware/trustedhost.py +0 -64
  201. prefect/_vendor/starlette/middleware/wsgi.py +0 -147
  202. prefect/_vendor/starlette/requests.py +0 -328
  203. prefect/_vendor/starlette/responses.py +0 -347
  204. prefect/_vendor/starlette/routing.py +0 -933
  205. prefect/_vendor/starlette/schemas.py +0 -154
  206. prefect/_vendor/starlette/staticfiles.py +0 -248
  207. prefect/_vendor/starlette/status.py +0 -199
  208. prefect/_vendor/starlette/templating.py +0 -231
  209. prefect/_vendor/starlette/testclient.py +0 -804
  210. prefect/_vendor/starlette/types.py +0 -30
  211. prefect/_vendor/starlette/websockets.py +0 -193
  212. prefect/agent.py +0 -698
  213. prefect/deployments/deployments.py +0 -1042
  214. prefect/deprecated/__init__.py +0 -0
  215. prefect/deprecated/data_documents.py +0 -350
  216. prefect/deprecated/packaging/__init__.py +0 -12
  217. prefect/deprecated/packaging/base.py +0 -96
  218. prefect/deprecated/packaging/docker.py +0 -146
  219. prefect/deprecated/packaging/file.py +0 -92
  220. prefect/deprecated/packaging/orion.py +0 -80
  221. prefect/deprecated/packaging/serializers.py +0 -171
  222. prefect/events/instrument.py +0 -135
  223. prefect/infrastructure/base.py +0 -323
  224. prefect/infrastructure/container.py +0 -818
  225. prefect/infrastructure/kubernetes.py +0 -920
  226. prefect/infrastructure/process.py +0 -289
  227. prefect/new_task_engine.py +0 -423
  228. prefect/pydantic/__init__.py +0 -76
  229. prefect/pydantic/main.py +0 -39
  230. prefect/software/__init__.py +0 -2
  231. prefect/software/base.py +0 -50
  232. prefect/software/conda.py +0 -199
  233. prefect/software/pip.py +0 -122
  234. prefect/software/python.py +0 -52
  235. prefect/workers/block.py +0 -218
  236. prefect_client-2.19.2.dist-info/RECORD +0 -292
  237. {prefect_client-2.19.2.dist-info → prefect_client-3.0.0rc1.dist-info}/LICENSE +0 -0
  238. {prefect_client-2.19.2.dist-info → prefect_client-3.0.0rc1.dist-info}/WHEEL +0 -0
  239. {prefect_client-2.19.2.dist-info → prefect_client-3.0.0rc1.dist-info}/top_level.txt +0 -0
@@ -1,1042 +0,0 @@
1
- """
2
- Objects for specifying deployments and utilities for loading flows from deployments.
3
- """
4
-
5
- import importlib
6
- import json
7
- import os
8
- import sys
9
- from datetime import datetime
10
- from functools import partial
11
- from pathlib import Path
12
- from typing import Any, Dict, Iterable, List, Optional, Union
13
- from uuid import UUID
14
-
15
- import anyio
16
- import pendulum
17
- import yaml
18
-
19
- from prefect._internal.pydantic import HAS_PYDANTIC_V2
20
-
21
- if HAS_PYDANTIC_V2:
22
- from pydantic.v1 import BaseModel, Field, parse_obj_as, root_validator, validator
23
- else:
24
- from pydantic import BaseModel, Field, parse_obj_as, root_validator, validator
25
-
26
- from prefect._internal.compatibility.deprecated import (
27
- DeprecatedInfraOverridesField,
28
- deprecated_callable,
29
- deprecated_class,
30
- deprecated_parameter,
31
- handle_deprecated_infra_overrides_parameter,
32
- )
33
- from prefect._internal.schemas.validators import (
34
- handle_openapi_schema,
35
- infrastructure_must_have_capabilities,
36
- reconcile_schedules,
37
- storage_must_have_capabilities,
38
- validate_automation_names,
39
- validate_deprecated_schedule_fields,
40
- )
41
- from prefect.blocks.core import Block
42
- from prefect.blocks.fields import SecretDict
43
- from prefect.client.orchestration import PrefectClient, get_client
44
- from prefect.client.schemas.actions import DeploymentScheduleCreate
45
- from prefect.client.schemas.objects import (
46
- FlowRun,
47
- MinimalDeploymentSchedule,
48
- )
49
- from prefect.client.schemas.schedules import SCHEDULE_TYPES
50
- from prefect.client.utilities import inject_client
51
- from prefect.context import FlowRunContext, PrefectObjectRegistry, TaskRunContext
52
- from prefect.deployments.schedules import (
53
- FlexibleScheduleList,
54
- )
55
- from prefect.deployments.steps.core import run_steps
56
- from prefect.events import DeploymentTriggerTypes, TriggerTypes
57
- from prefect.exceptions import (
58
- BlockMissingCapabilities,
59
- ObjectAlreadyExists,
60
- ObjectNotFound,
61
- PrefectHTTPStatusError,
62
- )
63
- from prefect.filesystems import LocalFileSystem
64
- from prefect.flows import Flow, load_flow_from_entrypoint
65
- from prefect.infrastructure import Infrastructure, Process
66
- from prefect.logging.loggers import flow_run_logger, get_logger
67
- from prefect.states import Scheduled
68
- from prefect.tasks import Task
69
- from prefect.utilities.asyncutils import run_sync_in_worker_thread, sync_compatible
70
- from prefect.utilities.callables import ParameterSchema, parameter_schema
71
- from prefect.utilities.filesystem import relative_path_to_current_platform, tmpchdir
72
- from prefect.utilities.slugify import slugify
73
-
74
- logger = get_logger("deployments")
75
-
76
-
77
- @sync_compatible
78
- @deprecated_parameter(
79
- "infra_overrides",
80
- start_date="Apr 2024",
81
- help="Use `job_variables` instead.",
82
- )
83
- @inject_client
84
- async def run_deployment(
85
- name: Union[str, UUID],
86
- client: Optional[PrefectClient] = None,
87
- parameters: Optional[dict] = None,
88
- scheduled_time: Optional[datetime] = None,
89
- flow_run_name: Optional[str] = None,
90
- timeout: Optional[float] = None,
91
- poll_interval: Optional[float] = 5,
92
- tags: Optional[Iterable[str]] = None,
93
- idempotency_key: Optional[str] = None,
94
- work_queue_name: Optional[str] = None,
95
- as_subflow: Optional[bool] = True,
96
- infra_overrides: Optional[dict] = None,
97
- job_variables: Optional[dict] = None,
98
- ) -> FlowRun:
99
- """
100
- Create a flow run for a deployment and return it after completion or a timeout.
101
-
102
- By default, this function blocks until the flow run finishes executing.
103
- Specify a timeout (in seconds) to wait for the flow run to execute before
104
- returning flow run metadata. To return immediately, without waiting for the
105
- flow run to execute, set `timeout=0`.
106
-
107
- Note that if you specify a timeout, this function will return the flow run
108
- metadata whether or not the flow run finished executing.
109
-
110
- If called within a flow or task, the flow run this function creates will
111
- be linked to the current flow run as a subflow. Disable this behavior by
112
- passing `as_subflow=False`.
113
-
114
- Args:
115
- name: The deployment id or deployment name in the form:
116
- `"flow name/deployment name"`
117
- parameters: Parameter overrides for this flow run. Merged with the deployment
118
- defaults.
119
- scheduled_time: The time to schedule the flow run for, defaults to scheduling
120
- the flow run to start now.
121
- flow_run_name: A name for the created flow run
122
- timeout: The amount of time to wait (in seconds) for the flow run to
123
- complete before returning. Setting `timeout` to 0 will return the flow
124
- run metadata immediately. Setting `timeout` to None will allow this
125
- function to poll indefinitely. Defaults to None.
126
- poll_interval: The number of seconds between polls
127
- tags: A list of tags to associate with this flow run; tags can be used in
128
- automations and for organizational purposes.
129
- idempotency_key: A unique value to recognize retries of the same run, and
130
- prevent creating multiple flow runs.
131
- work_queue_name: The name of a work queue to use for this run. Defaults to
132
- the default work queue for the deployment.
133
- as_subflow: Whether to link the flow run as a subflow of the current
134
- flow or task run.
135
- job_variables: A dictionary of dot delimited infrastructure overrides that
136
- will be applied at runtime; for example `env.CONFIG_KEY=config_value` or
137
- `namespace='prefect'`
138
- """
139
- if timeout is not None and timeout < 0:
140
- raise ValueError("`timeout` cannot be negative")
141
-
142
- if scheduled_time is None:
143
- scheduled_time = pendulum.now("UTC")
144
-
145
- jv = handle_deprecated_infra_overrides_parameter(job_variables, infra_overrides)
146
-
147
- parameters = parameters or {}
148
-
149
- deployment_id = None
150
-
151
- if isinstance(name, UUID):
152
- deployment_id = name
153
- else:
154
- try:
155
- deployment_id = UUID(name)
156
- except ValueError:
157
- pass
158
-
159
- if deployment_id:
160
- deployment = await client.read_deployment(deployment_id=deployment_id)
161
- else:
162
- deployment = await client.read_deployment_by_name(name)
163
-
164
- flow_run_ctx = FlowRunContext.get()
165
- task_run_ctx = TaskRunContext.get()
166
- if as_subflow and (flow_run_ctx or task_run_ctx):
167
- # This was called from a flow. Link the flow run as a subflow.
168
- from prefect.engine import (
169
- Pending,
170
- _dynamic_key_for_task_run,
171
- collect_task_run_inputs,
172
- )
173
-
174
- task_inputs = {
175
- k: await collect_task_run_inputs(v) for k, v in parameters.items()
176
- }
177
-
178
- if deployment_id:
179
- flow = await client.read_flow(deployment.flow_id)
180
- deployment_name = f"{flow.name}/{deployment.name}"
181
- else:
182
- deployment_name = name
183
-
184
- # Generate a task in the parent flow run to represent the result of the subflow
185
- dummy_task = Task(
186
- name=deployment_name,
187
- fn=lambda: None,
188
- version=deployment.version,
189
- )
190
- # Override the default task key to include the deployment name
191
- dummy_task.task_key = f"{__name__}.run_deployment.{slugify(deployment_name)}"
192
- flow_run_id = (
193
- flow_run_ctx.flow_run.id
194
- if flow_run_ctx
195
- else task_run_ctx.task_run.flow_run_id
196
- )
197
- dynamic_key = (
198
- _dynamic_key_for_task_run(flow_run_ctx, dummy_task)
199
- if flow_run_ctx
200
- else task_run_ctx.task_run.dynamic_key
201
- )
202
- parent_task_run = await client.create_task_run(
203
- task=dummy_task,
204
- flow_run_id=flow_run_id,
205
- dynamic_key=dynamic_key,
206
- task_inputs=task_inputs,
207
- state=Pending(),
208
- )
209
- parent_task_run_id = parent_task_run.id
210
- else:
211
- parent_task_run_id = None
212
-
213
- flow_run = await client.create_flow_run_from_deployment(
214
- deployment.id,
215
- parameters=parameters,
216
- state=Scheduled(scheduled_time=scheduled_time),
217
- name=flow_run_name,
218
- tags=tags,
219
- idempotency_key=idempotency_key,
220
- parent_task_run_id=parent_task_run_id,
221
- work_queue_name=work_queue_name,
222
- job_variables=jv,
223
- )
224
-
225
- flow_run_id = flow_run.id
226
-
227
- if timeout == 0:
228
- return flow_run
229
-
230
- with anyio.move_on_after(timeout):
231
- while True:
232
- flow_run = await client.read_flow_run(flow_run_id)
233
- flow_state = flow_run.state
234
- if flow_state and flow_state.is_final():
235
- return flow_run
236
- await anyio.sleep(poll_interval)
237
-
238
- return flow_run
239
-
240
-
241
- @inject_client
242
- async def load_flow_from_flow_run(
243
- flow_run: FlowRun,
244
- client: PrefectClient,
245
- ignore_storage: bool = False,
246
- storage_base_path: Optional[str] = None,
247
- ) -> Flow:
248
- """
249
- Load a flow from the location/script provided in a deployment's storage document.
250
-
251
- If `ignore_storage=True` is provided, no pull from remote storage occurs. This flag
252
- is largely for testing, and assumes the flow is already available locally.
253
- """
254
- deployment = await client.read_deployment(flow_run.deployment_id)
255
-
256
- if deployment.entrypoint is None:
257
- raise ValueError(
258
- f"Deployment {deployment.id} does not have an entrypoint and can not be run."
259
- )
260
-
261
- run_logger = flow_run_logger(flow_run)
262
-
263
- runner_storage_base_path = storage_base_path or os.environ.get(
264
- "PREFECT__STORAGE_BASE_PATH"
265
- )
266
-
267
- # If there's no colon, assume it's a module path
268
- if ":" not in deployment.entrypoint:
269
- run_logger.debug(
270
- f"Importing flow code from module path {deployment.entrypoint}"
271
- )
272
- flow = await run_sync_in_worker_thread(
273
- load_flow_from_entrypoint, deployment.entrypoint
274
- )
275
- return flow
276
-
277
- if not ignore_storage and not deployment.pull_steps:
278
- sys.path.insert(0, ".")
279
- if deployment.storage_document_id:
280
- storage_document = await client.read_block_document(
281
- deployment.storage_document_id
282
- )
283
- storage_block = Block._from_block_document(storage_document)
284
- else:
285
- basepath = deployment.path or Path(deployment.manifest_path).parent
286
- if runner_storage_base_path:
287
- basepath = str(basepath).replace(
288
- "$STORAGE_BASE_PATH", runner_storage_base_path
289
- )
290
- storage_block = LocalFileSystem(basepath=basepath)
291
-
292
- from_path = (
293
- str(deployment.path).replace("$STORAGE_BASE_PATH", runner_storage_base_path)
294
- if runner_storage_base_path and deployment.path
295
- else deployment.path
296
- )
297
- run_logger.info(f"Downloading flow code from storage at {from_path!r}")
298
- await storage_block.get_directory(from_path=from_path, local_path=".")
299
-
300
- if deployment.pull_steps:
301
- run_logger.debug(f"Running {len(deployment.pull_steps)} deployment pull steps")
302
- output = await run_steps(deployment.pull_steps)
303
- if output.get("directory"):
304
- run_logger.debug(f"Changing working directory to {output['directory']!r}")
305
- os.chdir(output["directory"])
306
-
307
- import_path = relative_path_to_current_platform(deployment.entrypoint)
308
- # for backwards compat
309
- if deployment.manifest_path:
310
- with open(deployment.manifest_path, "r") as f:
311
- import_path = json.load(f)["import_path"]
312
- import_path = (
313
- Path(deployment.manifest_path).parent / import_path
314
- ).absolute()
315
- run_logger.debug(f"Importing flow code from '{import_path}'")
316
-
317
- flow = await run_sync_in_worker_thread(load_flow_from_entrypoint, str(import_path))
318
-
319
- return flow
320
-
321
-
322
- @deprecated_callable(start_date="Mar 2024")
323
- def load_deployments_from_yaml(
324
- path: str,
325
- ) -> PrefectObjectRegistry:
326
- """
327
- Load deployments from a yaml file.
328
- """
329
- with open(path, "r") as f:
330
- contents = f.read()
331
-
332
- # Parse into a yaml tree to retrieve separate documents
333
- nodes = yaml.compose_all(contents)
334
-
335
- with PrefectObjectRegistry(capture_failures=True) as registry:
336
- for node in nodes:
337
- with tmpchdir(path):
338
- deployment_dict = yaml.safe_load(yaml.serialize(node))
339
- # The return value is not necessary, just instantiating the Deployment
340
- # is enough to get it recorded on the registry
341
- parse_obj_as(Deployment, deployment_dict)
342
-
343
- return registry
344
-
345
-
346
- @deprecated_class(
347
- start_date="Mar 2024",
348
- help="Use `flow.deploy` to deploy your flows instead."
349
- " Refer to the upgrade guide for more information:"
350
- " https://docs.prefect.io/latest/guides/upgrade-guide-agents-to-workers/.",
351
- )
352
- class Deployment(DeprecatedInfraOverridesField, BaseModel):
353
- """
354
- DEPRECATION WARNING:
355
-
356
- This class is deprecated as of March 2024 and will not be available after September 2024.
357
- It has been replaced by `flow.deploy`, which offers enhanced functionality and better a better user experience.
358
- For upgrade instructions, see https://docs.prefect.io/latest/guides/upgrade-guide-agents-to-workers/.
359
-
360
- A Prefect Deployment definition, used for specifying and building deployments.
361
-
362
- Args:
363
- name: A name for the deployment (required).
364
- version: An optional version for the deployment; defaults to the flow's version
365
- description: An optional description of the deployment; defaults to the flow's
366
- description
367
- tags: An optional list of tags to associate with this deployment; note that tags
368
- are used only for organizational purposes. For delegating work to agents,
369
- see `work_queue_name`.
370
- schedule: A schedule to run this deployment on, once registered (deprecated)
371
- is_schedule_active: Whether or not the schedule is active (deprecated)
372
- schedules: A list of schedules to run this deployment on
373
- work_queue_name: The work queue that will handle this deployment's runs
374
- work_pool_name: The work pool for the deployment
375
- flow_name: The name of the flow this deployment encapsulates
376
- parameters: A dictionary of parameter values to pass to runs created from this
377
- deployment
378
- infrastructure: An optional infrastructure block used to configure
379
- infrastructure for runs; if not provided, will default to running this
380
- deployment in Agent subprocesses
381
- job_variables: A dictionary of dot delimited infrastructure overrides that
382
- will be applied at runtime; for example `env.CONFIG_KEY=config_value` or
383
- `namespace='prefect'`
384
- storage: An optional remote storage block used to store and retrieve this
385
- workflow; if not provided, will default to referencing this flow by its
386
- local path
387
- path: The path to the working directory for the workflow, relative to remote
388
- storage or, if stored on a local filesystem, an absolute path
389
- entrypoint: The path to the entrypoint for the workflow, always relative to the
390
- `path`
391
- parameter_openapi_schema: The parameter schema of the flow, including defaults.
392
- enforce_parameter_schema: Whether or not the Prefect API should enforce the
393
- parameter schema for this deployment.
394
-
395
- Examples:
396
-
397
- Create a new deployment using configuration defaults for an imported flow:
398
-
399
- >>> from my_project.flows import my_flow
400
- >>> from prefect.deployments import Deployment
401
- >>>
402
- >>> deployment = Deployment.build_from_flow(
403
- ... flow=my_flow,
404
- ... name="example",
405
- ... version="1",
406
- ... tags=["demo"],
407
- >>> )
408
- >>> deployment.apply()
409
-
410
- Create a new deployment with custom storage and an infrastructure override:
411
-
412
- >>> from my_project.flows import my_flow
413
- >>> from prefect.deployments import Deployment
414
- >>> from prefect.filesystems import S3
415
-
416
- >>> storage = S3.load("dev-bucket") # load a pre-defined block
417
- >>> deployment = Deployment.build_from_flow(
418
- ... flow=my_flow,
419
- ... name="s3-example",
420
- ... version="2",
421
- ... tags=["aws"],
422
- ... storage=storage,
423
- ... job_variables=dict("env.PREFECT_LOGGING_LEVEL"="DEBUG"),
424
- >>> )
425
- >>> deployment.apply()
426
-
427
- """
428
-
429
- class Config:
430
- json_encoders = {SecretDict: lambda v: v.dict()}
431
- validate_assignment = True
432
- extra = "forbid"
433
-
434
- @property
435
- def _editable_fields(self) -> List[str]:
436
- editable_fields = [
437
- "name",
438
- "description",
439
- "version",
440
- "work_queue_name",
441
- "work_pool_name",
442
- "tags",
443
- "parameters",
444
- "schedule",
445
- "schedules",
446
- "is_schedule_active",
447
- # The `infra_overrides` field has been renamed to `job_variables`.
448
- # We will continue writing it in the YAML file as `infra_overrides`
449
- # instead of `job_variables` for better backwards compat, but we'll
450
- # accept either `job_variables` or `infra_overrides` when we read
451
- # the file.
452
- "infra_overrides",
453
- ]
454
-
455
- # if infrastructure is baked as a pre-saved block, then
456
- # editing its fields will not update anything
457
- if self.infrastructure._block_document_id:
458
- return editable_fields
459
- else:
460
- return editable_fields + ["infrastructure"]
461
-
462
- @property
463
- def location(self) -> str:
464
- """
465
- The 'location' that this deployment points to is given by `path` alone
466
- in the case of no remote storage, and otherwise by `storage.basepath / path`.
467
-
468
- The underlying flow entrypoint is interpreted relative to this location.
469
- """
470
- location = ""
471
- if self.storage:
472
- location = (
473
- self.storage.basepath + "/"
474
- if not self.storage.basepath.endswith("/")
475
- else ""
476
- )
477
- if self.path:
478
- location += self.path
479
- return location
480
-
481
- @sync_compatible
482
- async def to_yaml(self, path: Path) -> None:
483
- yaml_dict = self._yaml_dict()
484
- schema = self.schema()
485
-
486
- with open(path, "w") as f:
487
- # write header
488
- f.write(
489
- "###\n### A complete description of a Prefect Deployment for flow"
490
- f" {self.flow_name!r}\n###\n"
491
- )
492
-
493
- # write editable fields
494
- for field in self._editable_fields:
495
- # write any comments
496
- if schema["properties"][field].get("yaml_comment"):
497
- f.write(f"# {schema['properties'][field]['yaml_comment']}\n")
498
- # write the field
499
- yaml.dump({field: yaml_dict[field]}, f, sort_keys=False)
500
-
501
- # write non-editable fields, excluding `job_variables` because we'll
502
- # continue writing it as `infra_overrides` for better backwards compat
503
- # with the existing file format.
504
- f.write("\n###\n### DO NOT EDIT BELOW THIS LINE\n###\n")
505
- yaml.dump(
506
- {
507
- k: v
508
- for k, v in yaml_dict.items()
509
- if k not in self._editable_fields and k != "job_variables"
510
- },
511
- f,
512
- sort_keys=False,
513
- )
514
-
515
- def _yaml_dict(self) -> dict:
516
- """
517
- Returns a YAML-compatible representation of this deployment as a dictionary.
518
- """
519
- # avoids issues with UUIDs showing up in YAML
520
- all_fields = json.loads(
521
- self.json(
522
- exclude={
523
- "storage": {"_filesystem", "filesystem", "_remote_file_system"}
524
- }
525
- )
526
- )
527
- if all_fields["storage"]:
528
- all_fields["storage"][
529
- "_block_type_slug"
530
- ] = self.storage.get_block_type_slug()
531
- if all_fields["infrastructure"]:
532
- all_fields["infrastructure"][
533
- "_block_type_slug"
534
- ] = self.infrastructure.get_block_type_slug()
535
- return all_fields
536
-
537
- @classmethod
538
- def _validate_schedule(cls, value):
539
- """We do not support COUNT-based (# of occurrences) RRule schedules for deployments."""
540
- if value:
541
- rrule_value = getattr(value, "rrule", None)
542
- if rrule_value and "COUNT" in rrule_value.upper():
543
- raise ValueError(
544
- "RRule schedules with `COUNT` are not supported. Please use `UNTIL`"
545
- " or the `/deployments/{id}/schedule` endpoint to schedule a fixed"
546
- " number of flow runs."
547
- )
548
-
549
- # top level metadata
550
- name: str = Field(..., description="The name of the deployment.")
551
- description: Optional[str] = Field(
552
- default=None, description="An optional description of the deployment."
553
- )
554
- version: Optional[str] = Field(
555
- default=None, description="An optional version for the deployment."
556
- )
557
- tags: List[str] = Field(
558
- default_factory=list,
559
- description="One of more tags to apply to this deployment.",
560
- )
561
- schedule: Optional[SCHEDULE_TYPES] = Field(default=None)
562
- schedules: List[MinimalDeploymentSchedule] = Field(
563
- default_factory=list,
564
- description="The schedules to run this deployment on.",
565
- )
566
- is_schedule_active: Optional[bool] = Field(
567
- default=None, description="Whether or not the schedule is active."
568
- )
569
- flow_name: Optional[str] = Field(default=None, description="The name of the flow.")
570
- work_queue_name: Optional[str] = Field(
571
- "default",
572
- description="The work queue for the deployment.",
573
- yaml_comment="The work queue that will handle this deployment's runs",
574
- )
575
- work_pool_name: Optional[str] = Field(
576
- default=None, description="The work pool for the deployment"
577
- )
578
- # flow data
579
- parameters: Dict[str, Any] = Field(default_factory=dict)
580
- manifest_path: Optional[str] = Field(
581
- default=None,
582
- description=(
583
- "The path to the flow's manifest file, relative to the chosen storage."
584
- ),
585
- )
586
- infrastructure: Infrastructure = Field(default_factory=Process)
587
- job_variables: Dict[str, Any] = Field(
588
- default_factory=dict,
589
- description="Overrides to apply to the base infrastructure block at runtime.",
590
- )
591
- storage: Optional[Block] = Field(
592
- None,
593
- help="The remote storage to use for this workflow.",
594
- )
595
- path: Optional[str] = Field(
596
- default=None,
597
- description=(
598
- "The path to the working directory for the workflow, relative to remote"
599
- " storage or an absolute path."
600
- ),
601
- )
602
- entrypoint: Optional[str] = Field(
603
- default=None,
604
- description=(
605
- "The path to the entrypoint for the workflow, relative to the `path`."
606
- ),
607
- )
608
- parameter_openapi_schema: ParameterSchema = Field(
609
- default_factory=ParameterSchema,
610
- description="The parameter schema of the flow, including defaults.",
611
- )
612
- timestamp: datetime = Field(default_factory=partial(pendulum.now, "UTC"))
613
- triggers: List[Union[DeploymentTriggerTypes, TriggerTypes]] = Field(
614
- default_factory=list,
615
- description="The triggers that should cause this deployment to run.",
616
- )
617
- # defaults to None to allow for backwards compatibility
618
- enforce_parameter_schema: Optional[bool] = Field(
619
- default=None,
620
- description=(
621
- "Whether or not the Prefect API should enforce the parameter schema for"
622
- " this deployment."
623
- ),
624
- )
625
-
626
- @validator("infrastructure", pre=True)
627
- def validate_infrastructure_capabilities(cls, value):
628
- return infrastructure_must_have_capabilities(value)
629
-
630
- @validator("storage", pre=True)
631
- def validate_storage(cls, value):
632
- return storage_must_have_capabilities(value)
633
-
634
- @validator("parameter_openapi_schema", pre=True)
635
- def validate_parameter_openapi_schema(cls, value):
636
- return handle_openapi_schema(value)
637
-
638
- @validator("triggers")
639
- def validate_triggers(cls, field_value, values):
640
- return validate_automation_names(field_value, values)
641
-
642
- @root_validator(pre=True)
643
- def validate_schedule(cls, values):
644
- return validate_deprecated_schedule_fields(values, logger)
645
-
646
- @root_validator(pre=True)
647
- def validate_backwards_compatibility_for_schedule(cls, values):
648
- return reconcile_schedules(cls, values)
649
-
650
- @classmethod
651
- @sync_compatible
652
- async def load_from_yaml(cls, path: str):
653
- data = yaml.safe_load(await anyio.Path(path).read_bytes())
654
- # load blocks from server to ensure secret values are properly hydrated
655
- if data.get("storage"):
656
- block_doc_name = data["storage"].get("_block_document_name")
657
- # if no doc name, this block is not stored on the server
658
- if block_doc_name:
659
- block_slug = data["storage"]["_block_type_slug"]
660
- block = await Block.load(f"{block_slug}/{block_doc_name}")
661
- data["storage"] = block
662
-
663
- if data.get("infrastructure"):
664
- block_doc_name = data["infrastructure"].get("_block_document_name")
665
- # if no doc name, this block is not stored on the server
666
- if block_doc_name:
667
- block_slug = data["infrastructure"]["_block_type_slug"]
668
- block = await Block.load(f"{block_slug}/{block_doc_name}")
669
- data["infrastructure"] = block
670
-
671
- return cls(**data)
672
-
673
- @sync_compatible
674
- async def load(self) -> bool:
675
- """
676
- Queries the API for a deployment with this name for this flow, and if found,
677
- prepopulates any settings that were not set at initialization.
678
-
679
- Returns a boolean specifying whether a load was successful or not.
680
-
681
- Raises:
682
- - ValueError: if both name and flow name are not set
683
- """
684
- if not self.name or not self.flow_name:
685
- raise ValueError("Both a deployment name and flow name must be provided.")
686
- async with get_client() as client:
687
- try:
688
- deployment = await client.read_deployment_by_name(
689
- f"{self.flow_name}/{self.name}"
690
- )
691
- if deployment.storage_document_id:
692
- Block._from_block_document(
693
- await client.read_block_document(deployment.storage_document_id)
694
- )
695
-
696
- excluded_fields = self.__fields_set__.union(
697
- {
698
- "infrastructure",
699
- "storage",
700
- "timestamp",
701
- "triggers",
702
- "enforce_parameter_schema",
703
- "schedules",
704
- "schedule",
705
- "is_schedule_active",
706
- }
707
- )
708
- for field in set(self.__fields__.keys()) - excluded_fields:
709
- new_value = getattr(deployment, field)
710
- setattr(self, field, new_value)
711
-
712
- if "schedules" not in self.__fields_set__:
713
- self.schedules = [
714
- MinimalDeploymentSchedule(
715
- **schedule.dict(include={"schedule", "active"})
716
- )
717
- for schedule in deployment.schedules
718
- ]
719
-
720
- # The API server generates the "schedule" field from the
721
- # current list of schedules, so if the user has locally set
722
- # "schedules" to anything, we should avoid sending "schedule"
723
- # and let the API server generate a new value if necessary.
724
- if "schedules" in self.__fields_set__:
725
- self.schedule = None
726
- self.is_schedule_active = None
727
- else:
728
- # The user isn't using "schedules," so we should
729
- # populate "schedule" and "is_schedule_active" from the
730
- # API's version of the deployment, unless the user gave
731
- # us these fields in __init__().
732
- if "schedule" not in self.__fields_set__:
733
- self.schedule = deployment.schedule
734
- if "is_schedule_active" not in self.__fields_set__:
735
- self.is_schedule_active = deployment.is_schedule_active
736
-
737
- if "infrastructure" not in self.__fields_set__:
738
- if deployment.infrastructure_document_id:
739
- self.infrastructure = Block._from_block_document(
740
- await client.read_block_document(
741
- deployment.infrastructure_document_id
742
- )
743
- )
744
- if "storage" not in self.__fields_set__:
745
- if deployment.storage_document_id:
746
- self.storage = Block._from_block_document(
747
- await client.read_block_document(
748
- deployment.storage_document_id
749
- )
750
- )
751
- except ObjectNotFound:
752
- return False
753
- return True
754
-
755
- @sync_compatible
756
- async def update(self, ignore_none: bool = False, **kwargs):
757
- """
758
- Performs an in-place update with the provided settings.
759
-
760
- Args:
761
- ignore_none: if True, all `None` values are ignored when performing the
762
- update
763
- """
764
- unknown_keys = set(kwargs.keys()) - set(self.dict().keys())
765
- if unknown_keys:
766
- raise ValueError(
767
- f"Received unexpected attributes: {', '.join(unknown_keys)}"
768
- )
769
- for key, value in kwargs.items():
770
- if ignore_none and value is None:
771
- continue
772
- setattr(self, key, value)
773
-
774
- @sync_compatible
775
- async def upload_to_storage(
776
- self, storage_block: str = None, ignore_file: str = ".prefectignore"
777
- ) -> Optional[int]:
778
- """
779
- Uploads the workflow this deployment represents using a provided storage block;
780
- if no block is provided, defaults to configuring self for local storage.
781
-
782
- Args:
783
- storage_block: a string reference a remote storage block slug `$type/$name`;
784
- if provided, used to upload the workflow's project
785
- ignore_file: an optional path to a `.prefectignore` file that specifies
786
- filename patterns to ignore when uploading to remote storage; if not
787
- provided, looks for `.prefectignore` in the current working directory
788
- """
789
- file_count = None
790
- if storage_block:
791
- storage = await Block.load(storage_block)
792
-
793
- if "put-directory" not in storage.get_block_capabilities():
794
- raise BlockMissingCapabilities(
795
- f"Storage block {storage!r} missing 'put-directory' capability."
796
- )
797
-
798
- self.storage = storage
799
-
800
- # upload current directory to storage location
801
- file_count = await self.storage.put_directory(
802
- ignore_file=ignore_file, to_path=self.path
803
- )
804
- elif self.storage:
805
- if "put-directory" not in self.storage.get_block_capabilities():
806
- raise BlockMissingCapabilities(
807
- f"Storage block {self.storage!r} missing 'put-directory'"
808
- " capability."
809
- )
810
-
811
- file_count = await self.storage.put_directory(
812
- ignore_file=ignore_file, to_path=self.path
813
- )
814
-
815
- # persists storage now in case it contains secret values
816
- if self.storage and not self.storage._block_document_id:
817
- await self.storage._save(is_anonymous=True)
818
-
819
- return file_count
820
-
821
- @sync_compatible
822
- async def apply(
823
- self, upload: bool = False, work_queue_concurrency: int = None
824
- ) -> UUID:
825
- """
826
- Registers this deployment with the API and returns the deployment's ID.
827
-
828
- Args:
829
- upload: if True, deployment files are automatically uploaded to remote
830
- storage
831
- work_queue_concurrency: If provided, sets the concurrency limit on the
832
- deployment's work queue
833
- """
834
- if not self.name or not self.flow_name:
835
- raise ValueError("Both a deployment name and flow name must be set.")
836
- async with get_client() as client:
837
- # prep IDs
838
- flow_id = await client.create_flow_from_name(self.flow_name)
839
-
840
- infrastructure_document_id = self.infrastructure._block_document_id
841
- if not infrastructure_document_id:
842
- # if not building off a block, will create an anonymous block
843
- self.infrastructure = self.infrastructure.copy()
844
- infrastructure_document_id = await self.infrastructure._save(
845
- is_anonymous=True,
846
- )
847
-
848
- if upload:
849
- await self.upload_to_storage()
850
-
851
- if self.work_queue_name and work_queue_concurrency is not None:
852
- try:
853
- res = await client.create_work_queue(
854
- name=self.work_queue_name, work_pool_name=self.work_pool_name
855
- )
856
- except ObjectAlreadyExists:
857
- res = await client.read_work_queue_by_name(
858
- name=self.work_queue_name, work_pool_name=self.work_pool_name
859
- )
860
- await client.update_work_queue(
861
- res.id, concurrency_limit=work_queue_concurrency
862
- )
863
-
864
- if self.schedule:
865
- logger.info(
866
- "Interpreting the deprecated `schedule` field as an entry in "
867
- "`schedules`."
868
- )
869
- schedules = [
870
- DeploymentScheduleCreate(
871
- schedule=self.schedule, active=self.is_schedule_active
872
- )
873
- ]
874
- elif self.schedules:
875
- schedules = [
876
- DeploymentScheduleCreate(**schedule.dict())
877
- for schedule in self.schedules
878
- ]
879
- else:
880
- schedules = None
881
-
882
- # we assume storage was already saved
883
- storage_document_id = getattr(self.storage, "_block_document_id", None)
884
- deployment_id = await client.create_deployment(
885
- flow_id=flow_id,
886
- name=self.name,
887
- work_queue_name=self.work_queue_name,
888
- work_pool_name=self.work_pool_name,
889
- version=self.version,
890
- schedules=schedules,
891
- is_schedule_active=self.is_schedule_active,
892
- parameters=self.parameters,
893
- description=self.description,
894
- tags=self.tags,
895
- manifest_path=self.manifest_path, # allows for backwards YAML compat
896
- path=self.path,
897
- entrypoint=self.entrypoint,
898
- job_variables=self.job_variables,
899
- storage_document_id=storage_document_id,
900
- infrastructure_document_id=infrastructure_document_id,
901
- parameter_openapi_schema=self.parameter_openapi_schema.dict(),
902
- enforce_parameter_schema=self.enforce_parameter_schema,
903
- )
904
-
905
- if client.server_type.supports_automations():
906
- try:
907
- # The triggers defined in the deployment spec are, essentially,
908
- # anonymous and attempting truly sync them with cloud is not
909
- # feasible. Instead, we remove all automations that are owned
910
- # by the deployment, meaning that they were created via this
911
- # mechanism below, and then recreate them.
912
- await client.delete_resource_owned_automations(
913
- f"prefect.deployment.{deployment_id}"
914
- )
915
- except PrefectHTTPStatusError as e:
916
- if e.response.status_code == 404:
917
- # This Prefect server does not support automations, so we can safely
918
- # ignore this 404 and move on.
919
- return deployment_id
920
- raise e
921
-
922
- for trigger in self.triggers:
923
- trigger.set_deployment_id(deployment_id)
924
- await client.create_automation(trigger.as_automation())
925
-
926
- return deployment_id
927
-
928
- @classmethod
929
- @sync_compatible
930
- async def build_from_flow(
931
- cls,
932
- flow: Flow,
933
- name: str,
934
- output: str = None,
935
- skip_upload: bool = False,
936
- ignore_file: str = ".prefectignore",
937
- apply: bool = False,
938
- load_existing: bool = True,
939
- schedules: Optional[FlexibleScheduleList] = None,
940
- **kwargs,
941
- ) -> "Deployment":
942
- """
943
- Configure a deployment for a given flow.
944
-
945
- Args:
946
- flow: A flow function to deploy
947
- name: A name for the deployment
948
- output (optional): if provided, the full deployment specification will be
949
- written as a YAML file in the location specified by `output`
950
- skip_upload: if True, deployment files are not automatically uploaded to
951
- remote storage
952
- ignore_file: an optional path to a `.prefectignore` file that specifies
953
- filename patterns to ignore when uploading to remote storage; if not
954
- provided, looks for `.prefectignore` in the current working directory
955
- apply: if True, the deployment is automatically registered with the API
956
- load_existing: if True, load any settings that may already be configured for
957
- the named deployment server-side (e.g., schedules, default parameter
958
- values, etc.)
959
- schedules: An optional list of schedules. Each item in the list can be:
960
- - An instance of `MinimalDeploymentSchedule`.
961
- - A dictionary with a `schedule` key, and optionally, an
962
- `active` key. The `schedule` key should correspond to a
963
- schedule type, and `active` is a boolean indicating whether
964
- the schedule is active or not.
965
- - An instance of one of the predefined schedule types:
966
- `IntervalSchedule`, `CronSchedule`, or `RRuleSchedule`.
967
- **kwargs: other keyword arguments to pass to the constructor for the
968
- `Deployment` class
969
- """
970
- if not name:
971
- raise ValueError("A deployment name must be provided.")
972
-
973
- # note that `deployment.load` only updates settings that were *not*
974
- # provided at initialization
975
-
976
- deployment_args = {
977
- "name": name,
978
- "flow_name": flow.name,
979
- **kwargs,
980
- }
981
-
982
- if schedules is not None:
983
- deployment_args["schedules"] = schedules
984
-
985
- deployment = cls(**deployment_args)
986
- deployment.flow_name = flow.name
987
- if not deployment.entrypoint:
988
- ## first see if an entrypoint can be determined
989
- flow_file = getattr(flow, "__globals__", {}).get("__file__")
990
- mod_name = getattr(flow, "__module__", None)
991
- if not flow_file:
992
- if not mod_name:
993
- # todo, check if the file location was manually set already
994
- raise ValueError("Could not determine flow's file location.")
995
- module = importlib.import_module(mod_name)
996
- flow_file = getattr(module, "__file__", None)
997
- if not flow_file:
998
- raise ValueError("Could not determine flow's file location.")
999
-
1000
- # set entrypoint
1001
- entry_path = Path(flow_file).absolute().relative_to(Path(".").absolute())
1002
- deployment.entrypoint = f"{entry_path}:{flow.fn.__name__}"
1003
-
1004
- if load_existing:
1005
- await deployment.load()
1006
-
1007
- # set a few attributes for this flow object
1008
- deployment.parameter_openapi_schema = parameter_schema(flow)
1009
-
1010
- # ensure the ignore file exists
1011
- if not Path(ignore_file).exists():
1012
- Path(ignore_file).touch()
1013
-
1014
- if not deployment.version:
1015
- deployment.version = flow.version
1016
- if not deployment.description:
1017
- deployment.description = flow.description
1018
-
1019
- # proxy for whether infra is docker-based
1020
- is_docker_based = hasattr(deployment.infrastructure, "image")
1021
-
1022
- if not deployment.storage and not is_docker_based and not deployment.path:
1023
- deployment.path = str(Path(".").absolute())
1024
- elif not deployment.storage and is_docker_based:
1025
- # only update if a path is not already set
1026
- if not deployment.path:
1027
- deployment.path = "/opt/prefect/flows"
1028
-
1029
- if not skip_upload:
1030
- if (
1031
- deployment.storage
1032
- and "put-directory" in deployment.storage.get_block_capabilities()
1033
- ):
1034
- await deployment.upload_to_storage(ignore_file=ignore_file)
1035
-
1036
- if output:
1037
- await deployment.to_yaml(output)
1038
-
1039
- if apply:
1040
- await deployment.apply()
1041
-
1042
- return deployment