prefect-client 2.20.4__py3-none-any.whl → 3.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/__init__.py +74 -110
- prefect/_internal/compatibility/deprecated.py +6 -115
- prefect/_internal/compatibility/experimental.py +4 -79
- prefect/_internal/compatibility/migration.py +166 -0
- prefect/_internal/concurrency/__init__.py +2 -2
- prefect/_internal/concurrency/api.py +1 -35
- prefect/_internal/concurrency/calls.py +0 -6
- prefect/_internal/concurrency/cancellation.py +0 -3
- prefect/_internal/concurrency/event_loop.py +0 -20
- prefect/_internal/concurrency/inspection.py +3 -3
- prefect/_internal/concurrency/primitives.py +1 -0
- prefect/_internal/concurrency/services.py +23 -0
- prefect/_internal/concurrency/threads.py +35 -0
- prefect/_internal/concurrency/waiters.py +0 -28
- prefect/_internal/integrations.py +7 -0
- prefect/_internal/pydantic/__init__.py +0 -45
- prefect/_internal/pydantic/annotations/pendulum.py +2 -2
- prefect/_internal/pydantic/v1_schema.py +21 -22
- prefect/_internal/pydantic/v2_schema.py +0 -2
- prefect/_internal/pydantic/v2_validated_func.py +18 -23
- prefect/_internal/pytz.py +1 -1
- prefect/_internal/retries.py +61 -0
- prefect/_internal/schemas/bases.py +45 -177
- prefect/_internal/schemas/fields.py +1 -43
- prefect/_internal/schemas/validators.py +47 -233
- prefect/agent.py +3 -695
- prefect/artifacts.py +173 -14
- prefect/automations.py +39 -4
- prefect/blocks/abstract.py +1 -1
- prefect/blocks/core.py +405 -153
- prefect/blocks/fields.py +2 -57
- prefect/blocks/notifications.py +43 -28
- prefect/blocks/redis.py +168 -0
- prefect/blocks/system.py +67 -20
- prefect/blocks/webhook.py +2 -9
- prefect/cache_policies.py +239 -0
- prefect/client/__init__.py +4 -0
- prefect/client/base.py +33 -27
- prefect/client/cloud.py +65 -20
- prefect/client/collections.py +1 -1
- prefect/client/orchestration.py +650 -442
- prefect/client/schemas/actions.py +115 -100
- prefect/client/schemas/filters.py +46 -52
- prefect/client/schemas/objects.py +228 -178
- prefect/client/schemas/responses.py +18 -36
- prefect/client/schemas/schedules.py +55 -36
- prefect/client/schemas/sorting.py +2 -0
- prefect/client/subscriptions.py +8 -7
- prefect/client/types/flexible_schedule_list.py +11 -0
- prefect/client/utilities.py +9 -6
- prefect/concurrency/asyncio.py +60 -11
- prefect/concurrency/context.py +24 -0
- prefect/concurrency/events.py +2 -2
- prefect/concurrency/services.py +46 -16
- prefect/concurrency/sync.py +51 -7
- prefect/concurrency/v1/asyncio.py +143 -0
- prefect/concurrency/v1/context.py +27 -0
- prefect/concurrency/v1/events.py +61 -0
- prefect/concurrency/v1/services.py +116 -0
- prefect/concurrency/v1/sync.py +92 -0
- prefect/context.py +246 -149
- prefect/deployments/__init__.py +33 -18
- prefect/deployments/base.py +10 -15
- prefect/deployments/deployments.py +2 -1048
- prefect/deployments/flow_runs.py +178 -0
- prefect/deployments/runner.py +72 -173
- prefect/deployments/schedules.py +31 -25
- prefect/deployments/steps/__init__.py +0 -1
- prefect/deployments/steps/core.py +7 -0
- prefect/deployments/steps/pull.py +15 -21
- prefect/deployments/steps/utility.py +2 -1
- prefect/docker/__init__.py +20 -0
- prefect/docker/docker_image.py +82 -0
- prefect/engine.py +15 -2475
- prefect/events/actions.py +17 -23
- prefect/events/cli/automations.py +20 -7
- prefect/events/clients.py +142 -80
- prefect/events/filters.py +14 -18
- prefect/events/related.py +74 -75
- prefect/events/schemas/__init__.py +0 -5
- prefect/events/schemas/automations.py +55 -46
- prefect/events/schemas/deployment_triggers.py +7 -197
- prefect/events/schemas/events.py +46 -65
- prefect/events/schemas/labelling.py +10 -14
- prefect/events/utilities.py +4 -5
- prefect/events/worker.py +23 -8
- prefect/exceptions.py +15 -0
- prefect/filesystems.py +30 -529
- prefect/flow_engine.py +827 -0
- prefect/flow_runs.py +379 -7
- prefect/flows.py +470 -360
- prefect/futures.py +382 -331
- prefect/infrastructure/__init__.py +5 -26
- prefect/infrastructure/base.py +3 -320
- prefect/infrastructure/provisioners/__init__.py +5 -3
- prefect/infrastructure/provisioners/cloud_run.py +13 -8
- prefect/infrastructure/provisioners/container_instance.py +14 -9
- prefect/infrastructure/provisioners/ecs.py +10 -8
- prefect/infrastructure/provisioners/modal.py +8 -5
- prefect/input/__init__.py +4 -0
- prefect/input/actions.py +2 -4
- prefect/input/run_input.py +9 -9
- prefect/logging/formatters.py +2 -4
- prefect/logging/handlers.py +9 -14
- prefect/logging/loggers.py +5 -5
- prefect/main.py +72 -0
- prefect/plugins.py +2 -64
- prefect/profiles.toml +16 -2
- prefect/records/__init__.py +1 -0
- prefect/records/base.py +223 -0
- prefect/records/filesystem.py +207 -0
- prefect/records/memory.py +178 -0
- prefect/records/result_store.py +64 -0
- prefect/results.py +577 -504
- prefect/runner/runner.py +117 -47
- prefect/runner/server.py +32 -34
- prefect/runner/storage.py +3 -12
- prefect/runner/submit.py +2 -10
- prefect/runner/utils.py +2 -2
- prefect/runtime/__init__.py +1 -0
- prefect/runtime/deployment.py +1 -0
- prefect/runtime/flow_run.py +40 -5
- prefect/runtime/task_run.py +1 -0
- prefect/serializers.py +28 -39
- prefect/server/api/collections_data/views/aggregate-worker-metadata.json +5 -14
- prefect/settings.py +209 -332
- prefect/states.py +160 -63
- prefect/task_engine.py +1478 -57
- prefect/task_runners.py +383 -287
- prefect/task_runs.py +240 -0
- prefect/task_worker.py +463 -0
- prefect/tasks.py +684 -374
- prefect/transactions.py +410 -0
- prefect/types/__init__.py +72 -86
- prefect/types/entrypoint.py +13 -0
- prefect/utilities/annotations.py +4 -3
- prefect/utilities/asyncutils.py +227 -148
- prefect/utilities/callables.py +137 -45
- prefect/utilities/collections.py +134 -86
- prefect/utilities/dispatch.py +27 -14
- prefect/utilities/dockerutils.py +11 -4
- prefect/utilities/engine.py +186 -32
- prefect/utilities/filesystem.py +4 -5
- prefect/utilities/importtools.py +26 -27
- prefect/utilities/pydantic.py +128 -38
- prefect/utilities/schema_tools/hydration.py +18 -1
- prefect/utilities/schema_tools/validation.py +30 -0
- prefect/utilities/services.py +35 -9
- prefect/utilities/templating.py +12 -2
- prefect/utilities/timeout.py +20 -5
- prefect/utilities/urls.py +195 -0
- prefect/utilities/visualization.py +1 -0
- prefect/variables.py +78 -59
- prefect/workers/__init__.py +0 -1
- prefect/workers/base.py +237 -244
- prefect/workers/block.py +5 -226
- prefect/workers/cloud.py +6 -0
- prefect/workers/process.py +265 -12
- prefect/workers/server.py +29 -11
- {prefect_client-2.20.4.dist-info → prefect_client-3.0.0.dist-info}/METADATA +28 -24
- prefect_client-3.0.0.dist-info/RECORD +201 -0
- {prefect_client-2.20.4.dist-info → prefect_client-3.0.0.dist-info}/WHEEL +1 -1
- prefect/_internal/pydantic/_base_model.py +0 -51
- prefect/_internal/pydantic/_compat.py +0 -82
- prefect/_internal/pydantic/_flags.py +0 -20
- prefect/_internal/pydantic/_types.py +0 -8
- prefect/_internal/pydantic/utilities/config_dict.py +0 -72
- prefect/_internal/pydantic/utilities/field_validator.py +0 -150
- prefect/_internal/pydantic/utilities/model_construct.py +0 -56
- prefect/_internal/pydantic/utilities/model_copy.py +0 -55
- prefect/_internal/pydantic/utilities/model_dump.py +0 -136
- prefect/_internal/pydantic/utilities/model_dump_json.py +0 -112
- prefect/_internal/pydantic/utilities/model_fields.py +0 -50
- prefect/_internal/pydantic/utilities/model_fields_set.py +0 -29
- prefect/_internal/pydantic/utilities/model_json_schema.py +0 -82
- prefect/_internal/pydantic/utilities/model_rebuild.py +0 -80
- prefect/_internal/pydantic/utilities/model_validate.py +0 -75
- prefect/_internal/pydantic/utilities/model_validate_json.py +0 -68
- prefect/_internal/pydantic/utilities/model_validator.py +0 -87
- prefect/_internal/pydantic/utilities/type_adapter.py +0 -71
- prefect/_vendor/fastapi/__init__.py +0 -25
- prefect/_vendor/fastapi/applications.py +0 -946
- prefect/_vendor/fastapi/background.py +0 -3
- prefect/_vendor/fastapi/concurrency.py +0 -44
- prefect/_vendor/fastapi/datastructures.py +0 -58
- prefect/_vendor/fastapi/dependencies/__init__.py +0 -0
- prefect/_vendor/fastapi/dependencies/models.py +0 -64
- prefect/_vendor/fastapi/dependencies/utils.py +0 -877
- prefect/_vendor/fastapi/encoders.py +0 -177
- prefect/_vendor/fastapi/exception_handlers.py +0 -40
- prefect/_vendor/fastapi/exceptions.py +0 -46
- prefect/_vendor/fastapi/logger.py +0 -3
- prefect/_vendor/fastapi/middleware/__init__.py +0 -1
- prefect/_vendor/fastapi/middleware/asyncexitstack.py +0 -25
- prefect/_vendor/fastapi/middleware/cors.py +0 -3
- prefect/_vendor/fastapi/middleware/gzip.py +0 -3
- prefect/_vendor/fastapi/middleware/httpsredirect.py +0 -3
- prefect/_vendor/fastapi/middleware/trustedhost.py +0 -3
- prefect/_vendor/fastapi/middleware/wsgi.py +0 -3
- prefect/_vendor/fastapi/openapi/__init__.py +0 -0
- prefect/_vendor/fastapi/openapi/constants.py +0 -2
- prefect/_vendor/fastapi/openapi/docs.py +0 -203
- prefect/_vendor/fastapi/openapi/models.py +0 -480
- prefect/_vendor/fastapi/openapi/utils.py +0 -485
- prefect/_vendor/fastapi/param_functions.py +0 -340
- prefect/_vendor/fastapi/params.py +0 -453
- prefect/_vendor/fastapi/py.typed +0 -0
- prefect/_vendor/fastapi/requests.py +0 -4
- prefect/_vendor/fastapi/responses.py +0 -40
- prefect/_vendor/fastapi/routing.py +0 -1331
- prefect/_vendor/fastapi/security/__init__.py +0 -15
- prefect/_vendor/fastapi/security/api_key.py +0 -98
- prefect/_vendor/fastapi/security/base.py +0 -6
- prefect/_vendor/fastapi/security/http.py +0 -172
- prefect/_vendor/fastapi/security/oauth2.py +0 -227
- prefect/_vendor/fastapi/security/open_id_connect_url.py +0 -34
- prefect/_vendor/fastapi/security/utils.py +0 -10
- prefect/_vendor/fastapi/staticfiles.py +0 -1
- prefect/_vendor/fastapi/templating.py +0 -3
- prefect/_vendor/fastapi/testclient.py +0 -1
- prefect/_vendor/fastapi/types.py +0 -3
- prefect/_vendor/fastapi/utils.py +0 -235
- prefect/_vendor/fastapi/websockets.py +0 -7
- prefect/_vendor/starlette/__init__.py +0 -1
- prefect/_vendor/starlette/_compat.py +0 -28
- prefect/_vendor/starlette/_exception_handler.py +0 -80
- prefect/_vendor/starlette/_utils.py +0 -88
- prefect/_vendor/starlette/applications.py +0 -261
- prefect/_vendor/starlette/authentication.py +0 -159
- prefect/_vendor/starlette/background.py +0 -43
- prefect/_vendor/starlette/concurrency.py +0 -59
- prefect/_vendor/starlette/config.py +0 -151
- prefect/_vendor/starlette/convertors.py +0 -87
- prefect/_vendor/starlette/datastructures.py +0 -707
- prefect/_vendor/starlette/endpoints.py +0 -130
- prefect/_vendor/starlette/exceptions.py +0 -60
- prefect/_vendor/starlette/formparsers.py +0 -276
- prefect/_vendor/starlette/middleware/__init__.py +0 -17
- prefect/_vendor/starlette/middleware/authentication.py +0 -52
- prefect/_vendor/starlette/middleware/base.py +0 -220
- prefect/_vendor/starlette/middleware/cors.py +0 -176
- prefect/_vendor/starlette/middleware/errors.py +0 -265
- prefect/_vendor/starlette/middleware/exceptions.py +0 -74
- prefect/_vendor/starlette/middleware/gzip.py +0 -113
- prefect/_vendor/starlette/middleware/httpsredirect.py +0 -19
- prefect/_vendor/starlette/middleware/sessions.py +0 -82
- prefect/_vendor/starlette/middleware/trustedhost.py +0 -64
- prefect/_vendor/starlette/middleware/wsgi.py +0 -147
- prefect/_vendor/starlette/py.typed +0 -0
- prefect/_vendor/starlette/requests.py +0 -328
- prefect/_vendor/starlette/responses.py +0 -347
- prefect/_vendor/starlette/routing.py +0 -933
- prefect/_vendor/starlette/schemas.py +0 -154
- prefect/_vendor/starlette/staticfiles.py +0 -248
- prefect/_vendor/starlette/status.py +0 -199
- prefect/_vendor/starlette/templating.py +0 -231
- prefect/_vendor/starlette/testclient.py +0 -804
- prefect/_vendor/starlette/types.py +0 -30
- prefect/_vendor/starlette/websockets.py +0 -193
- prefect/blocks/kubernetes.py +0 -119
- prefect/deprecated/__init__.py +0 -0
- prefect/deprecated/data_documents.py +0 -350
- prefect/deprecated/packaging/__init__.py +0 -12
- prefect/deprecated/packaging/base.py +0 -96
- prefect/deprecated/packaging/docker.py +0 -146
- prefect/deprecated/packaging/file.py +0 -92
- prefect/deprecated/packaging/orion.py +0 -80
- prefect/deprecated/packaging/serializers.py +0 -171
- prefect/events/instrument.py +0 -135
- prefect/infrastructure/container.py +0 -824
- prefect/infrastructure/kubernetes.py +0 -920
- prefect/infrastructure/process.py +0 -289
- prefect/manifests.py +0 -20
- prefect/new_flow_engine.py +0 -449
- prefect/new_task_engine.py +0 -423
- prefect/pydantic/__init__.py +0 -76
- prefect/pydantic/main.py +0 -39
- prefect/software/__init__.py +0 -2
- prefect/software/base.py +0 -50
- prefect/software/conda.py +0 -199
- prefect/software/pip.py +0 -122
- prefect/software/python.py +0 -52
- prefect/task_server.py +0 -322
- prefect_client-2.20.4.dist-info/RECORD +0 -294
- /prefect/{_internal/pydantic/utilities → client/types}/__init__.py +0 -0
- /prefect/{_vendor → concurrency/v1}/__init__.py +0 -0
- {prefect_client-2.20.4.dist-info → prefect_client-3.0.0.dist-info}/LICENSE +0 -0
- {prefect_client-2.20.4.dist-info → prefect_client-3.0.0.dist-info}/top_level.txt +0 -0
@@ -1,1049 +1,3 @@
|
|
1
|
-
|
2
|
-
Objects for specifying deployments and utilities for loading flows from deployments.
|
3
|
-
"""
|
1
|
+
from .._internal.compatibility.migration import getattr_migration
|
4
2
|
|
5
|
-
|
6
|
-
import json
|
7
|
-
import os
|
8
|
-
import sys
|
9
|
-
from datetime import datetime
|
10
|
-
from functools import partial
|
11
|
-
from pathlib import Path
|
12
|
-
from typing import Any, Dict, Iterable, List, Optional, Union
|
13
|
-
from uuid import UUID
|
14
|
-
|
15
|
-
import anyio
|
16
|
-
import pendulum
|
17
|
-
import yaml
|
18
|
-
|
19
|
-
from prefect._internal.pydantic import HAS_PYDANTIC_V2
|
20
|
-
|
21
|
-
if HAS_PYDANTIC_V2:
|
22
|
-
from pydantic.v1 import BaseModel, Field, parse_obj_as, root_validator, validator
|
23
|
-
else:
|
24
|
-
from pydantic import BaseModel, Field, parse_obj_as, root_validator, validator
|
25
|
-
|
26
|
-
from prefect._internal.compatibility.deprecated import (
|
27
|
-
DeprecatedInfraOverridesField,
|
28
|
-
deprecated_callable,
|
29
|
-
deprecated_class,
|
30
|
-
deprecated_parameter,
|
31
|
-
handle_deprecated_infra_overrides_parameter,
|
32
|
-
)
|
33
|
-
from prefect._internal.schemas.validators import (
|
34
|
-
handle_openapi_schema,
|
35
|
-
infrastructure_must_have_capabilities,
|
36
|
-
reconcile_schedules,
|
37
|
-
storage_must_have_capabilities,
|
38
|
-
validate_automation_names,
|
39
|
-
validate_deprecated_schedule_fields,
|
40
|
-
)
|
41
|
-
from prefect.blocks.core import Block
|
42
|
-
from prefect.blocks.fields import SecretDict
|
43
|
-
from prefect.client.orchestration import PrefectClient, get_client
|
44
|
-
from prefect.client.schemas.actions import DeploymentScheduleCreate
|
45
|
-
from prefect.client.schemas.objects import (
|
46
|
-
FlowRun,
|
47
|
-
MinimalDeploymentSchedule,
|
48
|
-
)
|
49
|
-
from prefect.client.schemas.schedules import SCHEDULE_TYPES
|
50
|
-
from prefect.client.utilities import inject_client
|
51
|
-
from prefect.context import FlowRunContext, PrefectObjectRegistry, TaskRunContext
|
52
|
-
from prefect.deployments.schedules import (
|
53
|
-
FlexibleScheduleList,
|
54
|
-
)
|
55
|
-
from prefect.deployments.steps.core import run_steps
|
56
|
-
from prefect.events import DeploymentTriggerTypes, TriggerTypes
|
57
|
-
from prefect.exceptions import (
|
58
|
-
BlockMissingCapabilities,
|
59
|
-
ObjectAlreadyExists,
|
60
|
-
ObjectNotFound,
|
61
|
-
PrefectHTTPStatusError,
|
62
|
-
)
|
63
|
-
from prefect.filesystems import LocalFileSystem
|
64
|
-
from prefect.flows import Flow, load_flow_from_entrypoint
|
65
|
-
from prefect.infrastructure import Infrastructure, Process
|
66
|
-
from prefect.logging.loggers import flow_run_logger, get_logger
|
67
|
-
from prefect.states import Scheduled
|
68
|
-
from prefect.tasks import Task
|
69
|
-
from prefect.utilities.asyncutils import run_sync_in_worker_thread, sync_compatible
|
70
|
-
from prefect.utilities.callables import ParameterSchema, parameter_schema
|
71
|
-
from prefect.utilities.filesystem import relative_path_to_current_platform, tmpchdir
|
72
|
-
from prefect.utilities.slugify import slugify
|
73
|
-
|
74
|
-
logger = get_logger("deployments")
|
75
|
-
|
76
|
-
|
77
|
-
@sync_compatible
|
78
|
-
@deprecated_parameter(
|
79
|
-
"infra_overrides",
|
80
|
-
start_date="Apr 2024",
|
81
|
-
help="Use `job_variables` instead.",
|
82
|
-
)
|
83
|
-
@inject_client
|
84
|
-
async def run_deployment(
|
85
|
-
name: Union[str, UUID],
|
86
|
-
client: Optional[PrefectClient] = None,
|
87
|
-
parameters: Optional[dict] = None,
|
88
|
-
scheduled_time: Optional[datetime] = None,
|
89
|
-
flow_run_name: Optional[str] = None,
|
90
|
-
timeout: Optional[float] = None,
|
91
|
-
poll_interval: Optional[float] = 5,
|
92
|
-
tags: Optional[Iterable[str]] = None,
|
93
|
-
idempotency_key: Optional[str] = None,
|
94
|
-
work_queue_name: Optional[str] = None,
|
95
|
-
as_subflow: Optional[bool] = True,
|
96
|
-
infra_overrides: Optional[dict] = None,
|
97
|
-
job_variables: Optional[dict] = None,
|
98
|
-
) -> FlowRun:
|
99
|
-
"""
|
100
|
-
Create a flow run for a deployment and return it after completion or a timeout.
|
101
|
-
|
102
|
-
By default, this function blocks until the flow run finishes executing.
|
103
|
-
Specify a timeout (in seconds) to wait for the flow run to execute before
|
104
|
-
returning flow run metadata. To return immediately, without waiting for the
|
105
|
-
flow run to execute, set `timeout=0`.
|
106
|
-
|
107
|
-
Note that if you specify a timeout, this function will return the flow run
|
108
|
-
metadata whether or not the flow run finished executing.
|
109
|
-
|
110
|
-
If called within a flow or task, the flow run this function creates will
|
111
|
-
be linked to the current flow run as a subflow. Disable this behavior by
|
112
|
-
passing `as_subflow=False`.
|
113
|
-
|
114
|
-
Args:
|
115
|
-
name: The deployment id or deployment name in the form:
|
116
|
-
`"flow name/deployment name"`
|
117
|
-
parameters: Parameter overrides for this flow run. Merged with the deployment
|
118
|
-
defaults.
|
119
|
-
scheduled_time: The time to schedule the flow run for, defaults to scheduling
|
120
|
-
the flow run to start now.
|
121
|
-
flow_run_name: A name for the created flow run
|
122
|
-
timeout: The amount of time to wait (in seconds) for the flow run to
|
123
|
-
complete before returning. Setting `timeout` to 0 will return the flow
|
124
|
-
run metadata immediately. Setting `timeout` to None will allow this
|
125
|
-
function to poll indefinitely. Defaults to None.
|
126
|
-
poll_interval: The number of seconds between polls
|
127
|
-
tags: A list of tags to associate with this flow run; tags can be used in
|
128
|
-
automations and for organizational purposes.
|
129
|
-
idempotency_key: A unique value to recognize retries of the same run, and
|
130
|
-
prevent creating multiple flow runs.
|
131
|
-
work_queue_name: The name of a work queue to use for this run. Defaults to
|
132
|
-
the default work queue for the deployment.
|
133
|
-
as_subflow: Whether to link the flow run as a subflow of the current
|
134
|
-
flow or task run.
|
135
|
-
job_variables: A dictionary of dot delimited infrastructure overrides that
|
136
|
-
will be applied at runtime; for example `env.CONFIG_KEY=config_value` or
|
137
|
-
`namespace='prefect'`
|
138
|
-
"""
|
139
|
-
if timeout is not None and timeout < 0:
|
140
|
-
raise ValueError("`timeout` cannot be negative")
|
141
|
-
|
142
|
-
if scheduled_time is None:
|
143
|
-
scheduled_time = pendulum.now("UTC")
|
144
|
-
|
145
|
-
jv = handle_deprecated_infra_overrides_parameter(job_variables, infra_overrides)
|
146
|
-
|
147
|
-
parameters = parameters or {}
|
148
|
-
|
149
|
-
deployment_id = None
|
150
|
-
|
151
|
-
if isinstance(name, UUID):
|
152
|
-
deployment_id = name
|
153
|
-
else:
|
154
|
-
try:
|
155
|
-
deployment_id = UUID(name)
|
156
|
-
except ValueError:
|
157
|
-
pass
|
158
|
-
|
159
|
-
if deployment_id:
|
160
|
-
deployment = await client.read_deployment(deployment_id=deployment_id)
|
161
|
-
else:
|
162
|
-
deployment = await client.read_deployment_by_name(name)
|
163
|
-
|
164
|
-
flow_run_ctx = FlowRunContext.get()
|
165
|
-
task_run_ctx = TaskRunContext.get()
|
166
|
-
if as_subflow and (flow_run_ctx or task_run_ctx):
|
167
|
-
# This was called from a flow. Link the flow run as a subflow.
|
168
|
-
from prefect.engine import (
|
169
|
-
Pending,
|
170
|
-
_dynamic_key_for_task_run,
|
171
|
-
collect_task_run_inputs,
|
172
|
-
)
|
173
|
-
|
174
|
-
task_inputs = {
|
175
|
-
k: await collect_task_run_inputs(v) for k, v in parameters.items()
|
176
|
-
}
|
177
|
-
|
178
|
-
if deployment_id:
|
179
|
-
flow = await client.read_flow(deployment.flow_id)
|
180
|
-
deployment_name = f"{flow.name}/{deployment.name}"
|
181
|
-
else:
|
182
|
-
deployment_name = name
|
183
|
-
|
184
|
-
# Generate a task in the parent flow run to represent the result of the subflow
|
185
|
-
dummy_task = Task(
|
186
|
-
name=deployment_name,
|
187
|
-
fn=lambda: None,
|
188
|
-
version=deployment.version,
|
189
|
-
)
|
190
|
-
# Override the default task key to include the deployment name
|
191
|
-
dummy_task.task_key = f"{__name__}.run_deployment.{slugify(deployment_name)}"
|
192
|
-
flow_run_id = (
|
193
|
-
flow_run_ctx.flow_run.id
|
194
|
-
if flow_run_ctx
|
195
|
-
else task_run_ctx.task_run.flow_run_id
|
196
|
-
)
|
197
|
-
dynamic_key = (
|
198
|
-
_dynamic_key_for_task_run(flow_run_ctx, dummy_task)
|
199
|
-
if flow_run_ctx
|
200
|
-
else task_run_ctx.task_run.dynamic_key
|
201
|
-
)
|
202
|
-
parent_task_run = await client.create_task_run(
|
203
|
-
task=dummy_task,
|
204
|
-
flow_run_id=flow_run_id,
|
205
|
-
dynamic_key=dynamic_key,
|
206
|
-
task_inputs=task_inputs,
|
207
|
-
state=Pending(),
|
208
|
-
)
|
209
|
-
parent_task_run_id = parent_task_run.id
|
210
|
-
else:
|
211
|
-
parent_task_run_id = None
|
212
|
-
|
213
|
-
flow_run = await client.create_flow_run_from_deployment(
|
214
|
-
deployment.id,
|
215
|
-
parameters=parameters,
|
216
|
-
state=Scheduled(scheduled_time=scheduled_time),
|
217
|
-
name=flow_run_name,
|
218
|
-
tags=tags,
|
219
|
-
idempotency_key=idempotency_key,
|
220
|
-
parent_task_run_id=parent_task_run_id,
|
221
|
-
work_queue_name=work_queue_name,
|
222
|
-
job_variables=jv,
|
223
|
-
)
|
224
|
-
|
225
|
-
flow_run_id = flow_run.id
|
226
|
-
|
227
|
-
if timeout == 0:
|
228
|
-
return flow_run
|
229
|
-
|
230
|
-
with anyio.move_on_after(timeout):
|
231
|
-
while True:
|
232
|
-
flow_run = await client.read_flow_run(flow_run_id)
|
233
|
-
flow_state = flow_run.state
|
234
|
-
if flow_state and flow_state.is_final():
|
235
|
-
return flow_run
|
236
|
-
await anyio.sleep(poll_interval)
|
237
|
-
|
238
|
-
return flow_run
|
239
|
-
|
240
|
-
|
241
|
-
@deprecated_callable(
|
242
|
-
start_date="Jun 2024",
|
243
|
-
help="Will be moved in Prefect 3 to prefect.flows:load_flow_from_flow_run",
|
244
|
-
)
|
245
|
-
@inject_client
|
246
|
-
async def load_flow_from_flow_run(
|
247
|
-
flow_run: FlowRun,
|
248
|
-
client: PrefectClient,
|
249
|
-
ignore_storage: bool = False,
|
250
|
-
storage_base_path: Optional[str] = None,
|
251
|
-
use_placeholder_flow: bool = True,
|
252
|
-
) -> Flow:
|
253
|
-
"""
|
254
|
-
Load a flow from the location/script provided in a deployment's storage document.
|
255
|
-
|
256
|
-
If `ignore_storage=True` is provided, no pull from remote storage occurs. This flag
|
257
|
-
is largely for testing, and assumes the flow is already available locally.
|
258
|
-
"""
|
259
|
-
deployment = await client.read_deployment(flow_run.deployment_id)
|
260
|
-
|
261
|
-
if deployment.entrypoint is None:
|
262
|
-
raise ValueError(
|
263
|
-
f"Deployment {deployment.id} does not have an entrypoint and can not be run."
|
264
|
-
)
|
265
|
-
|
266
|
-
run_logger = flow_run_logger(flow_run)
|
267
|
-
|
268
|
-
runner_storage_base_path = storage_base_path or os.environ.get(
|
269
|
-
"PREFECT__STORAGE_BASE_PATH"
|
270
|
-
)
|
271
|
-
|
272
|
-
# If there's no colon, assume it's a module path
|
273
|
-
if ":" not in deployment.entrypoint:
|
274
|
-
run_logger.debug(
|
275
|
-
f"Importing flow code from module path {deployment.entrypoint}"
|
276
|
-
)
|
277
|
-
flow = await run_sync_in_worker_thread(
|
278
|
-
load_flow_from_entrypoint, deployment.entrypoint, use_placeholder_flow
|
279
|
-
)
|
280
|
-
return flow
|
281
|
-
|
282
|
-
if not ignore_storage and not deployment.pull_steps:
|
283
|
-
sys.path.insert(0, ".")
|
284
|
-
if deployment.storage_document_id:
|
285
|
-
storage_document = await client.read_block_document(
|
286
|
-
deployment.storage_document_id
|
287
|
-
)
|
288
|
-
storage_block = Block._from_block_document(storage_document)
|
289
|
-
else:
|
290
|
-
basepath = deployment.path or Path(deployment.manifest_path).parent
|
291
|
-
if runner_storage_base_path:
|
292
|
-
basepath = str(basepath).replace(
|
293
|
-
"$STORAGE_BASE_PATH", runner_storage_base_path
|
294
|
-
)
|
295
|
-
storage_block = LocalFileSystem(basepath=basepath)
|
296
|
-
|
297
|
-
from_path = (
|
298
|
-
str(deployment.path).replace("$STORAGE_BASE_PATH", runner_storage_base_path)
|
299
|
-
if runner_storage_base_path and deployment.path
|
300
|
-
else deployment.path
|
301
|
-
)
|
302
|
-
run_logger.info(f"Downloading flow code from storage at {from_path!r}")
|
303
|
-
await storage_block.get_directory(from_path=from_path, local_path=".")
|
304
|
-
|
305
|
-
if deployment.pull_steps:
|
306
|
-
run_logger.debug(f"Running {len(deployment.pull_steps)} deployment pull steps")
|
307
|
-
output = await run_steps(deployment.pull_steps)
|
308
|
-
if output.get("directory"):
|
309
|
-
run_logger.debug(f"Changing working directory to {output['directory']!r}")
|
310
|
-
os.chdir(output["directory"])
|
311
|
-
|
312
|
-
import_path = relative_path_to_current_platform(deployment.entrypoint)
|
313
|
-
# for backwards compat
|
314
|
-
if deployment.manifest_path:
|
315
|
-
with open(deployment.manifest_path, "r") as f:
|
316
|
-
import_path = json.load(f)["import_path"]
|
317
|
-
import_path = (
|
318
|
-
Path(deployment.manifest_path).parent / import_path
|
319
|
-
).absolute()
|
320
|
-
run_logger.debug(f"Importing flow code from '{import_path}'")
|
321
|
-
|
322
|
-
flow = await run_sync_in_worker_thread(
|
323
|
-
load_flow_from_entrypoint, str(import_path), use_placeholder_flow
|
324
|
-
)
|
325
|
-
|
326
|
-
return flow
|
327
|
-
|
328
|
-
|
329
|
-
@deprecated_callable(start_date="Mar 2024")
|
330
|
-
def load_deployments_from_yaml(
|
331
|
-
path: str,
|
332
|
-
) -> PrefectObjectRegistry:
|
333
|
-
"""
|
334
|
-
Load deployments from a yaml file.
|
335
|
-
"""
|
336
|
-
with open(path, "r") as f:
|
337
|
-
contents = f.read()
|
338
|
-
|
339
|
-
# Parse into a yaml tree to retrieve separate documents
|
340
|
-
nodes = yaml.compose_all(contents)
|
341
|
-
|
342
|
-
with PrefectObjectRegistry(capture_failures=True) as registry:
|
343
|
-
for node in nodes:
|
344
|
-
with tmpchdir(path):
|
345
|
-
deployment_dict = yaml.safe_load(yaml.serialize(node))
|
346
|
-
# The return value is not necessary, just instantiating the Deployment
|
347
|
-
# is enough to get it recorded on the registry
|
348
|
-
parse_obj_as(Deployment, deployment_dict)
|
349
|
-
|
350
|
-
return registry
|
351
|
-
|
352
|
-
|
353
|
-
@deprecated_class(
|
354
|
-
start_date="Mar 2024",
|
355
|
-
help="Use `flow.deploy` to deploy your flows instead."
|
356
|
-
" Refer to the upgrade guide for more information:"
|
357
|
-
" https://docs.prefect.io/latest/guides/upgrade-guide-agents-to-workers/.",
|
358
|
-
)
|
359
|
-
class Deployment(DeprecatedInfraOverridesField, BaseModel):
|
360
|
-
"""
|
361
|
-
DEPRECATION WARNING:
|
362
|
-
|
363
|
-
This class is deprecated as of March 2024 and will not be available after September 2024.
|
364
|
-
It has been replaced by `flow.deploy`, which offers enhanced functionality and better a better user experience.
|
365
|
-
For upgrade instructions, see https://docs.prefect.io/latest/guides/upgrade-guide-agents-to-workers/.
|
366
|
-
|
367
|
-
A Prefect Deployment definition, used for specifying and building deployments.
|
368
|
-
|
369
|
-
Args:
|
370
|
-
name: A name for the deployment (required).
|
371
|
-
version: An optional version for the deployment; defaults to the flow's version
|
372
|
-
description: An optional description of the deployment; defaults to the flow's
|
373
|
-
description
|
374
|
-
tags: An optional list of tags to associate with this deployment; note that tags
|
375
|
-
are used only for organizational purposes. For delegating work to agents,
|
376
|
-
see `work_queue_name`.
|
377
|
-
schedule: A schedule to run this deployment on, once registered (deprecated)
|
378
|
-
is_schedule_active: Whether or not the schedule is active (deprecated)
|
379
|
-
schedules: A list of schedules to run this deployment on
|
380
|
-
work_queue_name: The work queue that will handle this deployment's runs
|
381
|
-
work_pool_name: The work pool for the deployment
|
382
|
-
flow_name: The name of the flow this deployment encapsulates
|
383
|
-
parameters: A dictionary of parameter values to pass to runs created from this
|
384
|
-
deployment
|
385
|
-
infrastructure: An optional infrastructure block used to configure
|
386
|
-
infrastructure for runs; if not provided, will default to running this
|
387
|
-
deployment in Agent subprocesses
|
388
|
-
job_variables: A dictionary of dot delimited infrastructure overrides that
|
389
|
-
will be applied at runtime; for example `env.CONFIG_KEY=config_value` or
|
390
|
-
`namespace='prefect'`
|
391
|
-
storage: An optional remote storage block used to store and retrieve this
|
392
|
-
workflow; if not provided, will default to referencing this flow by its
|
393
|
-
local path
|
394
|
-
path: The path to the working directory for the workflow, relative to remote
|
395
|
-
storage or, if stored on a local filesystem, an absolute path
|
396
|
-
entrypoint: The path to the entrypoint for the workflow, always relative to the
|
397
|
-
`path`
|
398
|
-
parameter_openapi_schema: The parameter schema of the flow, including defaults.
|
399
|
-
enforce_parameter_schema: Whether or not the Prefect API should enforce the
|
400
|
-
parameter schema for this deployment.
|
401
|
-
|
402
|
-
Examples:
|
403
|
-
|
404
|
-
Create a new deployment using configuration defaults for an imported flow:
|
405
|
-
|
406
|
-
>>> from my_project.flows import my_flow
|
407
|
-
>>> from prefect.deployments import Deployment
|
408
|
-
>>>
|
409
|
-
>>> deployment = Deployment.build_from_flow(
|
410
|
-
... flow=my_flow,
|
411
|
-
... name="example",
|
412
|
-
... version="1",
|
413
|
-
... tags=["demo"],
|
414
|
-
>>> )
|
415
|
-
>>> deployment.apply()
|
416
|
-
|
417
|
-
Create a new deployment with custom storage and an infrastructure override:
|
418
|
-
|
419
|
-
>>> from my_project.flows import my_flow
|
420
|
-
>>> from prefect.deployments import Deployment
|
421
|
-
>>> from prefect.filesystems import S3
|
422
|
-
|
423
|
-
>>> storage = S3.load("dev-bucket") # load a pre-defined block
|
424
|
-
>>> deployment = Deployment.build_from_flow(
|
425
|
-
... flow=my_flow,
|
426
|
-
... name="s3-example",
|
427
|
-
... version="2",
|
428
|
-
... tags=["aws"],
|
429
|
-
... storage=storage,
|
430
|
-
... job_variables=dict("env.PREFECT_LOGGING_LEVEL"="DEBUG"),
|
431
|
-
>>> )
|
432
|
-
>>> deployment.apply()
|
433
|
-
|
434
|
-
"""
|
435
|
-
|
436
|
-
class Config:
|
437
|
-
json_encoders = {SecretDict: lambda v: v.dict()}
|
438
|
-
validate_assignment = True
|
439
|
-
extra = "forbid"
|
440
|
-
|
441
|
-
@property
|
442
|
-
def _editable_fields(self) -> List[str]:
|
443
|
-
editable_fields = [
|
444
|
-
"name",
|
445
|
-
"description",
|
446
|
-
"version",
|
447
|
-
"work_queue_name",
|
448
|
-
"work_pool_name",
|
449
|
-
"tags",
|
450
|
-
"parameters",
|
451
|
-
"schedule",
|
452
|
-
"schedules",
|
453
|
-
"is_schedule_active",
|
454
|
-
# The `infra_overrides` field has been renamed to `job_variables`.
|
455
|
-
# We will continue writing it in the YAML file as `infra_overrides`
|
456
|
-
# instead of `job_variables` for better backwards compat, but we'll
|
457
|
-
# accept either `job_variables` or `infra_overrides` when we read
|
458
|
-
# the file.
|
459
|
-
"infra_overrides",
|
460
|
-
]
|
461
|
-
|
462
|
-
# if infrastructure is baked as a pre-saved block, then
|
463
|
-
# editing its fields will not update anything
|
464
|
-
if self.infrastructure._block_document_id:
|
465
|
-
return editable_fields
|
466
|
-
else:
|
467
|
-
return editable_fields + ["infrastructure"]
|
468
|
-
|
469
|
-
@property
|
470
|
-
def location(self) -> str:
|
471
|
-
"""
|
472
|
-
The 'location' that this deployment points to is given by `path` alone
|
473
|
-
in the case of no remote storage, and otherwise by `storage.basepath / path`.
|
474
|
-
|
475
|
-
The underlying flow entrypoint is interpreted relative to this location.
|
476
|
-
"""
|
477
|
-
location = ""
|
478
|
-
if self.storage:
|
479
|
-
location = (
|
480
|
-
self.storage.basepath + "/"
|
481
|
-
if not self.storage.basepath.endswith("/")
|
482
|
-
else ""
|
483
|
-
)
|
484
|
-
if self.path:
|
485
|
-
location += self.path
|
486
|
-
return location
|
487
|
-
|
488
|
-
@sync_compatible
|
489
|
-
async def to_yaml(self, path: Path) -> None:
|
490
|
-
yaml_dict = self._yaml_dict()
|
491
|
-
schema = self.schema()
|
492
|
-
|
493
|
-
with open(path, "w") as f:
|
494
|
-
# write header
|
495
|
-
f.write(
|
496
|
-
"###\n### A complete description of a Prefect Deployment for flow"
|
497
|
-
f" {self.flow_name!r}\n###\n"
|
498
|
-
)
|
499
|
-
|
500
|
-
# write editable fields
|
501
|
-
for field in self._editable_fields:
|
502
|
-
# write any comments
|
503
|
-
if schema["properties"][field].get("yaml_comment"):
|
504
|
-
f.write(f"# {schema['properties'][field]['yaml_comment']}\n")
|
505
|
-
# write the field
|
506
|
-
yaml.dump({field: yaml_dict[field]}, f, sort_keys=False)
|
507
|
-
|
508
|
-
# write non-editable fields, excluding `job_variables` because we'll
|
509
|
-
# continue writing it as `infra_overrides` for better backwards compat
|
510
|
-
# with the existing file format.
|
511
|
-
f.write("\n###\n### DO NOT EDIT BELOW THIS LINE\n###\n")
|
512
|
-
yaml.dump(
|
513
|
-
{
|
514
|
-
k: v
|
515
|
-
for k, v in yaml_dict.items()
|
516
|
-
if k not in self._editable_fields and k != "job_variables"
|
517
|
-
},
|
518
|
-
f,
|
519
|
-
sort_keys=False,
|
520
|
-
)
|
521
|
-
|
522
|
-
def _yaml_dict(self) -> dict:
|
523
|
-
"""
|
524
|
-
Returns a YAML-compatible representation of this deployment as a dictionary.
|
525
|
-
"""
|
526
|
-
# avoids issues with UUIDs showing up in YAML
|
527
|
-
all_fields = json.loads(
|
528
|
-
self.json(
|
529
|
-
exclude={
|
530
|
-
"storage": {"_filesystem", "filesystem", "_remote_file_system"}
|
531
|
-
}
|
532
|
-
)
|
533
|
-
)
|
534
|
-
if all_fields["storage"]:
|
535
|
-
all_fields["storage"][
|
536
|
-
"_block_type_slug"
|
537
|
-
] = self.storage.get_block_type_slug()
|
538
|
-
if all_fields["infrastructure"]:
|
539
|
-
all_fields["infrastructure"][
|
540
|
-
"_block_type_slug"
|
541
|
-
] = self.infrastructure.get_block_type_slug()
|
542
|
-
return all_fields
|
543
|
-
|
544
|
-
@classmethod
|
545
|
-
def _validate_schedule(cls, value):
|
546
|
-
"""We do not support COUNT-based (# of occurrences) RRule schedules for deployments."""
|
547
|
-
if value:
|
548
|
-
rrule_value = getattr(value, "rrule", None)
|
549
|
-
if rrule_value and "COUNT" in rrule_value.upper():
|
550
|
-
raise ValueError(
|
551
|
-
"RRule schedules with `COUNT` are not supported. Please use `UNTIL`"
|
552
|
-
" or the `/deployments/{id}/schedule` endpoint to schedule a fixed"
|
553
|
-
" number of flow runs."
|
554
|
-
)
|
555
|
-
|
556
|
-
# top level metadata
|
557
|
-
name: str = Field(..., description="The name of the deployment.")
|
558
|
-
description: Optional[str] = Field(
|
559
|
-
default=None, description="An optional description of the deployment."
|
560
|
-
)
|
561
|
-
version: Optional[str] = Field(
|
562
|
-
default=None, description="An optional version for the deployment."
|
563
|
-
)
|
564
|
-
tags: List[str] = Field(
|
565
|
-
default_factory=list,
|
566
|
-
description="One of more tags to apply to this deployment.",
|
567
|
-
)
|
568
|
-
schedule: Optional[SCHEDULE_TYPES] = Field(default=None)
|
569
|
-
schedules: List[MinimalDeploymentSchedule] = Field(
|
570
|
-
default_factory=list,
|
571
|
-
description="The schedules to run this deployment on.",
|
572
|
-
)
|
573
|
-
is_schedule_active: Optional[bool] = Field(
|
574
|
-
default=None, description="Whether or not the schedule is active."
|
575
|
-
)
|
576
|
-
flow_name: Optional[str] = Field(default=None, description="The name of the flow.")
|
577
|
-
work_queue_name: Optional[str] = Field(
|
578
|
-
"default",
|
579
|
-
description="The work queue for the deployment.",
|
580
|
-
yaml_comment="The work queue that will handle this deployment's runs",
|
581
|
-
)
|
582
|
-
work_pool_name: Optional[str] = Field(
|
583
|
-
default=None, description="The work pool for the deployment"
|
584
|
-
)
|
585
|
-
# flow data
|
586
|
-
parameters: Dict[str, Any] = Field(default_factory=dict)
|
587
|
-
manifest_path: Optional[str] = Field(
|
588
|
-
default=None,
|
589
|
-
description=(
|
590
|
-
"The path to the flow's manifest file, relative to the chosen storage."
|
591
|
-
),
|
592
|
-
)
|
593
|
-
infrastructure: Infrastructure = Field(default_factory=Process)
|
594
|
-
job_variables: Dict[str, Any] = Field(
|
595
|
-
default_factory=dict,
|
596
|
-
description="Overrides to apply to the base infrastructure block at runtime.",
|
597
|
-
)
|
598
|
-
storage: Optional[Block] = Field(
|
599
|
-
None,
|
600
|
-
help="The remote storage to use for this workflow.",
|
601
|
-
)
|
602
|
-
path: Optional[str] = Field(
|
603
|
-
default=None,
|
604
|
-
description=(
|
605
|
-
"The path to the working directory for the workflow, relative to remote"
|
606
|
-
" storage or an absolute path."
|
607
|
-
),
|
608
|
-
)
|
609
|
-
entrypoint: Optional[str] = Field(
|
610
|
-
default=None,
|
611
|
-
description=(
|
612
|
-
"The path to the entrypoint for the workflow, relative to the `path`."
|
613
|
-
),
|
614
|
-
)
|
615
|
-
parameter_openapi_schema: ParameterSchema = Field(
|
616
|
-
default_factory=ParameterSchema,
|
617
|
-
description="The parameter schema of the flow, including defaults.",
|
618
|
-
)
|
619
|
-
timestamp: datetime = Field(default_factory=partial(pendulum.now, "UTC"))
|
620
|
-
triggers: List[Union[DeploymentTriggerTypes, TriggerTypes]] = Field(
|
621
|
-
default_factory=list,
|
622
|
-
description="The triggers that should cause this deployment to run.",
|
623
|
-
)
|
624
|
-
# defaults to None to allow for backwards compatibility
|
625
|
-
enforce_parameter_schema: Optional[bool] = Field(
|
626
|
-
default=None,
|
627
|
-
description=(
|
628
|
-
"Whether or not the Prefect API should enforce the parameter schema for"
|
629
|
-
" this deployment."
|
630
|
-
),
|
631
|
-
)
|
632
|
-
|
633
|
-
@validator("infrastructure", pre=True)
|
634
|
-
def validate_infrastructure_capabilities(cls, value):
|
635
|
-
return infrastructure_must_have_capabilities(value)
|
636
|
-
|
637
|
-
@validator("storage", pre=True)
|
638
|
-
def validate_storage(cls, value):
|
639
|
-
return storage_must_have_capabilities(value)
|
640
|
-
|
641
|
-
@validator("parameter_openapi_schema", pre=True)
|
642
|
-
def validate_parameter_openapi_schema(cls, value):
|
643
|
-
return handle_openapi_schema(value)
|
644
|
-
|
645
|
-
@validator("triggers")
|
646
|
-
def validate_triggers(cls, field_value, values):
|
647
|
-
return validate_automation_names(field_value, values)
|
648
|
-
|
649
|
-
@root_validator(pre=True)
|
650
|
-
def validate_schedule(cls, values):
|
651
|
-
return validate_deprecated_schedule_fields(values, logger)
|
652
|
-
|
653
|
-
@root_validator(pre=True)
|
654
|
-
def validate_backwards_compatibility_for_schedule(cls, values):
|
655
|
-
return reconcile_schedules(cls, values)
|
656
|
-
|
657
|
-
@classmethod
|
658
|
-
@sync_compatible
|
659
|
-
async def load_from_yaml(cls, path: str):
|
660
|
-
data = yaml.safe_load(await anyio.Path(path).read_bytes())
|
661
|
-
# load blocks from server to ensure secret values are properly hydrated
|
662
|
-
if data.get("storage"):
|
663
|
-
block_doc_name = data["storage"].get("_block_document_name")
|
664
|
-
# if no doc name, this block is not stored on the server
|
665
|
-
if block_doc_name:
|
666
|
-
block_slug = data["storage"]["_block_type_slug"]
|
667
|
-
block = await Block.load(f"{block_slug}/{block_doc_name}")
|
668
|
-
data["storage"] = block
|
669
|
-
|
670
|
-
if data.get("infrastructure"):
|
671
|
-
block_doc_name = data["infrastructure"].get("_block_document_name")
|
672
|
-
# if no doc name, this block is not stored on the server
|
673
|
-
if block_doc_name:
|
674
|
-
block_slug = data["infrastructure"]["_block_type_slug"]
|
675
|
-
block = await Block.load(f"{block_slug}/{block_doc_name}")
|
676
|
-
data["infrastructure"] = block
|
677
|
-
|
678
|
-
return cls(**data)
|
679
|
-
|
680
|
-
@sync_compatible
|
681
|
-
async def load(self) -> bool:
|
682
|
-
"""
|
683
|
-
Queries the API for a deployment with this name for this flow, and if found,
|
684
|
-
prepopulates any settings that were not set at initialization.
|
685
|
-
|
686
|
-
Returns a boolean specifying whether a load was successful or not.
|
687
|
-
|
688
|
-
Raises:
|
689
|
-
- ValueError: if both name and flow name are not set
|
690
|
-
"""
|
691
|
-
if not self.name or not self.flow_name:
|
692
|
-
raise ValueError("Both a deployment name and flow name must be provided.")
|
693
|
-
async with get_client() as client:
|
694
|
-
try:
|
695
|
-
deployment = await client.read_deployment_by_name(
|
696
|
-
f"{self.flow_name}/{self.name}"
|
697
|
-
)
|
698
|
-
if deployment.storage_document_id:
|
699
|
-
Block._from_block_document(
|
700
|
-
await client.read_block_document(deployment.storage_document_id)
|
701
|
-
)
|
702
|
-
|
703
|
-
excluded_fields = self.__fields_set__.union(
|
704
|
-
{
|
705
|
-
"infrastructure",
|
706
|
-
"storage",
|
707
|
-
"timestamp",
|
708
|
-
"triggers",
|
709
|
-
"enforce_parameter_schema",
|
710
|
-
"schedules",
|
711
|
-
"schedule",
|
712
|
-
"is_schedule_active",
|
713
|
-
}
|
714
|
-
)
|
715
|
-
for field in set(self.__fields__.keys()) - excluded_fields:
|
716
|
-
new_value = getattr(deployment, field)
|
717
|
-
setattr(self, field, new_value)
|
718
|
-
|
719
|
-
if "schedules" not in self.__fields_set__:
|
720
|
-
self.schedules = [
|
721
|
-
MinimalDeploymentSchedule(
|
722
|
-
**schedule.dict(include={"schedule", "active"})
|
723
|
-
)
|
724
|
-
for schedule in deployment.schedules
|
725
|
-
]
|
726
|
-
|
727
|
-
# The API server generates the "schedule" field from the
|
728
|
-
# current list of schedules, so if the user has locally set
|
729
|
-
# "schedules" to anything, we should avoid sending "schedule"
|
730
|
-
# and let the API server generate a new value if necessary.
|
731
|
-
if "schedules" in self.__fields_set__:
|
732
|
-
self.schedule = None
|
733
|
-
self.is_schedule_active = None
|
734
|
-
else:
|
735
|
-
# The user isn't using "schedules," so we should
|
736
|
-
# populate "schedule" and "is_schedule_active" from the
|
737
|
-
# API's version of the deployment, unless the user gave
|
738
|
-
# us these fields in __init__().
|
739
|
-
if "schedule" not in self.__fields_set__:
|
740
|
-
self.schedule = deployment.schedule
|
741
|
-
if "is_schedule_active" not in self.__fields_set__:
|
742
|
-
self.is_schedule_active = deployment.is_schedule_active
|
743
|
-
|
744
|
-
if "infrastructure" not in self.__fields_set__:
|
745
|
-
if deployment.infrastructure_document_id:
|
746
|
-
self.infrastructure = Block._from_block_document(
|
747
|
-
await client.read_block_document(
|
748
|
-
deployment.infrastructure_document_id
|
749
|
-
)
|
750
|
-
)
|
751
|
-
if "storage" not in self.__fields_set__:
|
752
|
-
if deployment.storage_document_id:
|
753
|
-
self.storage = Block._from_block_document(
|
754
|
-
await client.read_block_document(
|
755
|
-
deployment.storage_document_id
|
756
|
-
)
|
757
|
-
)
|
758
|
-
except ObjectNotFound:
|
759
|
-
return False
|
760
|
-
return True
|
761
|
-
|
762
|
-
@sync_compatible
|
763
|
-
async def update(self, ignore_none: bool = False, **kwargs):
|
764
|
-
"""
|
765
|
-
Performs an in-place update with the provided settings.
|
766
|
-
|
767
|
-
Args:
|
768
|
-
ignore_none: if True, all `None` values are ignored when performing the
|
769
|
-
update
|
770
|
-
"""
|
771
|
-
unknown_keys = set(kwargs.keys()) - set(self.dict().keys())
|
772
|
-
if unknown_keys:
|
773
|
-
raise ValueError(
|
774
|
-
f"Received unexpected attributes: {', '.join(unknown_keys)}"
|
775
|
-
)
|
776
|
-
for key, value in kwargs.items():
|
777
|
-
if ignore_none and value is None:
|
778
|
-
continue
|
779
|
-
setattr(self, key, value)
|
780
|
-
|
781
|
-
@sync_compatible
|
782
|
-
async def upload_to_storage(
|
783
|
-
self, storage_block: str = None, ignore_file: str = ".prefectignore"
|
784
|
-
) -> Optional[int]:
|
785
|
-
"""
|
786
|
-
Uploads the workflow this deployment represents using a provided storage block;
|
787
|
-
if no block is provided, defaults to configuring self for local storage.
|
788
|
-
|
789
|
-
Args:
|
790
|
-
storage_block: a string reference a remote storage block slug `$type/$name`;
|
791
|
-
if provided, used to upload the workflow's project
|
792
|
-
ignore_file: an optional path to a `.prefectignore` file that specifies
|
793
|
-
filename patterns to ignore when uploading to remote storage; if not
|
794
|
-
provided, looks for `.prefectignore` in the current working directory
|
795
|
-
"""
|
796
|
-
file_count = None
|
797
|
-
if storage_block:
|
798
|
-
storage = await Block.load(storage_block)
|
799
|
-
|
800
|
-
if "put-directory" not in storage.get_block_capabilities():
|
801
|
-
raise BlockMissingCapabilities(
|
802
|
-
f"Storage block {storage!r} missing 'put-directory' capability."
|
803
|
-
)
|
804
|
-
|
805
|
-
self.storage = storage
|
806
|
-
|
807
|
-
# upload current directory to storage location
|
808
|
-
file_count = await self.storage.put_directory(
|
809
|
-
ignore_file=ignore_file, to_path=self.path
|
810
|
-
)
|
811
|
-
elif self.storage:
|
812
|
-
if "put-directory" not in self.storage.get_block_capabilities():
|
813
|
-
raise BlockMissingCapabilities(
|
814
|
-
f"Storage block {self.storage!r} missing 'put-directory'"
|
815
|
-
" capability."
|
816
|
-
)
|
817
|
-
|
818
|
-
file_count = await self.storage.put_directory(
|
819
|
-
ignore_file=ignore_file, to_path=self.path
|
820
|
-
)
|
821
|
-
|
822
|
-
# persists storage now in case it contains secret values
|
823
|
-
if self.storage and not self.storage._block_document_id:
|
824
|
-
await self.storage._save(is_anonymous=True)
|
825
|
-
|
826
|
-
return file_count
|
827
|
-
|
828
|
-
@sync_compatible
|
829
|
-
async def apply(
|
830
|
-
self, upload: bool = False, work_queue_concurrency: int = None
|
831
|
-
) -> UUID:
|
832
|
-
"""
|
833
|
-
Registers this deployment with the API and returns the deployment's ID.
|
834
|
-
|
835
|
-
Args:
|
836
|
-
upload: if True, deployment files are automatically uploaded to remote
|
837
|
-
storage
|
838
|
-
work_queue_concurrency: If provided, sets the concurrency limit on the
|
839
|
-
deployment's work queue
|
840
|
-
"""
|
841
|
-
if not self.name or not self.flow_name:
|
842
|
-
raise ValueError("Both a deployment name and flow name must be set.")
|
843
|
-
async with get_client() as client:
|
844
|
-
# prep IDs
|
845
|
-
flow_id = await client.create_flow_from_name(self.flow_name)
|
846
|
-
|
847
|
-
infrastructure_document_id = self.infrastructure._block_document_id
|
848
|
-
if not infrastructure_document_id:
|
849
|
-
# if not building off a block, will create an anonymous block
|
850
|
-
self.infrastructure = self.infrastructure.copy()
|
851
|
-
infrastructure_document_id = await self.infrastructure._save(
|
852
|
-
is_anonymous=True,
|
853
|
-
)
|
854
|
-
|
855
|
-
if upload:
|
856
|
-
await self.upload_to_storage()
|
857
|
-
|
858
|
-
if self.work_queue_name and work_queue_concurrency is not None:
|
859
|
-
try:
|
860
|
-
res = await client.create_work_queue(
|
861
|
-
name=self.work_queue_name, work_pool_name=self.work_pool_name
|
862
|
-
)
|
863
|
-
except ObjectAlreadyExists:
|
864
|
-
res = await client.read_work_queue_by_name(
|
865
|
-
name=self.work_queue_name, work_pool_name=self.work_pool_name
|
866
|
-
)
|
867
|
-
await client.update_work_queue(
|
868
|
-
res.id, concurrency_limit=work_queue_concurrency
|
869
|
-
)
|
870
|
-
|
871
|
-
if self.schedule:
|
872
|
-
logger.info(
|
873
|
-
"Interpreting the deprecated `schedule` field as an entry in "
|
874
|
-
"`schedules`."
|
875
|
-
)
|
876
|
-
schedules = [
|
877
|
-
DeploymentScheduleCreate(
|
878
|
-
schedule=self.schedule, active=self.is_schedule_active
|
879
|
-
)
|
880
|
-
]
|
881
|
-
elif self.schedules:
|
882
|
-
schedules = [
|
883
|
-
DeploymentScheduleCreate(**schedule.dict())
|
884
|
-
for schedule in self.schedules
|
885
|
-
]
|
886
|
-
else:
|
887
|
-
schedules = None
|
888
|
-
|
889
|
-
# we assume storage was already saved
|
890
|
-
storage_document_id = getattr(self.storage, "_block_document_id", None)
|
891
|
-
deployment_id = await client.create_deployment(
|
892
|
-
flow_id=flow_id,
|
893
|
-
name=self.name,
|
894
|
-
work_queue_name=self.work_queue_name,
|
895
|
-
work_pool_name=self.work_pool_name,
|
896
|
-
version=self.version,
|
897
|
-
schedules=schedules,
|
898
|
-
is_schedule_active=self.is_schedule_active,
|
899
|
-
parameters=self.parameters,
|
900
|
-
description=self.description,
|
901
|
-
tags=self.tags,
|
902
|
-
manifest_path=self.manifest_path, # allows for backwards YAML compat
|
903
|
-
path=self.path,
|
904
|
-
entrypoint=self.entrypoint,
|
905
|
-
job_variables=self.job_variables,
|
906
|
-
storage_document_id=storage_document_id,
|
907
|
-
infrastructure_document_id=infrastructure_document_id,
|
908
|
-
parameter_openapi_schema=self.parameter_openapi_schema.dict(),
|
909
|
-
enforce_parameter_schema=self.enforce_parameter_schema,
|
910
|
-
)
|
911
|
-
|
912
|
-
if client.server_type.supports_automations():
|
913
|
-
try:
|
914
|
-
# The triggers defined in the deployment spec are, essentially,
|
915
|
-
# anonymous and attempting truly sync them with cloud is not
|
916
|
-
# feasible. Instead, we remove all automations that are owned
|
917
|
-
# by the deployment, meaning that they were created via this
|
918
|
-
# mechanism below, and then recreate them.
|
919
|
-
await client.delete_resource_owned_automations(
|
920
|
-
f"prefect.deployment.{deployment_id}"
|
921
|
-
)
|
922
|
-
except PrefectHTTPStatusError as e:
|
923
|
-
if e.response.status_code == 404:
|
924
|
-
# This Prefect server does not support automations, so we can safely
|
925
|
-
# ignore this 404 and move on.
|
926
|
-
return deployment_id
|
927
|
-
raise e
|
928
|
-
|
929
|
-
for trigger in self.triggers:
|
930
|
-
trigger.set_deployment_id(deployment_id)
|
931
|
-
await client.create_automation(trigger.as_automation())
|
932
|
-
|
933
|
-
return deployment_id
|
934
|
-
|
935
|
-
@classmethod
|
936
|
-
@sync_compatible
|
937
|
-
async def build_from_flow(
|
938
|
-
cls,
|
939
|
-
flow: Flow,
|
940
|
-
name: str,
|
941
|
-
output: str = None,
|
942
|
-
skip_upload: bool = False,
|
943
|
-
ignore_file: str = ".prefectignore",
|
944
|
-
apply: bool = False,
|
945
|
-
load_existing: bool = True,
|
946
|
-
schedules: Optional[FlexibleScheduleList] = None,
|
947
|
-
**kwargs,
|
948
|
-
) -> "Deployment":
|
949
|
-
"""
|
950
|
-
Configure a deployment for a given flow.
|
951
|
-
|
952
|
-
Args:
|
953
|
-
flow: A flow function to deploy
|
954
|
-
name: A name for the deployment
|
955
|
-
output (optional): if provided, the full deployment specification will be
|
956
|
-
written as a YAML file in the location specified by `output`
|
957
|
-
skip_upload: if True, deployment files are not automatically uploaded to
|
958
|
-
remote storage
|
959
|
-
ignore_file: an optional path to a `.prefectignore` file that specifies
|
960
|
-
filename patterns to ignore when uploading to remote storage; if not
|
961
|
-
provided, looks for `.prefectignore` in the current working directory
|
962
|
-
apply: if True, the deployment is automatically registered with the API
|
963
|
-
load_existing: if True, load any settings that may already be configured for
|
964
|
-
the named deployment server-side (e.g., schedules, default parameter
|
965
|
-
values, etc.)
|
966
|
-
schedules: An optional list of schedules. Each item in the list can be:
|
967
|
-
- An instance of `MinimalDeploymentSchedule`.
|
968
|
-
- A dictionary with a `schedule` key, and optionally, an
|
969
|
-
`active` key. The `schedule` key should correspond to a
|
970
|
-
schedule type, and `active` is a boolean indicating whether
|
971
|
-
the schedule is active or not.
|
972
|
-
- An instance of one of the predefined schedule types:
|
973
|
-
`IntervalSchedule`, `CronSchedule`, or `RRuleSchedule`.
|
974
|
-
**kwargs: other keyword arguments to pass to the constructor for the
|
975
|
-
`Deployment` class
|
976
|
-
"""
|
977
|
-
if not name:
|
978
|
-
raise ValueError("A deployment name must be provided.")
|
979
|
-
|
980
|
-
# note that `deployment.load` only updates settings that were *not*
|
981
|
-
# provided at initialization
|
982
|
-
|
983
|
-
deployment_args = {
|
984
|
-
"name": name,
|
985
|
-
"flow_name": flow.name,
|
986
|
-
**kwargs,
|
987
|
-
}
|
988
|
-
|
989
|
-
if schedules is not None:
|
990
|
-
deployment_args["schedules"] = schedules
|
991
|
-
|
992
|
-
deployment = cls(**deployment_args)
|
993
|
-
deployment.flow_name = flow.name
|
994
|
-
if not deployment.entrypoint:
|
995
|
-
## first see if an entrypoint can be determined
|
996
|
-
flow_file = getattr(flow, "__globals__", {}).get("__file__")
|
997
|
-
mod_name = getattr(flow, "__module__", None)
|
998
|
-
if not flow_file:
|
999
|
-
if not mod_name:
|
1000
|
-
# todo, check if the file location was manually set already
|
1001
|
-
raise ValueError("Could not determine flow's file location.")
|
1002
|
-
module = importlib.import_module(mod_name)
|
1003
|
-
flow_file = getattr(module, "__file__", None)
|
1004
|
-
if not flow_file:
|
1005
|
-
raise ValueError("Could not determine flow's file location.")
|
1006
|
-
|
1007
|
-
# set entrypoint
|
1008
|
-
entry_path = Path(flow_file).absolute().relative_to(Path(".").absolute())
|
1009
|
-
deployment.entrypoint = f"{entry_path}:{flow.fn.__name__}"
|
1010
|
-
|
1011
|
-
if load_existing:
|
1012
|
-
await deployment.load()
|
1013
|
-
|
1014
|
-
# set a few attributes for this flow object
|
1015
|
-
deployment.parameter_openapi_schema = parameter_schema(flow)
|
1016
|
-
|
1017
|
-
# ensure the ignore file exists
|
1018
|
-
if not Path(ignore_file).exists():
|
1019
|
-
Path(ignore_file).touch()
|
1020
|
-
|
1021
|
-
if not deployment.version:
|
1022
|
-
deployment.version = flow.version
|
1023
|
-
if not deployment.description:
|
1024
|
-
deployment.description = flow.description
|
1025
|
-
|
1026
|
-
# proxy for whether infra is docker-based
|
1027
|
-
is_docker_based = hasattr(deployment.infrastructure, "image")
|
1028
|
-
|
1029
|
-
if not deployment.storage and not is_docker_based and not deployment.path:
|
1030
|
-
deployment.path = str(Path(".").absolute())
|
1031
|
-
elif not deployment.storage and is_docker_based:
|
1032
|
-
# only update if a path is not already set
|
1033
|
-
if not deployment.path:
|
1034
|
-
deployment.path = "/opt/prefect/flows"
|
1035
|
-
|
1036
|
-
if not skip_upload:
|
1037
|
-
if (
|
1038
|
-
deployment.storage
|
1039
|
-
and "put-directory" in deployment.storage.get_block_capabilities()
|
1040
|
-
):
|
1041
|
-
await deployment.upload_to_storage(ignore_file=ignore_file)
|
1042
|
-
|
1043
|
-
if output:
|
1044
|
-
await deployment.to_yaml(output)
|
1045
|
-
|
1046
|
-
if apply:
|
1047
|
-
await deployment.apply()
|
1048
|
-
|
1049
|
-
return deployment
|
3
|
+
__getattr__ = getattr_migration(__name__)
|