prefect-client 3.2.12__py3-none-any.whl → 3.2.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/_build_info.py +3 -3
- prefect/flows.py +3 -4
- prefect/logging/loggers.py +5 -4
- prefect/server/api/server.py +17 -0
- prefect/task_engine.py +27 -0
- prefect/tasks.py +2 -0
- prefect/telemetry/run_telemetry.py +11 -3
- prefect/workers/base.py +184 -103
- prefect/workers/process.py +7 -6
- {prefect_client-3.2.12.dist-info → prefect_client-3.2.13.dist-info}/METADATA +1 -1
- {prefect_client-3.2.12.dist-info → prefect_client-3.2.13.dist-info}/RECORD +13 -13
- {prefect_client-3.2.12.dist-info → prefect_client-3.2.13.dist-info}/WHEEL +0 -0
- {prefect_client-3.2.12.dist-info → prefect_client-3.2.13.dist-info}/licenses/LICENSE +0 -0
prefect/_build_info.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1
1
|
# Generated by versioningit
|
2
|
-
__version__ = "3.2.
|
3
|
-
__build_date__ = "2025-03-
|
4
|
-
__git_commit__ = "
|
2
|
+
__version__ = "3.2.13"
|
3
|
+
__build_date__ = "2025-03-14 20:36:04.139050+00:00"
|
4
|
+
__git_commit__ = "1280029757bdefaca553d85a008f9b82da0c697a"
|
5
5
|
__dirty__ = False
|
prefect/flows.py
CHANGED
@@ -1046,10 +1046,9 @@ class Flow(Generic[P, R]):
|
|
1046
1046
|
if not name:
|
1047
1047
|
name = self.name
|
1048
1048
|
else:
|
1049
|
-
#
|
1050
|
-
|
1051
|
-
|
1052
|
-
name = Path(name).stem
|
1049
|
+
# Only strip extension if it is a file path
|
1050
|
+
if (p := Path(name)).is_file():
|
1051
|
+
name = p.stem
|
1053
1052
|
|
1054
1053
|
runner = Runner(name=name, pause_on_shutdown=pause_on_shutdown, limit=limit)
|
1055
1054
|
deployment_id = runner.add_flow(
|
prefect/logging/loggers.py
CHANGED
@@ -146,10 +146,11 @@ def get_run_logger(
|
|
146
146
|
**kwargs,
|
147
147
|
)
|
148
148
|
elif (
|
149
|
-
get_logger("prefect.
|
150
|
-
and get_logger("prefect.
|
149
|
+
get_logger("prefect.flow_runs").disabled
|
150
|
+
and get_logger("prefect.task_runs").disabled
|
151
151
|
):
|
152
152
|
logger = logging.getLogger("null")
|
153
|
+
logger.disabled = True
|
153
154
|
else:
|
154
155
|
raise MissingContextError("There is no active flow or task run context.")
|
155
156
|
|
@@ -280,9 +281,9 @@ def disable_run_logger():
|
|
280
281
|
"""
|
281
282
|
Gets both `prefect.flow_run` and `prefect.task_run` and disables them
|
282
283
|
within the context manager. Upon exiting the context manager, both loggers
|
283
|
-
are returned to
|
284
|
+
are returned to their original state.
|
284
285
|
"""
|
285
|
-
with disable_logger("prefect.
|
286
|
+
with disable_logger("prefect.flow_runs"), disable_logger("prefect.task_runs"):
|
286
287
|
yield
|
287
288
|
|
288
289
|
|
prefect/server/api/server.py
CHANGED
@@ -294,12 +294,16 @@ async def prefect_object_not_found_exception_handler(
|
|
294
294
|
)
|
295
295
|
|
296
296
|
|
297
|
+
API_APP_CACHE: dict[tuple[str, str | None], FastAPI] = {}
|
298
|
+
|
299
|
+
|
297
300
|
def create_api_app(
|
298
301
|
dependencies: list[Any] | None = None,
|
299
302
|
health_check_path: str = "/health",
|
300
303
|
version_check_path: str = "/version",
|
301
304
|
fast_api_app_kwargs: dict[str, Any] | None = None,
|
302
305
|
final: bool = False,
|
306
|
+
ignore_cache: bool = False,
|
303
307
|
) -> FastAPI:
|
304
308
|
"""
|
305
309
|
Create a FastAPI app that includes the Prefect REST API
|
@@ -310,10 +314,20 @@ def create_api_app(
|
|
310
314
|
fast_api_app_kwargs: kwargs to pass to the FastAPI constructor
|
311
315
|
final: whether this will be the last instance of the Prefect server to be
|
312
316
|
created in this process, so that additional optimizations may be applied
|
317
|
+
ignore_cache: if set, a new app will be created even if the settings and fast_api_app_kwargs match
|
318
|
+
an existing app in the cache
|
313
319
|
|
314
320
|
Returns:
|
315
321
|
a FastAPI app that serves the Prefect REST API
|
316
322
|
"""
|
323
|
+
cache_key = (
|
324
|
+
prefect.settings.get_current_settings().hash_key(),
|
325
|
+
hash_objects(fast_api_app_kwargs) if fast_api_app_kwargs else None,
|
326
|
+
)
|
327
|
+
|
328
|
+
if cache_key in API_APP_CACHE and not ignore_cache:
|
329
|
+
return API_APP_CACHE[cache_key]
|
330
|
+
|
317
331
|
fast_api_app_kwargs = fast_api_app_kwargs or {}
|
318
332
|
api_app = FastAPI(title=API_TITLE, **fast_api_app_kwargs)
|
319
333
|
api_app.add_middleware(GZipMiddleware)
|
@@ -389,6 +403,8 @@ def create_api_app(
|
|
389
403
|
)
|
390
404
|
return await call_next(request)
|
391
405
|
|
406
|
+
API_APP_CACHE[cache_key] = api_app
|
407
|
+
|
392
408
|
return api_app
|
393
409
|
|
394
410
|
|
@@ -655,6 +671,7 @@ def create_app(
|
|
655
671
|
}
|
656
672
|
},
|
657
673
|
final=final,
|
674
|
+
ignore_cache=ignore_cache,
|
658
675
|
)
|
659
676
|
ui_app = create_ui_app(ephemeral)
|
660
677
|
|
prefect/task_engine.py
CHANGED
@@ -24,6 +24,7 @@ from typing import (
|
|
24
24
|
Type,
|
25
25
|
TypeVar,
|
26
26
|
Union,
|
27
|
+
overload,
|
27
28
|
)
|
28
29
|
from uuid import UUID
|
29
30
|
|
@@ -1527,6 +1528,32 @@ async def run_generator_task_async(
|
|
1527
1528
|
await engine.result()
|
1528
1529
|
|
1529
1530
|
|
1531
|
+
@overload
|
1532
|
+
def run_task(
|
1533
|
+
task: "Task[P, R]",
|
1534
|
+
task_run_id: Optional[UUID] = None,
|
1535
|
+
task_run: Optional[TaskRun] = None,
|
1536
|
+
parameters: Optional[dict[str, Any]] = None,
|
1537
|
+
wait_for: Optional["OneOrManyFutureOrResult[Any]"] = None,
|
1538
|
+
return_type: Literal["state"] = "state",
|
1539
|
+
dependencies: Optional[dict[str, set[TaskRunInput]]] = None,
|
1540
|
+
context: Optional[dict[str, Any]] = None,
|
1541
|
+
) -> State[R]: ...
|
1542
|
+
|
1543
|
+
|
1544
|
+
@overload
|
1545
|
+
def run_task(
|
1546
|
+
task: "Task[P, R]",
|
1547
|
+
task_run_id: Optional[UUID] = None,
|
1548
|
+
task_run: Optional[TaskRun] = None,
|
1549
|
+
parameters: Optional[dict[str, Any]] = None,
|
1550
|
+
wait_for: Optional["OneOrManyFutureOrResult[Any]"] = None,
|
1551
|
+
return_type: Literal["result"] = "result",
|
1552
|
+
dependencies: Optional[dict[str, set[TaskRunInput]]] = None,
|
1553
|
+
context: Optional[dict[str, Any]] = None,
|
1554
|
+
) -> R: ...
|
1555
|
+
|
1556
|
+
|
1530
1557
|
def run_task(
|
1531
1558
|
task: "Task[P, Union[R, Coroutine[Any, Any, R]]]",
|
1532
1559
|
task_run_id: Optional[UUID] = None,
|
prefect/tasks.py
CHANGED
@@ -16,6 +16,7 @@ from opentelemetry.trace import (
|
|
16
16
|
from typing_extensions import TypeAlias
|
17
17
|
|
18
18
|
import prefect
|
19
|
+
import prefect.settings
|
19
20
|
from prefect.client.orchestration import PrefectClient, SyncPrefectClient
|
20
21
|
from prefect.client.schemas import FlowRun, TaskRun
|
21
22
|
from prefect.client.schemas.objects import State
|
@@ -50,15 +51,19 @@ class RunTelemetry:
|
|
50
51
|
default_factory=lambda: get_tracer("prefect", prefect.__version__)
|
51
52
|
)
|
52
53
|
span: Span | None = None
|
54
|
+
_enabled: bool = field(
|
55
|
+
default_factory=lambda: prefect.settings.get_current_settings().cloud.enable_orchestration_telemetry
|
56
|
+
)
|
53
57
|
|
54
58
|
async def async_start_span(
|
55
59
|
self,
|
56
60
|
run: FlowOrTaskRun,
|
57
61
|
client: PrefectClient,
|
58
62
|
parameters: dict[str, Any] | None = None,
|
59
|
-
) -> Span:
|
63
|
+
) -> Span | None:
|
64
|
+
if not self._enabled:
|
65
|
+
return None
|
60
66
|
traceparent, span = self._start_span(run, parameters)
|
61
|
-
|
62
67
|
if self._run_type(run) == "flow" and traceparent:
|
63
68
|
# Only explicitly update labels if the run is a flow as task runs
|
64
69
|
# are updated via events.
|
@@ -73,7 +78,10 @@ class RunTelemetry:
|
|
73
78
|
run: FlowOrTaskRun,
|
74
79
|
client: SyncPrefectClient,
|
75
80
|
parameters: dict[str, Any] | None = None,
|
76
|
-
) -> Span:
|
81
|
+
) -> Span | None:
|
82
|
+
if not self._enabled:
|
83
|
+
return None
|
84
|
+
|
77
85
|
traceparent, span = self._start_span(run, parameters)
|
78
86
|
|
79
87
|
if self._run_type(run) == "flow" and traceparent:
|
prefect/workers/base.py
CHANGED
@@ -3,31 +3,31 @@ from __future__ import annotations
|
|
3
3
|
import abc
|
4
4
|
import asyncio
|
5
5
|
import threading
|
6
|
+
import warnings
|
6
7
|
from contextlib import AsyncExitStack
|
7
8
|
from functools import partial
|
8
9
|
from typing import (
|
9
10
|
TYPE_CHECKING,
|
10
11
|
Any,
|
11
12
|
Callable,
|
12
|
-
Dict,
|
13
13
|
Generic,
|
14
|
-
List,
|
15
14
|
Optional,
|
16
|
-
Set,
|
17
15
|
Type,
|
18
|
-
Union,
|
19
16
|
)
|
20
17
|
from uuid import UUID, uuid4
|
21
18
|
|
22
19
|
import anyio
|
23
20
|
import anyio.abc
|
24
21
|
import httpx
|
25
|
-
from importlib_metadata import
|
22
|
+
from importlib_metadata import (
|
23
|
+
distributions, # type: ignore[reportUnknownVariableType] incomplete typing
|
24
|
+
)
|
26
25
|
from pydantic import BaseModel, Field, PrivateAttr, field_validator
|
27
26
|
from pydantic.json_schema import GenerateJsonSchema
|
28
27
|
from typing_extensions import Literal, Self, TypeVar
|
29
28
|
|
30
29
|
import prefect
|
30
|
+
from prefect._internal.compatibility.deprecated import PrefectDeprecationWarning
|
31
31
|
from prefect._internal.schemas.validators import return_v_or_none
|
32
32
|
from prefect.client.base import ServerType
|
33
33
|
from prefect.client.orchestration import PrefectClient, get_client
|
@@ -94,12 +94,12 @@ class BaseJobConfiguration(BaseModel):
|
|
94
94
|
"will be automatically generated by the worker."
|
95
95
|
),
|
96
96
|
)
|
97
|
-
env:
|
97
|
+
env: dict[str, Optional[str]] = Field(
|
98
98
|
default_factory=dict,
|
99
99
|
title="Environment Variables",
|
100
100
|
description="Environment variables to set when starting a flow run.",
|
101
101
|
)
|
102
|
-
labels:
|
102
|
+
labels: dict[str, str] = Field(
|
103
103
|
default_factory=dict,
|
104
104
|
description=(
|
105
105
|
"Labels applied to infrastructure created by the worker using "
|
@@ -114,7 +114,7 @@ class BaseJobConfiguration(BaseModel):
|
|
114
114
|
),
|
115
115
|
)
|
116
116
|
|
117
|
-
_related_objects:
|
117
|
+
_related_objects: dict[str, Any] = PrivateAttr(default_factory=dict)
|
118
118
|
|
119
119
|
@property
|
120
120
|
def is_using_a_runner(self) -> bool:
|
@@ -122,18 +122,18 @@ class BaseJobConfiguration(BaseModel):
|
|
122
122
|
|
123
123
|
@field_validator("command")
|
124
124
|
@classmethod
|
125
|
-
def _coerce_command(cls, v):
|
125
|
+
def _coerce_command(cls, v: str | None) -> str | None:
|
126
126
|
return return_v_or_none(v)
|
127
127
|
|
128
128
|
@field_validator("env", mode="before")
|
129
129
|
@classmethod
|
130
|
-
def _coerce_env(cls, v):
|
130
|
+
def _coerce_env(cls, v: dict[str, Any]) -> dict[str, str | None]:
|
131
131
|
return {k: str(v) if v is not None else None for k, v in v.items()}
|
132
132
|
|
133
133
|
@staticmethod
|
134
|
-
def _get_base_config_defaults(variables: dict) -> dict:
|
134
|
+
def _get_base_config_defaults(variables: dict[str, Any]) -> dict[str, Any]:
|
135
135
|
"""Get default values from base config for all variables that have them."""
|
136
|
-
defaults =
|
136
|
+
defaults: dict[str, Any] = {}
|
137
137
|
for variable_name, attrs in variables.items():
|
138
138
|
# We remote `None` values because we don't want to use them in templating.
|
139
139
|
# The currently logic depends on keys not existing to populate the correct value
|
@@ -149,9 +149,9 @@ class BaseJobConfiguration(BaseModel):
|
|
149
149
|
@inject_client
|
150
150
|
async def from_template_and_values(
|
151
151
|
cls,
|
152
|
-
base_job_template: dict,
|
153
|
-
values: dict,
|
154
|
-
client:
|
152
|
+
base_job_template: dict[str, Any],
|
153
|
+
values: dict[str, Any],
|
154
|
+
client: "PrefectClient | None" = None,
|
155
155
|
):
|
156
156
|
"""Creates a valid worker configuration object from the provided base
|
157
157
|
configuration and overrides.
|
@@ -159,7 +159,7 @@ class BaseJobConfiguration(BaseModel):
|
|
159
159
|
Important: this method expects that the base_job_template was already
|
160
160
|
validated server-side.
|
161
161
|
"""
|
162
|
-
base_config:
|
162
|
+
base_config: dict[str, Any] = base_job_template["job_configuration"]
|
163
163
|
variables_schema = base_job_template["variables"]
|
164
164
|
variables = cls._get_base_config_defaults(
|
165
165
|
variables_schema.get("properties", {})
|
@@ -213,8 +213,10 @@ class BaseJobConfiguration(BaseModel):
|
|
213
213
|
def prepare_for_flow_run(
|
214
214
|
self,
|
215
215
|
flow_run: "FlowRun",
|
216
|
-
deployment:
|
217
|
-
flow:
|
216
|
+
deployment: "DeploymentResponse | None" = None,
|
217
|
+
flow: "Flow | None" = None,
|
218
|
+
work_pool: "WorkPool | None" = None,
|
219
|
+
worker_name: str | None = None,
|
218
220
|
) -> None:
|
219
221
|
"""
|
220
222
|
Prepare the job configuration for a flow run.
|
@@ -227,6 +229,8 @@ class BaseJobConfiguration(BaseModel):
|
|
227
229
|
flow_run: The flow run to be executed.
|
228
230
|
deployment: The deployment that the flow run is associated with.
|
229
231
|
flow: The flow that the flow run is associated with.
|
232
|
+
work_pool: The work pool that the flow run is running in.
|
233
|
+
worker_name: The name of the worker that is submitting the flow run.
|
230
234
|
"""
|
231
235
|
|
232
236
|
self._related_objects = {
|
@@ -234,26 +238,19 @@ class BaseJobConfiguration(BaseModel):
|
|
234
238
|
"flow": flow,
|
235
239
|
"flow-run": flow_run,
|
236
240
|
}
|
237
|
-
if deployment is not None:
|
238
|
-
deployment_labels = self._base_deployment_labels(deployment)
|
239
|
-
else:
|
240
|
-
deployment_labels = {}
|
241
|
-
|
242
|
-
if flow is not None:
|
243
|
-
flow_labels = self._base_flow_labels(flow)
|
244
|
-
else:
|
245
|
-
flow_labels = {}
|
246
241
|
|
247
242
|
env = {
|
248
243
|
**self._base_environment(),
|
249
244
|
**self._base_flow_run_environment(flow_run),
|
250
|
-
**(self.env if isinstance(self.env, dict) else {}),
|
245
|
+
**(self.env if isinstance(self.env, dict) else {}), # pyright: ignore[reportUnnecessaryIsInstance]
|
251
246
|
}
|
252
247
|
self.env = {key: value for key, value in env.items() if value is not None}
|
253
248
|
self.labels = {
|
254
249
|
**self._base_flow_run_labels(flow_run),
|
255
|
-
**
|
256
|
-
**
|
250
|
+
**self._base_work_pool_labels(work_pool),
|
251
|
+
**self._base_worker_name_label(worker_name),
|
252
|
+
**self._base_flow_labels(flow),
|
253
|
+
**self._base_deployment_labels(deployment),
|
257
254
|
**self.labels,
|
258
255
|
}
|
259
256
|
self.name = self.name or flow_run.name
|
@@ -267,7 +264,7 @@ class BaseJobConfiguration(BaseModel):
|
|
267
264
|
return "prefect flow-run execute"
|
268
265
|
|
269
266
|
@staticmethod
|
270
|
-
def _base_flow_run_labels(flow_run: "FlowRun") ->
|
267
|
+
def _base_flow_run_labels(flow_run: "FlowRun") -> dict[str, str]:
|
271
268
|
"""
|
272
269
|
Generate a dictionary of labels for a flow run job.
|
273
270
|
"""
|
@@ -278,7 +275,7 @@ class BaseJobConfiguration(BaseModel):
|
|
278
275
|
}
|
279
276
|
|
280
277
|
@classmethod
|
281
|
-
def _base_environment(cls) ->
|
278
|
+
def _base_environment(cls) -> dict[str, str]:
|
282
279
|
"""
|
283
280
|
Environment variables that should be passed to all created infrastructure.
|
284
281
|
|
@@ -287,14 +284,22 @@ class BaseJobConfiguration(BaseModel):
|
|
287
284
|
return get_current_settings().to_environment_variables(exclude_unset=True)
|
288
285
|
|
289
286
|
@staticmethod
|
290
|
-
def _base_flow_run_environment(flow_run: "FlowRun") ->
|
287
|
+
def _base_flow_run_environment(flow_run: "FlowRun | None") -> dict[str, str]:
|
291
288
|
"""
|
292
289
|
Generate a dictionary of environment variables for a flow run job.
|
293
290
|
"""
|
291
|
+
if flow_run is None:
|
292
|
+
return {}
|
293
|
+
|
294
294
|
return {"PREFECT__FLOW_RUN_ID": str(flow_run.id)}
|
295
295
|
|
296
296
|
@staticmethod
|
297
|
-
def _base_deployment_labels(
|
297
|
+
def _base_deployment_labels(
|
298
|
+
deployment: "DeploymentResponse | None",
|
299
|
+
) -> dict[str, str]:
|
300
|
+
if deployment is None:
|
301
|
+
return {}
|
302
|
+
|
298
303
|
labels = {
|
299
304
|
"prefect.io/deployment-id": str(deployment.id),
|
300
305
|
"prefect.io/deployment-name": deployment.name,
|
@@ -306,15 +311,37 @@ class BaseJobConfiguration(BaseModel):
|
|
306
311
|
return labels
|
307
312
|
|
308
313
|
@staticmethod
|
309
|
-
def _base_flow_labels(flow: "Flow") ->
|
314
|
+
def _base_flow_labels(flow: "Flow | None") -> dict[str, str]:
|
315
|
+
if flow is None:
|
316
|
+
return {}
|
317
|
+
|
310
318
|
return {
|
311
319
|
"prefect.io/flow-id": str(flow.id),
|
312
320
|
"prefect.io/flow-name": flow.name,
|
313
321
|
}
|
314
322
|
|
315
|
-
|
316
|
-
|
317
|
-
|
323
|
+
@staticmethod
|
324
|
+
def _base_work_pool_labels(work_pool: "WorkPool | None") -> dict[str, str]:
|
325
|
+
"""Adds the work pool labels to the job manifest."""
|
326
|
+
if work_pool is None:
|
327
|
+
return {}
|
328
|
+
|
329
|
+
return {
|
330
|
+
"prefect.io/work-pool-name": work_pool.name,
|
331
|
+
"prefect.io/work-pool-id": str(work_pool.id),
|
332
|
+
}
|
333
|
+
|
334
|
+
@staticmethod
|
335
|
+
def _base_worker_name_label(worker_name: str | None) -> dict[str, str]:
|
336
|
+
"""Adds the worker name label to the job manifest."""
|
337
|
+
if worker_name is None:
|
338
|
+
return {}
|
339
|
+
|
340
|
+
return {"prefect.io/worker-name": worker_name}
|
341
|
+
|
342
|
+
def _related_resources(self) -> list[RelatedResource]:
|
343
|
+
tags: set[str] = set()
|
344
|
+
related: list[RelatedResource] = []
|
318
345
|
|
319
346
|
for kind, obj in self._related_objects.items():
|
320
347
|
if obj is None:
|
@@ -331,12 +358,12 @@ class BaseVariables(BaseModel):
|
|
331
358
|
default=None,
|
332
359
|
description="Name given to infrastructure created by a worker.",
|
333
360
|
)
|
334
|
-
env:
|
361
|
+
env: dict[str, Optional[str]] = Field(
|
335
362
|
default_factory=dict,
|
336
363
|
title="Environment Variables",
|
337
364
|
description="Environment variables to set when starting a flow run.",
|
338
365
|
)
|
339
|
-
labels:
|
366
|
+
labels: dict[str, str] = Field(
|
340
367
|
default_factory=dict,
|
341
368
|
description="Labels applied to infrastructure created by a worker.",
|
342
369
|
)
|
@@ -356,7 +383,7 @@ class BaseVariables(BaseModel):
|
|
356
383
|
ref_template: str = "#/definitions/{model}",
|
357
384
|
schema_generator: Type[GenerateJsonSchema] = GenerateJsonSchema,
|
358
385
|
mode: Literal["validation", "serialization"] = "validation",
|
359
|
-
) ->
|
386
|
+
) -> dict[str, Any]:
|
360
387
|
"""TODO: stop overriding this method - use GenerateSchema in ConfigDict instead?"""
|
361
388
|
schema = super().model_json_schema(
|
362
389
|
by_alias, ref_template, schema_generator, mode
|
@@ -403,14 +430,14 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
|
|
403
430
|
def __init__(
|
404
431
|
self,
|
405
432
|
work_pool_name: str,
|
406
|
-
work_queues:
|
407
|
-
name:
|
408
|
-
prefetch_seconds:
|
433
|
+
work_queues: list[str] | None = None,
|
434
|
+
name: str | None = None,
|
435
|
+
prefetch_seconds: float | None = None,
|
409
436
|
create_pool_if_not_found: bool = True,
|
410
|
-
limit:
|
411
|
-
heartbeat_interval_seconds:
|
437
|
+
limit: int | None = None,
|
438
|
+
heartbeat_interval_seconds: int | None = None,
|
412
439
|
*,
|
413
|
-
base_job_template:
|
440
|
+
base_job_template: dict[str, Any] | None = None,
|
414
441
|
):
|
415
442
|
"""
|
416
443
|
Base class for all Prefect workers.
|
@@ -445,7 +472,7 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
|
|
445
472
|
self._create_pool_if_not_found = create_pool_if_not_found
|
446
473
|
self._base_job_template = base_job_template
|
447
474
|
self._work_pool_name = work_pool_name
|
448
|
-
self._work_queues:
|
475
|
+
self._work_queues: set[str] = set(work_queues) if work_queues else set()
|
449
476
|
|
450
477
|
self._prefetch_seconds: float = (
|
451
478
|
prefetch_seconds or PREFECT_WORKER_PREFETCH_SECONDS.value()
|
@@ -461,11 +488,35 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
|
|
461
488
|
self._last_polled_time: DateTime = DateTime.now("utc")
|
462
489
|
self._limit = limit
|
463
490
|
self._limiter: Optional[anyio.CapacityLimiter] = None
|
464
|
-
self._submitting_flow_run_ids = set()
|
465
|
-
self._cancelling_flow_run_ids = set()
|
466
|
-
self._scheduled_task_scopes = set()
|
491
|
+
self._submitting_flow_run_ids: set[UUID] = set()
|
492
|
+
self._cancelling_flow_run_ids: set[UUID] = set()
|
493
|
+
self._scheduled_task_scopes: set[anyio.CancelScope] = set()
|
467
494
|
self._worker_metadata_sent = False
|
468
495
|
|
496
|
+
@property
|
497
|
+
def client(self) -> PrefectClient:
|
498
|
+
if self._client is None:
|
499
|
+
raise RuntimeError(
|
500
|
+
"Worker has not been correctly initialized. Please use the worker class as an async context manager."
|
501
|
+
)
|
502
|
+
return self._client
|
503
|
+
|
504
|
+
@property
|
505
|
+
def work_pool(self) -> WorkPool:
|
506
|
+
if self._work_pool is None:
|
507
|
+
raise RuntimeError(
|
508
|
+
"Worker has not been correctly initialized. Please use the worker class as an async context manager."
|
509
|
+
)
|
510
|
+
return self._work_pool
|
511
|
+
|
512
|
+
@property
|
513
|
+
def limiter(self) -> anyio.CapacityLimiter:
|
514
|
+
if self._limiter is None:
|
515
|
+
raise RuntimeError(
|
516
|
+
"Worker has not been correctly initialized. Please use the worker class as an async context manager."
|
517
|
+
)
|
518
|
+
return self._limiter
|
519
|
+
|
469
520
|
@classmethod
|
470
521
|
def get_documentation_url(cls) -> str:
|
471
522
|
return cls._documentation_url
|
@@ -510,7 +561,7 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
|
|
510
561
|
return worker_registry.get(type)
|
511
562
|
|
512
563
|
@staticmethod
|
513
|
-
def get_all_available_worker_types() ->
|
564
|
+
def get_all_available_worker_types() -> list[str]:
|
514
565
|
"""
|
515
566
|
Returns all worker types available in the local registry.
|
516
567
|
"""
|
@@ -790,7 +841,7 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
|
|
790
841
|
|
791
842
|
should_get_worker_id = self._should_get_worker_id()
|
792
843
|
|
793
|
-
params = {
|
844
|
+
params: dict[str, Any] = {
|
794
845
|
"work_pool_name": self._work_pool_name,
|
795
846
|
"worker_name": self.name,
|
796
847
|
"heartbeat_interval_seconds": self.heartbeat_interval_seconds,
|
@@ -852,7 +903,7 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
|
|
852
903
|
|
853
904
|
async def _get_scheduled_flow_runs(
|
854
905
|
self,
|
855
|
-
) ->
|
906
|
+
) -> list["WorkerFlowRunResponse"]:
|
856
907
|
"""
|
857
908
|
Retrieve scheduled flow runs from the work pool's queues.
|
858
909
|
"""
|
@@ -862,7 +913,7 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
|
|
862
913
|
)
|
863
914
|
try:
|
864
915
|
scheduled_flow_runs = (
|
865
|
-
await self.
|
916
|
+
await self.client.get_scheduled_flow_runs_for_work_pool(
|
866
917
|
work_pool_name=self._work_pool_name,
|
867
918
|
scheduled_before=scheduled_before,
|
868
919
|
work_queue_names=list(self._work_queues),
|
@@ -878,8 +929,8 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
|
|
878
929
|
return []
|
879
930
|
|
880
931
|
async def _submit_scheduled_flow_runs(
|
881
|
-
self, flow_run_response:
|
882
|
-
) ->
|
932
|
+
self, flow_run_response: list["WorkerFlowRunResponse"]
|
933
|
+
) -> list["FlowRun"]:
|
883
934
|
"""
|
884
935
|
Takes a list of WorkerFlowRunResponses and submits the referenced flow runs
|
885
936
|
for execution by the worker.
|
@@ -897,7 +948,7 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
|
|
897
948
|
self._limiter.acquire_on_behalf_of_nowait(flow_run.id)
|
898
949
|
except anyio.WouldBlock:
|
899
950
|
self._logger.info(
|
900
|
-
f"Flow run limit reached; {self.
|
951
|
+
f"Flow run limit reached; {self.limiter.borrowed_tokens} flow runs"
|
901
952
|
" in progress."
|
902
953
|
)
|
903
954
|
break
|
@@ -921,6 +972,8 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
|
|
921
972
|
run_logger.warning(f"Failed to generate worker URL: {ve}")
|
922
973
|
|
923
974
|
self._submitting_flow_run_ids.add(flow_run.id)
|
975
|
+
if TYPE_CHECKING:
|
976
|
+
assert self._runs_task_group is not None
|
924
977
|
self._runs_task_group.start_soon(
|
925
978
|
self._submit_run,
|
926
979
|
flow_run,
|
@@ -939,14 +992,9 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
|
|
939
992
|
"""
|
940
993
|
run_logger = self.get_flow_run_logger(flow_run)
|
941
994
|
|
942
|
-
if flow_run.deployment_id:
|
943
|
-
assert self._client and self._client._started, (
|
944
|
-
"Client must be started to check flow run deployment."
|
945
|
-
)
|
946
|
-
|
947
995
|
try:
|
948
|
-
await self.
|
949
|
-
except ObjectNotFound:
|
996
|
+
await self.client.read_deployment(getattr(flow_run, "deployment_id"))
|
997
|
+
except (ObjectNotFound, AttributeError):
|
950
998
|
self._logger.exception(
|
951
999
|
f"Deployment {flow_run.deployment_id} no longer exists. "
|
952
1000
|
f"Flow run {flow_run.id} will not be submitted for"
|
@@ -964,13 +1012,15 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
|
|
964
1012
|
ready_to_submit = await self._propose_pending_state(flow_run)
|
965
1013
|
self._logger.debug(f"Ready to submit {flow_run.id}: {ready_to_submit}")
|
966
1014
|
if ready_to_submit:
|
1015
|
+
if TYPE_CHECKING:
|
1016
|
+
assert self._runs_task_group is not None
|
967
1017
|
readiness_result = await self._runs_task_group.start(
|
968
1018
|
self._submit_run_and_capture_errors, flow_run
|
969
1019
|
)
|
970
1020
|
|
971
1021
|
if readiness_result and not isinstance(readiness_result, Exception):
|
972
1022
|
try:
|
973
|
-
await self.
|
1023
|
+
await self.client.update_flow_run(
|
974
1024
|
flow_run_id=flow_run.id,
|
975
1025
|
infrastructure_pid=str(readiness_result),
|
976
1026
|
)
|
@@ -991,8 +1041,10 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
|
|
991
1041
|
self._submitting_flow_run_ids.remove(flow_run.id)
|
992
1042
|
|
993
1043
|
async def _submit_run_and_capture_errors(
|
994
|
-
self,
|
995
|
-
|
1044
|
+
self,
|
1045
|
+
flow_run: "FlowRun",
|
1046
|
+
task_status: anyio.abc.TaskStatus[int | Exception] | None = None,
|
1047
|
+
) -> BaseWorkerResult | Exception:
|
996
1048
|
run_logger = self.get_flow_run_logger(flow_run)
|
997
1049
|
|
998
1050
|
try:
|
@@ -1006,7 +1058,7 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
|
|
1006
1058
|
configuration=configuration,
|
1007
1059
|
)
|
1008
1060
|
except Exception as exc:
|
1009
|
-
if not task_status
|
1061
|
+
if task_status and not getattr(task_status, "_future").done():
|
1010
1062
|
# This flow run was being submitted and did not start successfully
|
1011
1063
|
run_logger.exception(
|
1012
1064
|
f"Failed to submit flow run '{flow_run.id}' to infrastructure."
|
@@ -1025,7 +1077,7 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
|
|
1025
1077
|
finally:
|
1026
1078
|
self._release_limit_slot(flow_run.id)
|
1027
1079
|
|
1028
|
-
if not task_status
|
1080
|
+
if task_status and not getattr(task_status, "_future").done():
|
1029
1081
|
run_logger.error(
|
1030
1082
|
f"Infrastructure returned without reporting flow run '{flow_run.id}' "
|
1031
1083
|
"as started or raising an error. This behavior is not expected and "
|
@@ -1033,7 +1085,11 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
|
|
1033
1085
|
"flow run will not be marked as failed, but an issue may have occurred."
|
1034
1086
|
)
|
1035
1087
|
# Mark the task as started to prevent agent crash
|
1036
|
-
task_status.started(
|
1088
|
+
task_status.started(
|
1089
|
+
RuntimeError(
|
1090
|
+
"Infrastructure returned without reporting flow run as started or raising an error."
|
1091
|
+
)
|
1092
|
+
)
|
1037
1093
|
|
1038
1094
|
if result.status_code != 0:
|
1039
1095
|
await self._propose_crashed_state(
|
@@ -1044,11 +1100,12 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
|
|
1044
1100
|
),
|
1045
1101
|
)
|
1046
1102
|
|
1047
|
-
|
1103
|
+
if submitted_event:
|
1104
|
+
self._emit_flow_run_executed_event(result, configuration, submitted_event)
|
1048
1105
|
|
1049
1106
|
return result
|
1050
1107
|
|
1051
|
-
def _release_limit_slot(self, flow_run_id:
|
1108
|
+
def _release_limit_slot(self, flow_run_id: UUID) -> None:
|
1052
1109
|
"""
|
1053
1110
|
Frees up a slot taken by the given flow run id.
|
1054
1111
|
"""
|
@@ -1078,14 +1135,12 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
|
|
1078
1135
|
flow_run: "FlowRun",
|
1079
1136
|
deployment: Optional["DeploymentResponse"] = None,
|
1080
1137
|
) -> C:
|
1081
|
-
deployment
|
1082
|
-
deployment
|
1083
|
-
if deployment
|
1084
|
-
else await self._client.read_deployment(flow_run.deployment_id)
|
1085
|
-
)
|
1086
|
-
flow = await self._client.read_flow(flow_run.flow_id)
|
1138
|
+
if not deployment and flow_run.deployment_id:
|
1139
|
+
deployment = await self.client.read_deployment(flow_run.deployment_id)
|
1087
1140
|
|
1088
|
-
|
1141
|
+
flow = await self.client.read_flow(flow_run.flow_id)
|
1142
|
+
|
1143
|
+
deployment_vars = getattr(deployment, "job_variables", {}) or {}
|
1089
1144
|
flow_run_vars = flow_run.job_variables or {}
|
1090
1145
|
job_variables = {**deployment_vars}
|
1091
1146
|
|
@@ -1095,22 +1150,37 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
|
|
1095
1150
|
job_variables.update(flow_run_vars)
|
1096
1151
|
|
1097
1152
|
configuration = await self.job_configuration.from_template_and_values(
|
1098
|
-
base_job_template=self.
|
1153
|
+
base_job_template=self.work_pool.base_job_template,
|
1099
1154
|
values=job_variables,
|
1100
|
-
client=self.
|
1101
|
-
)
|
1102
|
-
configuration.prepare_for_flow_run(
|
1103
|
-
flow_run=flow_run, deployment=deployment, flow=flow
|
1155
|
+
client=self.client,
|
1104
1156
|
)
|
1157
|
+
try:
|
1158
|
+
configuration.prepare_for_flow_run(
|
1159
|
+
flow_run=flow_run,
|
1160
|
+
deployment=deployment,
|
1161
|
+
flow=flow,
|
1162
|
+
work_pool=self.work_pool,
|
1163
|
+
worker_name=self.name,
|
1164
|
+
)
|
1165
|
+
except TypeError:
|
1166
|
+
warnings.warn(
|
1167
|
+
"This worker is missing the `work_pool` and `worker_name` arguments "
|
1168
|
+
"in its JobConfiguration.prepare_for_flow_run method. Please update "
|
1169
|
+
"the worker's JobConfiguration class to accept these arguments to "
|
1170
|
+
"avoid this warning.",
|
1171
|
+
category=PrefectDeprecationWarning,
|
1172
|
+
)
|
1173
|
+
# Handle older subclasses that don't accept work_pool and worker_name
|
1174
|
+
configuration.prepare_for_flow_run(
|
1175
|
+
flow_run=flow_run, deployment=deployment, flow=flow
|
1176
|
+
)
|
1105
1177
|
return configuration
|
1106
1178
|
|
1107
1179
|
async def _propose_pending_state(self, flow_run: "FlowRun") -> bool:
|
1108
1180
|
run_logger = self.get_flow_run_logger(flow_run)
|
1109
1181
|
state = flow_run.state
|
1110
1182
|
try:
|
1111
|
-
state = await propose_state(
|
1112
|
-
self._client, Pending(), flow_run_id=flow_run.id
|
1113
|
-
)
|
1183
|
+
state = await propose_state(self.client, Pending(), flow_run_id=flow_run.id)
|
1114
1184
|
except Abort as exc:
|
1115
1185
|
run_logger.info(
|
1116
1186
|
(
|
@@ -1141,7 +1211,7 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
|
|
1141
1211
|
run_logger = self.get_flow_run_logger(flow_run)
|
1142
1212
|
try:
|
1143
1213
|
await propose_state(
|
1144
|
-
self.
|
1214
|
+
self.client,
|
1145
1215
|
await exception_to_failed_state(message="Submission failed.", exc=exc),
|
1146
1216
|
flow_run_id=flow_run.id,
|
1147
1217
|
)
|
@@ -1159,7 +1229,7 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
|
|
1159
1229
|
run_logger = self.get_flow_run_logger(flow_run)
|
1160
1230
|
try:
|
1161
1231
|
state = await propose_state(
|
1162
|
-
self.
|
1232
|
+
self.client,
|
1163
1233
|
Crashed(message=message),
|
1164
1234
|
flow_run_id=flow_run.id,
|
1165
1235
|
)
|
@@ -1175,14 +1245,16 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
|
|
1175
1245
|
)
|
1176
1246
|
|
1177
1247
|
async def _mark_flow_run_as_cancelled(
|
1178
|
-
self, flow_run: "FlowRun", state_updates:
|
1248
|
+
self, flow_run: "FlowRun", state_updates: dict[str, Any] | None = None
|
1179
1249
|
) -> None:
|
1180
1250
|
state_updates = state_updates or {}
|
1181
1251
|
state_updates.setdefault("name", "Cancelled")
|
1182
1252
|
state_updates.setdefault("type", StateType.CANCELLED)
|
1253
|
+
if TYPE_CHECKING:
|
1254
|
+
assert flow_run.state
|
1183
1255
|
state = flow_run.state.model_copy(update=state_updates)
|
1184
1256
|
|
1185
|
-
await self.
|
1257
|
+
await self.client.set_flow_run_state(flow_run.id, state, force=True)
|
1186
1258
|
|
1187
1259
|
# Do not remove the flow run from the cancelling set immediately because
|
1188
1260
|
# the API caches responses for the `read_flow_runs` and we do not want to
|
@@ -1191,16 +1263,21 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
|
|
1191
1263
|
60 * 10, self._cancelling_flow_run_ids.remove, flow_run.id
|
1192
1264
|
)
|
1193
1265
|
|
1194
|
-
async def _set_work_pool_template(
|
1266
|
+
async def _set_work_pool_template(
|
1267
|
+
self, work_pool: "WorkPool", job_template: dict[str, Any]
|
1268
|
+
):
|
1195
1269
|
"""Updates the `base_job_template` for the worker's work pool server side."""
|
1196
|
-
|
1270
|
+
|
1271
|
+
await self.client.update_work_pool(
|
1197
1272
|
work_pool_name=work_pool.name,
|
1198
1273
|
work_pool=WorkPoolUpdate(
|
1199
1274
|
base_job_template=job_template,
|
1200
1275
|
),
|
1201
1276
|
)
|
1202
1277
|
|
1203
|
-
async def _schedule_task(
|
1278
|
+
async def _schedule_task(
|
1279
|
+
self, __in_seconds: int, fn: Callable[..., Any], *args: Any, **kwargs: Any
|
1280
|
+
):
|
1204
1281
|
"""
|
1205
1282
|
Schedule a background task to start after some time.
|
1206
1283
|
|
@@ -1208,8 +1285,12 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
|
|
1208
1285
|
|
1209
1286
|
The function may be async or sync. Async functions will be awaited.
|
1210
1287
|
"""
|
1288
|
+
if not self._runs_task_group:
|
1289
|
+
raise RuntimeError(
|
1290
|
+
"Worker has not been correctly initialized. Please use the worker class as an async context manager."
|
1291
|
+
)
|
1211
1292
|
|
1212
|
-
async def wrapper(task_status):
|
1293
|
+
async def wrapper(task_status: anyio.abc.TaskStatus[Any]):
|
1213
1294
|
# If we are shutting down, do not sleep; otherwise sleep until the scheduled
|
1214
1295
|
# time or shutdown
|
1215
1296
|
if self.is_setup:
|
@@ -1271,12 +1352,12 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
|
|
1271
1352
|
|
1272
1353
|
def _event_related_resources(
|
1273
1354
|
self,
|
1274
|
-
configuration:
|
1355
|
+
configuration: BaseJobConfiguration | None = None,
|
1275
1356
|
include_self: bool = False,
|
1276
|
-
) ->
|
1277
|
-
related = []
|
1357
|
+
) -> list[RelatedResource]:
|
1358
|
+
related: list[RelatedResource] = []
|
1278
1359
|
if configuration:
|
1279
|
-
related += configuration
|
1360
|
+
related += getattr(configuration, "_related_resources")()
|
1280
1361
|
|
1281
1362
|
if self._work_pool:
|
1282
1363
|
related.append(
|
@@ -1294,7 +1375,7 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
|
|
1294
1375
|
|
1295
1376
|
def _emit_flow_run_submitted_event(
|
1296
1377
|
self, configuration: BaseJobConfiguration
|
1297
|
-
) -> Event:
|
1378
|
+
) -> Event | None:
|
1298
1379
|
return emit_event(
|
1299
1380
|
event="prefect.worker.submitted-flow-run",
|
1300
1381
|
resource=self._event_resource(),
|
@@ -1305,7 +1386,7 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
|
|
1305
1386
|
self,
|
1306
1387
|
result: BaseWorkerResult,
|
1307
1388
|
configuration: BaseJobConfiguration,
|
1308
|
-
submitted_event: Event,
|
1389
|
+
submitted_event: Event | None = None,
|
1309
1390
|
):
|
1310
1391
|
related = self._event_related_resources(configuration=configuration)
|
1311
1392
|
|
@@ -1321,7 +1402,7 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
|
|
1321
1402
|
follows=submitted_event,
|
1322
1403
|
)
|
1323
1404
|
|
1324
|
-
async def _emit_worker_started_event(self) -> Event:
|
1405
|
+
async def _emit_worker_started_event(self) -> Event | None:
|
1325
1406
|
return emit_event(
|
1326
1407
|
"prefect.worker.started",
|
1327
1408
|
resource=self._event_resource(),
|
prefect/workers/process.py
CHANGED
@@ -29,7 +29,6 @@ import anyio.abc
|
|
29
29
|
from pydantic import Field, field_validator
|
30
30
|
|
31
31
|
from prefect._internal.schemas.validators import validate_working_dir
|
32
|
-
from prefect.client.schemas import FlowRun
|
33
32
|
from prefect.runner.runner import Runner
|
34
33
|
from prefect.settings import PREFECT_WORKER_QUERY_SECONDS
|
35
34
|
from prefect.utilities.processutils import get_sys_executable
|
@@ -42,7 +41,7 @@ from prefect.workers.base import (
|
|
42
41
|
)
|
43
42
|
|
44
43
|
if TYPE_CHECKING:
|
45
|
-
from prefect.client.schemas.objects import Flow
|
44
|
+
from prefect.client.schemas.objects import Flow, FlowRun, WorkPool
|
46
45
|
from prefect.client.schemas.responses import DeploymentResponse
|
47
46
|
|
48
47
|
|
@@ -60,10 +59,12 @@ class ProcessJobConfiguration(BaseJobConfiguration):
|
|
60
59
|
def prepare_for_flow_run(
|
61
60
|
self,
|
62
61
|
flow_run: "FlowRun",
|
63
|
-
deployment:
|
64
|
-
flow:
|
62
|
+
deployment: "DeploymentResponse | None" = None,
|
63
|
+
flow: "Flow | None" = None,
|
64
|
+
work_pool: "WorkPool | None" = None,
|
65
|
+
worker_name: str | None = None,
|
65
66
|
) -> None:
|
66
|
-
super().prepare_for_flow_run(flow_run, deployment, flow)
|
67
|
+
super().prepare_for_flow_run(flow_run, deployment, flow, work_pool, worker_name)
|
67
68
|
|
68
69
|
self.env: dict[str, str | None] = {**os.environ, **self.env}
|
69
70
|
self.command: str | None = (
|
@@ -201,7 +202,7 @@ class ProcessWorker(
|
|
201
202
|
|
202
203
|
async def run(
|
203
204
|
self,
|
204
|
-
flow_run: FlowRun,
|
205
|
+
flow_run: "FlowRun",
|
205
206
|
configuration: ProcessJobConfiguration,
|
206
207
|
task_status: Optional[anyio.abc.TaskStatus[int]] = None,
|
207
208
|
) -> ProcessWorkerResult:
|
@@ -1,7 +1,7 @@
|
|
1
1
|
prefect/.prefectignore,sha256=awSprvKT0vI8a64mEOLrMxhxqcO-b0ERQeYpA2rNKVQ,390
|
2
2
|
prefect/__init__.py,sha256=iCdcC5ZmeewikCdnPEP6YBAjPNV5dvfxpYCTpw30Hkw,3685
|
3
3
|
prefect/__main__.py,sha256=WFjw3kaYJY6pOTA7WDOgqjsz8zUEUZHCcj3P5wyVa-g,66
|
4
|
-
prefect/_build_info.py,sha256=
|
4
|
+
prefect/_build_info.py,sha256=fPct5cKMbz-qqS_zMeI5SJEFgZXcf41UFkUscPv_ikM,181
|
5
5
|
prefect/_result_records.py,sha256=S6QmsODkehGVSzbMm6ig022PYbI6gNKz671p_8kBYx4,7789
|
6
6
|
prefect/_waiters.py,sha256=Ia2ITaXdHzevtyWIgJoOg95lrEXQqNEOquHvw3T33UQ,9026
|
7
7
|
prefect/agent.py,sha256=dPvG1jDGD5HSH7aM2utwtk6RaJ9qg13XjkA0lAIgQmY,287
|
@@ -14,7 +14,7 @@ prefect/exceptions.py,sha256=-nih8qqdxRm6CX-4yrqwePVh8Mcpvla_V6N_KbdJsIU,11593
|
|
14
14
|
prefect/filesystems.py,sha256=v5YqGB4uXf9Ew2VuB9VCSkawvYMMVvEtZf7w1VmAmr8,18036
|
15
15
|
prefect/flow_engine.py,sha256=gR44YU7aCAbHEqoMDdxL1SDrtS5Xx1Kzg3M7FWjHcvY,58967
|
16
16
|
prefect/flow_runs.py,sha256=ocbV3ioSBIFoLqExpI2YLteyHdWpHB3t0lrbfl-Ni1E,17256
|
17
|
-
prefect/flows.py,sha256=
|
17
|
+
prefect/flows.py,sha256=dsrV-qNP_2eUsdG409XbtNtfMkTIzcAnBbXrD9OBUgQ,108950
|
18
18
|
prefect/futures.py,sha256=ADd8ceFqX7A8Kw8aXaqvbYRG03uU82OEY30xrP5vrwY,23599
|
19
19
|
prefect/main.py,sha256=hFeTTrr01qWKcRwZVEHVipyHEybS0VLTscFV6zG6GtY,2306
|
20
20
|
prefect/plugins.py,sha256=FPRLR2mWVBMuOnlzeiTD9krlHONZH2rtYLD753JQDNQ,2516
|
@@ -23,11 +23,11 @@ prefect/results.py,sha256=gAcYivq5CN8oL5CWu8cJe2560i0M8I5GL-8RcBTJ6VI,36385
|
|
23
23
|
prefect/schedules.py,sha256=9ufG4jhIA_R7vS9uXqnnZEgB7Ts922KMhNacWcveVgA,7291
|
24
24
|
prefect/serializers.py,sha256=QI0oEal_BO4HQaWSjr6ReSwT55Hn4sbSOXxGgQI1-y0,9249
|
25
25
|
prefect/states.py,sha256=tTZrN-IZKvmFcN8FR_4L-X-ZrmXi6z-cPXl6KdOy-XI,26920
|
26
|
-
prefect/task_engine.py,sha256=
|
26
|
+
prefect/task_engine.py,sha256=nbiaDyTNN89zP_ibJbe_Zrw2kLGZ4QqoeN221iIc5y0,61526
|
27
27
|
prefect/task_runners.py,sha256=Ce_ngocfq_X-NA5zhPj13IdVmzZ5h6gXlmfxYWs2AXA,15828
|
28
28
|
prefect/task_runs.py,sha256=7LIzfo3fondCyEUpU05sYFN5IfpZigBDXrhG5yc-8t0,9039
|
29
29
|
prefect/task_worker.py,sha256=mihWOZ3IpZCupqBboB_T1XhLm-0ApwwptTgUH-I3nKo,17794
|
30
|
-
prefect/tasks.py,sha256=
|
30
|
+
prefect/tasks.py,sha256=KkqUaGYArxOSf8dRCr6HKtrqWrMfUhq8U4fXC93m4AE,74080
|
31
31
|
prefect/transactions.py,sha256=kOXwghBW3jM71gg49MkjJPTnImEzXWeTCUE_zpq2MlI,16068
|
32
32
|
prefect/variables.py,sha256=dCK3vX7TbkqXZhnNT_v7rcGh3ISRqoR6pJVLpoll3Js,8342
|
33
33
|
prefect/_experimental/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -181,7 +181,7 @@ prefect/logging/filters.py,sha256=NnRYubh9dMmWcCAjuW32cIVQ37rLxdn8ci26wTtQMyU,11
|
|
181
181
|
prefect/logging/formatters.py,sha256=BkPykVyOFKdnhDj_1vhhOoWiHiiBeRnWXPcaRIWK3aI,4125
|
182
182
|
prefect/logging/handlers.py,sha256=pIeS6gvuVnuh3lZ-kIC4ijRMSbVPkHo-rYeLMj5P8NA,12240
|
183
183
|
prefect/logging/highlighters.py,sha256=BCf_LNhFInIfGPqwuu8YVrGa4wVxNc4YXo2pYgftpg4,1811
|
184
|
-
prefect/logging/loggers.py,sha256=
|
184
|
+
prefect/logging/loggers.py,sha256=rwFJv0i3dhdKr25XX-xUkQy4Vv4dy18bTy366jrC0OQ,12741
|
185
185
|
prefect/logging/logging.yml,sha256=tT7gTyC4NmngFSqFkCdHaw7R0GPNPDDsTCGZQByiJAQ,3169
|
186
186
|
prefect/runner/__init__.py,sha256=7U-vAOXFkzMfRz1q8Uv6Otsvc0OrPYLLP44srwkJ_8s,89
|
187
187
|
prefect/runner/runner.py,sha256=8JixM-hU4Oo4AMaXhWGEISUdZCg0AZdPGkP4lXxbMu0,65436
|
@@ -218,7 +218,7 @@ prefect/server/api/middleware.py,sha256=WkyuyeJIfo9Q0GAIVU5gO6yIGNVwoHwuBah5AB5o
|
|
218
218
|
prefect/server/api/root.py,sha256=CeumFYIM_BDvPicJH9ry5PO_02PZTLeMqbLMGGTh90o,942
|
219
219
|
prefect/server/api/run_history.py,sha256=FHepAgo1AYFeuh7rrAVzo_o3hu8Uc8-4DeH5aD5VGgw,5995
|
220
220
|
prefect/server/api/saved_searches.py,sha256=UjoqLLe245QVIs6q5Vk4vdODCOoYzciEEjhi7B8sYCE,3233
|
221
|
-
prefect/server/api/server.py,sha256=
|
221
|
+
prefect/server/api/server.py,sha256=GHbHTm8qd6i0mbqhnplFNHStUXwYnOuOFc4F9OAYk1c,32940
|
222
222
|
prefect/server/api/task_run_states.py,sha256=e63OPpxPudv_CIB5oKr8Z8rfQ-Osjm9Zq0iHe8obnMo,1647
|
223
223
|
prefect/server/api/task_runs.py,sha256=VY6MrolTi_vmiaE5my3WyRl5r256WKC7sfxxBE4Wnpw,12239
|
224
224
|
prefect/server/api/task_workers.py,sha256=cFP9M8tsApDL_JpySn-x6fOYy9RnOeOgKiqOl_UVVQM,1042
|
@@ -274,7 +274,7 @@ prefect/telemetry/bootstrap.py,sha256=tD6CXgMU3skgpKNscrRYZudwCWEEt6wjwMsulYpHeA
|
|
274
274
|
prefect/telemetry/instrumentation.py,sha256=9JElKwEqKhErohjHNS4oAIXJRYCWVY0rfSnk4DUBna0,4454
|
275
275
|
prefect/telemetry/logging.py,sha256=yn5D4D2GGRrAv0y8wlHPN7PZDmQucGjQT_YauK9M9Yo,727
|
276
276
|
prefect/telemetry/processors.py,sha256=jw6j6LviOVxw3IBJe7cSjsxFk0zzY43jUmy6C9pcfCE,2272
|
277
|
-
prefect/telemetry/run_telemetry.py,sha256=
|
277
|
+
prefect/telemetry/run_telemetry.py,sha256=_FbjiPqPemu4xvZuI2YBPwXeRJ2BcKRJ6qgO4UMzKKE,8571
|
278
278
|
prefect/telemetry/services.py,sha256=DxgNNDTeWNtHBtioX8cjua4IrCbTiJJdYecx-gugg-w,2358
|
279
279
|
prefect/types/__init__.py,sha256=yBjKxiQmSC7jXoo0UNmM3KZil1NBFS-BWGPfwSEaoJo,4621
|
280
280
|
prefect/types/_datetime.py,sha256=eOsg5gkm4bATLWvK4lmLqHByxQdER6gfTFyafzj-DLk,3343
|
@@ -312,13 +312,13 @@ prefect/utilities/schema_tools/__init__.py,sha256=At3rMHd2g_Em2P3_dFQlFgqR_EpBwr
|
|
312
312
|
prefect/utilities/schema_tools/hydration.py,sha256=NkRhWkNfxxFmVGhNDfmxdK_xeKaEhs3a42q83Sg9cT4,9436
|
313
313
|
prefect/utilities/schema_tools/validation.py,sha256=Wix26IVR-ZJ32-6MX2pHhrwm3reB-Q4iB6_phn85OKE,10743
|
314
314
|
prefect/workers/__init__.py,sha256=EaM1F0RZ-XIJaGeTKLsXDnfOPHzVWk5bk0_c4BVS44M,64
|
315
|
-
prefect/workers/base.py,sha256=
|
315
|
+
prefect/workers/base.py,sha256=E0fZeR0zS_1y7Z7pNx8JyrRn9QLd9gHhA6kuG582Hl4,53040
|
316
316
|
prefect/workers/block.py,sha256=dPvG1jDGD5HSH7aM2utwtk6RaJ9qg13XjkA0lAIgQmY,287
|
317
317
|
prefect/workers/cloud.py,sha256=dPvG1jDGD5HSH7aM2utwtk6RaJ9qg13XjkA0lAIgQmY,287
|
318
|
-
prefect/workers/process.py,sha256=
|
318
|
+
prefect/workers/process.py,sha256=uxOwcqA2Ps-V-W6WeSdKCQMINrCxBEVx1K1Un8pb7vs,8973
|
319
319
|
prefect/workers/server.py,sha256=SEuyScZ5nGm2OotdtbHjpvqJlTRVWCh29ND7FeL_fZA,1974
|
320
320
|
prefect/workers/utilities.py,sha256=VfPfAlGtTuDj0-Kb8WlMgAuOfgXCdrGAnKMapPSBrwc,2483
|
321
|
-
prefect_client-3.2.
|
322
|
-
prefect_client-3.2.
|
323
|
-
prefect_client-3.2.
|
324
|
-
prefect_client-3.2.
|
321
|
+
prefect_client-3.2.13.dist-info/METADATA,sha256=_597uZ6zBWXufm6Zc5TkIsJKb41D53QL2DJ-qUnjpX4,7193
|
322
|
+
prefect_client-3.2.13.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
323
|
+
prefect_client-3.2.13.dist-info/licenses/LICENSE,sha256=MCxsn8osAkzfxKC4CC_dLcUkU8DZLkyihZ8mGs3Ah3Q,11357
|
324
|
+
prefect_client-3.2.13.dist-info/RECORD,,
|
File without changes
|
File without changes
|