prefect-client 2.16.9__py3-none-any.whl → 2.17.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/__init__.py +0 -18
- prefect/_internal/compatibility/deprecated.py +108 -5
- prefect/_internal/pydantic/__init__.py +4 -0
- prefect/_internal/pydantic/_base_model.py +36 -4
- prefect/_internal/pydantic/_compat.py +33 -2
- prefect/_internal/pydantic/_flags.py +3 -0
- prefect/_internal/pydantic/utilities/config_dict.py +72 -0
- prefect/_internal/pydantic/utilities/field_validator.py +135 -0
- prefect/_internal/pydantic/utilities/model_fields_set.py +29 -0
- prefect/_internal/pydantic/utilities/model_validator.py +79 -0
- prefect/agent.py +1 -1
- prefect/blocks/notifications.py +18 -18
- prefect/blocks/webhook.py +1 -1
- prefect/client/base.py +7 -0
- prefect/client/orchestration.py +44 -4
- prefect/client/schemas/actions.py +27 -20
- prefect/client/schemas/filters.py +28 -28
- prefect/client/schemas/objects.py +31 -21
- prefect/client/schemas/responses.py +17 -11
- prefect/client/schemas/schedules.py +6 -8
- prefect/context.py +2 -1
- prefect/deployments/base.py +2 -10
- prefect/deployments/deployments.py +34 -9
- prefect/deployments/runner.py +2 -2
- prefect/engine.py +32 -596
- prefect/events/clients.py +45 -13
- prefect/events/filters.py +19 -2
- prefect/events/utilities.py +12 -4
- prefect/events/worker.py +26 -8
- prefect/exceptions.py +3 -8
- prefect/filesystems.py +7 -7
- prefect/flows.py +4 -3
- prefect/manifests.py +1 -8
- prefect/profiles.toml +1 -1
- prefect/pydantic/__init__.py +27 -1
- prefect/pydantic/main.py +26 -2
- prefect/settings.py +33 -10
- prefect/task_server.py +2 -2
- prefect/utilities/dispatch.py +1 -0
- prefect/utilities/engine.py +629 -0
- prefect/utilities/pydantic.py +1 -1
- prefect/utilities/visualization.py +1 -1
- prefect/variables.py +88 -12
- prefect/workers/base.py +1 -1
- prefect/workers/block.py +1 -1
- {prefect_client-2.16.9.dist-info → prefect_client-2.17.1.dist-info}/METADATA +3 -3
- {prefect_client-2.16.9.dist-info → prefect_client-2.17.1.dist-info}/RECORD +50 -45
- {prefect_client-2.16.9.dist-info → prefect_client-2.17.1.dist-info}/LICENSE +0 -0
- {prefect_client-2.16.9.dist-info → prefect_client-2.17.1.dist-info}/WHEEL +0 -0
- {prefect_client-2.16.9.dist-info → prefect_client-2.17.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,629 @@
|
|
1
|
+
import asyncio
|
2
|
+
import contextlib
|
3
|
+
import os
|
4
|
+
import signal
|
5
|
+
import time
|
6
|
+
from functools import partial
|
7
|
+
from typing import (
|
8
|
+
Any,
|
9
|
+
Callable,
|
10
|
+
Dict,
|
11
|
+
Iterable,
|
12
|
+
Optional,
|
13
|
+
Set,
|
14
|
+
Union,
|
15
|
+
)
|
16
|
+
from uuid import UUID, uuid4
|
17
|
+
|
18
|
+
import anyio
|
19
|
+
from typing_extensions import Literal
|
20
|
+
|
21
|
+
import prefect
|
22
|
+
import prefect.context
|
23
|
+
import prefect.plugins
|
24
|
+
from prefect._internal.concurrency.cancellation import get_deadline
|
25
|
+
from prefect.client.orchestration import PrefectClient
|
26
|
+
from prefect.client.schemas import OrchestrationResult, TaskRun
|
27
|
+
from prefect.client.schemas.objects import (
|
28
|
+
StateType,
|
29
|
+
TaskRunInput,
|
30
|
+
TaskRunResult,
|
31
|
+
)
|
32
|
+
from prefect.client.schemas.responses import SetStateStatus
|
33
|
+
from prefect.context import (
|
34
|
+
FlowRunContext,
|
35
|
+
)
|
36
|
+
from prefect.events import Event, emit_event
|
37
|
+
from prefect.exceptions import (
|
38
|
+
Pause,
|
39
|
+
PrefectException,
|
40
|
+
TerminationSignal,
|
41
|
+
UpstreamTaskError,
|
42
|
+
)
|
43
|
+
from prefect.flows import Flow
|
44
|
+
from prefect.futures import PrefectFuture
|
45
|
+
from prefect.logging.loggers import (
|
46
|
+
get_logger,
|
47
|
+
task_run_logger,
|
48
|
+
)
|
49
|
+
from prefect.results import BaseResult
|
50
|
+
from prefect.settings import (
|
51
|
+
PREFECT_LOGGING_LOG_PRINTS,
|
52
|
+
)
|
53
|
+
from prefect.states import (
|
54
|
+
State,
|
55
|
+
get_state_exception,
|
56
|
+
is_state,
|
57
|
+
)
|
58
|
+
from prefect.tasks import Task
|
59
|
+
from prefect.utilities.annotations import allow_failure, quote
|
60
|
+
from prefect.utilities.asyncutils import (
|
61
|
+
gather,
|
62
|
+
)
|
63
|
+
from prefect.utilities.collections import StopVisiting, visit_collection
|
64
|
+
from prefect.utilities.text import truncated_to
|
65
|
+
|
66
|
+
API_HEALTHCHECKS = {}
|
67
|
+
UNTRACKABLE_TYPES = {bool, type(None), type(...), type(NotImplemented)}
|
68
|
+
engine_logger = get_logger("engine")
|
69
|
+
|
70
|
+
|
71
|
+
async def collect_task_run_inputs(expr: Any, max_depth: int = -1) -> Set[TaskRunInput]:
|
72
|
+
"""
|
73
|
+
This function recurses through an expression to generate a set of any discernible
|
74
|
+
task run inputs it finds in the data structure. It produces a set of all inputs
|
75
|
+
found.
|
76
|
+
|
77
|
+
Examples:
|
78
|
+
>>> task_inputs = {
|
79
|
+
>>> k: await collect_task_run_inputs(v) for k, v in parameters.items()
|
80
|
+
>>> }
|
81
|
+
"""
|
82
|
+
# TODO: This function needs to be updated to detect parameters and constants
|
83
|
+
|
84
|
+
inputs = set()
|
85
|
+
futures = set()
|
86
|
+
|
87
|
+
def add_futures_and_states_to_inputs(obj):
|
88
|
+
if isinstance(obj, PrefectFuture):
|
89
|
+
# We need to wait for futures to be submitted before we can get the task
|
90
|
+
# run id but we want to do so asynchronously
|
91
|
+
futures.add(obj)
|
92
|
+
elif is_state(obj):
|
93
|
+
if obj.state_details.task_run_id:
|
94
|
+
inputs.add(TaskRunResult(id=obj.state_details.task_run_id))
|
95
|
+
# Expressions inside quotes should not be traversed
|
96
|
+
elif isinstance(obj, quote):
|
97
|
+
raise StopVisiting
|
98
|
+
else:
|
99
|
+
state = get_state_for_result(obj)
|
100
|
+
if state and state.state_details.task_run_id:
|
101
|
+
inputs.add(TaskRunResult(id=state.state_details.task_run_id))
|
102
|
+
|
103
|
+
visit_collection(
|
104
|
+
expr,
|
105
|
+
visit_fn=add_futures_and_states_to_inputs,
|
106
|
+
return_data=False,
|
107
|
+
max_depth=max_depth,
|
108
|
+
)
|
109
|
+
|
110
|
+
await asyncio.gather(*[future._wait_for_submission() for future in futures])
|
111
|
+
for future in futures:
|
112
|
+
inputs.add(TaskRunResult(id=future.task_run.id))
|
113
|
+
|
114
|
+
return inputs
|
115
|
+
|
116
|
+
|
117
|
+
async def wait_for_task_runs_and_report_crashes(
|
118
|
+
task_run_futures: Iterable[PrefectFuture], client: PrefectClient
|
119
|
+
) -> Literal[True]:
|
120
|
+
crash_exceptions = []
|
121
|
+
|
122
|
+
# Gather states concurrently first
|
123
|
+
states = await gather(*(future._wait for future in task_run_futures))
|
124
|
+
|
125
|
+
for future, state in zip(task_run_futures, states):
|
126
|
+
logger = task_run_logger(future.task_run)
|
127
|
+
|
128
|
+
if not state.type == StateType.CRASHED:
|
129
|
+
continue
|
130
|
+
|
131
|
+
# We use this utility instead of `state.result` for type checking
|
132
|
+
exception = await get_state_exception(state)
|
133
|
+
|
134
|
+
task_run = await client.read_task_run(future.task_run.id)
|
135
|
+
if not task_run.state.is_crashed():
|
136
|
+
logger.info(f"Crash detected! {state.message}")
|
137
|
+
logger.debug("Crash details:", exc_info=exception)
|
138
|
+
|
139
|
+
# Update the state of the task run
|
140
|
+
result = await client.set_task_run_state(
|
141
|
+
task_run_id=future.task_run.id, state=state, force=True
|
142
|
+
)
|
143
|
+
if result.status == SetStateStatus.ACCEPT:
|
144
|
+
engine_logger.debug(
|
145
|
+
f"Reported crashed task run {future.name!r} successfully."
|
146
|
+
)
|
147
|
+
else:
|
148
|
+
engine_logger.warning(
|
149
|
+
f"Failed to report crashed task run {future.name!r}. "
|
150
|
+
f"Orchestrator did not accept state: {result!r}"
|
151
|
+
)
|
152
|
+
else:
|
153
|
+
# Populate the state details on the local state
|
154
|
+
future._final_state.state_details = task_run.state.state_details
|
155
|
+
|
156
|
+
crash_exceptions.append(exception)
|
157
|
+
|
158
|
+
# Now that we've finished reporting crashed tasks, reraise any exit exceptions
|
159
|
+
for exception in crash_exceptions:
|
160
|
+
if isinstance(exception, (KeyboardInterrupt, SystemExit)):
|
161
|
+
raise exception
|
162
|
+
|
163
|
+
return True
|
164
|
+
|
165
|
+
|
166
|
+
@contextlib.contextmanager
|
167
|
+
def capture_sigterm():
|
168
|
+
def cancel_flow_run(*args):
|
169
|
+
raise TerminationSignal(signal=signal.SIGTERM)
|
170
|
+
|
171
|
+
original_term_handler = None
|
172
|
+
try:
|
173
|
+
original_term_handler = signal.signal(signal.SIGTERM, cancel_flow_run)
|
174
|
+
except ValueError:
|
175
|
+
# Signals only work in the main thread
|
176
|
+
pass
|
177
|
+
|
178
|
+
try:
|
179
|
+
yield
|
180
|
+
except TerminationSignal as exc:
|
181
|
+
# Termination signals are swapped out during a flow run to perform
|
182
|
+
# a graceful shutdown and raise this exception. This `os.kill` call
|
183
|
+
# ensures that the previous handler, likely the Python default,
|
184
|
+
# gets called as well.
|
185
|
+
if original_term_handler is not None:
|
186
|
+
signal.signal(exc.signal, original_term_handler)
|
187
|
+
os.kill(os.getpid(), exc.signal)
|
188
|
+
|
189
|
+
raise
|
190
|
+
|
191
|
+
finally:
|
192
|
+
if original_term_handler is not None:
|
193
|
+
signal.signal(signal.SIGTERM, original_term_handler)
|
194
|
+
|
195
|
+
|
196
|
+
async def resolve_inputs(
|
197
|
+
parameters: Dict[str, Any], return_data: bool = True, max_depth: int = -1
|
198
|
+
) -> Dict[str, Any]:
|
199
|
+
"""
|
200
|
+
Resolve any `Quote`, `PrefectFuture`, or `State` types nested in parameters into
|
201
|
+
data.
|
202
|
+
|
203
|
+
Returns:
|
204
|
+
A copy of the parameters with resolved data
|
205
|
+
|
206
|
+
Raises:
|
207
|
+
UpstreamTaskError: If any of the upstream states are not `COMPLETED`
|
208
|
+
"""
|
209
|
+
|
210
|
+
futures = set()
|
211
|
+
states = set()
|
212
|
+
result_by_state = {}
|
213
|
+
|
214
|
+
if not parameters:
|
215
|
+
return {}
|
216
|
+
|
217
|
+
def collect_futures_and_states(expr, context):
|
218
|
+
# Expressions inside quotes should not be traversed
|
219
|
+
if isinstance(context.get("annotation"), quote):
|
220
|
+
raise StopVisiting()
|
221
|
+
|
222
|
+
if isinstance(expr, PrefectFuture):
|
223
|
+
futures.add(expr)
|
224
|
+
if is_state(expr):
|
225
|
+
states.add(expr)
|
226
|
+
|
227
|
+
return expr
|
228
|
+
|
229
|
+
visit_collection(
|
230
|
+
parameters,
|
231
|
+
visit_fn=collect_futures_and_states,
|
232
|
+
return_data=False,
|
233
|
+
max_depth=max_depth,
|
234
|
+
context={},
|
235
|
+
)
|
236
|
+
|
237
|
+
# Wait for all futures so we do not block when we retrieve the state in `resolve_input`
|
238
|
+
states.update(await asyncio.gather(*[future._wait() for future in futures]))
|
239
|
+
|
240
|
+
# Only retrieve the result if requested as it may be expensive
|
241
|
+
if return_data:
|
242
|
+
finished_states = [state for state in states if state.is_final()]
|
243
|
+
|
244
|
+
state_results = await asyncio.gather(
|
245
|
+
*[
|
246
|
+
state.result(raise_on_failure=False, fetch=True)
|
247
|
+
for state in finished_states
|
248
|
+
]
|
249
|
+
)
|
250
|
+
|
251
|
+
for state, result in zip(finished_states, state_results):
|
252
|
+
result_by_state[state] = result
|
253
|
+
|
254
|
+
def resolve_input(expr, context):
|
255
|
+
state = None
|
256
|
+
|
257
|
+
# Expressions inside quotes should not be modified
|
258
|
+
if isinstance(context.get("annotation"), quote):
|
259
|
+
raise StopVisiting()
|
260
|
+
|
261
|
+
if isinstance(expr, PrefectFuture):
|
262
|
+
state = expr._final_state
|
263
|
+
elif is_state(expr):
|
264
|
+
state = expr
|
265
|
+
else:
|
266
|
+
return expr
|
267
|
+
|
268
|
+
# Do not allow uncompleted upstreams except failures when `allow_failure` has
|
269
|
+
# been used
|
270
|
+
if not state.is_completed() and not (
|
271
|
+
# TODO: Note that the contextual annotation here is only at the current level
|
272
|
+
# if `allow_failure` is used then another annotation is used, this will
|
273
|
+
# incorrectly evaluate to false — to resolve this, we must track all
|
274
|
+
# annotations wrapping the current expression but this is not yet
|
275
|
+
# implemented.
|
276
|
+
isinstance(context.get("annotation"), allow_failure) and state.is_failed()
|
277
|
+
):
|
278
|
+
raise UpstreamTaskError(
|
279
|
+
f"Upstream task run '{state.state_details.task_run_id}' did not reach a"
|
280
|
+
" 'COMPLETED' state."
|
281
|
+
)
|
282
|
+
|
283
|
+
return result_by_state.get(state)
|
284
|
+
|
285
|
+
resolved_parameters = {}
|
286
|
+
for parameter, value in parameters.items():
|
287
|
+
try:
|
288
|
+
resolved_parameters[parameter] = visit_collection(
|
289
|
+
value,
|
290
|
+
visit_fn=resolve_input,
|
291
|
+
return_data=return_data,
|
292
|
+
# we're manually going 1 layer deeper here
|
293
|
+
max_depth=max_depth - 1,
|
294
|
+
remove_annotations=True,
|
295
|
+
context={},
|
296
|
+
)
|
297
|
+
except UpstreamTaskError:
|
298
|
+
raise
|
299
|
+
except Exception as exc:
|
300
|
+
raise PrefectException(
|
301
|
+
f"Failed to resolve inputs in parameter {parameter!r}. If your"
|
302
|
+
" parameter type is not supported, consider using the `quote`"
|
303
|
+
" annotation to skip resolution of inputs."
|
304
|
+
) from exc
|
305
|
+
|
306
|
+
return resolved_parameters
|
307
|
+
|
308
|
+
|
309
|
+
async def propose_state(
|
310
|
+
client: PrefectClient,
|
311
|
+
state: State,
|
312
|
+
force: bool = False,
|
313
|
+
task_run_id: UUID = None,
|
314
|
+
flow_run_id: UUID = None,
|
315
|
+
) -> State:
|
316
|
+
"""
|
317
|
+
Propose a new state for a flow run or task run, invoking Prefect orchestration logic.
|
318
|
+
|
319
|
+
If the proposed state is accepted, the provided `state` will be augmented with
|
320
|
+
details and returned.
|
321
|
+
|
322
|
+
If the proposed state is rejected, a new state returned by the Prefect API will be
|
323
|
+
returned.
|
324
|
+
|
325
|
+
If the proposed state results in a WAIT instruction from the Prefect API, the
|
326
|
+
function will sleep and attempt to propose the state again.
|
327
|
+
|
328
|
+
If the proposed state results in an ABORT instruction from the Prefect API, an
|
329
|
+
error will be raised.
|
330
|
+
|
331
|
+
Args:
|
332
|
+
state: a new state for the task or flow run
|
333
|
+
task_run_id: an optional task run id, used when proposing task run states
|
334
|
+
flow_run_id: an optional flow run id, used when proposing flow run states
|
335
|
+
|
336
|
+
Returns:
|
337
|
+
a [State model][prefect.client.schemas.objects.State] representation of the
|
338
|
+
flow or task run state
|
339
|
+
|
340
|
+
Raises:
|
341
|
+
ValueError: if neither task_run_id or flow_run_id is provided
|
342
|
+
prefect.exceptions.Abort: if an ABORT instruction is received from
|
343
|
+
the Prefect API
|
344
|
+
"""
|
345
|
+
|
346
|
+
# Determine if working with a task run or flow run
|
347
|
+
if not task_run_id and not flow_run_id:
|
348
|
+
raise ValueError("You must provide either a `task_run_id` or `flow_run_id`")
|
349
|
+
|
350
|
+
# Handle task and sub-flow tracing
|
351
|
+
if state.is_final():
|
352
|
+
if isinstance(state.data, BaseResult) and state.data.has_cached_object():
|
353
|
+
# Avoid fetching the result unless it is cached, otherwise we defeat
|
354
|
+
# the purpose of disabling `cache_result_in_memory`
|
355
|
+
result = await state.result(raise_on_failure=False, fetch=True)
|
356
|
+
else:
|
357
|
+
result = state.data
|
358
|
+
|
359
|
+
link_state_to_result(state, result)
|
360
|
+
|
361
|
+
# Handle repeated WAITs in a loop instead of recursively, to avoid
|
362
|
+
# reaching max recursion depth in extreme cases.
|
363
|
+
async def set_state_and_handle_waits(set_state_func) -> OrchestrationResult:
|
364
|
+
response = await set_state_func()
|
365
|
+
while response.status == SetStateStatus.WAIT:
|
366
|
+
engine_logger.debug(
|
367
|
+
f"Received wait instruction for {response.details.delay_seconds}s: "
|
368
|
+
f"{response.details.reason}"
|
369
|
+
)
|
370
|
+
await anyio.sleep(response.details.delay_seconds)
|
371
|
+
response = await set_state_func()
|
372
|
+
return response
|
373
|
+
|
374
|
+
# Attempt to set the state
|
375
|
+
if task_run_id:
|
376
|
+
set_state = partial(client.set_task_run_state, task_run_id, state, force=force)
|
377
|
+
response = await set_state_and_handle_waits(set_state)
|
378
|
+
elif flow_run_id:
|
379
|
+
set_state = partial(client.set_flow_run_state, flow_run_id, state, force=force)
|
380
|
+
response = await set_state_and_handle_waits(set_state)
|
381
|
+
else:
|
382
|
+
raise ValueError(
|
383
|
+
"Neither flow run id or task run id were provided. At least one must "
|
384
|
+
"be given."
|
385
|
+
)
|
386
|
+
|
387
|
+
# Parse the response to return the new state
|
388
|
+
if response.status == SetStateStatus.ACCEPT:
|
389
|
+
# Update the state with the details if provided
|
390
|
+
state.id = response.state.id
|
391
|
+
state.timestamp = response.state.timestamp
|
392
|
+
if response.state.state_details:
|
393
|
+
state.state_details = response.state.state_details
|
394
|
+
return state
|
395
|
+
|
396
|
+
elif response.status == SetStateStatus.ABORT:
|
397
|
+
raise prefect.exceptions.Abort(response.details.reason)
|
398
|
+
|
399
|
+
elif response.status == SetStateStatus.REJECT:
|
400
|
+
if response.state.is_paused():
|
401
|
+
raise Pause(response.details.reason, state=response.state)
|
402
|
+
return response.state
|
403
|
+
|
404
|
+
else:
|
405
|
+
raise ValueError(
|
406
|
+
f"Received unexpected `SetStateStatus` from server: {response.status!r}"
|
407
|
+
)
|
408
|
+
|
409
|
+
|
410
|
+
def _dynamic_key_for_task_run(context: FlowRunContext, task: Task) -> int:
|
411
|
+
if context.flow_run is None: # this is an autonomous task run
|
412
|
+
context.task_run_dynamic_keys[task.task_key] = getattr(
|
413
|
+
task, "dynamic_key", str(uuid4())
|
414
|
+
)
|
415
|
+
|
416
|
+
elif task.task_key not in context.task_run_dynamic_keys:
|
417
|
+
context.task_run_dynamic_keys[task.task_key] = 0
|
418
|
+
else:
|
419
|
+
context.task_run_dynamic_keys[task.task_key] += 1
|
420
|
+
|
421
|
+
return context.task_run_dynamic_keys[task.task_key]
|
422
|
+
|
423
|
+
|
424
|
+
def _observed_flow_pauses(context: FlowRunContext) -> int:
|
425
|
+
if "counter" not in context.observed_flow_pauses:
|
426
|
+
context.observed_flow_pauses["counter"] = 1
|
427
|
+
else:
|
428
|
+
context.observed_flow_pauses["counter"] += 1
|
429
|
+
return context.observed_flow_pauses["counter"]
|
430
|
+
|
431
|
+
|
432
|
+
def get_state_for_result(obj: Any) -> Optional[State]:
|
433
|
+
"""
|
434
|
+
Get the state related to a result object.
|
435
|
+
|
436
|
+
`link_state_to_result` must have been called first.
|
437
|
+
"""
|
438
|
+
flow_run_context = FlowRunContext.get()
|
439
|
+
if flow_run_context:
|
440
|
+
return flow_run_context.task_run_results.get(id(obj))
|
441
|
+
|
442
|
+
|
443
|
+
def link_state_to_result(state: State, result: Any) -> None:
|
444
|
+
"""
|
445
|
+
Caches a link between a state and a result and its components using
|
446
|
+
the `id` of the components to map to the state. The cache is persisted to the
|
447
|
+
current flow run context since task relationships are limited to within a flow run.
|
448
|
+
|
449
|
+
This allows dependency tracking to occur when results are passed around.
|
450
|
+
Note: Because `id` is used, we cannot cache links between singleton objects.
|
451
|
+
|
452
|
+
We only cache the relationship between components 1-layer deep.
|
453
|
+
Example:
|
454
|
+
Given the result [1, ["a","b"], ("c",)], the following elements will be
|
455
|
+
mapped to the state:
|
456
|
+
- [1, ["a","b"], ("c",)]
|
457
|
+
- ["a","b"]
|
458
|
+
- ("c",)
|
459
|
+
|
460
|
+
Note: the int `1` will not be mapped to the state because it is a singleton.
|
461
|
+
|
462
|
+
Other Notes:
|
463
|
+
We do not hash the result because:
|
464
|
+
- If changes are made to the object in the flow between task calls, we can still
|
465
|
+
track that they are related.
|
466
|
+
- Hashing can be expensive.
|
467
|
+
- Not all objects are hashable.
|
468
|
+
|
469
|
+
We do not set an attribute, e.g. `__prefect_state__`, on the result because:
|
470
|
+
|
471
|
+
- Mutating user's objects is dangerous.
|
472
|
+
- Unrelated equality comparisons can break unexpectedly.
|
473
|
+
- The field can be preserved on copy.
|
474
|
+
- We cannot set this attribute on Python built-ins.
|
475
|
+
"""
|
476
|
+
|
477
|
+
flow_run_context = FlowRunContext.get()
|
478
|
+
|
479
|
+
def link_if_trackable(obj: Any) -> None:
|
480
|
+
"""Track connection between a task run result and its associated state if it has a unique ID.
|
481
|
+
|
482
|
+
We cannot track booleans, Ellipsis, None, NotImplemented, or the integers from -5 to 256
|
483
|
+
because they are singletons.
|
484
|
+
|
485
|
+
This function will mutate the State if the object is an untrackable type by setting the value
|
486
|
+
for `State.state_details.untrackable_result` to `True`.
|
487
|
+
|
488
|
+
"""
|
489
|
+
if (type(obj) in UNTRACKABLE_TYPES) or (
|
490
|
+
isinstance(obj, int) and (-5 <= obj <= 256)
|
491
|
+
):
|
492
|
+
state.state_details.untrackable_result = True
|
493
|
+
return
|
494
|
+
flow_run_context.task_run_results[id(obj)] = state
|
495
|
+
|
496
|
+
if flow_run_context:
|
497
|
+
visit_collection(expr=result, visit_fn=link_if_trackable, max_depth=1)
|
498
|
+
|
499
|
+
|
500
|
+
def should_log_prints(flow_or_task: Union[Flow, Task]) -> bool:
|
501
|
+
flow_run_context = FlowRunContext.get()
|
502
|
+
|
503
|
+
if flow_or_task.log_prints is None:
|
504
|
+
if flow_run_context:
|
505
|
+
return flow_run_context.log_prints
|
506
|
+
else:
|
507
|
+
return PREFECT_LOGGING_LOG_PRINTS.value()
|
508
|
+
|
509
|
+
return flow_or_task.log_prints
|
510
|
+
|
511
|
+
|
512
|
+
def _resolve_custom_flow_run_name(flow: Flow, parameters: Dict[str, Any]) -> str:
|
513
|
+
if callable(flow.flow_run_name):
|
514
|
+
flow_run_name = flow.flow_run_name()
|
515
|
+
if not isinstance(flow_run_name, str):
|
516
|
+
raise TypeError(
|
517
|
+
f"Callable {flow.flow_run_name} for 'flow_run_name' returned type"
|
518
|
+
f" {type(flow_run_name).__name__} but a string is required."
|
519
|
+
)
|
520
|
+
elif isinstance(flow.flow_run_name, str):
|
521
|
+
flow_run_name = flow.flow_run_name.format(**parameters)
|
522
|
+
else:
|
523
|
+
raise TypeError(
|
524
|
+
"Expected string or callable for 'flow_run_name'; got"
|
525
|
+
f" {type(flow.flow_run_name).__name__} instead."
|
526
|
+
)
|
527
|
+
|
528
|
+
return flow_run_name
|
529
|
+
|
530
|
+
|
531
|
+
def _resolve_custom_task_run_name(task: Task, parameters: Dict[str, Any]) -> str:
|
532
|
+
if callable(task.task_run_name):
|
533
|
+
task_run_name = task.task_run_name()
|
534
|
+
if not isinstance(task_run_name, str):
|
535
|
+
raise TypeError(
|
536
|
+
f"Callable {task.task_run_name} for 'task_run_name' returned type"
|
537
|
+
f" {type(task_run_name).__name__} but a string is required."
|
538
|
+
)
|
539
|
+
elif isinstance(task.task_run_name, str):
|
540
|
+
task_run_name = task.task_run_name.format(**parameters)
|
541
|
+
else:
|
542
|
+
raise TypeError(
|
543
|
+
"Expected string or callable for 'task_run_name'; got"
|
544
|
+
f" {type(task.task_run_name).__name__} instead."
|
545
|
+
)
|
546
|
+
|
547
|
+
return task_run_name
|
548
|
+
|
549
|
+
|
550
|
+
def _get_hook_name(hook: Callable) -> str:
|
551
|
+
return (
|
552
|
+
hook.__name__
|
553
|
+
if hasattr(hook, "__name__")
|
554
|
+
else (
|
555
|
+
hook.func.__name__ if isinstance(hook, partial) else hook.__class__.__name__
|
556
|
+
)
|
557
|
+
)
|
558
|
+
|
559
|
+
|
560
|
+
async def check_api_reachable(client: PrefectClient, fail_message: str):
|
561
|
+
# Do not perform a healthcheck if it exists and is not expired
|
562
|
+
api_url = str(client.api_url)
|
563
|
+
if api_url in API_HEALTHCHECKS:
|
564
|
+
expires = API_HEALTHCHECKS[api_url]
|
565
|
+
if expires > time.monotonic():
|
566
|
+
return
|
567
|
+
|
568
|
+
connect_error = await client.api_healthcheck()
|
569
|
+
if connect_error:
|
570
|
+
raise RuntimeError(
|
571
|
+
f"{fail_message}. Failed to reach API at {api_url}."
|
572
|
+
) from connect_error
|
573
|
+
|
574
|
+
# Create a 10 minute cache for the healthy response
|
575
|
+
API_HEALTHCHECKS[api_url] = get_deadline(60 * 10)
|
576
|
+
|
577
|
+
|
578
|
+
def emit_task_run_state_change_event(
|
579
|
+
task_run: TaskRun,
|
580
|
+
initial_state: Optional[State],
|
581
|
+
validated_state: State,
|
582
|
+
follows: Optional[Event] = None,
|
583
|
+
) -> Event:
|
584
|
+
state_message_truncation_length = 100_000
|
585
|
+
|
586
|
+
return emit_event(
|
587
|
+
id=validated_state.id,
|
588
|
+
occurred=validated_state.timestamp,
|
589
|
+
event=f"prefect.task-run.{validated_state.name}",
|
590
|
+
payload={
|
591
|
+
"intended": {
|
592
|
+
"from": str(initial_state.type.value) if initial_state else None,
|
593
|
+
"to": str(validated_state.type.value) if validated_state else None,
|
594
|
+
},
|
595
|
+
"initial_state": (
|
596
|
+
{
|
597
|
+
"type": str(initial_state.type.value),
|
598
|
+
"name": initial_state.name,
|
599
|
+
"message": truncated_to(
|
600
|
+
state_message_truncation_length, initial_state.message
|
601
|
+
),
|
602
|
+
}
|
603
|
+
if initial_state
|
604
|
+
else None
|
605
|
+
),
|
606
|
+
"validated_state": {
|
607
|
+
"type": str(validated_state.type.value),
|
608
|
+
"name": validated_state.name,
|
609
|
+
"message": truncated_to(
|
610
|
+
state_message_truncation_length, validated_state.message
|
611
|
+
),
|
612
|
+
},
|
613
|
+
},
|
614
|
+
resource={
|
615
|
+
"prefect.resource.id": f"prefect.task-run.{task_run.id}",
|
616
|
+
"prefect.resource.name": task_run.name,
|
617
|
+
"prefect.state-message": truncated_to(
|
618
|
+
state_message_truncation_length, validated_state.message
|
619
|
+
),
|
620
|
+
"prefect.state-name": validated_state.name or "",
|
621
|
+
"prefect.state-timestamp": (
|
622
|
+
validated_state.timestamp.isoformat()
|
623
|
+
if validated_state and validated_state.timestamp
|
624
|
+
else ""
|
625
|
+
),
|
626
|
+
"prefect.state-type": str(validated_state.type.value),
|
627
|
+
},
|
628
|
+
follows=follows,
|
629
|
+
)
|
prefect/utilities/pydantic.py
CHANGED
@@ -101,7 +101,7 @@ def get_class_fields_only(model: Type[pydantic.BaseModel]) -> set:
|
|
101
101
|
|
102
102
|
def add_type_dispatch(model_cls: Type[M]) -> Type[M]:
|
103
103
|
"""
|
104
|
-
Extend a Pydantic model to add a 'type' field that is used a discriminator field
|
104
|
+
Extend a Pydantic model to add a 'type' field that is used as a discriminator field
|
105
105
|
to dynamically determine the subtype that when deserializing models.
|
106
106
|
|
107
107
|
This allows automatic resolution to subtypes of the decorated model.
|
@@ -119,7 +119,7 @@ class TaskVizTracker:
|
|
119
119
|
We cannot track booleans, Ellipsis, None, NotImplemented, or the integers from -5 to 256
|
120
120
|
because they are singletons.
|
121
121
|
"""
|
122
|
-
from prefect.engine import UNTRACKABLE_TYPES
|
122
|
+
from prefect.utilities.engine import UNTRACKABLE_TYPES
|
123
123
|
|
124
124
|
if (type(viz_return_value) in UNTRACKABLE_TYPES) or (
|
125
125
|
isinstance(viz_return_value, int) and (-5 <= viz_return_value <= 256)
|