prefect-client 2.18.0__py3-none-any.whl → 2.18.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. prefect/_internal/schemas/fields.py +31 -12
  2. prefect/blocks/core.py +1 -1
  3. prefect/blocks/notifications.py +2 -2
  4. prefect/blocks/system.py +2 -3
  5. prefect/client/orchestration.py +283 -22
  6. prefect/client/schemas/sorting.py +9 -0
  7. prefect/client/utilities.py +25 -3
  8. prefect/concurrency/asyncio.py +11 -5
  9. prefect/concurrency/events.py +3 -3
  10. prefect/concurrency/services.py +1 -1
  11. prefect/concurrency/sync.py +9 -5
  12. prefect/deployments/deployments.py +27 -18
  13. prefect/deployments/runner.py +34 -26
  14. prefect/engine.py +3 -1
  15. prefect/events/actions.py +2 -1
  16. prefect/events/cli/automations.py +47 -9
  17. prefect/events/clients.py +50 -18
  18. prefect/events/filters.py +30 -3
  19. prefect/events/instrument.py +40 -40
  20. prefect/events/related.py +2 -1
  21. prefect/events/schemas/automations.py +50 -5
  22. prefect/events/schemas/deployment_triggers.py +15 -227
  23. prefect/events/schemas/events.py +7 -7
  24. prefect/events/utilities.py +1 -1
  25. prefect/events/worker.py +10 -7
  26. prefect/flows.py +33 -18
  27. prefect/input/actions.py +9 -9
  28. prefect/input/run_input.py +49 -37
  29. prefect/new_flow_engine.py +293 -0
  30. prefect/new_task_engine.py +374 -0
  31. prefect/results.py +3 -2
  32. prefect/runner/runner.py +3 -2
  33. prefect/server/api/collections_data/views/aggregate-worker-metadata.json +44 -3
  34. prefect/settings.py +26 -0
  35. prefect/states.py +25 -19
  36. prefect/tasks.py +17 -0
  37. prefect/utilities/asyncutils.py +37 -0
  38. prefect/utilities/engine.py +6 -4
  39. prefect/utilities/schema_tools/validation.py +1 -1
  40. {prefect_client-2.18.0.dist-info → prefect_client-2.18.1.dist-info}/METADATA +1 -1
  41. {prefect_client-2.18.0.dist-info → prefect_client-2.18.1.dist-info}/RECORD +44 -43
  42. prefect/concurrency/common.py +0 -0
  43. {prefect_client-2.18.0.dist-info → prefect_client-2.18.1.dist-info}/LICENSE +0 -0
  44. {prefect_client-2.18.0.dist-info → prefect_client-2.18.1.dist-info}/WHEEL +0 -0
  45. {prefect_client-2.18.0.dist-info → prefect_client-2.18.1.dist-info}/top_level.txt +0 -0
@@ -58,7 +58,7 @@ async def receiver_flow():
58
58
  ```
59
59
  """
60
60
 
61
-
61
+ from inspect import isclass
62
62
  from typing import (
63
63
  TYPE_CHECKING,
64
64
  Any,
@@ -96,7 +96,7 @@ if HAS_PYDANTIC_V2:
96
96
  from prefect._internal.pydantic.v2_schema import create_v2_schema
97
97
 
98
98
  R = TypeVar("R", bound="RunInput")
99
- T = TypeVar("T")
99
+ T = TypeVar("T", bound="object")
100
100
 
101
101
  Keyset = Dict[
102
102
  Union[Literal["description"], Literal["response"], Literal["schema"]], str
@@ -114,7 +114,8 @@ def keyset_from_paused_state(state: "State") -> Keyset:
114
114
  if not state.is_paused():
115
115
  raise RuntimeError(f"{state.type.value!r} is unsupported.")
116
116
 
117
- base_key = f"{state.name.lower()}-{str(state.state_details.pause_key)}"
117
+ state_name = state.name or ""
118
+ base_key = f"{state_name.lower()}-{str(state.state_details.pause_key)}"
118
119
  return keyset_from_base_key(base_key)
119
120
 
120
121
 
@@ -234,7 +235,7 @@ class RunInput(pydantic.BaseModel):
234
235
  a flow run that requires input
235
236
  - kwargs (Any): the initial data to populate the subclass
236
237
  """
237
- fields = {}
238
+ fields: Dict[str, Any] = {}
238
239
  for key, value in kwargs.items():
239
240
  fields[key] = (type(value), value)
240
241
  model = pydantic.create_model(cls.__name__, **fields, __base__=cls)
@@ -340,31 +341,34 @@ class AutomaticRunInput(RunInput, Generic[T]):
340
341
  def subclass_from_type(cls, _type: Type[T]) -> Type["AutomaticRunInput[T]"]:
341
342
  """
342
343
  Create a new `AutomaticRunInput` subclass from the given type.
344
+
345
+ This method uses the type's name as a key prefix to identify related
346
+ flow run inputs. This helps in ensuring that values saved under a type
347
+ (like List[int]) are retrievable under the generic type name (like "list").
343
348
  """
344
- fields = {"value": (_type, ...)}
345
-
346
- # Sending a value to a flow run that relies on an AutomaticRunInput will
347
- # produce a key prefix that includes the type name. For example, if the
348
- # value is a list, the key will include "list" as the type. If the user
349
- # then tries to receive the value with a type annotation like List[int],
350
- # we need to find the key we saved with "list" as the type (not
351
- # "List[int]"). Calling __name__.lower() on a type annotation like
352
- # List[int] produces the string "list", which is what we need.
353
- if hasattr(_type, "__name__"):
354
- type_prefix = _type.__name__.lower()
355
- elif hasattr(_type, "_name"):
356
- # On Python 3.9 and earlier, type annotation values don't have a
357
- # __name__ attribute, but they do have a _name.
358
- type_prefix = _type._name.lower()
359
- else:
360
- # If we can't identify a type name that we can use as a key
361
- # prefix that will match an input, we'll have to use
362
- # "AutomaticRunInput" as the generic name. This will match all
363
- # automatic inputs sent to the flow run, rather than a specific
364
- # type.
365
- type_prefix = ""
349
+ fields: Dict[str, Any] = {"value": (_type, ...)}
350
+
351
+ # Explanation for using getattr for type name extraction:
352
+ # - "__name__": This is the usual attribute for getting the name of
353
+ # most types.
354
+ # - "_name": Used as a fallback, some type annotations in Python 3.9
355
+ # and earlier might only have this attribute instead of __name__.
356
+ # - If neither is available, defaults to an empty string to prevent
357
+ # errors, but typically we should find at least one valid name
358
+ # attribute. This will match all automatic inputs sent to the flow
359
+ # run, rather than a specific type.
360
+ #
361
+ # This approach ensures compatibility across Python versions and
362
+ # handles various edge cases in type annotations.
363
+
364
+ type_prefix: str = getattr(
365
+ _type, "__name__", getattr(_type, "_name", "")
366
+ ).lower()
367
+
366
368
  class_name = f"{type_prefix}AutomaticRunInput"
367
369
 
370
+ # Creating a new Pydantic model class dynamically with the name based
371
+ # on the type prefix.
368
372
  new_cls: Type["AutomaticRunInput"] = pydantic.create_model(
369
373
  class_name, **fields, __base__=AutomaticRunInput
370
374
  )
@@ -384,18 +388,19 @@ def run_input_subclass_from_type(
384
388
  """
385
389
  Create a new `RunInput` subclass from the given type.
386
390
  """
387
- try:
391
+ if isclass(_type):
388
392
  if issubclass(_type, RunInput):
389
393
  return cast(Type[R], _type)
390
394
  elif issubclass(_type, pydantic.BaseModel):
391
395
  return cast(Type[R], RunInput.subclass_from_base_model_type(_type))
392
- except TypeError:
393
- pass
394
396
 
395
397
  # Could be something like a typing._GenericAlias or any other type that
396
398
  # isn't a `RunInput` subclass or `pydantic.BaseModel` subclass. Try passing
397
399
  # it to AutomaticRunInput to see if we can create a model from it.
398
- return cast(Type[AutomaticRunInput[T]], AutomaticRunInput.subclass_from_type(_type))
400
+ return cast(
401
+ Type[AutomaticRunInput[T]],
402
+ AutomaticRunInput.subclass_from_type(cast(Type[T], _type)),
403
+ )
399
404
 
400
405
 
401
406
  class GetInputHandler(Generic[R]):
@@ -425,7 +430,7 @@ class GetInputHandler(Generic[R]):
425
430
 
426
431
  def __next__(self) -> R:
427
432
  try:
428
- return self.next()
433
+ return cast(R, self.next())
429
434
  except TimeoutError:
430
435
  if self.raise_timeout_error:
431
436
  raise
@@ -502,9 +507,11 @@ async def _send_input(
502
507
  key_prefix: Optional[str] = None,
503
508
  ):
504
509
  if isinstance(run_input, RunInput):
505
- _run_input = run_input
510
+ _run_input: RunInput = run_input
506
511
  else:
507
- input_cls = run_input_subclass_from_type(type(run_input))
512
+ input_cls: Type[AutomaticRunInput] = run_input_subclass_from_type(
513
+ type(run_input)
514
+ )
508
515
  _run_input = input_cls(value=run_input)
509
516
 
510
517
  if key_prefix is None:
@@ -533,8 +540,8 @@ async def send_input(
533
540
 
534
541
 
535
542
  @overload
536
- def receive_input(
537
- input_type: Type[R],
543
+ def receive_input( # type: ignore[overload-overlap]
544
+ input_type: Union[Type[R], pydantic.BaseModel],
538
545
  timeout: Optional[float] = 3600,
539
546
  poll_interval: float = 10,
540
547
  raise_timeout_error: bool = False,
@@ -561,7 +568,7 @@ def receive_input(
561
568
 
562
569
 
563
570
  def receive_input(
564
- input_type: Union[Type[R], Type[T]],
571
+ input_type: Union[Type[R], Type[T], pydantic.BaseModel],
565
572
  timeout: Optional[float] = 3600,
566
573
  poll_interval: float = 10,
567
574
  raise_timeout_error: bool = False,
@@ -570,7 +577,12 @@ def receive_input(
570
577
  flow_run_id: Optional[UUID] = None,
571
578
  with_metadata: bool = False,
572
579
  ) -> Union[GetAutomaticInputHandler[T], GetInputHandler[R]]:
573
- input_cls = run_input_subclass_from_type(input_type)
580
+ # The typing in this module is a bit complex, and at this point `mypy`
581
+ # thinks that `run_input_subclass_from_type` accepts a `Type[Never]` but
582
+ # the signature is the same as here:
583
+ # Union[Type[R], Type[T], pydantic.BaseModel],
584
+ # Seems like a possible mypy bug, so we'll ignore the type check here.
585
+ input_cls = run_input_subclass_from_type(input_type) # type: ignore[arg-type]
574
586
 
575
587
  if issubclass(input_cls, AutomaticRunInput):
576
588
  return input_cls.receive(
@@ -0,0 +1,293 @@
1
+ import asyncio
2
+ from contextlib import asynccontextmanager
3
+ from dataclasses import dataclass
4
+ from typing import (
5
+ Any,
6
+ Coroutine,
7
+ Dict,
8
+ Generic,
9
+ Iterable,
10
+ Literal,
11
+ Optional,
12
+ TypeVar,
13
+ Union,
14
+ cast,
15
+ )
16
+
17
+ import anyio
18
+ from typing_extensions import ParamSpec
19
+
20
+ from prefect import Flow, Task, get_client
21
+ from prefect.client.orchestration import PrefectClient
22
+ from prefect.client.schemas import FlowRun, TaskRun
23
+ from prefect.client.schemas.filters import FlowRunFilter
24
+ from prefect.client.schemas.sorting import FlowRunSort
25
+ from prefect.context import FlowRunContext
26
+ from prefect.futures import PrefectFuture, resolve_futures_to_states
27
+ from prefect.logging.loggers import flow_run_logger
28
+ from prefect.results import ResultFactory
29
+ from prefect.states import (
30
+ Pending,
31
+ Running,
32
+ State,
33
+ exception_to_failed_state,
34
+ return_value_to_state,
35
+ )
36
+ from prefect.utilities.asyncutils import A, Async
37
+ from prefect.utilities.engine import (
38
+ _dynamic_key_for_task_run,
39
+ _resolve_custom_flow_run_name,
40
+ collect_task_run_inputs,
41
+ propose_state,
42
+ )
43
+
44
+ P = ParamSpec("P")
45
+ R = TypeVar("R")
46
+
47
+
48
+ @dataclass
49
+ class FlowRunEngine(Generic[P, R]):
50
+ flow: Flow[P, Coroutine[Any, Any, R]]
51
+ parameters: Optional[Dict[str, Any]] = None
52
+ flow_run: Optional[FlowRun] = None
53
+ _is_started: bool = False
54
+ _client: Optional[PrefectClient] = None
55
+ short_circuit: bool = False
56
+
57
+ def __post_init__(self):
58
+ if self.parameters is None:
59
+ self.parameters = {}
60
+
61
+ @property
62
+ def client(self) -> PrefectClient:
63
+ if not self._is_started or self._client is None:
64
+ raise RuntimeError("Engine has not started.")
65
+ return self._client
66
+
67
+ @property
68
+ def state(self) -> State:
69
+ return self.flow_run.state # type: ignore
70
+
71
+ async def begin_run(self) -> State:
72
+ new_state = Running()
73
+ state = await self.set_state(new_state)
74
+ while state.is_pending():
75
+ await asyncio.sleep(1)
76
+ state = await self.set_state(new_state)
77
+ return state
78
+
79
+ async def set_state(self, state: State) -> State:
80
+ """ """
81
+ # prevents any state-setting activity
82
+ if self.short_circuit:
83
+ return self.state
84
+
85
+ state = await propose_state(self.client, state, flow_run_id=self.flow_run.id) # type: ignore
86
+ self.flow_run.state = state # type: ignore
87
+ self.flow_run.state_name = state.name # type: ignore
88
+ self.flow_run.state_type = state.type # type: ignore
89
+ return state
90
+
91
+ async def result(self, raise_on_failure: bool = True) -> "Union[R, State, None]":
92
+ return await self.state.result(raise_on_failure=raise_on_failure, fetch=True)
93
+
94
+ async def handle_success(self, result: R) -> R:
95
+ result_factory = getattr(FlowRunContext.get(), "result_factory", None)
96
+ terminal_state = await return_value_to_state(
97
+ await resolve_futures_to_states(result),
98
+ result_factory=result_factory,
99
+ )
100
+ await self.set_state(terminal_state)
101
+ return result
102
+
103
+ async def handle_exception(
104
+ self,
105
+ exc: Exception,
106
+ msg: Optional[str] = None,
107
+ result_factory: Optional[ResultFactory] = None,
108
+ ) -> State:
109
+ context = FlowRunContext.get()
110
+ state = await exception_to_failed_state(
111
+ exc,
112
+ message=msg or "Flow run encountered an exception:",
113
+ result_factory=result_factory or getattr(context, "result_factory", None),
114
+ )
115
+ state = await self.set_state(state)
116
+ if self.state.is_scheduled():
117
+ state = await self.set_state(Running())
118
+ return state
119
+
120
+ async def create_subflow_task_run(
121
+ self, client: PrefectClient, context: FlowRunContext
122
+ ) -> TaskRun:
123
+ dummy_task = Task(
124
+ name=self.flow.name, fn=self.flow.fn, version=self.flow.version
125
+ )
126
+ task_inputs = {
127
+ k: await collect_task_run_inputs(v) for k, v in self.parameters.items()
128
+ }
129
+ parent_task_run = await client.create_task_run(
130
+ task=dummy_task,
131
+ flow_run_id=(
132
+ context.flow_run.id if getattr(context, "flow_run", None) else None
133
+ ),
134
+ dynamic_key=_dynamic_key_for_task_run(context, dummy_task),
135
+ task_inputs=task_inputs,
136
+ state=Pending(),
137
+ )
138
+ return parent_task_run
139
+
140
+ async def get_most_recent_flow_run_for_parent_task_run(
141
+ self, client: PrefectClient, parent_task_run: TaskRun
142
+ ) -> "Union[FlowRun, None]":
143
+ """
144
+ Get the most recent flow run associated with the provided parent task run.
145
+
146
+ Args:
147
+ - An orchestration client
148
+ - The parent task run to get the most recent flow run for
149
+
150
+ Returns:
151
+ The most recent flow run associated with the parent task run or `None` if
152
+ no flow runs are found
153
+ """
154
+ flow_runs = await client.read_flow_runs(
155
+ flow_run_filter=FlowRunFilter(
156
+ parent_task_run_id={"any_": [parent_task_run.id]}
157
+ ),
158
+ sort=FlowRunSort.EXPECTED_START_TIME_ASC,
159
+ )
160
+ return flow_runs[-1] if flow_runs else None
161
+
162
+ async def create_flow_run(self, client: PrefectClient) -> FlowRun:
163
+ flow_run_ctx = FlowRunContext.get()
164
+
165
+ parent_task_run = None
166
+ # this is a subflow run
167
+ if flow_run_ctx:
168
+ parent_task_run = await self.create_subflow_task_run(
169
+ client=client, context=flow_run_ctx
170
+ )
171
+ # If the parent task run already completed, return the last flow run
172
+ # associated with the parent task run. This prevents rerunning a completed
173
+ # flow run when the parent task run is rerun.
174
+ most_recent_flow_run = (
175
+ await self.get_most_recent_flow_run_for_parent_task_run(
176
+ client=client, parent_task_run=parent_task_run
177
+ )
178
+ )
179
+ if most_recent_flow_run:
180
+ return most_recent_flow_run
181
+
182
+ try:
183
+ flow_run_name = _resolve_custom_flow_run_name(
184
+ flow=self.flow, parameters=self.parameters
185
+ )
186
+ except TypeError:
187
+ flow_run_name = None
188
+
189
+ flow_run = await client.create_flow_run(
190
+ flow=self.flow,
191
+ name=flow_run_name,
192
+ parameters=self.flow.serialize_parameters(self.parameters),
193
+ state=Pending(),
194
+ parent_task_run_id=getattr(parent_task_run, "id", None),
195
+ )
196
+ return flow_run
197
+
198
+ @asynccontextmanager
199
+ async def enter_run_context(self, client: Optional[PrefectClient] = None):
200
+ if client is None:
201
+ client = self.client
202
+
203
+ self.flow_run = await client.read_flow_run(self.flow_run.id)
204
+
205
+ with FlowRunContext(
206
+ flow=self.flow,
207
+ log_prints=self.flow.log_prints or False,
208
+ flow_run=self.flow_run,
209
+ parameters=self.parameters,
210
+ client=client,
211
+ background_tasks=anyio.create_task_group(),
212
+ result_factory=await ResultFactory.from_flow(self.flow),
213
+ task_runner=self.flow.task_runner,
214
+ ):
215
+ self.logger = flow_run_logger(flow_run=self.flow_run, flow=self.flow)
216
+ yield
217
+
218
+ @asynccontextmanager
219
+ async def start(self):
220
+ """
221
+ Enters a client context and creates a flow run if needed.
222
+ """
223
+ async with get_client() as client:
224
+ self._client = client
225
+ self._is_started = True
226
+
227
+ if not self.flow_run:
228
+ self.flow_run = await self.create_flow_run(client)
229
+
230
+ # validate prior to context so that context receives validated params
231
+ if self.flow.should_validate_parameters:
232
+ try:
233
+ self.parameters = self.flow.validate_parameters(self.parameters)
234
+ except Exception as exc:
235
+ await self.handle_exception(
236
+ exc,
237
+ msg="Validation of flow parameters failed with error",
238
+ result_factory=await ResultFactory.from_flow(self.flow),
239
+ )
240
+ self.short_circuit = True
241
+
242
+ yield self
243
+
244
+ self._is_started = False
245
+ self._client = None
246
+
247
+ def is_running(self) -> bool:
248
+ if getattr(self, "flow_run", None) is None:
249
+ return False
250
+ return getattr(self, "flow_run").state.is_running()
251
+
252
+ def is_pending(self) -> bool:
253
+ if getattr(self, "flow_run", None) is None:
254
+ return False # TODO: handle this differently?
255
+ return getattr(self, "flow_run").state.is_pending()
256
+
257
+
258
+ async def run_flow(
259
+ flow: Task[P, Coroutine[Any, Any, R]],
260
+ flow_run: Optional[FlowRun] = None,
261
+ parameters: Optional[Dict[str, Any]] = None,
262
+ wait_for: Optional[Iterable[PrefectFuture[A, Async]]] = None,
263
+ return_type: Literal["state", "result"] = "result",
264
+ ) -> "Union[R, None]":
265
+ """
266
+ Runs a flow against the API.
267
+
268
+ We will most likely want to use this logic as a wrapper and return a coroutine for type inference.
269
+ """
270
+
271
+ engine = FlowRunEngine[P, R](flow, parameters, flow_run)
272
+ async with engine.start() as run:
273
+ # This is a context manager that keeps track of the state of the flow run.
274
+ await run.begin_run()
275
+
276
+ while run.is_running():
277
+ async with run.enter_run_context():
278
+ try:
279
+ # This is where the flow is actually run.
280
+ if flow.isasync:
281
+ result = cast(R, await flow.fn(**(run.parameters or {}))) # type: ignore
282
+ else:
283
+ result = cast(R, flow.fn(**(run.parameters or {}))) # type: ignore
284
+ # If the flow run is successful, finalize it.
285
+ await run.handle_success(result)
286
+
287
+ except Exception as exc:
288
+ # If the flow fails, and we have retries left, set the flow to retrying.
289
+ await run.handle_exception(exc)
290
+
291
+ if return_type == "state":
292
+ return run.state
293
+ return await run.result()