prefect-client 3.0.0rc2__py3-none-any.whl → 3.0.0rc4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. prefect/__init__.py +0 -1
  2. prefect/_internal/compatibility/migration.py +124 -0
  3. prefect/_internal/concurrency/__init__.py +2 -2
  4. prefect/_internal/concurrency/primitives.py +1 -0
  5. prefect/_internal/pydantic/annotations/pendulum.py +2 -2
  6. prefect/_internal/pytz.py +1 -1
  7. prefect/blocks/core.py +1 -1
  8. prefect/client/orchestration.py +96 -22
  9. prefect/client/schemas/actions.py +1 -1
  10. prefect/client/schemas/filters.py +6 -0
  11. prefect/client/schemas/objects.py +10 -3
  12. prefect/client/subscriptions.py +6 -5
  13. prefect/context.py +1 -27
  14. prefect/deployments/__init__.py +3 -0
  15. prefect/deployments/base.py +4 -2
  16. prefect/deployments/deployments.py +3 -0
  17. prefect/deployments/steps/pull.py +1 -0
  18. prefect/deployments/steps/utility.py +2 -1
  19. prefect/engine.py +3 -0
  20. prefect/events/cli/automations.py +1 -1
  21. prefect/events/clients.py +7 -1
  22. prefect/exceptions.py +9 -0
  23. prefect/filesystems.py +22 -11
  24. prefect/flow_engine.py +195 -153
  25. prefect/flows.py +95 -36
  26. prefect/futures.py +9 -1
  27. prefect/infrastructure/provisioners/container_instance.py +1 -0
  28. prefect/infrastructure/provisioners/ecs.py +2 -2
  29. prefect/input/__init__.py +4 -0
  30. prefect/logging/formatters.py +2 -2
  31. prefect/logging/handlers.py +2 -2
  32. prefect/logging/loggers.py +1 -1
  33. prefect/plugins.py +1 -0
  34. prefect/records/cache_policies.py +3 -3
  35. prefect/records/result_store.py +10 -3
  36. prefect/results.py +47 -73
  37. prefect/runner/runner.py +1 -1
  38. prefect/runner/server.py +1 -1
  39. prefect/runtime/__init__.py +1 -0
  40. prefect/runtime/deployment.py +1 -0
  41. prefect/runtime/flow_run.py +1 -0
  42. prefect/runtime/task_run.py +1 -0
  43. prefect/settings.py +16 -3
  44. prefect/states.py +15 -4
  45. prefect/task_engine.py +195 -39
  46. prefect/task_runners.py +9 -3
  47. prefect/task_runs.py +26 -12
  48. prefect/task_worker.py +149 -20
  49. prefect/tasks.py +153 -71
  50. prefect/transactions.py +85 -15
  51. prefect/types/__init__.py +10 -3
  52. prefect/utilities/asyncutils.py +3 -3
  53. prefect/utilities/callables.py +16 -4
  54. prefect/utilities/collections.py +120 -57
  55. prefect/utilities/dockerutils.py +5 -3
  56. prefect/utilities/engine.py +11 -0
  57. prefect/utilities/filesystem.py +4 -5
  58. prefect/utilities/importtools.py +29 -0
  59. prefect/utilities/services.py +2 -2
  60. prefect/utilities/urls.py +195 -0
  61. prefect/utilities/visualization.py +1 -0
  62. prefect/variables.py +4 -0
  63. prefect/workers/base.py +35 -0
  64. {prefect_client-3.0.0rc2.dist-info → prefect_client-3.0.0rc4.dist-info}/METADATA +2 -2
  65. {prefect_client-3.0.0rc2.dist-info → prefect_client-3.0.0rc4.dist-info}/RECORD +68 -66
  66. prefect/blocks/kubernetes.py +0 -115
  67. {prefect_client-3.0.0rc2.dist-info → prefect_client-3.0.0rc4.dist-info}/LICENSE +0 -0
  68. {prefect_client-3.0.0rc2.dist-info → prefect_client-3.0.0rc4.dist-info}/WHEEL +0 -0
  69. {prefect_client-3.0.0rc2.dist-info → prefect_client-3.0.0rc4.dist-info}/top_level.txt +0 -0
prefect/task_worker.py CHANGED
@@ -8,10 +8,14 @@ from concurrent.futures import ThreadPoolExecutor
8
8
  from contextlib import AsyncExitStack
9
9
  from contextvars import copy_context
10
10
  from typing import List, Optional
11
+ from uuid import UUID
11
12
 
12
13
  import anyio
13
14
  import anyio.abc
15
+ import pendulum
16
+ import uvicorn
14
17
  from exceptiongroup import BaseExceptionGroup # novermin
18
+ from fastapi import FastAPI
15
19
  from websockets.exceptions import InvalidStatusCode
16
20
 
17
21
  from prefect import Task
@@ -73,8 +77,9 @@ class TaskWorker:
73
77
  limit: Optional[int] = 10,
74
78
  ):
75
79
  self.tasks: List[Task] = list(tasks)
80
+ self.task_keys = set(t.task_key for t in tasks if isinstance(t, Task))
76
81
 
77
- self.started: bool = False
82
+ self._started_at: Optional[pendulum.DateTime] = None
78
83
  self.stopping: bool = False
79
84
 
80
85
  self._client = get_client()
@@ -86,13 +91,44 @@ class TaskWorker:
86
91
  )
87
92
 
88
93
  self._runs_task_group: anyio.abc.TaskGroup = anyio.create_task_group()
89
- self._executor = ThreadPoolExecutor()
94
+ self._executor = ThreadPoolExecutor(max_workers=limit if limit else None)
90
95
  self._limiter = anyio.CapacityLimiter(limit) if limit else None
91
96
 
97
+ self.in_flight_task_runs: dict[str, dict[UUID, pendulum.DateTime]] = {
98
+ task_key: {} for task_key in self.task_keys
99
+ }
100
+ self.finished_task_runs: dict[str, int] = {
101
+ task_key: 0 for task_key in self.task_keys
102
+ }
103
+
92
104
  @property
93
- def _client_id(self) -> str:
105
+ def client_id(self) -> str:
94
106
  return f"{socket.gethostname()}-{os.getpid()}"
95
107
 
108
+ @property
109
+ def started_at(self) -> Optional[pendulum.DateTime]:
110
+ return self._started_at
111
+
112
+ @property
113
+ def started(self) -> bool:
114
+ return self._started_at is not None
115
+
116
+ @property
117
+ def limit(self) -> Optional[int]:
118
+ return int(self._limiter.total_tokens) if self._limiter else None
119
+
120
+ @property
121
+ def current_tasks(self) -> Optional[int]:
122
+ return (
123
+ int(self._limiter.borrowed_tokens)
124
+ if self._limiter
125
+ else sum(len(runs) for runs in self.in_flight_task_runs.values())
126
+ )
127
+
128
+ @property
129
+ def available_tasks(self) -> Optional[int]:
130
+ return int(self._limiter.available_tokens) if self._limiter else None
131
+
96
132
  def handle_sigterm(self, signum, frame):
97
133
  """
98
134
  Shuts down the task worker when a SIGTERM is received.
@@ -116,7 +152,7 @@ class TaskWorker:
116
152
  except InvalidStatusCode as exc:
117
153
  if exc.status_code == 403:
118
154
  logger.error(
119
- "Could not establish a connection to the `/task_runs/subscriptions/scheduled`"
155
+ "403: Could not establish a connection to the `/task_runs/subscriptions/scheduled`"
120
156
  f" endpoint found at:\n\n {PREFECT_API_URL.value()}"
121
157
  "\n\nPlease double-check the values of your"
122
158
  " `PREFECT_API_URL` and `PREFECT_API_KEY` environment variables."
@@ -133,25 +169,70 @@ class TaskWorker:
133
169
  " calling .start()"
134
170
  )
135
171
 
136
- self.started = False
172
+ self._started_at = None
137
173
  self.stopping = True
138
174
 
139
175
  raise StopTaskWorker
140
176
 
177
+ async def _acquire_token(self, task_run_id: UUID) -> bool:
178
+ try:
179
+ if self._limiter:
180
+ await self._limiter.acquire_on_behalf_of(task_run_id)
181
+ except RuntimeError:
182
+ logger.debug(f"Token already acquired for task run: {task_run_id!r}")
183
+ return False
184
+
185
+ return True
186
+
187
+ def _release_token(self, task_run_id: UUID) -> bool:
188
+ try:
189
+ if self._limiter:
190
+ self._limiter.release_on_behalf_of(task_run_id)
191
+ except RuntimeError:
192
+ logger.debug(f"No token to release for task run: {task_run_id!r}")
193
+ return False
194
+
195
+ return True
196
+
141
197
  async def _subscribe_to_task_scheduling(self):
142
- logger.info(
143
- f"Subscribing to tasks: {' | '.join(t.task_key.split('.')[-1] for t in self.tasks)}"
198
+ base_url = PREFECT_API_URL.value()
199
+ if base_url is None:
200
+ raise ValueError(
201
+ "`PREFECT_API_URL` must be set to use the task worker. "
202
+ "Task workers are not compatible with the ephemeral API."
203
+ )
204
+ task_keys_repr = " | ".join(
205
+ task_key.split(".")[-1].split("-")[0] for task_key in sorted(self.task_keys)
144
206
  )
207
+ logger.info(f"Subscribing to runs of task(s): {task_keys_repr}")
145
208
  async for task_run in Subscription(
146
209
  model=TaskRun,
147
210
  path="/task_runs/subscriptions/scheduled",
148
- keys=[task.task_key for task in self.tasks],
149
- client_id=self._client_id,
211
+ keys=self.task_keys,
212
+ client_id=self.client_id,
213
+ base_url=base_url,
150
214
  ):
151
- if self._limiter:
152
- await self._limiter.acquire_on_behalf_of(task_run.id)
153
215
  logger.info(f"Received task run: {task_run.id} - {task_run.name}")
154
- self._runs_task_group.start_soon(self._submit_scheduled_task_run, task_run)
216
+
217
+ token_acquired = await self._acquire_token(task_run.id)
218
+ if token_acquired:
219
+ self._runs_task_group.start_soon(
220
+ self._safe_submit_scheduled_task_run, task_run
221
+ )
222
+
223
+ async def _safe_submit_scheduled_task_run(self, task_run: TaskRun):
224
+ self.in_flight_task_runs[task_run.task_key][task_run.id] = pendulum.now()
225
+ try:
226
+ await self._submit_scheduled_task_run(task_run)
227
+ except BaseException as exc:
228
+ logger.exception(
229
+ f"Failed to submit task run {task_run.id!r}",
230
+ exc_info=exc,
231
+ )
232
+ finally:
233
+ self.in_flight_task_runs[task_run.task_key].pop(task_run.id, None)
234
+ self.finished_task_runs[task_run.task_key] += 1
235
+ self._release_token(task_run.id)
155
236
 
156
237
  async def _submit_scheduled_task_run(self, task_run: TaskRun):
157
238
  logger.debug(
@@ -258,15 +339,13 @@ class TaskWorker:
258
339
  context=run_context,
259
340
  )
260
341
  await asyncio.wrap_future(future)
261
- if self._limiter:
262
- self._limiter.release_on_behalf_of(task_run.id)
263
342
 
264
343
  async def execute_task_run(self, task_run: TaskRun):
265
344
  """Execute a task run in the task worker."""
266
345
  async with self if not self.started else asyncnullcontext():
267
- if self._limiter:
268
- await self._limiter.acquire_on_behalf_of(task_run.id)
269
- await self._submit_scheduled_task_run(task_run)
346
+ token_acquired = await self._acquire_token(task_run.id)
347
+ if token_acquired:
348
+ await self._safe_submit_scheduled_task_run(task_run)
270
349
 
271
350
  async def __aenter__(self):
272
351
  logger.debug("Starting task worker...")
@@ -278,17 +357,42 @@ class TaskWorker:
278
357
  await self._exit_stack.enter_async_context(self._runs_task_group)
279
358
  self._exit_stack.enter_context(self._executor)
280
359
 
281
- self.started = True
360
+ self._started_at = pendulum.now()
282
361
  return self
283
362
 
284
363
  async def __aexit__(self, *exc_info):
285
364
  logger.debug("Stopping task worker...")
286
- self.started = False
365
+ self._started_at = None
287
366
  await self._exit_stack.__aexit__(*exc_info)
288
367
 
289
368
 
369
+ def create_status_server(task_worker: TaskWorker) -> FastAPI:
370
+ status_app = FastAPI()
371
+
372
+ @status_app.get("/status")
373
+ def status():
374
+ return {
375
+ "client_id": task_worker.client_id,
376
+ "started_at": task_worker.started_at.isoformat(),
377
+ "stopping": task_worker.stopping,
378
+ "limit": task_worker.limit,
379
+ "current": task_worker.current_tasks,
380
+ "available": task_worker.available_tasks,
381
+ "tasks": sorted(task_worker.task_keys),
382
+ "finished": task_worker.finished_task_runs,
383
+ "in_flight": {
384
+ key: {str(run): start.isoformat() for run, start in tasks.items()}
385
+ for key, tasks in task_worker.in_flight_task_runs.items()
386
+ },
387
+ }
388
+
389
+ return status_app
390
+
391
+
290
392
  @sync_compatible
291
- async def serve(*tasks: Task, limit: Optional[int] = 10):
393
+ async def serve(
394
+ *tasks: Task, limit: Optional[int] = 10, status_server_port: Optional[int] = None
395
+ ):
292
396
  """Serve the provided tasks so that their runs may be submitted to and executed.
293
397
  in the engine. Tasks do not need to be within a flow run context to be submitted.
294
398
  You must `.submit` the same task object that you pass to `serve`.
@@ -298,6 +402,9 @@ async def serve(*tasks: Task, limit: Optional[int] = 10):
298
402
  given task, the task run will be submitted to the engine for execution.
299
403
  - limit: The maximum number of tasks that can be run concurrently. Defaults to 10.
300
404
  Pass `None` to remove the limit.
405
+ - status_server_port: An optional port on which to start an HTTP server
406
+ exposing status information about the task worker. If not provided, no
407
+ status server will run.
301
408
 
302
409
  Example:
303
410
  ```python
@@ -319,6 +426,20 @@ async def serve(*tasks: Task, limit: Optional[int] = 10):
319
426
  """
320
427
  task_worker = TaskWorker(*tasks, limit=limit)
321
428
 
429
+ status_server_task = None
430
+ if status_server_port is not None:
431
+ server = uvicorn.Server(
432
+ uvicorn.Config(
433
+ app=create_status_server(task_worker),
434
+ host="127.0.0.1",
435
+ port=status_server_port,
436
+ access_log=False,
437
+ log_level="warning",
438
+ )
439
+ )
440
+ loop = asyncio.get_event_loop()
441
+ status_server_task = loop.create_task(server.serve())
442
+
322
443
  try:
323
444
  await task_worker.start()
324
445
 
@@ -335,3 +456,11 @@ async def serve(*tasks: Task, limit: Optional[int] = 10):
335
456
 
336
457
  except (asyncio.CancelledError, KeyboardInterrupt):
337
458
  logger.info("Task worker interrupted, stopping...")
459
+
460
+ finally:
461
+ if status_server_task:
462
+ status_server_task.cancel()
463
+ try:
464
+ await status_server_task
465
+ except asyncio.CancelledError:
466
+ pass
prefect/tasks.py CHANGED
@@ -22,6 +22,7 @@ from typing import (
22
22
  Optional,
23
23
  Set,
24
24
  Tuple,
25
+ Type,
25
26
  TypeVar,
26
27
  Union,
27
28
  cast,
@@ -43,7 +44,7 @@ from prefect.context import (
43
44
  )
44
45
  from prefect.futures import PrefectDistributedFuture, PrefectFuture
45
46
  from prefect.logging.loggers import get_logger
46
- from prefect.records.cache_policies import DEFAULT, CachePolicy
47
+ from prefect.records.cache_policies import DEFAULT, NONE, CachePolicy
47
48
  from prefect.results import ResultFactory, ResultSerializer, ResultStorage
48
49
  from prefect.settings import (
49
50
  PREFECT_TASK_DEFAULT_RETRIES,
@@ -122,6 +123,57 @@ def exponential_backoff(backoff_factor: float) -> Callable[[int], List[float]]:
122
123
  return retry_backoff_callable
123
124
 
124
125
 
126
+ def _infer_parent_task_runs(
127
+ flow_run_context: Optional[FlowRunContext],
128
+ task_run_context: Optional[TaskRunContext],
129
+ parameters: Dict[str, Any],
130
+ ):
131
+ """
132
+ Attempt to infer the parent task runs for this task run based on the
133
+ provided flow run and task run contexts, as well as any parameters. It is
134
+ assumed that the task run is running within those contexts.
135
+ If any parameter comes from a running task run, that task run is considered
136
+ a parent. This is expected to happen when task inputs are yielded from
137
+ generator tasks.
138
+ """
139
+ parents = []
140
+
141
+ # check if this task has a parent task run based on running in another
142
+ # task run's existing context. A task run is only considered a parent if
143
+ # it is in the same flow run (because otherwise presumably the child is
144
+ # in a subflow, so the subflow serves as the parent) or if there is no
145
+ # flow run
146
+ if task_run_context:
147
+ # there is no flow run
148
+ if not flow_run_context:
149
+ parents.append(TaskRunResult(id=task_run_context.task_run.id))
150
+ # there is a flow run and the task run is in the same flow run
151
+ elif flow_run_context and task_run_context.task_run.flow_run_id == getattr(
152
+ flow_run_context.flow_run, "id", None
153
+ ):
154
+ parents.append(TaskRunResult(id=task_run_context.task_run.id))
155
+
156
+ # parent dependency tracking: for every provided parameter value, try to
157
+ # load the corresponding task run state. If the task run state is still
158
+ # running, we consider it a parent task run. Note this is only done if
159
+ # there is an active flow run context because dependencies are only
160
+ # tracked within the same flow run.
161
+ if flow_run_context:
162
+ for v in parameters.values():
163
+ if isinstance(v, State):
164
+ upstream_state = v
165
+ elif isinstance(v, PrefectFuture):
166
+ upstream_state = v.state
167
+ else:
168
+ upstream_state = flow_run_context.task_run_results.get(id(v))
169
+ if upstream_state and upstream_state.is_running():
170
+ parents.append(
171
+ TaskRunResult(id=upstream_state.state_details.task_run_id)
172
+ )
173
+
174
+ return parents
175
+
176
+
125
177
  @PrefectObjectRegistry.register_instances
126
178
  class Task(Generic[P, R]):
127
179
  """
@@ -166,10 +218,8 @@ class Task(Generic[P, R]):
166
218
  cannot exceed 50.
167
219
  retry_jitter_factor: An optional factor that defines the factor to which a retry
168
220
  can be jittered in order to avoid a "thundering herd".
169
- persist_result: An optional toggle indicating whether the result of this task
170
- should be persisted to result storage. Defaults to `None`, which indicates
171
- that Prefect should choose whether the result should be persisted depending on
172
- the features being used.
221
+ persist_result: An toggle indicating whether the result of this task
222
+ should be persisted to result storage. Defaults to `True`.
173
223
  result_storage: An optional block to use to persist the result of this task.
174
224
  Defaults to the value set in the flow the task is called in.
175
225
  result_storage_key: An optional key to store the result in storage at when persisted.
@@ -221,7 +271,7 @@ class Task(Generic[P, R]):
221
271
  ]
222
272
  ] = None,
223
273
  retry_jitter_factor: Optional[float] = None,
224
- persist_result: Optional[bool] = None,
274
+ persist_result: bool = True,
225
275
  result_storage: Optional[ResultStorage] = None,
226
276
  result_serializer: Optional[ResultSerializer] = None,
227
277
  result_storage_key: Optional[str] = None,
@@ -268,7 +318,18 @@ class Task(Generic[P, R]):
268
318
  self.description = description or inspect.getdoc(fn)
269
319
  update_wrapper(self, fn)
270
320
  self.fn = fn
271
- self.isasync = inspect.iscoroutinefunction(self.fn)
321
+
322
+ # the task is considered async if its function is async or an async
323
+ # generator
324
+ self.isasync = inspect.iscoroutinefunction(
325
+ self.fn
326
+ ) or inspect.isasyncgenfunction(self.fn)
327
+
328
+ # the task is considered a generator if its function is a generator or
329
+ # an async generator
330
+ self.isgenerator = inspect.isgeneratorfunction(
331
+ self.fn
332
+ ) or inspect.isasyncgenfunction(self.fn)
272
333
 
273
334
  if not name:
274
335
  if not hasattr(self.fn, "__name__"):
@@ -305,7 +366,11 @@ class Task(Generic[P, R]):
305
366
 
306
367
  self.task_key = f"{self.fn.__qualname__}-{task_origin_hash}"
307
368
 
308
- # TODO: warn of precedence of cache policies and cache key fn if both provided?
369
+ if cache_policy is not NotSet and cache_key_fn is not None:
370
+ logger.warning(
371
+ f"Both `cache_policy` and `cache_key_fn` are set on task {self}. `cache_key_fn` will be used."
372
+ )
373
+
309
374
  if cache_key_fn:
310
375
  cache_policy = CachePolicy.from_cache_key_fn(cache_key_fn)
311
376
 
@@ -314,7 +379,13 @@ class Task(Generic[P, R]):
314
379
  self.cache_expiration = cache_expiration
315
380
  self.refresh_cache = refresh_cache
316
381
 
317
- if cache_policy is NotSet and result_storage_key is None:
382
+ if not persist_result:
383
+ self.cache_policy = None if cache_policy is None else NONE
384
+ if cache_policy and cache_policy is not NotSet and cache_policy != NONE:
385
+ logger.warning(
386
+ "Ignoring `cache_policy` because `persist_result` is False"
387
+ )
388
+ elif cache_policy is NotSet and result_storage_key is None:
318
389
  self.cache_policy = DEFAULT
319
390
  elif result_storage_key:
320
391
  # TODO: handle this situation with double storage
@@ -367,34 +438,57 @@ class Task(Generic[P, R]):
367
438
  self.retry_condition_fn = retry_condition_fn
368
439
  self.viz_return_value = viz_return_value
369
440
 
441
+ @property
442
+ def ismethod(self) -> bool:
443
+ return hasattr(self.fn, "__prefect_self__")
444
+
445
+ def __get__(self, instance, owner):
446
+ """
447
+ Implement the descriptor protocol so that the task can be used as an instance method.
448
+ When an instance method is loaded, this method is called with the "self" instance as
449
+ an argument. We return a copy of the task with that instance bound to the task's function.
450
+ """
451
+
452
+ # if no instance is provided, it's being accessed on the class
453
+ if instance is None:
454
+ return self
455
+
456
+ # if the task is being accessed on an instance, bind the instance to the __prefect_self__ attribute
457
+ # of the task's function. This will allow it to be automatically added to the task's parameters
458
+ else:
459
+ bound_task = copy(self)
460
+ bound_task.fn.__prefect_self__ = instance
461
+ return bound_task
462
+
370
463
  def with_options(
371
464
  self,
372
465
  *,
373
- name: str = None,
374
- description: str = None,
375
- tags: Iterable[str] = None,
376
- cache_policy: CachePolicy = NotSet,
377
- cache_key_fn: Callable[
378
- ["TaskRunContext", Dict[str, Any]], Optional[str]
466
+ name: Optional[str] = None,
467
+ description: Optional[str] = None,
468
+ tags: Optional[Iterable[str]] = None,
469
+ cache_policy: Union[CachePolicy, Type[NotSet]] = NotSet,
470
+ cache_key_fn: Optional[
471
+ Callable[["TaskRunContext", Dict[str, Any]], Optional[str]]
379
472
  ] = None,
380
473
  task_run_name: Optional[Union[Callable[[], str], str]] = None,
381
- cache_expiration: datetime.timedelta = None,
382
- retries: Optional[int] = NotSet,
474
+ cache_expiration: Optional[datetime.timedelta] = None,
475
+ retries: Union[int, Type[NotSet]] = NotSet,
383
476
  retry_delay_seconds: Union[
384
477
  float,
385
478
  int,
386
479
  List[float],
387
480
  Callable[[int], List[float]],
481
+ Type[NotSet],
388
482
  ] = NotSet,
389
- retry_jitter_factor: Optional[float] = NotSet,
390
- persist_result: Optional[bool] = NotSet,
391
- result_storage: Optional[ResultStorage] = NotSet,
392
- result_serializer: Optional[ResultSerializer] = NotSet,
393
- result_storage_key: Optional[str] = NotSet,
483
+ retry_jitter_factor: Union[float, Type[NotSet]] = NotSet,
484
+ persist_result: Union[bool, Type[NotSet]] = NotSet,
485
+ result_storage: Union[ResultStorage, Type[NotSet]] = NotSet,
486
+ result_serializer: Union[ResultSerializer, Type[NotSet]] = NotSet,
487
+ result_storage_key: Union[str, Type[NotSet]] = NotSet,
394
488
  cache_result_in_memory: Optional[bool] = None,
395
- timeout_seconds: Union[int, float] = None,
396
- log_prints: Optional[bool] = NotSet,
397
- refresh_cache: Optional[bool] = NotSet,
489
+ timeout_seconds: Union[int, float, None] = None,
490
+ log_prints: Union[bool, Type[NotSet]] = NotSet,
491
+ refresh_cache: Union[bool, Type[NotSet]] = NotSet,
398
492
  on_completion: Optional[
399
493
  List[Callable[["Task", TaskRun, State], Union[Awaitable[None], None]]]
400
494
  ] = None,
@@ -588,7 +682,7 @@ class Task(Generic[P, R]):
588
682
  async with client:
589
683
  if not flow_run_context:
590
684
  dynamic_key = f"{self.task_key}-{str(uuid4().hex)}"
591
- task_run_name = f"{self.name}-{dynamic_key[:NUM_CHARS_DYNAMIC_KEY]}"
685
+ task_run_name = self.name
592
686
  else:
593
687
  dynamic_key = _dynamic_key_for_task_run(
594
688
  context=flow_run_context, task=self
@@ -624,27 +718,15 @@ class Task(Generic[P, R]):
624
718
  k: collect_task_run_inputs_sync(v) for k, v in parameters.items()
625
719
  }
626
720
 
627
- # check if this task has a parent task run based on running in another
628
- # task run's existing context. A task run is only considered a parent if
629
- # it is in the same flow run (because otherwise presumably the child is
630
- # in a subflow, so the subflow serves as the parent) or if there is no
631
- # flow run
632
- if parent_task_run_context:
633
- # there is no flow run
634
- if not flow_run_context:
635
- task_inputs["__parents__"] = [
636
- TaskRunResult(id=parent_task_run_context.task_run.id)
637
- ]
638
- # there is a flow run and the task run is in the same flow run
639
- elif (
640
- flow_run_context
641
- and parent_task_run_context.task_run.flow_run_id
642
- == getattr(flow_run_context.flow_run, "id", None)
643
- ):
644
- task_inputs["__parents__"] = [
645
- TaskRunResult(id=parent_task_run_context.task_run.id)
646
- ]
721
+ # collect all parent dependencies
722
+ if task_parents := _infer_parent_task_runs(
723
+ flow_run_context=flow_run_context,
724
+ task_run_context=parent_task_run_context,
725
+ parameters=parameters,
726
+ ):
727
+ task_inputs["__parents__"] = task_parents
647
728
 
729
+ # check wait for dependencies
648
730
  if wait_for:
649
731
  task_inputs["wait_for"] = collect_task_run_inputs_sync(wait_for)
650
732
 
@@ -1234,13 +1316,15 @@ def task(__fn: Callable[P, R]) -> Task[P, R]:
1234
1316
  @overload
1235
1317
  def task(
1236
1318
  *,
1237
- name: str = None,
1238
- description: str = None,
1239
- tags: Iterable[str] = None,
1240
- version: str = None,
1319
+ name: Optional[str] = None,
1320
+ description: Optional[str] = None,
1321
+ tags: Optional[Iterable[str]] = None,
1322
+ version: Optional[str] = None,
1241
1323
  cache_policy: CachePolicy = NotSet,
1242
- cache_key_fn: Callable[["TaskRunContext", Dict[str, Any]], Optional[str]] = None,
1243
- cache_expiration: datetime.timedelta = None,
1324
+ cache_key_fn: Optional[
1325
+ Callable[["TaskRunContext", Dict[str, Any]], Optional[str]]
1326
+ ] = None,
1327
+ cache_expiration: Optional[datetime.timedelta] = None,
1244
1328
  task_run_name: Optional[Union[Callable[[], str], str]] = None,
1245
1329
  retries: int = 0,
1246
1330
  retry_delay_seconds: Union[
@@ -1250,12 +1334,12 @@ def task(
1250
1334
  Callable[[int], List[float]],
1251
1335
  ] = 0,
1252
1336
  retry_jitter_factor: Optional[float] = None,
1253
- persist_result: Optional[bool] = None,
1337
+ persist_result: bool = True,
1254
1338
  result_storage: Optional[ResultStorage] = None,
1255
1339
  result_storage_key: Optional[str] = None,
1256
1340
  result_serializer: Optional[ResultSerializer] = None,
1257
1341
  cache_result_in_memory: bool = True,
1258
- timeout_seconds: Union[int, float] = None,
1342
+ timeout_seconds: Union[int, float, None] = None,
1259
1343
  log_prints: Optional[bool] = None,
1260
1344
  refresh_cache: Optional[bool] = None,
1261
1345
  on_completion: Optional[List[Callable[["Task", TaskRun, State], None]]] = None,
@@ -1269,28 +1353,25 @@ def task(
1269
1353
  def task(
1270
1354
  __fn=None,
1271
1355
  *,
1272
- name: str = None,
1273
- description: str = None,
1274
- tags: Iterable[str] = None,
1275
- version: str = None,
1276
- cache_policy: CachePolicy = NotSet,
1356
+ name: Optional[str] = None,
1357
+ description: Optional[str] = None,
1358
+ tags: Optional[Iterable[str]] = None,
1359
+ version: Optional[str] = None,
1360
+ cache_policy: Union[CachePolicy, Type[NotSet]] = NotSet,
1277
1361
  cache_key_fn: Callable[["TaskRunContext", Dict[str, Any]], Optional[str]] = None,
1278
- cache_expiration: datetime.timedelta = None,
1362
+ cache_expiration: Optional[datetime.timedelta] = None,
1279
1363
  task_run_name: Optional[Union[Callable[[], str], str]] = None,
1280
- retries: int = None,
1364
+ retries: Optional[int] = None,
1281
1365
  retry_delay_seconds: Union[
1282
- float,
1283
- int,
1284
- List[float],
1285
- Callable[[int], List[float]],
1366
+ float, int, List[float], Callable[[int], List[float]], None
1286
1367
  ] = None,
1287
1368
  retry_jitter_factor: Optional[float] = None,
1288
- persist_result: Optional[bool] = None,
1369
+ persist_result: bool = True,
1289
1370
  result_storage: Optional[ResultStorage] = None,
1290
1371
  result_storage_key: Optional[str] = None,
1291
1372
  result_serializer: Optional[ResultSerializer] = None,
1292
1373
  cache_result_in_memory: bool = True,
1293
- timeout_seconds: Union[int, float] = None,
1374
+ timeout_seconds: Union[int, float, None] = None,
1294
1375
  log_prints: Optional[bool] = None,
1295
1376
  refresh_cache: Optional[bool] = None,
1296
1377
  on_completion: Optional[List[Callable[["Task", TaskRun, State], None]]] = None,
@@ -1331,10 +1412,8 @@ def task(
1331
1412
  cannot exceed 50.
1332
1413
  retry_jitter_factor: An optional factor that defines the factor to which a retry
1333
1414
  can be jittered in order to avoid a "thundering herd".
1334
- persist_result: An optional toggle indicating whether the result of this task
1335
- should be persisted to result storage. Defaults to `None`, which indicates
1336
- that Prefect should choose whether the result should be persisted depending on
1337
- the features being used.
1415
+ persist_result: An toggle indicating whether the result of this task
1416
+ should be persisted to result storage. Defaults to `True`.
1338
1417
  result_storage: An optional block to use to persist the result of this task.
1339
1418
  Defaults to the value set in the flow the task is called in.
1340
1419
  result_storage_key: An optional key to store the result in storage at when persisted.
@@ -1408,6 +1487,9 @@ def task(
1408
1487
  """
1409
1488
 
1410
1489
  if __fn:
1490
+ if isinstance(__fn, (classmethod, staticmethod)):
1491
+ method_decorator = type(__fn).__name__
1492
+ raise TypeError(f"@{method_decorator} should be applied on top of @task")
1411
1493
  return cast(
1412
1494
  Task[P, R],
1413
1495
  Task(