prefect-client 2.16.1__py3-none-any.whl → 2.16.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -91,6 +91,11 @@ class QueueService(abc.ABC, Generic[T]):
91
91
  self._remove_instance()
92
92
  self._stopped = True
93
93
 
94
+ # Allow asyncio task to be garbage-collected. Its context may contain
95
+ # references to all Prefect Task calls made during a flow run, through
96
+ # EngineContext. Issue #10338.
97
+ self._task = None
98
+
94
99
  # Signal completion to the loop
95
100
  self._queue.put_nowait(None)
96
101
 
@@ -214,6 +214,9 @@ class EventLoopThread(Portal):
214
214
  for call in self._on_shutdown:
215
215
  await self._run_call(call)
216
216
 
217
+ # Empty the list to allow calls to be garbage collected. Issue #10338.
218
+ self._on_shutdown = []
219
+
217
220
  async def _run_call(self, call: Call) -> None:
218
221
  task = call.run()
219
222
  if task is not None:
@@ -2215,6 +2215,24 @@ class PrefectClient:
2215
2215
  response = await self._client.post("/task_runs/filter", json=body)
2216
2216
  return pydantic.parse_obj_as(List[TaskRun], response.json())
2217
2217
 
2218
+ async def delete_task_run(self, task_run_id: UUID) -> None:
2219
+ """
2220
+ Delete a task run by id.
2221
+
2222
+ Args:
2223
+ task_run_id: the task run ID of interest
2224
+ Raises:
2225
+ prefect.exceptions.ObjectNotFound: If request returns 404
2226
+ httpx.RequestError: If requests fails
2227
+ """
2228
+ try:
2229
+ await self._client.delete(f"/task_runs/{task_run_id}")
2230
+ except httpx.HTTPStatusError as e:
2231
+ if e.response.status_code == 404:
2232
+ raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
2233
+ else:
2234
+ raise
2235
+
2218
2236
  async def set_task_run_state(
2219
2237
  self,
2220
2238
  task_run_id: UUID,
@@ -12,4 +12,9 @@ from prefect.deployments.deployments import (
12
12
  load_deployments_from_yaml,
13
13
  Deployment,
14
14
  )
15
- from prefect.deployments.runner import RunnerDeployment, deploy, DeploymentImage
15
+ from prefect.deployments.runner import (
16
+ RunnerDeployment,
17
+ deploy,
18
+ DeploymentImage,
19
+ EntrypointType,
20
+ )
@@ -36,6 +36,10 @@ from prefect.client.schemas.objects import (
36
36
  from prefect.client.schemas.schedules import SCHEDULE_TYPES
37
37
  from prefect.client.utilities import inject_client
38
38
  from prefect.context import FlowRunContext, PrefectObjectRegistry, TaskRunContext
39
+ from prefect.deployments.schedules import (
40
+ FlexibleScheduleList,
41
+ normalize_to_minimal_deployment_schedules,
42
+ )
39
43
  from prefect.deployments.steps.core import run_steps
40
44
  from prefect.events.schemas import DeploymentTrigger
41
45
  from prefect.exceptions import (
@@ -222,12 +226,28 @@ async def load_flow_from_flow_run(
222
226
  is largely for testing, and assumes the flow is already available locally.
223
227
  """
224
228
  deployment = await client.read_deployment(flow_run.deployment_id)
229
+
230
+ if deployment.entrypoint is None:
231
+ raise ValueError(
232
+ f"Deployment {deployment.id} does not have an entrypoint and can not be run."
233
+ )
234
+
225
235
  run_logger = flow_run_logger(flow_run)
226
236
 
227
237
  runner_storage_base_path = storage_base_path or os.environ.get(
228
238
  "PREFECT__STORAGE_BASE_PATH"
229
239
  )
230
240
 
241
+ # If there's no colon, assume it's a module path
242
+ if ":" not in deployment.entrypoint:
243
+ run_logger.debug(
244
+ f"Importing flow code from module path {deployment.entrypoint}"
245
+ )
246
+ flow = await run_sync_in_worker_thread(
247
+ load_flow_from_entrypoint, deployment.entrypoint
248
+ )
249
+ return flow
250
+
231
251
  if not ignore_storage and not deployment.pull_steps:
232
252
  sys.path.insert(0, ".")
233
253
  if deployment.storage_document_id:
@@ -259,8 +279,6 @@ async def load_flow_from_flow_run(
259
279
  os.chdir(output["directory"])
260
280
 
261
281
  import_path = relative_path_to_current_platform(deployment.entrypoint)
262
- run_logger.debug(f"Importing flow code from '{import_path}'")
263
-
264
282
  # for backwards compat
265
283
  if deployment.manifest_path:
266
284
  with open(deployment.manifest_path, "r") as f:
@@ -268,7 +286,10 @@ async def load_flow_from_flow_run(
268
286
  import_path = (
269
287
  Path(deployment.manifest_path).parent / import_path
270
288
  ).absolute()
289
+ run_logger.debug(f"Importing flow code from '{import_path}'")
290
+
271
291
  flow = await run_sync_in_worker_thread(load_flow_from_entrypoint, str(import_path))
292
+
272
293
  return flow
273
294
 
274
295
 
@@ -632,11 +653,17 @@ class Deployment(BaseModel):
632
653
  cls._validate_schedule(value)
633
654
  return value
634
655
 
635
- @validator("schedules")
636
- def validate_schedules(cls, value):
637
- for schedule in value:
638
- cls._validate_schedule(schedule.schedule)
639
- return value
656
+ @root_validator(pre=True)
657
+ def validate_schedules(cls, values):
658
+ if "schedules" in values:
659
+ values["schedules"] = normalize_to_minimal_deployment_schedules(
660
+ values["schedules"]
661
+ )
662
+
663
+ for schedule in values["schedules"]:
664
+ cls._validate_schedule(schedule.schedule)
665
+
666
+ return values
640
667
 
641
668
  @classmethod
642
669
  @sync_compatible
@@ -919,6 +946,7 @@ class Deployment(BaseModel):
919
946
  ignore_file: str = ".prefectignore",
920
947
  apply: bool = False,
921
948
  load_existing: bool = True,
949
+ schedules: Optional[FlexibleScheduleList] = None,
922
950
  **kwargs,
923
951
  ) -> "Deployment":
924
952
  """
@@ -938,6 +966,14 @@ class Deployment(BaseModel):
938
966
  load_existing: if True, load any settings that may already be configured for
939
967
  the named deployment server-side (e.g., schedules, default parameter
940
968
  values, etc.)
969
+ schedules: An optional list of schedules. Each item in the list can be:
970
+ - An instance of `MinimalDeploymentSchedule`.
971
+ - A dictionary with a `schedule` key, and optionally, an
972
+ `active` key. The `schedule` key should correspond to a
973
+ schedule type, and `active` is a boolean indicating whether
974
+ the schedule is active or not.
975
+ - An instance of one of the predefined schedule types:
976
+ `IntervalSchedule`, `CronSchedule`, or `RRuleSchedule`.
941
977
  **kwargs: other keyword arguments to pass to the constructor for the
942
978
  `Deployment` class
943
979
  """
@@ -946,7 +982,11 @@ class Deployment(BaseModel):
946
982
 
947
983
  # note that `deployment.load` only updates settings that were *not*
948
984
  # provided at initialization
949
- deployment = cls(name=name, **kwargs)
985
+ deployment = cls(
986
+ name=name,
987
+ schedules=schedules,
988
+ **kwargs,
989
+ )
950
990
  deployment.flow_name = flow.name
951
991
  if not deployment.entrypoint:
952
992
  ## first see if an entrypoint can be determined
@@ -29,11 +29,12 @@ Example:
29
29
 
30
30
  """
31
31
 
32
+ import enum
32
33
  import importlib
33
34
  import tempfile
34
35
  from datetime import datetime, timedelta
35
36
  from pathlib import Path
36
- from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Union, get_args
37
+ from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Union
37
38
  from uuid import UUID
38
39
 
39
40
  import pendulum
@@ -62,6 +63,11 @@ from prefect.client.schemas.schedules import (
62
63
  SCHEDULE_TYPES,
63
64
  construct_schedule,
64
65
  )
66
+ from prefect.deployments.schedules import (
67
+ FlexibleScheduleList,
68
+ create_minimal_deployment_schedule,
69
+ normalize_to_minimal_deployment_schedules,
70
+ )
65
71
  from prefect.events.schemas import DeploymentTrigger
66
72
  from prefect.exceptions import (
67
73
  ObjectNotFound,
@@ -82,24 +88,27 @@ from prefect.utilities.slugify import slugify
82
88
  if TYPE_CHECKING:
83
89
  from prefect.flows import Flow
84
90
 
85
- FlexibleScheduleList = Union[MinimalDeploymentSchedule, dict, SCHEDULE_TYPES]
86
-
87
91
  __all__ = ["RunnerDeployment"]
88
92
 
89
93
 
90
- def _to_deployment_schedule(
91
- schedule: Optional[SCHEDULE_TYPES] = None,
92
- active: Optional[bool] = True,
93
- ) -> MinimalDeploymentSchedule:
94
- return MinimalDeploymentSchedule(schedule=schedule, active=active)
95
-
96
-
97
94
  class DeploymentApplyError(RuntimeError):
98
95
  """
99
96
  Raised when an error occurs while applying a deployment.
100
97
  """
101
98
 
102
99
 
100
+ class EntrypointType(enum.Enum):
101
+ """
102
+ Enum representing a entrypoint type.
103
+
104
+ File path entrypoints are in the format: `path/to/file.py:function_name`.
105
+ Module path entrypoints are in the format: `path.to.module.function_name`.
106
+ """
107
+
108
+ FILE_PATH = "file_path"
109
+ MODULE_PATH = "module_path"
110
+
111
+
103
112
  class RunnerDeployment(BaseModel):
104
113
  """
105
114
  A Prefect RunnerDeployment definition, used for specifying and building deployments.
@@ -205,6 +214,9 @@ class RunnerDeployment(BaseModel):
205
214
  " a built runner."
206
215
  ),
207
216
  )
217
+ _entrypoint_type: EntrypointType = PrivateAttr(
218
+ default=EntrypointType.FILE_PATH,
219
+ )
208
220
  _path: Optional[str] = PrivateAttr(
209
221
  default=None,
210
222
  )
@@ -212,6 +224,10 @@ class RunnerDeployment(BaseModel):
212
224
  default_factory=ParameterSchema,
213
225
  )
214
226
 
227
+ @property
228
+ def entrypoint_type(self) -> EntrypointType:
229
+ return self._entrypoint_type
230
+
215
231
  @validator("triggers", allow_reuse=True)
216
232
  def validate_automation_names(cls, field_value, values, field, config):
217
233
  """Ensure that each trigger has a name for its automation if none is provided."""
@@ -244,23 +260,9 @@ class RunnerDeployment(BaseModel):
244
260
  schedules = values.get("schedules")
245
261
 
246
262
  if schedules is None and schedule is not None:
247
- values["schedules"] = [_to_deployment_schedule(schedule)]
263
+ values["schedules"] = [create_minimal_deployment_schedule(schedule)]
248
264
  elif schedules is not None and len(schedules) > 0:
249
- reconciled = []
250
- for obj in schedules:
251
- if isinstance(obj, get_args(SCHEDULE_TYPES)):
252
- reconciled.append(_to_deployment_schedule(obj))
253
- elif isinstance(obj, dict):
254
- reconciled.append(_to_deployment_schedule(**obj))
255
- elif isinstance(obj, MinimalDeploymentSchedule):
256
- reconciled.append(obj)
257
- else:
258
- raise ValueError(
259
- "Invalid schedule provided. Must be a schedule object, a dict,"
260
- " or a MinimalDeploymentSchedule."
261
- )
262
-
263
- values["schedules"] = reconciled
265
+ values["schedules"] = normalize_to_minimal_deployment_schedules(schedules)
264
266
 
265
267
  return values
266
268
 
@@ -369,8 +371,8 @@ class RunnerDeployment(BaseModel):
369
371
  rrule: Optional[Union[Iterable[str], str]] = None,
370
372
  timezone: Optional[str] = None,
371
373
  schedule: Optional[SCHEDULE_TYPES] = None,
372
- schedules: Optional[List[FlexibleScheduleList]] = None,
373
- ) -> Union[List[MinimalDeploymentSchedule], List[FlexibleScheduleList]]:
374
+ schedules: Optional[FlexibleScheduleList] = None,
375
+ ) -> Union[List[MinimalDeploymentSchedule], FlexibleScheduleList]:
374
376
  """
375
377
  Construct a schedule or schedules from the provided arguments.
376
378
 
@@ -428,7 +430,7 @@ class RunnerDeployment(BaseModel):
428
430
  value = [value]
429
431
 
430
432
  return [
431
- _to_deployment_schedule(
433
+ create_minimal_deployment_schedule(
432
434
  construct_schedule(
433
435
  **{
434
436
  schedule_type: v,
@@ -440,7 +442,7 @@ class RunnerDeployment(BaseModel):
440
442
  for v in value
441
443
  ]
442
444
  else:
443
- return [_to_deployment_schedule(schedule)]
445
+ return [create_minimal_deployment_schedule(schedule)]
444
446
 
445
447
  def _set_defaults_from_flow(self, flow: "Flow"):
446
448
  self._parameter_openapi_schema = parameter_schema(flow)
@@ -461,7 +463,7 @@ class RunnerDeployment(BaseModel):
461
463
  cron: Optional[Union[Iterable[str], str]] = None,
462
464
  rrule: Optional[Union[Iterable[str], str]] = None,
463
465
  paused: Optional[bool] = None,
464
- schedules: Optional[List[FlexibleScheduleList]] = None,
466
+ schedules: Optional[FlexibleScheduleList] = None,
465
467
  schedule: Optional[SCHEDULE_TYPES] = None,
466
468
  is_schedule_active: Optional[bool] = None,
467
469
  parameters: Optional[dict] = None,
@@ -473,6 +475,7 @@ class RunnerDeployment(BaseModel):
473
475
  work_pool_name: Optional[str] = None,
474
476
  work_queue_name: Optional[str] = None,
475
477
  job_variables: Optional[Dict[str, Any]] = None,
478
+ entrypoint_type: EntrypointType = EntrypointType.FILE_PATH,
476
479
  ) -> "RunnerDeployment":
477
480
  """
478
481
  Configure a deployment for a given flow.
@@ -545,30 +548,42 @@ class RunnerDeployment(BaseModel):
545
548
  ## first see if an entrypoint can be determined
546
549
  flow_file = getattr(flow, "__globals__", {}).get("__file__")
547
550
  mod_name = getattr(flow, "__module__", None)
548
- if not flow_file:
549
- if not mod_name:
550
- raise ValueError(no_file_location_error)
551
- try:
552
- module = importlib.import_module(mod_name)
553
- flow_file = getattr(module, "__file__", None)
554
- except ModuleNotFoundError as exc:
555
- if "__prefect_loader__" in str(exc):
556
- raise ValueError(
557
- "Cannot create a RunnerDeployment from a flow that has been"
558
- " loaded from an entrypoint. To deploy a flow via"
559
- " entrypoint, use RunnerDeployment.from_entrypoint instead."
560
- )
561
- raise ValueError(no_file_location_error)
551
+ if entrypoint_type == EntrypointType.MODULE_PATH:
552
+ if mod_name:
553
+ deployment.entrypoint = f"{mod_name}.{flow.__name__}"
554
+ else:
555
+ raise ValueError(
556
+ "Unable to determine module path for provided flow."
557
+ )
558
+ else:
562
559
  if not flow_file:
563
- raise ValueError(no_file_location_error)
564
-
565
- # set entrypoint
566
- entry_path = Path(flow_file).absolute().relative_to(Path.cwd().absolute())
567
- deployment.entrypoint = f"{entry_path}:{flow.fn.__name__}"
560
+ if not mod_name:
561
+ raise ValueError(no_file_location_error)
562
+ try:
563
+ module = importlib.import_module(mod_name)
564
+ flow_file = getattr(module, "__file__", None)
565
+ except ModuleNotFoundError as exc:
566
+ if "__prefect_loader__" in str(exc):
567
+ raise ValueError(
568
+ "Cannot create a RunnerDeployment from a flow that has been"
569
+ " loaded from an entrypoint. To deploy a flow via"
570
+ " entrypoint, use RunnerDeployment.from_entrypoint instead."
571
+ )
572
+ raise ValueError(no_file_location_error)
573
+ if not flow_file:
574
+ raise ValueError(no_file_location_error)
575
+
576
+ # set entrypoint
577
+ entry_path = (
578
+ Path(flow_file).absolute().relative_to(Path.cwd().absolute())
579
+ )
580
+ deployment.entrypoint = f"{entry_path}:{flow.fn.__name__}"
568
581
 
569
- if not deployment._path:
582
+ if entrypoint_type == EntrypointType.FILE_PATH and not deployment._path:
570
583
  deployment._path = "."
571
584
 
585
+ deployment._entrypoint_type = entrypoint_type
586
+
572
587
  cls._set_defaults_from_flow(deployment, flow)
573
588
 
574
589
  return deployment
@@ -584,7 +599,7 @@ class RunnerDeployment(BaseModel):
584
599
  cron: Optional[Union[Iterable[str], str]] = None,
585
600
  rrule: Optional[Union[Iterable[str], str]] = None,
586
601
  paused: Optional[bool] = None,
587
- schedules: Optional[List[FlexibleScheduleList]] = None,
602
+ schedules: Optional[FlexibleScheduleList] = None,
588
603
  schedule: Optional[SCHEDULE_TYPES] = None,
589
604
  is_schedule_active: Optional[bool] = None,
590
605
  parameters: Optional[dict] = None,
@@ -682,7 +697,7 @@ class RunnerDeployment(BaseModel):
682
697
  cron: Optional[Union[Iterable[str], str]] = None,
683
698
  rrule: Optional[Union[Iterable[str], str]] = None,
684
699
  paused: Optional[bool] = None,
685
- schedules: Optional[List[FlexibleScheduleList]] = None,
700
+ schedules: Optional[FlexibleScheduleList] = None,
686
701
  schedule: Optional[SCHEDULE_TYPES] = None,
687
702
  is_schedule_active: Optional[bool] = None,
688
703
  parameters: Optional[dict] = None,
@@ -905,7 +920,10 @@ async def deploy(
905
920
  """
906
921
  work_pool_name = work_pool_name or PREFECT_DEFAULT_WORK_POOL_NAME.value()
907
922
 
908
- if not image and not all(d.storage for d in deployments):
923
+ if not image and not all(
924
+ d.storage or d.entrypoint_type == EntrypointType.MODULE_PATH
925
+ for d in deployments
926
+ ):
909
927
  raise ValueError(
910
928
  "Either an image or remote storage location must be provided when deploying"
911
929
  " a deployment."
@@ -0,0 +1,37 @@
1
+ from typing import List, Optional, Sequence, Union, get_args
2
+
3
+ from prefect.client.schemas.objects import MinimalDeploymentSchedule
4
+ from prefect.client.schemas.schedules import SCHEDULE_TYPES
5
+
6
+ FlexibleScheduleList = Sequence[Union[MinimalDeploymentSchedule, dict, SCHEDULE_TYPES]]
7
+
8
+
9
+ def create_minimal_deployment_schedule(
10
+ schedule: SCHEDULE_TYPES,
11
+ active: Optional[bool] = True,
12
+ ) -> MinimalDeploymentSchedule:
13
+ return MinimalDeploymentSchedule(
14
+ schedule=schedule,
15
+ active=active if active is not None else True,
16
+ )
17
+
18
+
19
+ def normalize_to_minimal_deployment_schedules(
20
+ schedules: Optional[FlexibleScheduleList],
21
+ ) -> List[MinimalDeploymentSchedule]:
22
+ normalized = []
23
+ if schedules is not None:
24
+ for obj in schedules:
25
+ if isinstance(obj, get_args(SCHEDULE_TYPES)):
26
+ normalized.append(create_minimal_deployment_schedule(obj))
27
+ elif isinstance(obj, dict):
28
+ normalized.append(create_minimal_deployment_schedule(**obj))
29
+ elif isinstance(obj, MinimalDeploymentSchedule):
30
+ normalized.append(obj)
31
+ else:
32
+ raise ValueError(
33
+ "Invalid schedule provided. Must be a schedule object, a dict,"
34
+ " or a MinimalDeploymentSchedule."
35
+ )
36
+
37
+ return normalized
prefect/engine.py CHANGED
@@ -80,6 +80,7 @@ Client-side execution and orchestration of flows and tasks.
80
80
  _Ideally, for local and sequential task runners we would send the task run to the
81
81
  user thread as we do for flows. See [#9855](https://github.com/PrefectHQ/prefect/pull/9855).
82
82
  """
83
+
83
84
  import asyncio
84
85
  import contextlib
85
86
  import logging
@@ -169,6 +170,7 @@ from prefect.logging.loggers import (
169
170
  from prefect.results import BaseResult, ResultFactory, UnknownResult
170
171
  from prefect.settings import (
171
172
  PREFECT_DEBUG_MODE,
173
+ PREFECT_EXPERIMENTAL_ENABLE_TASK_SCHEDULING,
172
174
  PREFECT_LOGGING_LOG_PRINTS,
173
175
  PREFECT_TASK_INTROSPECTION_WARN_THRESHOLD,
174
176
  PREFECT_TASKS_REFRESH_CACHE,
@@ -799,6 +801,8 @@ async def orchestrate_flow_run(
799
801
  # flag to ensure we only update the flow run name once
800
802
  run_name_set = False
801
803
 
804
+ await _run_flow_hooks(flow=flow, flow_run=flow_run, state=state)
805
+
802
806
  while state.is_running():
803
807
  waited_for_task_runs = False
804
808
 
@@ -954,7 +958,6 @@ async def orchestrate_flow_run(
954
958
  f"Received non-final state {state.name!r} when proposing final"
955
959
  f" state {terminal_state.name!r} and will attempt to run again..."
956
960
  ),
957
- extra={"send_to_api": False},
958
961
  )
959
962
  # Attempt to enter a running state again
960
963
  state = await propose_state(client, Running(), flow_run_id=flow_run.id)
@@ -1368,10 +1371,28 @@ def enter_task_run_engine(
1368
1371
  flow_run_context = FlowRunContext.get()
1369
1372
 
1370
1373
  if not flow_run_context:
1371
- raise RuntimeError(
1372
- "Tasks cannot be run outside of a flow"
1373
- " - if you meant to submit an autonomous task, you need to set"
1374
- " `prefect config set PREFECT_EXPERIMENTAL_ENABLE_TASK_SCHEDULING=true`"
1374
+ if (
1375
+ not PREFECT_EXPERIMENTAL_ENABLE_TASK_SCHEDULING.value()
1376
+ or return_type == "future"
1377
+ or mapped
1378
+ ):
1379
+ raise RuntimeError(
1380
+ "Tasks cannot be run outside of a flow by default."
1381
+ " If you meant to submit an autonomous task, you need to set"
1382
+ " `prefect config set PREFECT_EXPERIMENTAL_ENABLE_TASK_SCHEDULING=true`"
1383
+ " and use `your_task.submit()` instead of `your_task()`."
1384
+ " Mapping autonomous tasks is not yet supported."
1385
+ )
1386
+ from prefect.task_engine import submit_autonomous_task_run_to_engine
1387
+
1388
+ return submit_autonomous_task_run_to_engine(
1389
+ task=task,
1390
+ task_run=None,
1391
+ parameters=parameters,
1392
+ task_runner=task_runner,
1393
+ wait_for=wait_for,
1394
+ return_type=return_type,
1395
+ client=get_client(),
1375
1396
  )
1376
1397
 
1377
1398
  if TaskRunContext.get():
@@ -1402,12 +1423,13 @@ def enter_task_run_engine(
1402
1423
 
1403
1424
  async def begin_task_map(
1404
1425
  task: Task,
1405
- flow_run_context: FlowRunContext,
1426
+ flow_run_context: Optional[FlowRunContext],
1406
1427
  parameters: Dict[str, Any],
1407
1428
  wait_for: Optional[Iterable[PrefectFuture]],
1408
1429
  return_type: EngineReturnType,
1409
1430
  task_runner: Optional[BaseTaskRunner],
1410
- ) -> List[Union[PrefectFuture, Awaitable[PrefectFuture]]]:
1431
+ autonomous: bool = False,
1432
+ ) -> List[Union[PrefectFuture, Awaitable[PrefectFuture], TaskRun]]:
1411
1433
  """Async entrypoint for task mapping"""
1412
1434
  # We need to resolve some futures to map over their data, collect the upstream
1413
1435
  # links beforehand to retain relationship tracking.
@@ -1474,18 +1496,29 @@ async def begin_task_map(
1474
1496
  # Collapse any previously exploded kwargs
1475
1497
  call_parameters = collapse_variadic_parameters(task.fn, call_parameters)
1476
1498
 
1477
- task_runs.append(
1478
- partial(
1479
- get_task_call_return_value,
1480
- task=task,
1481
- flow_run_context=flow_run_context,
1482
- parameters=call_parameters,
1483
- wait_for=wait_for,
1484
- return_type=return_type,
1485
- task_runner=task_runner,
1486
- extra_task_inputs=task_inputs,
1499
+ if autonomous:
1500
+ task_runs.append(
1501
+ await create_autonomous_task_run(
1502
+ task=task,
1503
+ parameters=call_parameters,
1504
+ )
1505
+ )
1506
+ else:
1507
+ task_runs.append(
1508
+ partial(
1509
+ get_task_call_return_value,
1510
+ task=task,
1511
+ flow_run_context=flow_run_context,
1512
+ parameters=call_parameters,
1513
+ wait_for=wait_for,
1514
+ return_type=return_type,
1515
+ task_runner=task_runner,
1516
+ extra_task_inputs=task_inputs,
1517
+ )
1487
1518
  )
1488
- )
1519
+
1520
+ if autonomous:
1521
+ return task_runs
1489
1522
 
1490
1523
  # Maintain the order of the task runs when using the sequential task runner
1491
1524
  runner = task_runner if task_runner else flow_run_context.task_runner
@@ -1697,7 +1730,10 @@ async def create_task_run(
1697
1730
  task_inputs=task_inputs,
1698
1731
  )
1699
1732
 
1700
- logger.info(f"Created task run {task_run.name!r} for task {task.name!r}")
1733
+ if flow_run_context.flow_run:
1734
+ logger.info(f"Created task run {task_run.name!r} for task {task.name!r}")
1735
+ else:
1736
+ engine_logger.info(f"Created task run {task_run.name!r} for task {task.name!r}")
1701
1737
 
1702
1738
  return task_run
1703
1739
 
@@ -1715,7 +1751,7 @@ async def submit_task_run(
1715
1751
 
1716
1752
  if (
1717
1753
  task_runner.concurrency_type == TaskConcurrencyType.SEQUENTIAL
1718
- and not flow_run_context.autonomous_task_run
1754
+ and flow_run_context.flow_run
1719
1755
  ):
1720
1756
  logger.info(f"Executing {task_run.name!r} immediately...")
1721
1757
 
@@ -2181,7 +2217,6 @@ async def orchestrate_task_run(
2181
2217
  f" state {terminal_state.name!r} and will attempt to run"
2182
2218
  " again..."
2183
2219
  ),
2184
- extra={"send_to_api": False},
2185
2220
  )
2186
2221
  # Attempt to enter a running state again
2187
2222
  state = await propose_state(client, Running(), task_run_id=task_run.id)
@@ -2801,7 +2836,10 @@ async def _run_flow_hooks(flow: Flow, flow_run: FlowRun, state: State) -> None:
2801
2836
  os.environ.get("PREFECT__ENABLE_CANCELLATION_AND_CRASHED_HOOKS", "true").lower()
2802
2837
  == "true"
2803
2838
  )
2804
- if state.is_failed() and flow.on_failure:
2839
+
2840
+ if state.is_running() and flow.on_running:
2841
+ hooks = flow.on_running
2842
+ elif state.is_failed() and flow.on_failure:
2805
2843
  hooks = flow.on_failure
2806
2844
  elif state.is_completed() and flow.on_completion:
2807
2845
  hooks = flow.on_completion
@@ -2924,7 +2962,8 @@ async def create_autonomous_task_run(task: Task, parameters: Dict[str, Any]) ->
2924
2962
 
2925
2963
  # TODO: Improve use of result storage for parameter storage / reference
2926
2964
  task.persist_result = True
2927
- factory = await ResultFactory.from_task(task, client=client)
2965
+
2966
+ factory = await ResultFactory.from_autonomous_task(task, client=client)
2928
2967
  await factory.store_parameters(parameters_id, parameters)
2929
2968
 
2930
2969
  task_run = await client.create_task_run(