prefect-client 2.20.2__py3-none-any.whl → 3.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (288) hide show
  1. prefect/__init__.py +74 -110
  2. prefect/_internal/compatibility/deprecated.py +6 -115
  3. prefect/_internal/compatibility/experimental.py +4 -79
  4. prefect/_internal/compatibility/migration.py +166 -0
  5. prefect/_internal/concurrency/__init__.py +2 -2
  6. prefect/_internal/concurrency/api.py +1 -35
  7. prefect/_internal/concurrency/calls.py +0 -6
  8. prefect/_internal/concurrency/cancellation.py +0 -3
  9. prefect/_internal/concurrency/event_loop.py +0 -20
  10. prefect/_internal/concurrency/inspection.py +3 -3
  11. prefect/_internal/concurrency/primitives.py +1 -0
  12. prefect/_internal/concurrency/services.py +23 -0
  13. prefect/_internal/concurrency/threads.py +35 -0
  14. prefect/_internal/concurrency/waiters.py +0 -28
  15. prefect/_internal/integrations.py +7 -0
  16. prefect/_internal/pydantic/__init__.py +0 -45
  17. prefect/_internal/pydantic/annotations/pendulum.py +2 -2
  18. prefect/_internal/pydantic/v1_schema.py +21 -22
  19. prefect/_internal/pydantic/v2_schema.py +0 -2
  20. prefect/_internal/pydantic/v2_validated_func.py +18 -23
  21. prefect/_internal/pytz.py +1 -1
  22. prefect/_internal/retries.py +61 -0
  23. prefect/_internal/schemas/bases.py +45 -177
  24. prefect/_internal/schemas/fields.py +1 -43
  25. prefect/_internal/schemas/validators.py +47 -233
  26. prefect/agent.py +3 -695
  27. prefect/artifacts.py +173 -14
  28. prefect/automations.py +39 -4
  29. prefect/blocks/abstract.py +1 -1
  30. prefect/blocks/core.py +423 -164
  31. prefect/blocks/fields.py +2 -57
  32. prefect/blocks/notifications.py +43 -28
  33. prefect/blocks/redis.py +168 -0
  34. prefect/blocks/system.py +67 -20
  35. prefect/blocks/webhook.py +2 -9
  36. prefect/cache_policies.py +239 -0
  37. prefect/client/__init__.py +4 -0
  38. prefect/client/base.py +33 -27
  39. prefect/client/cloud.py +65 -20
  40. prefect/client/collections.py +1 -1
  41. prefect/client/orchestration.py +667 -440
  42. prefect/client/schemas/actions.py +115 -100
  43. prefect/client/schemas/filters.py +46 -52
  44. prefect/client/schemas/objects.py +228 -178
  45. prefect/client/schemas/responses.py +18 -36
  46. prefect/client/schemas/schedules.py +55 -36
  47. prefect/client/schemas/sorting.py +2 -0
  48. prefect/client/subscriptions.py +8 -7
  49. prefect/client/types/flexible_schedule_list.py +11 -0
  50. prefect/client/utilities.py +9 -6
  51. prefect/concurrency/asyncio.py +60 -11
  52. prefect/concurrency/context.py +24 -0
  53. prefect/concurrency/events.py +2 -2
  54. prefect/concurrency/services.py +46 -16
  55. prefect/concurrency/sync.py +51 -7
  56. prefect/concurrency/v1/asyncio.py +143 -0
  57. prefect/concurrency/v1/context.py +27 -0
  58. prefect/concurrency/v1/events.py +61 -0
  59. prefect/concurrency/v1/services.py +116 -0
  60. prefect/concurrency/v1/sync.py +92 -0
  61. prefect/context.py +246 -149
  62. prefect/deployments/__init__.py +33 -18
  63. prefect/deployments/base.py +10 -15
  64. prefect/deployments/deployments.py +2 -1048
  65. prefect/deployments/flow_runs.py +178 -0
  66. prefect/deployments/runner.py +72 -173
  67. prefect/deployments/schedules.py +31 -25
  68. prefect/deployments/steps/__init__.py +0 -1
  69. prefect/deployments/steps/core.py +7 -0
  70. prefect/deployments/steps/pull.py +15 -21
  71. prefect/deployments/steps/utility.py +2 -1
  72. prefect/docker/__init__.py +20 -0
  73. prefect/docker/docker_image.py +82 -0
  74. prefect/engine.py +15 -2466
  75. prefect/events/actions.py +17 -23
  76. prefect/events/cli/automations.py +20 -7
  77. prefect/events/clients.py +142 -80
  78. prefect/events/filters.py +14 -18
  79. prefect/events/related.py +74 -75
  80. prefect/events/schemas/__init__.py +0 -5
  81. prefect/events/schemas/automations.py +55 -46
  82. prefect/events/schemas/deployment_triggers.py +7 -197
  83. prefect/events/schemas/events.py +46 -65
  84. prefect/events/schemas/labelling.py +10 -14
  85. prefect/events/utilities.py +4 -5
  86. prefect/events/worker.py +23 -8
  87. prefect/exceptions.py +15 -0
  88. prefect/filesystems.py +30 -529
  89. prefect/flow_engine.py +827 -0
  90. prefect/flow_runs.py +379 -7
  91. prefect/flows.py +470 -360
  92. prefect/futures.py +382 -331
  93. prefect/infrastructure/__init__.py +5 -26
  94. prefect/infrastructure/base.py +3 -320
  95. prefect/infrastructure/provisioners/__init__.py +5 -3
  96. prefect/infrastructure/provisioners/cloud_run.py +13 -8
  97. prefect/infrastructure/provisioners/container_instance.py +14 -9
  98. prefect/infrastructure/provisioners/ecs.py +10 -8
  99. prefect/infrastructure/provisioners/modal.py +8 -5
  100. prefect/input/__init__.py +4 -0
  101. prefect/input/actions.py +2 -4
  102. prefect/input/run_input.py +9 -9
  103. prefect/logging/formatters.py +2 -4
  104. prefect/logging/handlers.py +9 -14
  105. prefect/logging/loggers.py +5 -5
  106. prefect/main.py +72 -0
  107. prefect/plugins.py +2 -64
  108. prefect/profiles.toml +16 -2
  109. prefect/records/__init__.py +1 -0
  110. prefect/records/base.py +223 -0
  111. prefect/records/filesystem.py +207 -0
  112. prefect/records/memory.py +178 -0
  113. prefect/records/result_store.py +64 -0
  114. prefect/results.py +577 -504
  115. prefect/runner/runner.py +124 -51
  116. prefect/runner/server.py +32 -34
  117. prefect/runner/storage.py +3 -12
  118. prefect/runner/submit.py +2 -10
  119. prefect/runner/utils.py +2 -2
  120. prefect/runtime/__init__.py +1 -0
  121. prefect/runtime/deployment.py +1 -0
  122. prefect/runtime/flow_run.py +40 -5
  123. prefect/runtime/task_run.py +1 -0
  124. prefect/serializers.py +28 -39
  125. prefect/server/api/collections_data/views/aggregate-worker-metadata.json +5 -14
  126. prefect/settings.py +209 -332
  127. prefect/states.py +160 -63
  128. prefect/task_engine.py +1478 -57
  129. prefect/task_runners.py +383 -287
  130. prefect/task_runs.py +240 -0
  131. prefect/task_worker.py +463 -0
  132. prefect/tasks.py +684 -374
  133. prefect/transactions.py +410 -0
  134. prefect/types/__init__.py +72 -86
  135. prefect/types/entrypoint.py +13 -0
  136. prefect/utilities/annotations.py +4 -3
  137. prefect/utilities/asyncutils.py +227 -148
  138. prefect/utilities/callables.py +138 -48
  139. prefect/utilities/collections.py +134 -86
  140. prefect/utilities/dispatch.py +27 -14
  141. prefect/utilities/dockerutils.py +11 -4
  142. prefect/utilities/engine.py +186 -32
  143. prefect/utilities/filesystem.py +4 -5
  144. prefect/utilities/importtools.py +26 -27
  145. prefect/utilities/pydantic.py +128 -38
  146. prefect/utilities/schema_tools/hydration.py +18 -1
  147. prefect/utilities/schema_tools/validation.py +30 -0
  148. prefect/utilities/services.py +35 -9
  149. prefect/utilities/templating.py +12 -2
  150. prefect/utilities/timeout.py +20 -5
  151. prefect/utilities/urls.py +195 -0
  152. prefect/utilities/visualization.py +1 -0
  153. prefect/variables.py +78 -59
  154. prefect/workers/__init__.py +0 -1
  155. prefect/workers/base.py +237 -244
  156. prefect/workers/block.py +5 -226
  157. prefect/workers/cloud.py +6 -0
  158. prefect/workers/process.py +265 -12
  159. prefect/workers/server.py +29 -11
  160. {prefect_client-2.20.2.dist-info → prefect_client-3.0.0.dist-info}/METADATA +30 -26
  161. prefect_client-3.0.0.dist-info/RECORD +201 -0
  162. {prefect_client-2.20.2.dist-info → prefect_client-3.0.0.dist-info}/WHEEL +1 -1
  163. prefect/_internal/pydantic/_base_model.py +0 -51
  164. prefect/_internal/pydantic/_compat.py +0 -82
  165. prefect/_internal/pydantic/_flags.py +0 -20
  166. prefect/_internal/pydantic/_types.py +0 -8
  167. prefect/_internal/pydantic/utilities/config_dict.py +0 -72
  168. prefect/_internal/pydantic/utilities/field_validator.py +0 -150
  169. prefect/_internal/pydantic/utilities/model_construct.py +0 -56
  170. prefect/_internal/pydantic/utilities/model_copy.py +0 -55
  171. prefect/_internal/pydantic/utilities/model_dump.py +0 -136
  172. prefect/_internal/pydantic/utilities/model_dump_json.py +0 -112
  173. prefect/_internal/pydantic/utilities/model_fields.py +0 -50
  174. prefect/_internal/pydantic/utilities/model_fields_set.py +0 -29
  175. prefect/_internal/pydantic/utilities/model_json_schema.py +0 -82
  176. prefect/_internal/pydantic/utilities/model_rebuild.py +0 -80
  177. prefect/_internal/pydantic/utilities/model_validate.py +0 -75
  178. prefect/_internal/pydantic/utilities/model_validate_json.py +0 -68
  179. prefect/_internal/pydantic/utilities/model_validator.py +0 -87
  180. prefect/_internal/pydantic/utilities/type_adapter.py +0 -71
  181. prefect/_vendor/fastapi/__init__.py +0 -25
  182. prefect/_vendor/fastapi/applications.py +0 -946
  183. prefect/_vendor/fastapi/background.py +0 -3
  184. prefect/_vendor/fastapi/concurrency.py +0 -44
  185. prefect/_vendor/fastapi/datastructures.py +0 -58
  186. prefect/_vendor/fastapi/dependencies/__init__.py +0 -0
  187. prefect/_vendor/fastapi/dependencies/models.py +0 -64
  188. prefect/_vendor/fastapi/dependencies/utils.py +0 -877
  189. prefect/_vendor/fastapi/encoders.py +0 -177
  190. prefect/_vendor/fastapi/exception_handlers.py +0 -40
  191. prefect/_vendor/fastapi/exceptions.py +0 -46
  192. prefect/_vendor/fastapi/logger.py +0 -3
  193. prefect/_vendor/fastapi/middleware/__init__.py +0 -1
  194. prefect/_vendor/fastapi/middleware/asyncexitstack.py +0 -25
  195. prefect/_vendor/fastapi/middleware/cors.py +0 -3
  196. prefect/_vendor/fastapi/middleware/gzip.py +0 -3
  197. prefect/_vendor/fastapi/middleware/httpsredirect.py +0 -3
  198. prefect/_vendor/fastapi/middleware/trustedhost.py +0 -3
  199. prefect/_vendor/fastapi/middleware/wsgi.py +0 -3
  200. prefect/_vendor/fastapi/openapi/__init__.py +0 -0
  201. prefect/_vendor/fastapi/openapi/constants.py +0 -2
  202. prefect/_vendor/fastapi/openapi/docs.py +0 -203
  203. prefect/_vendor/fastapi/openapi/models.py +0 -480
  204. prefect/_vendor/fastapi/openapi/utils.py +0 -485
  205. prefect/_vendor/fastapi/param_functions.py +0 -340
  206. prefect/_vendor/fastapi/params.py +0 -453
  207. prefect/_vendor/fastapi/py.typed +0 -0
  208. prefect/_vendor/fastapi/requests.py +0 -4
  209. prefect/_vendor/fastapi/responses.py +0 -40
  210. prefect/_vendor/fastapi/routing.py +0 -1331
  211. prefect/_vendor/fastapi/security/__init__.py +0 -15
  212. prefect/_vendor/fastapi/security/api_key.py +0 -98
  213. prefect/_vendor/fastapi/security/base.py +0 -6
  214. prefect/_vendor/fastapi/security/http.py +0 -172
  215. prefect/_vendor/fastapi/security/oauth2.py +0 -227
  216. prefect/_vendor/fastapi/security/open_id_connect_url.py +0 -34
  217. prefect/_vendor/fastapi/security/utils.py +0 -10
  218. prefect/_vendor/fastapi/staticfiles.py +0 -1
  219. prefect/_vendor/fastapi/templating.py +0 -3
  220. prefect/_vendor/fastapi/testclient.py +0 -1
  221. prefect/_vendor/fastapi/types.py +0 -3
  222. prefect/_vendor/fastapi/utils.py +0 -235
  223. prefect/_vendor/fastapi/websockets.py +0 -7
  224. prefect/_vendor/starlette/__init__.py +0 -1
  225. prefect/_vendor/starlette/_compat.py +0 -28
  226. prefect/_vendor/starlette/_exception_handler.py +0 -80
  227. prefect/_vendor/starlette/_utils.py +0 -88
  228. prefect/_vendor/starlette/applications.py +0 -261
  229. prefect/_vendor/starlette/authentication.py +0 -159
  230. prefect/_vendor/starlette/background.py +0 -43
  231. prefect/_vendor/starlette/concurrency.py +0 -59
  232. prefect/_vendor/starlette/config.py +0 -151
  233. prefect/_vendor/starlette/convertors.py +0 -87
  234. prefect/_vendor/starlette/datastructures.py +0 -707
  235. prefect/_vendor/starlette/endpoints.py +0 -130
  236. prefect/_vendor/starlette/exceptions.py +0 -60
  237. prefect/_vendor/starlette/formparsers.py +0 -276
  238. prefect/_vendor/starlette/middleware/__init__.py +0 -17
  239. prefect/_vendor/starlette/middleware/authentication.py +0 -52
  240. prefect/_vendor/starlette/middleware/base.py +0 -220
  241. prefect/_vendor/starlette/middleware/cors.py +0 -176
  242. prefect/_vendor/starlette/middleware/errors.py +0 -265
  243. prefect/_vendor/starlette/middleware/exceptions.py +0 -74
  244. prefect/_vendor/starlette/middleware/gzip.py +0 -113
  245. prefect/_vendor/starlette/middleware/httpsredirect.py +0 -19
  246. prefect/_vendor/starlette/middleware/sessions.py +0 -82
  247. prefect/_vendor/starlette/middleware/trustedhost.py +0 -64
  248. prefect/_vendor/starlette/middleware/wsgi.py +0 -147
  249. prefect/_vendor/starlette/py.typed +0 -0
  250. prefect/_vendor/starlette/requests.py +0 -328
  251. prefect/_vendor/starlette/responses.py +0 -347
  252. prefect/_vendor/starlette/routing.py +0 -933
  253. prefect/_vendor/starlette/schemas.py +0 -154
  254. prefect/_vendor/starlette/staticfiles.py +0 -248
  255. prefect/_vendor/starlette/status.py +0 -199
  256. prefect/_vendor/starlette/templating.py +0 -231
  257. prefect/_vendor/starlette/testclient.py +0 -804
  258. prefect/_vendor/starlette/types.py +0 -30
  259. prefect/_vendor/starlette/websockets.py +0 -193
  260. prefect/blocks/kubernetes.py +0 -119
  261. prefect/deprecated/__init__.py +0 -0
  262. prefect/deprecated/data_documents.py +0 -350
  263. prefect/deprecated/packaging/__init__.py +0 -12
  264. prefect/deprecated/packaging/base.py +0 -96
  265. prefect/deprecated/packaging/docker.py +0 -146
  266. prefect/deprecated/packaging/file.py +0 -92
  267. prefect/deprecated/packaging/orion.py +0 -80
  268. prefect/deprecated/packaging/serializers.py +0 -171
  269. prefect/events/instrument.py +0 -135
  270. prefect/infrastructure/container.py +0 -824
  271. prefect/infrastructure/kubernetes.py +0 -920
  272. prefect/infrastructure/process.py +0 -289
  273. prefect/manifests.py +0 -20
  274. prefect/new_flow_engine.py +0 -449
  275. prefect/new_task_engine.py +0 -423
  276. prefect/pydantic/__init__.py +0 -76
  277. prefect/pydantic/main.py +0 -39
  278. prefect/software/__init__.py +0 -2
  279. prefect/software/base.py +0 -50
  280. prefect/software/conda.py +0 -199
  281. prefect/software/pip.py +0 -122
  282. prefect/software/python.py +0 -52
  283. prefect/task_server.py +0 -322
  284. prefect_client-2.20.2.dist-info/RECORD +0 -294
  285. /prefect/{_internal/pydantic/utilities → client/types}/__init__.py +0 -0
  286. /prefect/{_vendor → concurrency/v1}/__init__.py +0 -0
  287. {prefect_client-2.20.2.dist-info → prefect_client-3.0.0.dist-info}/LICENSE +0 -0
  288. {prefect_client-2.20.2.dist-info → prefect_client-3.0.0.dist-info}/top_level.txt +0 -0
prefect/engine.py CHANGED
@@ -1,2472 +1,22 @@
1
- """
2
- Client-side execution and orchestration of flows and tasks.
3
-
4
- ## Engine process overview
5
-
6
- ### Flows
7
-
8
- - **The flow is called by the user or an existing flow run is executed in a new process.**
9
-
10
- See `Flow.__call__` and `prefect.engine.__main__` (`python -m prefect.engine`)
11
-
12
- - **A synchronous function acts as an entrypoint to the engine.**
13
- The engine executes on a dedicated "global loop" thread. For asynchronous flow calls,
14
- we return a coroutine from the entrypoint so the user can enter the engine without
15
- blocking their event loop.
16
-
17
- See `enter_flow_run_engine_from_flow_call`, `enter_flow_run_engine_from_subprocess`
18
-
19
- - **The thread that calls the entrypoint waits until orchestration of the flow run completes.**
20
- This thread is referred to as the "user" thread and is usually the "main" thread.
21
- The thread is not blocked while waiting — it allows the engine to send work back to it.
22
- This allows us to send calls back to the user thread from the global loop thread.
23
-
24
- See `wait_for_call_in_loop_thread` and `call_soon_in_waiting_thread`
25
-
26
- - **The asynchronous engine branches depending on if the flow run exists already and if
27
- there is a parent flow run in the current context.**
28
-
29
- See `create_then_begin_flow_run`, `create_and_begin_subflow_run`, and `retrieve_flow_then_begin_flow_run`
30
-
31
- - **The asynchronous engine prepares for execution of the flow run.**
32
- This includes starting the task runner, preparing context, etc.
33
-
34
- See `begin_flow_run`
35
-
36
- - **The flow run is orchestrated through states, calling the user's function as necessary.**
37
- Generally the user's function is sent for execution on the user thread.
38
- If the flow function cannot be safely executed on the user thread, e.g. it is
39
- a synchronous child in an asynchronous parent it will be scheduled on a worker
40
- thread instead.
41
-
42
- See `orchestrate_flow_run`, `call_soon_in_waiting_thread`, `call_soon_in_new_thread`
43
-
44
- ### Tasks
45
-
46
- - **The task is called or submitted by the user.**
47
- We require that this is always within a flow.
48
-
49
- See `Task.__call__` and `Task.submit`
50
-
51
- - **A synchronous function acts as an entrypoint to the engine.**
52
- Unlike flow calls, this _will not_ block until completion if `submit` was used.
53
-
54
- See `enter_task_run_engine`
55
-
56
- - **A future is created for the task call.**
57
- Creation of the task run and submission to the task runner is scheduled as a
58
- background task so submission of many tasks can occur concurrently.
59
-
60
- See `create_task_run_future` and `create_task_run_then_submit`
61
-
62
- - **The engine branches depending on if a future, state, or result is requested.**
63
- If a future is requested, it is returned immediately to the user thread.
64
- Otherwise, the engine will wait for the task run to complete and return the final
65
- state or result.
66
-
67
- See `get_task_call_return_value`
68
-
69
- - **An engine function is submitted to the task runner.**
70
- The task runner will schedule this function for execution on a worker.
71
- When executed, it will prepare for orchestration and wait for completion of the run.
72
-
73
- See `create_task_run_then_submit` and `begin_task_run`
74
-
75
- - **The task run is orchestrated through states, calling the user's function as necessary.**
76
- The user's function is always executed in a worker thread for isolation.
77
-
78
- See `orchestrate_task_run`, `call_soon_in_new_thread`
79
-
80
- _Ideally, for local and sequential task runners we would send the task run to the
81
- user thread as we do for flows. See [#9855](https://github.com/PrefectHQ/prefect/pull/9855).
82
- """
83
-
84
- import asyncio
85
- import logging
86
1
  import os
87
- import random
88
2
  import sys
89
- import threading
90
- import time
91
- from contextlib import AsyncExitStack, asynccontextmanager
92
- from functools import partial
93
- from typing import (
94
- Any,
95
- Awaitable,
96
- Dict,
97
- Iterable,
98
- List,
99
- Optional,
100
- Set,
101
- Type,
102
- TypeVar,
103
- Union,
104
- overload,
105
- )
106
- from uuid import UUID, uuid4
3
+ from uuid import UUID
107
4
 
108
- import anyio
109
- import pendulum
110
- from anyio.from_thread import start_blocking_portal
111
- from typing_extensions import Literal
112
-
113
- import prefect
114
- import prefect.context
115
- import prefect.plugins
116
- from prefect._internal.compatibility.deprecated import (
117
- deprecated_callable,
118
- deprecated_parameter,
119
- )
120
- from prefect._internal.compatibility.experimental import experimental_parameter
121
- from prefect._internal.concurrency.api import create_call, from_async, from_sync
122
- from prefect._internal.concurrency.calls import get_current_call
123
- from prefect._internal.concurrency.cancellation import CancelledError
124
- from prefect._internal.concurrency.threads import wait_for_global_loop_exit
125
- from prefect.client.orchestration import PrefectClient, get_client
126
- from prefect.client.schemas import FlowRun, TaskRun
127
- from prefect.client.schemas.filters import FlowRunFilter
128
- from prefect.client.schemas.objects import (
129
- StateDetails,
130
- StateType,
131
- TaskRunInput,
132
- )
133
- from prefect.client.schemas.responses import SetStateStatus
134
- from prefect.client.schemas.sorting import FlowRunSort
135
- from prefect.client.utilities import inject_client
136
- from prefect.context import (
137
- FlowRunContext,
138
- PrefectObjectRegistry,
139
- TagsContext,
140
- TaskRunContext,
141
- )
142
- from prefect.deployments import load_flow_from_flow_run
5
+ from prefect._internal.compatibility.migration import getattr_migration
143
6
  from prefect.exceptions import (
144
7
  Abort,
145
- FlowPauseTimeout,
146
- MappingLengthMismatch,
147
- MappingMissingIterable,
148
- NotPausedError,
149
8
  Pause,
150
- PausedRun,
151
- UpstreamTaskError,
152
9
  )
153
- from prefect.flows import Flow, load_flow_from_entrypoint
154
- from prefect.futures import PrefectFuture, call_repr, resolve_futures_to_states
155
- from prefect.input import keyset_from_paused_state
156
- from prefect.input.run_input import run_input_subclass_from_type
157
- from prefect.logging.configuration import setup_logging
158
- from prefect.logging.handlers import APILogHandler
159
10
  from prefect.logging.loggers import (
160
- flow_run_logger,
161
11
  get_logger,
162
- get_run_logger,
163
- patch_print,
164
- task_run_logger,
165
- )
166
- from prefect.results import ResultFactory, UnknownResult
167
- from prefect.settings import (
168
- PREFECT_DEBUG_MODE,
169
- PREFECT_EXPERIMENTAL_ENABLE_NEW_ENGINE,
170
- PREFECT_TASK_INTROSPECTION_WARN_THRESHOLD,
171
- PREFECT_TASKS_REFRESH_CACHE,
172
- PREFECT_UI_URL,
173
12
  )
174
- from prefect.states import (
175
- Completed,
176
- Paused,
177
- Pending,
178
- Running,
179
- Scheduled,
180
- State,
181
- Suspended,
182
- exception_to_crashed_state,
183
- exception_to_failed_state,
184
- return_value_to_state,
185
- )
186
- from prefect.task_runners import (
187
- CONCURRENCY_MESSAGES,
188
- BaseTaskRunner,
189
- TaskConcurrencyType,
190
- )
191
- from prefect.tasks import Task
192
- from prefect.utilities.annotations import allow_failure, quote, unmapped
193
13
  from prefect.utilities.asyncutils import (
194
- gather,
195
- is_async_fn,
196
- run_sync,
197
- sync_compatible,
198
- )
199
- from prefect.utilities.callables import (
200
- collapse_variadic_parameters,
201
- explode_variadic_parameter,
202
- get_parameter_defaults,
203
- parameters_to_args_kwargs,
204
- )
205
- from prefect.utilities.collections import isiterable
206
- from prefect.utilities.engine import (
207
- _dynamic_key_for_task_run,
208
- _get_hook_name,
209
- _observed_flow_pauses,
210
- _resolve_custom_flow_run_name,
211
- _resolve_custom_task_run_name,
212
- capture_sigterm,
213
- check_api_reachable,
214
- collapse_excgroups,
215
- collect_task_run_inputs,
216
- emit_task_run_state_change_event,
217
- propose_state,
218
- resolve_inputs,
219
- should_log_prints,
220
- wait_for_task_runs_and_report_crashes,
14
+ run_coro_as_sync,
221
15
  )
222
16
 
223
- R = TypeVar("R")
224
- T = TypeVar("T")
225
- EngineReturnType = Literal["future", "state", "result"]
226
-
227
- NUM_CHARS_DYNAMIC_KEY = 8
228
-
229
17
  engine_logger = get_logger("engine")
230
18
 
231
19
 
232
- def enter_flow_run_engine_from_flow_call(
233
- flow: Flow,
234
- parameters: Dict[str, Any],
235
- wait_for: Optional[Iterable[PrefectFuture]],
236
- return_type: EngineReturnType,
237
- ) -> Union[State, Awaitable[State]]:
238
- """
239
- Sync entrypoint for flow calls.
240
-
241
- This function does the heavy lifting of ensuring we can get into an async context
242
- for flow run execution with minimal overhead.
243
- """
244
- setup_logging()
245
-
246
- registry = PrefectObjectRegistry.get()
247
- if registry and registry.block_code_execution:
248
- engine_logger.warning(
249
- f"Script loading is in progress, flow {flow.name!r} will not be executed."
250
- " Consider updating the script to only call the flow if executed"
251
- f' directly:\n\n\tif __name__ == "__main__":\n\t\t{flow.fn.__name__}()'
252
- )
253
- return None
254
-
255
- parent_flow_run_context = FlowRunContext.get()
256
- is_subflow_run = parent_flow_run_context is not None
257
-
258
- if wait_for is not None and not is_subflow_run:
259
- raise ValueError("Only flows run as subflows can wait for dependencies.")
260
-
261
- begin_run = create_call(
262
- create_and_begin_subflow_run if is_subflow_run else create_then_begin_flow_run,
263
- flow=flow,
264
- parameters=parameters,
265
- wait_for=wait_for,
266
- return_type=return_type,
267
- client=parent_flow_run_context.client if is_subflow_run else None,
268
- user_thread=threading.current_thread(),
269
- )
270
-
271
- # On completion of root flows, wait for the global thread to ensure that
272
- # any work there is complete
273
- done_callbacks = (
274
- [create_call(wait_for_global_loop_exit)] if not is_subflow_run else None
275
- )
276
-
277
- # WARNING: You must define any context managers here to pass to our concurrency
278
- # api instead of entering them in here in the engine entrypoint. Otherwise, async
279
- # flows will not use the context as this function _exits_ to return an awaitable to
280
- # the user. Generally, you should enter contexts _within_ the async `begin_run`
281
- # instead but if you need to enter a context from the main thread you'll need to do
282
- # it here.
283
- contexts = [capture_sigterm(), collapse_excgroups()]
284
-
285
- if flow.isasync and (
286
- not is_subflow_run or (is_subflow_run and parent_flow_run_context.flow.isasync)
287
- ):
288
- # return a coro for the user to await if the flow is async
289
- # unless it is an async subflow called in a sync flow
290
- retval = from_async.wait_for_call_in_loop_thread(
291
- begin_run,
292
- done_callbacks=done_callbacks,
293
- contexts=contexts,
294
- )
295
-
296
- else:
297
- retval = from_sync.wait_for_call_in_loop_thread(
298
- begin_run,
299
- done_callbacks=done_callbacks,
300
- contexts=contexts,
301
- )
302
-
303
- return retval
304
-
305
-
306
- def enter_flow_run_engine_from_subprocess(flow_run_id: UUID) -> State:
307
- """
308
- Sync entrypoint for flow runs that have been submitted for execution by an agent
309
-
310
- Differs from `enter_flow_run_engine_from_flow_call` in that we have a flow run id
311
- but not a flow object. The flow must be retrieved before execution can begin.
312
- Additionally, this assumes that the caller is always in a context without an event
313
- loop as this should be called from a fresh process.
314
- """
315
-
316
- # Ensure collections are imported and have the opportunity to register types before
317
- # loading the user code from the deployment
318
- prefect.plugins.load_prefect_collections()
319
-
320
- setup_logging()
321
-
322
- state = from_sync.wait_for_call_in_loop_thread(
323
- create_call(
324
- retrieve_flow_then_begin_flow_run,
325
- flow_run_id,
326
- user_thread=threading.current_thread(),
327
- ),
328
- contexts=[capture_sigterm(), collapse_excgroups()],
329
- )
330
-
331
- APILogHandler.flush()
332
- return state
333
-
334
-
335
- @inject_client
336
- async def create_then_begin_flow_run(
337
- flow: Flow,
338
- parameters: Dict[str, Any],
339
- wait_for: Optional[Iterable[PrefectFuture]],
340
- return_type: EngineReturnType,
341
- client: PrefectClient,
342
- user_thread: threading.Thread,
343
- ) -> Any:
344
- """
345
- Async entrypoint for flow calls
346
-
347
- Creates the flow run in the backend, then enters the main flow run engine.
348
- """
349
- # TODO: Returns a `State` depending on `return_type` and we can add an overload to
350
- # the function signature to clarify this eventually.
351
-
352
- await check_api_reachable(client, "Cannot create flow run")
353
-
354
- state = Pending()
355
- if flow.should_validate_parameters:
356
- try:
357
- parameters = flow.validate_parameters(parameters)
358
- except Exception:
359
- state = await exception_to_failed_state(
360
- message="Validation of flow parameters failed with error:"
361
- )
362
-
363
- flow_run = await client.create_flow_run(
364
- flow,
365
- # Send serialized parameters to the backend
366
- parameters=flow.serialize_parameters(parameters),
367
- state=state,
368
- tags=TagsContext.get().current_tags,
369
- )
370
-
371
- engine_logger.info(f"Created flow run {flow_run.name!r} for flow {flow.name!r}")
372
-
373
- logger = flow_run_logger(flow_run, flow)
374
-
375
- ui_url = PREFECT_UI_URL.value()
376
- if ui_url:
377
- logger.info(
378
- f"View at {ui_url}/flow-runs/flow-run/{flow_run.id}",
379
- extra={"send_to_api": False},
380
- )
381
-
382
- if state.is_failed():
383
- logger.error(state.message)
384
- engine_logger.info(
385
- f"Flow run {flow_run.name!r} received invalid parameters and is marked as"
386
- " failed."
387
- )
388
- else:
389
- state = await begin_flow_run(
390
- flow=flow,
391
- flow_run=flow_run,
392
- parameters=parameters,
393
- client=client,
394
- user_thread=user_thread,
395
- )
396
-
397
- if return_type == "state":
398
- return state
399
- elif return_type == "result":
400
- return await state.result(fetch=True)
401
- else:
402
- raise ValueError(f"Invalid return type for flow engine {return_type!r}.")
403
-
404
-
405
- @inject_client
406
- async def retrieve_flow_then_begin_flow_run(
407
- flow_run_id: UUID,
408
- client: PrefectClient,
409
- user_thread: threading.Thread,
410
- ) -> State:
411
- """
412
- Async entrypoint for flow runs that have been submitted for execution by an agent
413
-
414
- - Retrieves the deployment information
415
- - Loads the flow object using deployment information
416
- - Updates the flow run version
417
- """
418
- flow_run = await client.read_flow_run(flow_run_id)
419
-
420
- entrypoint = os.environ.get("PREFECT__FLOW_ENTRYPOINT")
421
-
422
- try:
423
- flow = (
424
- # We do not want to use a placeholder flow at runtime
425
- load_flow_from_entrypoint(entrypoint, use_placeholder_flow=False)
426
- if entrypoint
427
- else await load_flow_from_flow_run(
428
- flow_run, client=client, use_placeholder_flow=False
429
- )
430
- )
431
- except Exception:
432
- message = (
433
- "Flow could not be retrieved from"
434
- f" {'entrypoint' if entrypoint else 'deployment'}."
435
- )
436
- flow_run_logger(flow_run).exception(message)
437
- state = await exception_to_failed_state(message=message)
438
- await client.set_flow_run_state(
439
- state=state, flow_run_id=flow_run_id, force=True
440
- )
441
- return state
442
-
443
- # Update the flow run policy defaults to match settings on the flow
444
- # Note: Mutating the flow run object prevents us from performing another read
445
- # operation if these properties are used by the client downstream
446
- if flow_run.empirical_policy.retry_delay is None:
447
- flow_run.empirical_policy.retry_delay = flow.retry_delay_seconds
448
-
449
- if flow_run.empirical_policy.retries is None:
450
- flow_run.empirical_policy.retries = flow.retries
451
-
452
- await client.update_flow_run(
453
- flow_run_id=flow_run_id,
454
- flow_version=flow.version,
455
- empirical_policy=flow_run.empirical_policy,
456
- )
457
-
458
- if flow.should_validate_parameters:
459
- failed_state = None
460
- try:
461
- parameters = flow.validate_parameters(flow_run.parameters)
462
- except Exception:
463
- message = "Validation of flow parameters failed with error: "
464
- flow_run_logger(flow_run).exception(message)
465
- failed_state = await exception_to_failed_state(message=message)
466
-
467
- if failed_state is not None:
468
- await propose_state(
469
- client,
470
- state=failed_state,
471
- flow_run_id=flow_run_id,
472
- )
473
- return failed_state
474
- else:
475
- parameters = flow_run.parameters
476
-
477
- # Ensure default values are populated
478
- parameters = {**get_parameter_defaults(flow.fn), **parameters}
479
-
480
- return await begin_flow_run(
481
- flow=flow,
482
- flow_run=flow_run,
483
- parameters=parameters,
484
- client=client,
485
- user_thread=user_thread,
486
- )
487
-
488
-
489
- async def begin_flow_run(
490
- flow: Flow,
491
- flow_run: FlowRun,
492
- parameters: Dict[str, Any],
493
- client: PrefectClient,
494
- user_thread: threading.Thread,
495
- ) -> State:
496
- """
497
- Begins execution of a flow run; blocks until completion of the flow run
498
-
499
- - Starts a task runner
500
- - Determines the result storage block to use
501
- - Orchestrates the flow run (runs the user-function and generates tasks)
502
- - Waits for tasks to complete / shutsdown the task runner
503
- - Sets a terminal state for the flow run
504
-
505
- Note that the `flow_run` contains a `parameters` attribute which is the serialized
506
- parameters sent to the backend while the `parameters` argument here should be the
507
- deserialized and validated dictionary of python objects.
508
-
509
- Returns:
510
- The final state of the run
511
- """
512
- logger = flow_run_logger(flow_run, flow)
513
-
514
- log_prints = should_log_prints(flow)
515
- flow_run_context = FlowRunContext.construct(log_prints=log_prints)
516
-
517
- async with AsyncExitStack() as stack:
518
- await stack.enter_async_context(
519
- report_flow_run_crashes(flow_run=flow_run, client=client, flow=flow)
520
- )
521
-
522
- # Create a task group for background tasks
523
- flow_run_context.background_tasks = await stack.enter_async_context(
524
- anyio.create_task_group()
525
- )
526
-
527
- # If the flow is async, we need to provide a portal so sync tasks can run
528
- flow_run_context.sync_portal = (
529
- stack.enter_context(start_blocking_portal()) if flow.isasync else None
530
- )
531
-
532
- task_runner = flow.task_runner.duplicate()
533
- if task_runner is NotImplemented:
534
- # Backwards compatibility; will not support concurrent flow runs
535
- task_runner = flow.task_runner
536
- logger.warning(
537
- f"Task runner {type(task_runner).__name__!r} does not implement the"
538
- " `duplicate` method and will fail if used for concurrent execution of"
539
- " the same flow."
540
- )
541
-
542
- logger.debug(
543
- f"Starting {type(flow.task_runner).__name__!r}; submitted tasks "
544
- f"will be run {CONCURRENCY_MESSAGES[flow.task_runner.concurrency_type]}..."
545
- )
546
-
547
- flow_run_context.task_runner = await stack.enter_async_context(
548
- task_runner.start()
549
- )
550
-
551
- flow_run_context.result_factory = await ResultFactory.from_flow(
552
- flow, client=client
553
- )
554
-
555
- if log_prints:
556
- stack.enter_context(patch_print())
557
-
558
- terminal_or_paused_state = await orchestrate_flow_run(
559
- flow,
560
- flow_run=flow_run,
561
- parameters=parameters,
562
- wait_for=None,
563
- client=client,
564
- partial_flow_run_context=flow_run_context,
565
- # Orchestration needs to be interruptible if it has a timeout
566
- interruptible=flow.timeout_seconds is not None,
567
- user_thread=user_thread,
568
- )
569
-
570
- if terminal_or_paused_state.is_paused():
571
- timeout = terminal_or_paused_state.state_details.pause_timeout
572
- msg = "Currently paused and suspending execution."
573
- if timeout:
574
- msg += f" Resume before {timeout.to_rfc3339_string()} to finish execution."
575
- logger.log(level=logging.INFO, msg=msg)
576
- await APILogHandler.aflush()
577
-
578
- return terminal_or_paused_state
579
- else:
580
- terminal_state = terminal_or_paused_state
581
-
582
- # If debugging, use the more complete `repr` than the usual `str` description
583
- display_state = repr(terminal_state) if PREFECT_DEBUG_MODE else str(terminal_state)
584
-
585
- logger.log(
586
- level=logging.INFO if terminal_state.is_completed() else logging.ERROR,
587
- msg=f"Finished in state {display_state}",
588
- )
589
-
590
- # When a "root" flow run finishes, flush logs so we do not have to rely on handling
591
- # during interpreter shutdown
592
- await APILogHandler.aflush()
593
-
594
- return terminal_state
595
-
596
-
597
- @inject_client
598
- async def create_and_begin_subflow_run(
599
- flow: Flow,
600
- parameters: Dict[str, Any],
601
- wait_for: Optional[Iterable[PrefectFuture]],
602
- return_type: EngineReturnType,
603
- client: PrefectClient,
604
- user_thread: threading.Thread,
605
- ) -> Any:
606
- """
607
- Async entrypoint for flows calls within a flow run
608
-
609
- Subflows differ from parent flows in that they
610
- - Resolve futures in passed parameters into values
611
- - Create a dummy task for representation in the parent flow
612
- - Retrieve default result storage from the parent flow rather than the server
613
-
614
- Returns:
615
- The final state of the run
616
- """
617
- parent_flow_run_context = FlowRunContext.get()
618
- parent_logger = get_run_logger(parent_flow_run_context)
619
- log_prints = should_log_prints(flow)
620
- terminal_state = None
621
-
622
- parent_logger.debug(f"Resolving inputs to {flow.name!r}")
623
- task_inputs = {k: await collect_task_run_inputs(v) for k, v in parameters.items()}
624
-
625
- if wait_for:
626
- task_inputs["wait_for"] = await collect_task_run_inputs(wait_for)
627
-
628
- rerunning = (
629
- parent_flow_run_context.flow_run.run_count > 1
630
- if getattr(parent_flow_run_context, "flow_run", None)
631
- else False
632
- )
633
-
634
- # Generate a task in the parent flow run to represent the result of the subflow run
635
- dummy_task = Task(name=flow.name, fn=flow.fn, version=flow.version)
636
- parent_task_run = await client.create_task_run(
637
- task=dummy_task,
638
- flow_run_id=(
639
- parent_flow_run_context.flow_run.id
640
- if getattr(parent_flow_run_context, "flow_run", None)
641
- else None
642
- ),
643
- dynamic_key=_dynamic_key_for_task_run(parent_flow_run_context, dummy_task),
644
- task_inputs=task_inputs,
645
- state=Pending(),
646
- )
647
-
648
- # Resolve any task futures in the input
649
- parameters = await resolve_inputs(parameters)
650
-
651
- if parent_task_run.state.is_final() and not (
652
- rerunning and not parent_task_run.state.is_completed()
653
- ):
654
- # Retrieve the most recent flow run from the database
655
- flow_runs = await client.read_flow_runs(
656
- flow_run_filter=FlowRunFilter(
657
- parent_task_run_id={"any_": [parent_task_run.id]}
658
- ),
659
- sort=FlowRunSort.EXPECTED_START_TIME_ASC,
660
- )
661
- flow_run = flow_runs[-1]
662
-
663
- # Set up variables required downstream
664
- terminal_state = flow_run.state
665
- logger = flow_run_logger(flow_run, flow)
666
-
667
- else:
668
- flow_run = await client.create_flow_run(
669
- flow,
670
- parameters=flow.serialize_parameters(parameters),
671
- parent_task_run_id=parent_task_run.id,
672
- state=parent_task_run.state if not rerunning else Pending(),
673
- tags=TagsContext.get().current_tags,
674
- )
675
-
676
- parent_logger.info(
677
- f"Created subflow run {flow_run.name!r} for flow {flow.name!r}"
678
- )
679
-
680
- logger = flow_run_logger(flow_run, flow)
681
- ui_url = PREFECT_UI_URL.value()
682
- if ui_url:
683
- logger.info(
684
- f"View at {ui_url}/flow-runs/flow-run/{flow_run.id}",
685
- extra={"send_to_api": False},
686
- )
687
-
688
- result_factory = await ResultFactory.from_flow(
689
- flow, client=parent_flow_run_context.client
690
- )
691
-
692
- if flow.should_validate_parameters:
693
- try:
694
- parameters = flow.validate_parameters(parameters)
695
- except Exception:
696
- message = "Validation of flow parameters failed with error:"
697
- logger.exception(message)
698
- terminal_state = await propose_state(
699
- client,
700
- state=await exception_to_failed_state(
701
- message=message, result_factory=result_factory
702
- ),
703
- flow_run_id=flow_run.id,
704
- )
705
-
706
- if terminal_state is None or not terminal_state.is_final():
707
- async with AsyncExitStack() as stack:
708
- await stack.enter_async_context(
709
- report_flow_run_crashes(flow_run=flow_run, client=client, flow=flow)
710
- )
711
-
712
- task_runner = flow.task_runner.duplicate()
713
- if task_runner is NotImplemented:
714
- # Backwards compatibility; will not support concurrent flow runs
715
- task_runner = flow.task_runner
716
- logger.warning(
717
- f"Task runner {type(task_runner).__name__!r} does not implement"
718
- " the `duplicate` method and will fail if used for concurrent"
719
- " execution of the same flow."
720
- )
721
-
722
- await stack.enter_async_context(task_runner.start())
723
-
724
- if log_prints:
725
- stack.enter_context(patch_print())
726
-
727
- terminal_state = await orchestrate_flow_run(
728
- flow,
729
- flow_run=flow_run,
730
- parameters=parameters,
731
- wait_for=wait_for,
732
- # If the parent flow run has a timeout, then this one needs to be
733
- # interruptible as well
734
- interruptible=parent_flow_run_context.timeout_scope is not None,
735
- client=client,
736
- partial_flow_run_context=FlowRunContext.construct(
737
- sync_portal=parent_flow_run_context.sync_portal,
738
- task_runner=task_runner,
739
- background_tasks=parent_flow_run_context.background_tasks,
740
- result_factory=result_factory,
741
- log_prints=log_prints,
742
- ),
743
- user_thread=user_thread,
744
- )
745
-
746
- # Display the full state (including the result) if debugging
747
- display_state = repr(terminal_state) if PREFECT_DEBUG_MODE else str(terminal_state)
748
- logger.log(
749
- level=logging.INFO if terminal_state.is_completed() else logging.ERROR,
750
- msg=f"Finished in state {display_state}",
751
- )
752
-
753
- # Track the subflow state so the parent flow can use it to determine its final state
754
- parent_flow_run_context.flow_run_states.append(terminal_state)
755
-
756
- if return_type == "state":
757
- return terminal_state
758
- elif return_type == "result":
759
- return await terminal_state.result(fetch=True)
760
- else:
761
- raise ValueError(f"Invalid return type for flow engine {return_type!r}.")
762
-
763
-
764
- async def orchestrate_flow_run(
765
- flow: Flow,
766
- flow_run: FlowRun,
767
- parameters: Dict[str, Any],
768
- wait_for: Optional[Iterable[PrefectFuture]],
769
- interruptible: bool,
770
- client: PrefectClient,
771
- partial_flow_run_context: FlowRunContext,
772
- user_thread: threading.Thread,
773
- ) -> State:
774
- """
775
- Executes a flow run.
776
-
777
- Note on flow timeouts:
778
- Since async flows are run directly in the main event loop, timeout behavior will
779
- match that described by anyio. If the flow is awaiting something, it will
780
- immediately return; otherwise, the next time it awaits it will exit. Sync flows
781
- are being task runner in a worker thread, which cannot be interrupted. The worker
782
- thread will exit at the next task call. The worker thread also has access to the
783
- status of the cancellation scope at `FlowRunContext.timeout_scope.cancel_called`
784
- which allows it to raise a `TimeoutError` to respect the timeout.
785
-
786
- Returns:
787
- The final state of the run
788
- """
789
-
790
- logger = flow_run_logger(flow_run, flow)
791
-
792
- flow_run_context = None
793
- parent_flow_run_context = FlowRunContext.get()
794
-
795
- try:
796
- # Resolve futures in any non-data dependencies to ensure they are ready
797
- if wait_for is not None:
798
- await resolve_inputs({"wait_for": wait_for}, return_data=False)
799
- except UpstreamTaskError as upstream_exc:
800
- return await propose_state(
801
- client,
802
- Pending(name="NotReady", message=str(upstream_exc)),
803
- flow_run_id=flow_run.id,
804
- # if orchestrating a run already in a pending state, force orchestration to
805
- # update the state name
806
- force=flow_run.state.is_pending(),
807
- )
808
-
809
- state = await propose_state(client, Running(), flow_run_id=flow_run.id)
810
-
811
- # flag to ensure we only update the flow run name once
812
- run_name_set = False
813
-
814
- await _run_flow_hooks(flow=flow, flow_run=flow_run, state=state)
815
-
816
- while state.is_running():
817
- waited_for_task_runs = False
818
-
819
- # Update the flow run to the latest data
820
- flow_run = await client.read_flow_run(flow_run.id)
821
- try:
822
- with FlowRunContext(
823
- **{
824
- **partial_flow_run_context.dict(),
825
- **{
826
- "flow_run": flow_run,
827
- "flow": flow,
828
- "client": client,
829
- "parameters": parameters,
830
- },
831
- }
832
- ) as flow_run_context:
833
- # update flow run name
834
- if not run_name_set and flow.flow_run_name:
835
- flow_run_name = _resolve_custom_flow_run_name(
836
- flow=flow, parameters=parameters
837
- )
838
-
839
- await client.update_flow_run(
840
- flow_run_id=flow_run.id, name=flow_run_name
841
- )
842
- logger.extra["flow_run_name"] = flow_run_name
843
- logger.debug(
844
- f"Renamed flow run {flow_run.name!r} to {flow_run_name!r}"
845
- )
846
- flow_run.name = flow_run_name
847
- run_name_set = True
848
-
849
- args, kwargs = parameters_to_args_kwargs(flow.fn, parameters)
850
- logger.debug(
851
- f"Executing flow {flow.name!r} for flow run {flow_run.name!r}..."
852
- )
853
-
854
- if PREFECT_DEBUG_MODE:
855
- logger.debug(f"Executing {call_repr(flow.fn, *args, **kwargs)}")
856
- else:
857
- logger.debug(
858
- "Beginning execution...", extra={"state_message": True}
859
- )
860
-
861
- flow_call = create_call(flow.fn, *args, **kwargs)
862
-
863
- # This check for a parent call is needed for cases where the engine
864
- # was entered directly during testing
865
- parent_call = get_current_call()
866
-
867
- if parent_call and (
868
- not parent_flow_run_context
869
- or (
870
- getattr(parent_flow_run_context, "flow", None)
871
- and parent_flow_run_context.flow.isasync == flow.isasync
872
- )
873
- ):
874
- from_async.call_soon_in_waiting_thread(
875
- flow_call,
876
- thread=user_thread,
877
- timeout=flow.timeout_seconds,
878
- )
879
- else:
880
- from_async.call_soon_in_new_thread(
881
- flow_call, timeout=flow.timeout_seconds
882
- )
883
-
884
- result = await flow_call.aresult()
885
-
886
- waited_for_task_runs = await wait_for_task_runs_and_report_crashes(
887
- flow_run_context.task_run_futures, client=client
888
- )
889
- except PausedRun as exc:
890
- # could get raised either via utility or by returning Paused from a task run
891
- # if a task run pauses, we set its state as the flow's state
892
- # to preserve reschedule and timeout behavior
893
- paused_flow_run = await client.read_flow_run(flow_run.id)
894
- if paused_flow_run.state.is_running():
895
- state = await propose_state(
896
- client,
897
- state=exc.state,
898
- flow_run_id=flow_run.id,
899
- )
900
-
901
- return state
902
- paused_flow_run_state = paused_flow_run.state
903
- return paused_flow_run_state
904
- except CancelledError as exc:
905
- if not flow_call.timedout():
906
- # If the flow call was not cancelled by us; this is a crash
907
- raise
908
- # Construct a new exception as `TimeoutError`
909
- original = exc
910
- exc = TimeoutError()
911
- exc.__cause__ = original
912
- logger.exception("Encountered exception during execution:")
913
- terminal_state = await exception_to_failed_state(
914
- exc,
915
- message=f"Flow run exceeded timeout of {flow.timeout_seconds} seconds",
916
- result_factory=flow_run_context.result_factory,
917
- name="TimedOut",
918
- )
919
- except Exception:
920
- # Generic exception in user code
921
- logger.exception("Encountered exception during execution:")
922
- terminal_state = await exception_to_failed_state(
923
- message="Flow run encountered an exception.",
924
- result_factory=flow_run_context.result_factory,
925
- )
926
- else:
927
- if result is None:
928
- # All tasks and subflows are reference tasks if there is no return value
929
- # If there are no tasks, use `None` instead of an empty iterable
930
- result = (
931
- flow_run_context.task_run_futures
932
- + flow_run_context.task_run_states
933
- + flow_run_context.flow_run_states
934
- ) or None
935
-
936
- terminal_state = await return_value_to_state(
937
- await resolve_futures_to_states(result),
938
- result_factory=flow_run_context.result_factory,
939
- )
940
-
941
- if not waited_for_task_runs:
942
- # An exception occurred that prevented us from waiting for task runs to
943
- # complete. Ensure that we wait for them before proposing a final state
944
- # for the flow run.
945
- await wait_for_task_runs_and_report_crashes(
946
- flow_run_context.task_run_futures, client=client
947
- )
948
-
949
- # Before setting the flow run state, store state.data using
950
- # block storage and send the resulting data document to the Prefect API instead.
951
- # This prevents the pickled return value of flow runs
952
- # from being sent to the Prefect API and stored in the Prefect database.
953
- # state.data is left as is, otherwise we would have to load
954
- # the data from block storage again after storing.
955
- state = await propose_state(
956
- client,
957
- state=terminal_state,
958
- flow_run_id=flow_run.id,
959
- )
960
-
961
- await _run_flow_hooks(flow=flow, flow_run=flow_run, state=state)
962
-
963
- if state.type != terminal_state.type and PREFECT_DEBUG_MODE:
964
- logger.debug(
965
- (
966
- f"Received new state {state} when proposing final state"
967
- f" {terminal_state}"
968
- ),
969
- extra={"send_to_api": False},
970
- )
971
-
972
- if not state.is_final() and not state.is_paused():
973
- logger.info(
974
- (
975
- f"Received non-final state {state.name!r} when proposing final"
976
- f" state {terminal_state.name!r} and will attempt to run again..."
977
- ),
978
- )
979
- # Attempt to enter a running state again
980
- state = await propose_state(client, Running(), flow_run_id=flow_run.id)
981
-
982
- return state
983
-
984
-
985
- @deprecated_callable(
986
- start_date="Jun 2024",
987
- help="Will be moved in Prefect 3 to prefect.flow_runs:pause_flow_run",
988
- )
989
- @overload
990
- async def pause_flow_run(
991
- wait_for_input: None = None,
992
- flow_run_id: UUID = None,
993
- timeout: int = 3600,
994
- poll_interval: int = 10,
995
- reschedule: bool = False,
996
- key: str = None,
997
- ) -> None:
998
- ...
999
-
1000
-
1001
- @deprecated_callable(
1002
- start_date="Jun 2024",
1003
- help="Will be moved in Prefect 3 to prefect.flow_runs:pause_flow_run",
1004
- )
1005
- @overload
1006
- async def pause_flow_run(
1007
- wait_for_input: Type[T],
1008
- flow_run_id: UUID = None,
1009
- timeout: int = 3600,
1010
- poll_interval: int = 10,
1011
- reschedule: bool = False,
1012
- key: str = None,
1013
- ) -> T:
1014
- ...
1015
-
1016
-
1017
- @sync_compatible
1018
- @deprecated_parameter(
1019
- "flow_run_id", start_date="Dec 2023", help="Use `suspend_flow_run` instead."
1020
- )
1021
- @deprecated_parameter(
1022
- "reschedule",
1023
- start_date="Dec 2023",
1024
- when=lambda p: p is True,
1025
- help="Use `suspend_flow_run` instead.",
1026
- )
1027
- @experimental_parameter(
1028
- "wait_for_input", group="flow_run_input", when=lambda y: y is not None
1029
- )
1030
- async def pause_flow_run(
1031
- wait_for_input: Optional[Type[T]] = None,
1032
- flow_run_id: UUID = None,
1033
- timeout: int = 3600,
1034
- poll_interval: int = 10,
1035
- reschedule: bool = False,
1036
- key: str = None,
1037
- ) -> Optional[T]:
1038
- """
1039
- Pauses the current flow run by blocking execution until resumed.
1040
-
1041
- When called within a flow run, execution will block and no downstream tasks will
1042
- run until the flow is resumed. Task runs that have already started will continue
1043
- running. A timeout parameter can be passed that will fail the flow run if it has not
1044
- been resumed within the specified time.
1045
-
1046
- Args:
1047
- flow_run_id: a flow run id. If supplied, this function will attempt to pause
1048
- the specified flow run outside of the flow run process. When paused, the
1049
- flow run will continue execution until the NEXT task is orchestrated, at
1050
- which point the flow will exit. Any tasks that have already started will
1051
- run until completion. When resumed, the flow run will be rescheduled to
1052
- finish execution. In order pause a flow run in this way, the flow needs to
1053
- have an associated deployment and results need to be configured with the
1054
- `persist_results` option.
1055
- timeout: the number of seconds to wait for the flow to be resumed before
1056
- failing. Defaults to 1 hour (3600 seconds). If the pause timeout exceeds
1057
- any configured flow-level timeout, the flow might fail even after resuming.
1058
- poll_interval: The number of seconds between checking whether the flow has been
1059
- resumed. Defaults to 10 seconds.
1060
- reschedule: Flag that will reschedule the flow run if resumed. Instead of
1061
- blocking execution, the flow will gracefully exit (with no result returned)
1062
- instead. To use this flag, a flow needs to have an associated deployment and
1063
- results need to be configured with the `persist_results` option.
1064
- key: An optional key to prevent calling pauses more than once. This defaults to
1065
- the number of pauses observed by the flow so far, and prevents pauses that
1066
- use the "reschedule" option from running the same pause twice. A custom key
1067
- can be supplied for custom pausing behavior.
1068
- wait_for_input: a subclass of `RunInput` or any type supported by
1069
- Pydantic. If provided when the flow pauses, the flow will wait for the
1070
- input to be provided before resuming. If the flow is resumed without
1071
- providing the input, the flow will fail. If the flow is resumed with the
1072
- input, the flow will resume and the input will be loaded and returned
1073
- from this function.
1074
-
1075
- Example:
1076
- ```python
1077
- @task
1078
- def task_one():
1079
- for i in range(3):
1080
- sleep(1)
1081
-
1082
- @flow
1083
- def my_flow():
1084
- terminal_state = task_one.submit(return_state=True)
1085
- if terminal_state.type == StateType.COMPLETED:
1086
- print("Task one succeeded! Pausing flow run..")
1087
- pause_flow_run(timeout=2)
1088
- else:
1089
- print("Task one failed. Skipping pause flow run..")
1090
- ```
1091
-
1092
- """
1093
- if flow_run_id:
1094
- if wait_for_input is not None:
1095
- raise RuntimeError("Cannot wait for input when pausing out of process.")
1096
-
1097
- return await _out_of_process_pause(
1098
- flow_run_id=flow_run_id,
1099
- timeout=timeout,
1100
- reschedule=reschedule,
1101
- key=key,
1102
- )
1103
- else:
1104
- return await _in_process_pause(
1105
- timeout=timeout,
1106
- poll_interval=poll_interval,
1107
- reschedule=reschedule,
1108
- key=key,
1109
- wait_for_input=wait_for_input,
1110
- )
1111
-
1112
-
1113
- @deprecated_callable(
1114
- start_date="Jun 2024",
1115
- help="Will be moved in Prefect 3 to prefect.flow_runs:_in_process_pause",
1116
- )
1117
- @inject_client
1118
- async def _in_process_pause(
1119
- timeout: int = 3600,
1120
- poll_interval: int = 10,
1121
- reschedule=False,
1122
- key: str = None,
1123
- client=None,
1124
- wait_for_input: Optional[T] = None,
1125
- ) -> Optional[T]:
1126
- if TaskRunContext.get():
1127
- raise RuntimeError("Cannot pause task runs.")
1128
-
1129
- context = FlowRunContext.get()
1130
- if not context:
1131
- raise RuntimeError("Flow runs can only be paused from within a flow run.")
1132
-
1133
- logger = get_run_logger(context=context)
1134
-
1135
- pause_counter = _observed_flow_pauses(context)
1136
- pause_key = key or str(pause_counter)
1137
-
1138
- logger.info("Pausing flow, execution will continue when this flow run is resumed.")
1139
-
1140
- proposed_state = Paused(
1141
- timeout_seconds=timeout, reschedule=reschedule, pause_key=pause_key
1142
- )
1143
-
1144
- if wait_for_input:
1145
- wait_for_input = run_input_subclass_from_type(wait_for_input)
1146
- run_input_keyset = keyset_from_paused_state(proposed_state)
1147
- proposed_state.state_details.run_input_keyset = run_input_keyset
1148
-
1149
- try:
1150
- state = await propose_state(
1151
- client=client,
1152
- state=proposed_state,
1153
- flow_run_id=context.flow_run.id,
1154
- )
1155
- except Abort as exc:
1156
- # Aborted pause requests mean the pause is not allowed
1157
- raise RuntimeError(f"Flow run cannot be paused: {exc}")
1158
-
1159
- if state.is_running():
1160
- # The orchestrator rejected the paused state which means that this
1161
- # pause has happened before (via reschedule) and the flow run has
1162
- # been resumed.
1163
- if wait_for_input:
1164
- # The flow run wanted input, so we need to load it and return it
1165
- # to the user.
1166
- await wait_for_input.load(run_input_keyset)
1167
-
1168
- return
1169
-
1170
- if not state.is_paused():
1171
- # If we receive anything but a PAUSED state, we are unable to continue
1172
- raise RuntimeError(
1173
- f"Flow run cannot be paused. Received non-paused state from API: {state}"
1174
- )
1175
-
1176
- if wait_for_input:
1177
- # We're now in a paused state and the flow run is waiting for input.
1178
- # Save the schema of the users `RunInput` subclass, stored in
1179
- # `wait_for_input`, so the UI can display the form and we can validate
1180
- # the input when the flow is resumed.
1181
- await wait_for_input.save(run_input_keyset)
1182
-
1183
- if reschedule:
1184
- # If a rescheduled pause, exit this process so the run can be resubmitted later
1185
- raise Pause(state=state)
1186
-
1187
- # Otherwise, block and check for completion on an interval
1188
- with anyio.move_on_after(timeout):
1189
- # attempt to check if a flow has resumed at least once
1190
- initial_sleep = min(timeout / 2, poll_interval)
1191
- await anyio.sleep(initial_sleep)
1192
- while True:
1193
- flow_run = await client.read_flow_run(context.flow_run.id)
1194
- if flow_run.state.is_running():
1195
- logger.info("Resuming flow run execution!")
1196
- if wait_for_input:
1197
- return await wait_for_input.load(run_input_keyset)
1198
- return
1199
- await anyio.sleep(poll_interval)
1200
-
1201
- # check one last time before failing the flow
1202
- flow_run = await client.read_flow_run(context.flow_run.id)
1203
- if flow_run.state.is_running():
1204
- logger.info("Resuming flow run execution!")
1205
- if wait_for_input:
1206
- return await wait_for_input.load(run_input_keyset)
1207
- return
1208
-
1209
- raise FlowPauseTimeout("Flow run was paused and never resumed.")
1210
-
1211
-
1212
- @deprecated_callable(
1213
- start_date="Jun 2024",
1214
- help="Will be moved in Prefect 3 to prefect.flow_runs.pause_flow_run.",
1215
- )
1216
- @inject_client
1217
- async def _out_of_process_pause(
1218
- flow_run_id: UUID,
1219
- timeout: int = 3600,
1220
- reschedule: bool = True,
1221
- key: str = None,
1222
- client=None,
1223
- ):
1224
- if reschedule:
1225
- raise RuntimeError(
1226
- "Pausing a flow run out of process requires the `reschedule` option set to"
1227
- " True."
1228
- )
1229
-
1230
- response = await client.set_flow_run_state(
1231
- flow_run_id,
1232
- Paused(timeout_seconds=timeout, reschedule=True, pause_key=key),
1233
- )
1234
- if response.status != SetStateStatus.ACCEPT:
1235
- raise RuntimeError(response.details.reason)
1236
-
1237
-
1238
- @deprecated_callable(
1239
- start_date="Jun 2024",
1240
- help="Will be moved in Prefect 3 to prefect.flow_runs:suspend_flow_run",
1241
- )
1242
- @overload
1243
- async def suspend_flow_run(
1244
- wait_for_input: None = None,
1245
- flow_run_id: Optional[UUID] = None,
1246
- timeout: Optional[int] = 3600,
1247
- key: Optional[str] = None,
1248
- client: PrefectClient = None,
1249
- ) -> None:
1250
- ...
1251
-
1252
-
1253
- @overload
1254
- async def suspend_flow_run(
1255
- wait_for_input: Type[T],
1256
- flow_run_id: Optional[UUID] = None,
1257
- timeout: Optional[int] = 3600,
1258
- key: Optional[str] = None,
1259
- client: PrefectClient = None,
1260
- ) -> T:
1261
- ...
1262
-
1263
-
1264
- @sync_compatible
1265
- @inject_client
1266
- @experimental_parameter(
1267
- "wait_for_input", group="flow_run_input", when=lambda y: y is not None
1268
- )
1269
- async def suspend_flow_run(
1270
- wait_for_input: Optional[Type[T]] = None,
1271
- flow_run_id: Optional[UUID] = None,
1272
- timeout: Optional[int] = 3600,
1273
- key: Optional[str] = None,
1274
- client: PrefectClient = None,
1275
- ) -> Optional[T]:
1276
- """
1277
- Suspends a flow run by stopping code execution until resumed.
1278
-
1279
- When suspended, the flow run will continue execution until the NEXT task is
1280
- orchestrated, at which point the flow will exit. Any tasks that have
1281
- already started will run until completion. When resumed, the flow run will
1282
- be rescheduled to finish execution. In order suspend a flow run in this
1283
- way, the flow needs to have an associated deployment and results need to be
1284
- configured with the `persist_results` option.
1285
-
1286
- Args:
1287
- flow_run_id: a flow run id. If supplied, this function will attempt to
1288
- suspend the specified flow run. If not supplied will attempt to
1289
- suspend the current flow run.
1290
- timeout: the number of seconds to wait for the flow to be resumed before
1291
- failing. Defaults to 1 hour (3600 seconds). If the pause timeout
1292
- exceeds any configured flow-level timeout, the flow might fail even
1293
- after resuming.
1294
- key: An optional key to prevent calling suspend more than once. This
1295
- defaults to a random string and prevents suspends from running the
1296
- same suspend twice. A custom key can be supplied for custom
1297
- suspending behavior.
1298
- wait_for_input: a subclass of `RunInput` or any type supported by
1299
- Pydantic. If provided when the flow suspends, the flow will remain
1300
- suspended until receiving the input before resuming. If the flow is
1301
- resumed without providing the input, the flow will fail. If the flow is
1302
- resumed with the input, the flow will resume and the input will be
1303
- loaded and returned from this function.
1304
- """
1305
- context = FlowRunContext.get()
1306
-
1307
- if flow_run_id is None:
1308
- if TaskRunContext.get():
1309
- raise RuntimeError("Cannot suspend task runs.")
1310
-
1311
- if context is None or context.flow_run is None:
1312
- raise RuntimeError(
1313
- "Flow runs can only be suspended from within a flow run."
1314
- )
1315
-
1316
- logger = get_run_logger(context=context)
1317
- logger.info(
1318
- "Suspending flow run, execution will be rescheduled when this flow run is"
1319
- " resumed."
1320
- )
1321
- flow_run_id = context.flow_run.id
1322
- suspending_current_flow_run = True
1323
- pause_counter = _observed_flow_pauses(context)
1324
- pause_key = key or str(pause_counter)
1325
- else:
1326
- # Since we're suspending another flow run we need to generate a pause
1327
- # key that won't conflict with whatever suspends/pauses that flow may
1328
- # have. Since this method won't be called during that flow run it's
1329
- # okay that this is non-deterministic.
1330
- suspending_current_flow_run = False
1331
- pause_key = key or str(uuid4())
1332
-
1333
- proposed_state = Suspended(timeout_seconds=timeout, pause_key=pause_key)
1334
-
1335
- if wait_for_input:
1336
- wait_for_input = run_input_subclass_from_type(wait_for_input)
1337
- run_input_keyset = keyset_from_paused_state(proposed_state)
1338
- proposed_state.state_details.run_input_keyset = run_input_keyset
1339
-
1340
- try:
1341
- state = await propose_state(
1342
- client=client,
1343
- state=proposed_state,
1344
- flow_run_id=flow_run_id,
1345
- )
1346
- except Abort as exc:
1347
- # Aborted requests mean the suspension is not allowed
1348
- raise RuntimeError(f"Flow run cannot be suspended: {exc}")
1349
-
1350
- if state.is_running():
1351
- # The orchestrator rejected the suspended state which means that this
1352
- # suspend has happened before and the flow run has been resumed.
1353
- if wait_for_input:
1354
- # The flow run wanted input, so we need to load it and return it
1355
- # to the user.
1356
- return await wait_for_input.load(run_input_keyset)
1357
- return
1358
-
1359
- if not state.is_paused():
1360
- # If we receive anything but a PAUSED state, we are unable to continue
1361
- raise RuntimeError(
1362
- f"Flow run cannot be suspended. Received unexpected state from API: {state}"
1363
- )
1364
-
1365
- if wait_for_input:
1366
- await wait_for_input.save(run_input_keyset)
1367
-
1368
- if suspending_current_flow_run:
1369
- # Exit this process so the run can be resubmitted later
1370
- raise Pause()
1371
-
1372
-
1373
- @deprecated_callable(
1374
- start_date="Jun 2024",
1375
- help="Will be moved in Prefect 3 to prefect.flow_runs:resume_flow_run",
1376
- )
1377
- @sync_compatible
1378
- async def resume_flow_run(flow_run_id, run_input: Optional[Dict] = None):
1379
- """
1380
- Resumes a paused flow.
1381
-
1382
- Args:
1383
- flow_run_id: the flow_run_id to resume
1384
- run_input: a dictionary of inputs to provide to the flow run.
1385
- """
1386
- client = get_client()
1387
- async with client:
1388
- flow_run = await client.read_flow_run(flow_run_id)
1389
-
1390
- if not flow_run.state.is_paused():
1391
- raise NotPausedError("Cannot resume a run that isn't paused!")
1392
-
1393
- response = await client.resume_flow_run(flow_run_id, run_input=run_input)
1394
-
1395
- if response.status == SetStateStatus.REJECT:
1396
- if response.state.type == StateType.FAILED:
1397
- raise FlowPauseTimeout("Flow run can no longer be resumed.")
1398
- else:
1399
- raise RuntimeError(f"Cannot resume this run: {response.details.reason}")
1400
-
1401
-
1402
- def enter_task_run_engine(
1403
- task: Task,
1404
- parameters: Dict[str, Any],
1405
- wait_for: Optional[Iterable[PrefectFuture]],
1406
- return_type: EngineReturnType,
1407
- task_runner: Optional[BaseTaskRunner],
1408
- mapped: bool,
1409
- entering_from_task_run: Optional[bool] = False,
1410
- ) -> Union[PrefectFuture, Awaitable[PrefectFuture], TaskRun]:
1411
- """Sync entrypoint for task calls"""
1412
-
1413
- flow_run_context = FlowRunContext.get()
1414
-
1415
- if not flow_run_context:
1416
- if return_type == "future" or mapped:
1417
- raise RuntimeError(
1418
- " If you meant to submit a background task, you need to set"
1419
- " `prefect config set PREFECT_EXPERIMENTAL_ENABLE_TASK_SCHEDULING=true`"
1420
- " and use `your_task.submit()` instead of `your_task()`."
1421
- )
1422
- from prefect.task_engine import submit_autonomous_task_run_to_engine
1423
-
1424
- return submit_autonomous_task_run_to_engine(
1425
- task=task,
1426
- task_run=None,
1427
- parameters=parameters,
1428
- task_runner=task_runner,
1429
- wait_for=wait_for,
1430
- return_type=return_type,
1431
- client=get_client(),
1432
- )
1433
-
1434
- if flow_run_context.timeout_scope and flow_run_context.timeout_scope.cancel_called:
1435
- raise TimeoutError("Flow run timed out")
1436
-
1437
- call_arguments = {
1438
- "task": task,
1439
- "flow_run_context": flow_run_context,
1440
- "parameters": parameters,
1441
- "wait_for": wait_for,
1442
- "return_type": return_type,
1443
- "task_runner": task_runner,
1444
- }
1445
-
1446
- if not mapped:
1447
- call_arguments["entering_from_task_run"] = entering_from_task_run
1448
-
1449
- begin_run = create_call(
1450
- begin_task_map if mapped else get_task_call_return_value, **call_arguments
1451
- )
1452
-
1453
- if task.isasync and (
1454
- flow_run_context.flow is None or flow_run_context.flow.isasync
1455
- ):
1456
- # return a coro for the user to await if an async task in an async flow
1457
- return from_async.wait_for_call_in_loop_thread(begin_run)
1458
- else:
1459
- return from_sync.wait_for_call_in_loop_thread(begin_run)
1460
-
1461
-
1462
- async def begin_task_map(
1463
- task: Task,
1464
- flow_run_context: Optional[FlowRunContext],
1465
- parameters: Dict[str, Any],
1466
- wait_for: Optional[Iterable[PrefectFuture]],
1467
- return_type: EngineReturnType,
1468
- task_runner: Optional[BaseTaskRunner],
1469
- autonomous: bool = False,
1470
- ) -> List[Union[PrefectFuture, Awaitable[PrefectFuture], TaskRun]]:
1471
- """Async entrypoint for task mapping"""
1472
- # We need to resolve some futures to map over their data, collect the upstream
1473
- # links beforehand to retain relationship tracking.
1474
- task_inputs = {
1475
- k: await collect_task_run_inputs(v, max_depth=0) for k, v in parameters.items()
1476
- }
1477
-
1478
- # Resolve the top-level parameters in order to get mappable data of a known length.
1479
- # Nested parameters will be resolved in each mapped child where their relationships
1480
- # will also be tracked.
1481
- parameters = await resolve_inputs(parameters, max_depth=1)
1482
-
1483
- # Ensure that any parameters in kwargs are expanded before this check
1484
- parameters = explode_variadic_parameter(task.fn, parameters)
1485
-
1486
- iterable_parameters = {}
1487
- static_parameters = {}
1488
- annotated_parameters = {}
1489
- for key, val in parameters.items():
1490
- if isinstance(val, (allow_failure, quote)):
1491
- # Unwrap annotated parameters to determine if they are iterable
1492
- annotated_parameters[key] = val
1493
- val = val.unwrap()
1494
-
1495
- if isinstance(val, unmapped):
1496
- static_parameters[key] = val.value
1497
- elif isiterable(val):
1498
- iterable_parameters[key] = list(val)
1499
- else:
1500
- static_parameters[key] = val
1501
-
1502
- if not len(iterable_parameters):
1503
- raise MappingMissingIterable(
1504
- "No iterable parameters were received. Parameters for map must "
1505
- f"include at least one iterable. Parameters: {parameters}"
1506
- )
1507
-
1508
- iterable_parameter_lengths = {
1509
- key: len(val) for key, val in iterable_parameters.items()
1510
- }
1511
- lengths = set(iterable_parameter_lengths.values())
1512
- if len(lengths) > 1:
1513
- raise MappingLengthMismatch(
1514
- "Received iterable parameters with different lengths. Parameters for map"
1515
- f" must all be the same length. Got lengths: {iterable_parameter_lengths}"
1516
- )
1517
-
1518
- map_length = list(lengths)[0]
1519
-
1520
- task_runs = []
1521
- for i in range(map_length):
1522
- call_parameters = {key: value[i] for key, value in iterable_parameters.items()}
1523
- call_parameters.update({key: value for key, value in static_parameters.items()})
1524
-
1525
- # Add default values for parameters; these are skipped earlier since they should
1526
- # not be mapped over
1527
- for key, value in get_parameter_defaults(task.fn).items():
1528
- call_parameters.setdefault(key, value)
1529
-
1530
- # Re-apply annotations to each key again
1531
- for key, annotation in annotated_parameters.items():
1532
- call_parameters[key] = annotation.rewrap(call_parameters[key])
1533
-
1534
- # Collapse any previously exploded kwargs
1535
- call_parameters = collapse_variadic_parameters(task.fn, call_parameters)
1536
-
1537
- if autonomous:
1538
- task_runs.append(
1539
- await create_autonomous_task_run(
1540
- task=task,
1541
- parameters=call_parameters,
1542
- )
1543
- )
1544
- else:
1545
- task_runs.append(
1546
- partial(
1547
- get_task_call_return_value,
1548
- task=task,
1549
- flow_run_context=flow_run_context,
1550
- parameters=call_parameters,
1551
- wait_for=wait_for,
1552
- return_type=return_type,
1553
- task_runner=task_runner,
1554
- extra_task_inputs=task_inputs,
1555
- )
1556
- )
1557
-
1558
- if autonomous:
1559
- return task_runs
1560
-
1561
- # Maintain the order of the task runs when using the sequential task runner
1562
- runner = task_runner if task_runner else flow_run_context.task_runner
1563
- if runner.concurrency_type == TaskConcurrencyType.SEQUENTIAL:
1564
- return [await task_run() for task_run in task_runs]
1565
-
1566
- return await gather(*task_runs)
1567
-
1568
-
1569
- async def get_task_call_return_value(
1570
- task: Task,
1571
- flow_run_context: FlowRunContext,
1572
- parameters: Dict[str, Any],
1573
- wait_for: Optional[Iterable[PrefectFuture]],
1574
- return_type: EngineReturnType,
1575
- task_runner: Optional[BaseTaskRunner],
1576
- extra_task_inputs: Optional[Dict[str, Set[TaskRunInput]]] = None,
1577
- entering_from_task_run: Optional[bool] = False,
1578
- ):
1579
- extra_task_inputs = extra_task_inputs or {}
1580
-
1581
- future = await create_task_run_future(
1582
- task=task,
1583
- flow_run_context=flow_run_context,
1584
- parameters=parameters,
1585
- wait_for=wait_for,
1586
- task_runner=task_runner,
1587
- extra_task_inputs=extra_task_inputs,
1588
- entering_from_task_run=entering_from_task_run,
1589
- )
1590
- if return_type == "future":
1591
- return future
1592
- elif return_type == "state":
1593
- return await future._wait()
1594
- elif return_type == "result":
1595
- return await future._result()
1596
- else:
1597
- raise ValueError(f"Invalid return type for task engine {return_type!r}.")
1598
-
1599
-
1600
- async def create_task_run_future(
1601
- task: Task,
1602
- flow_run_context: FlowRunContext,
1603
- parameters: Dict[str, Any],
1604
- wait_for: Optional[Iterable[PrefectFuture]],
1605
- task_runner: Optional[BaseTaskRunner],
1606
- extra_task_inputs: Dict[str, Set[TaskRunInput]],
1607
- entering_from_task_run: Optional[bool] = False,
1608
- ) -> PrefectFuture:
1609
- # Default to the flow run's task runner
1610
- task_runner = task_runner or flow_run_context.task_runner
1611
-
1612
- # Generate a name for the future
1613
- dynamic_key = _dynamic_key_for_task_run(flow_run_context, task)
1614
-
1615
- task_run_name = (
1616
- f"{task.name}-{dynamic_key}"
1617
- if flow_run_context and flow_run_context.flow_run
1618
- else f"{task.name}-{dynamic_key[:NUM_CHARS_DYNAMIC_KEY]}" # autonomous task run
1619
- )
1620
-
1621
- # Generate a future
1622
- future = PrefectFuture(
1623
- name=task_run_name,
1624
- key=uuid4(),
1625
- task_runner=task_runner,
1626
- asynchronous=(
1627
- task.isasync and flow_run_context.flow.isasync
1628
- if flow_run_context and flow_run_context.flow
1629
- else task.isasync
1630
- ),
1631
- )
1632
-
1633
- # Create and submit the task run in the background
1634
- flow_run_context.background_tasks.start_soon(
1635
- partial(
1636
- create_task_run_then_submit,
1637
- task=task,
1638
- task_run_name=task_run_name,
1639
- task_run_dynamic_key=dynamic_key,
1640
- future=future,
1641
- flow_run_context=flow_run_context,
1642
- parameters=parameters,
1643
- wait_for=wait_for,
1644
- task_runner=task_runner,
1645
- extra_task_inputs=extra_task_inputs,
1646
- )
1647
- )
1648
-
1649
- if not entering_from_task_run:
1650
- # Track the task run future in the flow run context
1651
- flow_run_context.task_run_futures.append(future)
1652
-
1653
- if task_runner.concurrency_type == TaskConcurrencyType.SEQUENTIAL:
1654
- await future._wait()
1655
-
1656
- # Return the future without waiting for task run creation or submission
1657
- return future
1658
-
1659
-
1660
- async def create_task_run_then_submit(
1661
- task: Task,
1662
- task_run_name: str,
1663
- task_run_dynamic_key: str,
1664
- future: PrefectFuture,
1665
- flow_run_context: FlowRunContext,
1666
- parameters: Dict[str, Any],
1667
- wait_for: Optional[Iterable[PrefectFuture]],
1668
- task_runner: BaseTaskRunner,
1669
- extra_task_inputs: Dict[str, Set[TaskRunInput]],
1670
- ) -> None:
1671
- task_run = (
1672
- await create_task_run(
1673
- task=task,
1674
- name=task_run_name,
1675
- flow_run_context=flow_run_context,
1676
- parameters=parameters,
1677
- dynamic_key=task_run_dynamic_key,
1678
- wait_for=wait_for,
1679
- extra_task_inputs=extra_task_inputs,
1680
- )
1681
- if not flow_run_context.autonomous_task_run
1682
- else flow_run_context.autonomous_task_run
1683
- )
1684
-
1685
- # Attach the task run to the future to support `get_state` operations
1686
- future.task_run = task_run
1687
-
1688
- await submit_task_run(
1689
- task=task,
1690
- future=future,
1691
- flow_run_context=flow_run_context,
1692
- parameters=parameters,
1693
- task_run=task_run,
1694
- wait_for=wait_for,
1695
- task_runner=task_runner,
1696
- )
1697
-
1698
- future._submitted.set()
1699
-
1700
-
1701
- async def create_task_run(
1702
- task: Task,
1703
- name: str,
1704
- flow_run_context: FlowRunContext,
1705
- parameters: Dict[str, Any],
1706
- dynamic_key: str,
1707
- wait_for: Optional[Iterable[PrefectFuture]],
1708
- extra_task_inputs: Dict[str, Set[TaskRunInput]],
1709
- ) -> TaskRun:
1710
- task_inputs = {k: await collect_task_run_inputs(v) for k, v in parameters.items()}
1711
- if wait_for:
1712
- task_inputs["wait_for"] = await collect_task_run_inputs(wait_for)
1713
-
1714
- # Join extra task inputs
1715
- for k, extras in extra_task_inputs.items():
1716
- task_inputs[k] = task_inputs[k].union(extras)
1717
-
1718
- logger = get_run_logger(flow_run_context)
1719
-
1720
- task_run = await flow_run_context.client.create_task_run(
1721
- task=task,
1722
- name=name,
1723
- flow_run_id=flow_run_context.flow_run.id if flow_run_context.flow_run else None,
1724
- dynamic_key=dynamic_key,
1725
- state=Pending(),
1726
- extra_tags=TagsContext.get().current_tags,
1727
- task_inputs=task_inputs,
1728
- )
1729
-
1730
- if flow_run_context.flow_run:
1731
- logger.info(f"Created task run {task_run.name!r} for task {task.name!r}")
1732
- else:
1733
- engine_logger.info(f"Created task run {task_run.name!r} for task {task.name!r}")
1734
-
1735
- return task_run
1736
-
1737
-
1738
- async def submit_task_run(
1739
- task: Task,
1740
- future: PrefectFuture,
1741
- flow_run_context: FlowRunContext,
1742
- parameters: Dict[str, Any],
1743
- task_run: TaskRun,
1744
- wait_for: Optional[Iterable[PrefectFuture]],
1745
- task_runner: BaseTaskRunner,
1746
- ) -> PrefectFuture:
1747
- logger = get_run_logger(flow_run_context)
1748
-
1749
- if (
1750
- task_runner.concurrency_type == TaskConcurrencyType.SEQUENTIAL
1751
- and flow_run_context.flow_run
1752
- ):
1753
- logger.info(f"Executing {task_run.name!r} immediately...")
1754
-
1755
- future = await task_runner.submit(
1756
- key=future.key,
1757
- call=partial(
1758
- begin_task_run,
1759
- task=task,
1760
- task_run=task_run,
1761
- parameters=parameters,
1762
- wait_for=wait_for,
1763
- result_factory=await ResultFactory.from_task(
1764
- task, client=flow_run_context.client
1765
- ),
1766
- log_prints=should_log_prints(task),
1767
- settings=prefect.context.SettingsContext.get().copy(),
1768
- ),
1769
- )
1770
-
1771
- if (
1772
- task_runner.concurrency_type != TaskConcurrencyType.SEQUENTIAL
1773
- and not flow_run_context.autonomous_task_run
1774
- ):
1775
- logger.info(f"Submitted task run {task_run.name!r} for execution.")
1776
-
1777
- return future
1778
-
1779
-
1780
- async def begin_task_run(
1781
- task: Task,
1782
- task_run: TaskRun,
1783
- parameters: Dict[str, Any],
1784
- wait_for: Optional[Iterable[PrefectFuture]],
1785
- result_factory: ResultFactory,
1786
- log_prints: bool,
1787
- settings: prefect.context.SettingsContext,
1788
- ):
1789
- """
1790
- Entrypoint for task run execution.
1791
-
1792
- This function is intended for submission to the task runner.
1793
-
1794
- This method may be called from a worker so we ensure the settings context has been
1795
- entered. For example, with a runner that is executing tasks in the same event loop,
1796
- we will likely not enter the context again because the current context already
1797
- matches:
1798
-
1799
- main thread:
1800
- --> Flow called with settings A
1801
- --> `begin_task_run` executes same event loop
1802
- --> Profile A matches and is not entered again
1803
-
1804
- However, with execution on a remote environment, we are going to need to ensure the
1805
- settings for the task run are respected by entering the context:
1806
-
1807
- main thread:
1808
- --> Flow called with settings A
1809
- --> `begin_task_run` is scheduled on a remote worker, settings A is serialized
1810
- remote worker:
1811
- --> Remote worker imports Prefect (may not occur)
1812
- --> Global settings is loaded with default settings
1813
- --> `begin_task_run` executes on a different event loop than the flow
1814
- --> Current settings is not set or does not match, settings A is entered
1815
- """
1816
- maybe_flow_run_context = prefect.context.FlowRunContext.get()
1817
-
1818
- async with AsyncExitStack() as stack:
1819
- # The settings context may be null on a remote worker so we use the safe `.get`
1820
- # method and compare it to the settings required for this task run
1821
- if prefect.context.SettingsContext.get() != settings:
1822
- stack.enter_context(settings)
1823
- setup_logging()
1824
-
1825
- if maybe_flow_run_context:
1826
- # Accessible if on a worker that is running in the same thread as the flow
1827
- client = maybe_flow_run_context.client
1828
- # Only run the task in an interruptible thread if it in the same thread as
1829
- # the flow _and_ the flow run has a timeout attached. If the task is on a
1830
- # worker, the flow run timeout will not be raised in the worker process.
1831
- interruptible = maybe_flow_run_context.timeout_scope is not None
1832
- else:
1833
- # Otherwise, retrieve a new clien`t
1834
- client = await stack.enter_async_context(get_client())
1835
- interruptible = False
1836
- await stack.enter_async_context(anyio.create_task_group())
1837
-
1838
- await stack.enter_async_context(report_task_run_crashes(task_run, client))
1839
-
1840
- # TODO: Use the background tasks group to manage logging for this task
1841
-
1842
- if log_prints:
1843
- stack.enter_context(patch_print())
1844
-
1845
- await check_api_reachable(
1846
- client, f"Cannot orchestrate task run '{task_run.id}'"
1847
- )
1848
- try:
1849
- state = await orchestrate_task_run(
1850
- task=task,
1851
- task_run=task_run,
1852
- parameters=parameters,
1853
- wait_for=wait_for,
1854
- result_factory=result_factory,
1855
- log_prints=log_prints,
1856
- interruptible=interruptible,
1857
- client=client,
1858
- )
1859
-
1860
- if not maybe_flow_run_context:
1861
- # When a a task run finishes on a remote worker flush logs to prevent
1862
- # loss if the process exits
1863
- await APILogHandler.aflush()
1864
-
1865
- except Abort as abort:
1866
- # Task run probably already completed, fetch its state
1867
- task_run = await client.read_task_run(task_run.id)
1868
-
1869
- if task_run.state.is_final():
1870
- task_run_logger(task_run).info(
1871
- f"Task run '{task_run.id}' already finished."
1872
- )
1873
- else:
1874
- # TODO: This is a concerning case; we should determine when this occurs
1875
- # 1. This can occur when the flow run is not in a running state
1876
- task_run_logger(task_run).warning(
1877
- f"Task run '{task_run.id}' received abort during orchestration: "
1878
- f"{abort} Task run is in {task_run.state.type.value} state."
1879
- )
1880
- state = task_run.state
1881
-
1882
- except Pause:
1883
- # A pause signal here should mean the flow run suspended, so we
1884
- # should do the same. We'll look up the flow run's pause state to
1885
- # try and reuse it, so we capture any data like timeouts.
1886
- flow_run = await client.read_flow_run(task_run.flow_run_id)
1887
- if flow_run.state and flow_run.state.is_paused():
1888
- state = flow_run.state
1889
- else:
1890
- state = Suspended()
1891
-
1892
- task_run_logger(task_run).info(
1893
- "Task run encountered a pause signal during orchestration."
1894
- )
1895
-
1896
- return state
1897
-
1898
-
1899
- async def orchestrate_task_run(
1900
- task: Task,
1901
- task_run: TaskRun,
1902
- parameters: Dict[str, Any],
1903
- wait_for: Optional[Iterable[PrefectFuture]],
1904
- result_factory: ResultFactory,
1905
- log_prints: bool,
1906
- interruptible: bool,
1907
- client: PrefectClient,
1908
- ) -> State:
1909
- """
1910
- Execute a task run
1911
-
1912
- This function should be submitted to a task runner. We must construct the context
1913
- here instead of receiving it already populated since we may be in a new environment.
1914
-
1915
- Proposes a RUNNING state, then
1916
- - if accepted, the task user function will be run
1917
- - if rejected, the received state will be returned
1918
-
1919
- When the user function is run, the result will be used to determine a final state
1920
- - if an exception is encountered, it is trapped and stored in a FAILED state
1921
- - otherwise, `return_value_to_state` is used to determine the state
1922
-
1923
- If the final state is COMPLETED, we generate a cache key as specified by the task
1924
-
1925
- The final state is then proposed
1926
- - if accepted, this is the final state and will be returned
1927
- - if rejected and a new final state is provided, it will be returned
1928
- - if rejected and a non-final state is provided, we will attempt to enter a RUNNING
1929
- state again
1930
-
1931
- Returns:
1932
- The final state of the run
1933
- """
1934
- flow_run_context = prefect.context.FlowRunContext.get()
1935
- if flow_run_context:
1936
- flow_run = flow_run_context.flow_run
1937
- else:
1938
- flow_run = await client.read_flow_run(task_run.flow_run_id)
1939
- logger = task_run_logger(task_run, task=task, flow_run=flow_run)
1940
-
1941
- partial_task_run_context = TaskRunContext.construct(
1942
- task_run=task_run,
1943
- task=task,
1944
- client=client,
1945
- result_factory=result_factory,
1946
- log_prints=log_prints,
1947
- )
1948
- task_introspection_start_time = time.perf_counter()
1949
- try:
1950
- # Resolve futures in parameters into data
1951
- resolved_parameters = await resolve_inputs(parameters)
1952
- # Resolve futures in any non-data dependencies to ensure they are ready
1953
- await resolve_inputs({"wait_for": wait_for}, return_data=False)
1954
- except UpstreamTaskError as upstream_exc:
1955
- return await propose_state(
1956
- client,
1957
- Pending(name="NotReady", message=str(upstream_exc)),
1958
- task_run_id=task_run.id,
1959
- # if orchestrating a run already in a pending state, force orchestration to
1960
- # update the state name
1961
- force=task_run.state.is_pending(),
1962
- )
1963
- task_introspection_end_time = time.perf_counter()
1964
-
1965
- introspection_time = round(
1966
- task_introspection_end_time - task_introspection_start_time, 3
1967
- )
1968
- threshold = PREFECT_TASK_INTROSPECTION_WARN_THRESHOLD.value()
1969
- if threshold and introspection_time > threshold:
1970
- logger.warning(
1971
- f"Task parameter introspection took {introspection_time} seconds "
1972
- f", exceeding `PREFECT_TASK_INTROSPECTION_WARN_THRESHOLD` of {threshold}. "
1973
- "Try wrapping large task parameters with "
1974
- "`prefect.utilities.annotations.quote` for increased performance, "
1975
- "e.g. `my_task(quote(param))`. To disable this message set "
1976
- "`PREFECT_TASK_INTROSPECTION_WARN_THRESHOLD=0`."
1977
- )
1978
-
1979
- # Generate the cache key to attach to proposed states
1980
- # The cache key uses a TaskRunContext that does not include a `timeout_context``
1981
-
1982
- task_run_context = TaskRunContext(
1983
- **partial_task_run_context.dict(), parameters=resolved_parameters
1984
- )
1985
-
1986
- cache_key = (
1987
- task.cache_key_fn(
1988
- task_run_context,
1989
- resolved_parameters,
1990
- )
1991
- if task.cache_key_fn
1992
- else None
1993
- )
1994
-
1995
- # Ignore the cached results for a cache key, default = false
1996
- # Setting on task level overrules the Prefect setting (env var)
1997
- refresh_cache = (
1998
- task.refresh_cache
1999
- if task.refresh_cache is not None
2000
- else PREFECT_TASKS_REFRESH_CACHE.value()
2001
- )
2002
-
2003
- # Emit an event to capture that the task run was in the `PENDING` state.
2004
- last_event = emit_task_run_state_change_event(
2005
- task_run=task_run, initial_state=None, validated_state=task_run.state
2006
- )
2007
- last_state = (
2008
- Pending()
2009
- if flow_run_context and flow_run_context.autonomous_task_run
2010
- else task_run.state
2011
- )
2012
-
2013
- # Completed states with persisted results should have result data. If it's missing,
2014
- # this could be a manual state transition, so we should use the Unknown result type
2015
- # to represent that we know we don't know the result.
2016
- if (
2017
- last_state
2018
- and last_state.is_completed()
2019
- and result_factory.persist_result
2020
- and not last_state.data
2021
- ):
2022
- state = await propose_state(
2023
- client,
2024
- state=Completed(data=await UnknownResult.create()),
2025
- task_run_id=task_run.id,
2026
- force=True,
2027
- )
2028
-
2029
- # Transition from `PENDING` -> `RUNNING`
2030
- try:
2031
- state = await propose_state(
2032
- client,
2033
- Running(
2034
- state_details=StateDetails(
2035
- cache_key=cache_key, refresh_cache=refresh_cache
2036
- )
2037
- ),
2038
- task_run_id=task_run.id,
2039
- )
2040
- except Pause as exc:
2041
- # We shouldn't get a pause signal without a state, but if this happens,
2042
- # just use a Paused state to assume an in-process pause.
2043
- state = exc.state if exc.state else Paused()
2044
-
2045
- # If a flow submits tasks and then pauses, we may reach this point due
2046
- # to concurrency timing because the tasks will try to transition after
2047
- # the flow run has paused. Orchestration will send back a Paused state
2048
- # for the task runs.
2049
- if state.state_details.pause_reschedule:
2050
- # If we're being asked to pause and reschedule, we should exit the
2051
- # task and expect to be resumed later.
2052
- raise
2053
-
2054
- if state.is_paused():
2055
- BACKOFF_MAX = 10 # Seconds
2056
- backoff_count = 0
2057
-
2058
- async def tick():
2059
- nonlocal backoff_count
2060
- if backoff_count < BACKOFF_MAX:
2061
- backoff_count += 1
2062
- interval = 1 + backoff_count + random.random() * backoff_count
2063
- await anyio.sleep(interval)
2064
-
2065
- # Enter a loop to wait for the task run to be resumed, i.e.
2066
- # become Pending, and then propose a Running state again.
2067
- while True:
2068
- await tick()
2069
-
2070
- # Propose a Running state again. We do this instead of reading the
2071
- # task run because if the flow run times out, this lets
2072
- # orchestration fail the task run.
2073
- try:
2074
- state = await propose_state(
2075
- client,
2076
- Running(
2077
- state_details=StateDetails(
2078
- cache_key=cache_key, refresh_cache=refresh_cache
2079
- )
2080
- ),
2081
- task_run_id=task_run.id,
2082
- )
2083
- except Pause as exc:
2084
- if not exc.state:
2085
- continue
2086
-
2087
- if exc.state.state_details.pause_reschedule:
2088
- # If the pause state includes pause_reschedule, we should exit the
2089
- # task and expect to be resumed later. We've already checked for this
2090
- # above, but we check again here in case the state changed; e.g. the
2091
- # flow run suspended.
2092
- raise
2093
- else:
2094
- # Propose a Running state again.
2095
- continue
2096
- else:
2097
- break
2098
-
2099
- # Emit an event to capture the result of proposing a `RUNNING` state.
2100
- last_event = emit_task_run_state_change_event(
2101
- task_run=task_run,
2102
- initial_state=last_state,
2103
- validated_state=state,
2104
- follows=last_event,
2105
- )
2106
- last_state = state
2107
-
2108
- # flag to ensure we only update the task run name once
2109
- run_name_set = False
2110
-
2111
- # Only run the task if we enter a `RUNNING` state
2112
- while state.is_running():
2113
- # Retrieve the latest metadata for the task run context
2114
- task_run = await client.read_task_run(task_run.id)
2115
-
2116
- with task_run_context.copy(
2117
- update={"task_run": task_run, "start_time": pendulum.now("UTC")}
2118
- ):
2119
- try:
2120
- args, kwargs = parameters_to_args_kwargs(task.fn, resolved_parameters)
2121
- # update task run name
2122
- if not run_name_set and task.task_run_name:
2123
- task_run_name = _resolve_custom_task_run_name(
2124
- task=task, parameters=resolved_parameters
2125
- )
2126
- await client.set_task_run_name(
2127
- task_run_id=task_run.id, name=task_run_name
2128
- )
2129
- logger.extra["task_run_name"] = task_run_name
2130
- logger.debug(
2131
- f"Renamed task run {task_run.name!r} to {task_run_name!r}"
2132
- )
2133
- task_run.name = task_run_name
2134
- run_name_set = True
2135
-
2136
- if PREFECT_DEBUG_MODE.value():
2137
- logger.debug(f"Executing {call_repr(task.fn, *args, **kwargs)}")
2138
- else:
2139
- logger.debug(
2140
- "Beginning execution...", extra={"state_message": True}
2141
- )
2142
-
2143
- call = from_async.call_soon_in_new_thread(
2144
- create_call(task.fn, *args, **kwargs), timeout=task.timeout_seconds
2145
- )
2146
- result = await call.aresult()
2147
-
2148
- except (CancelledError, asyncio.CancelledError) as exc:
2149
- if not call.timedout():
2150
- # If the task call was not cancelled by us; this is a crash
2151
- raise
2152
- # Construct a new exception as `TimeoutError`
2153
- original = exc
2154
- exc = TimeoutError()
2155
- exc.__cause__ = original
2156
- logger.exception("Encountered exception during execution:")
2157
- terminal_state = await exception_to_failed_state(
2158
- exc,
2159
- message=(
2160
- f"Task run exceeded timeout of {task.timeout_seconds} seconds"
2161
- ),
2162
- result_factory=task_run_context.result_factory,
2163
- name="TimedOut",
2164
- )
2165
- except Exception as exc:
2166
- logger.exception("Encountered exception during execution:")
2167
- terminal_state = await exception_to_failed_state(
2168
- exc,
2169
- message="Task run encountered an exception",
2170
- result_factory=task_run_context.result_factory,
2171
- )
2172
- else:
2173
- terminal_state = await return_value_to_state(
2174
- result,
2175
- result_factory=task_run_context.result_factory,
2176
- )
2177
-
2178
- # for COMPLETED tasks, add the cache key and expiration
2179
- if terminal_state.is_completed():
2180
- terminal_state.state_details.cache_expiration = (
2181
- (pendulum.now("utc") + task.cache_expiration)
2182
- if task.cache_expiration
2183
- else None
2184
- )
2185
- terminal_state.state_details.cache_key = cache_key
2186
-
2187
- if terminal_state.is_failed():
2188
- # Defer to user to decide whether failure is retriable
2189
- terminal_state.state_details.retriable = (
2190
- await _check_task_failure_retriable(task, task_run, terminal_state)
2191
- )
2192
- state = await propose_state(client, terminal_state, task_run_id=task_run.id)
2193
- last_event = emit_task_run_state_change_event(
2194
- task_run=task_run,
2195
- initial_state=last_state,
2196
- validated_state=state,
2197
- follows=last_event,
2198
- )
2199
- last_state = state
2200
-
2201
- await _run_task_hooks(
2202
- task=task,
2203
- task_run=task_run,
2204
- state=state,
2205
- )
2206
-
2207
- if state.type != terminal_state.type and PREFECT_DEBUG_MODE:
2208
- logger.debug(
2209
- (
2210
- f"Received new state {state} when proposing final state"
2211
- f" {terminal_state}"
2212
- ),
2213
- extra={"send_to_api": False},
2214
- )
2215
-
2216
- if not state.is_final() and not state.is_paused():
2217
- logger.info(
2218
- (
2219
- f"Received non-final state {state.name!r} when proposing final"
2220
- f" state {terminal_state.name!r} and will attempt to run"
2221
- " again..."
2222
- ),
2223
- )
2224
- # Attempt to enter a running state again
2225
- state = await propose_state(client, Running(), task_run_id=task_run.id)
2226
- last_event = emit_task_run_state_change_event(
2227
- task_run=task_run,
2228
- initial_state=last_state,
2229
- validated_state=state,
2230
- follows=last_event,
2231
- )
2232
- last_state = state
2233
-
2234
- # If debugging, use the more complete `repr` than the usual `str` description
2235
- display_state = repr(state) if PREFECT_DEBUG_MODE else str(state)
2236
-
2237
- logger.log(
2238
- level=logging.INFO if state.is_completed() else logging.ERROR,
2239
- msg=f"Finished in state {display_state}",
2240
- )
2241
- return state
2242
-
2243
-
2244
- @asynccontextmanager
2245
- async def report_flow_run_crashes(flow_run: FlowRun, client: PrefectClient, flow: Flow):
2246
- """
2247
- Detect flow run crashes during this context and update the run to a proper final
2248
- state.
2249
-
2250
- This context _must_ reraise the exception to properly exit the run.
2251
- """
2252
- try:
2253
- with collapse_excgroups():
2254
- yield
2255
- except (Abort, Pause):
2256
- # Do not capture internal signals as crashes
2257
- raise
2258
- except BaseException as exc:
2259
- state = await exception_to_crashed_state(exc)
2260
- logger = flow_run_logger(flow_run)
2261
- with anyio.CancelScope(shield=True):
2262
- logger.error(f"Crash detected! {state.message}")
2263
- logger.debug("Crash details:", exc_info=exc)
2264
- flow_run_state = await propose_state(client, state, flow_run_id=flow_run.id)
2265
- engine_logger.debug(
2266
- f"Reported crashed flow run {flow_run.name!r} successfully!"
2267
- )
2268
-
2269
- # Only `on_crashed` and `on_cancellation` flow run state change hooks can be called here.
2270
- # We call the hooks after the state change proposal to `CRASHED` is validated
2271
- # or rejected (if it is in a `CANCELLING` state).
2272
- await _run_flow_hooks(
2273
- flow=flow,
2274
- flow_run=flow_run,
2275
- state=flow_run_state,
2276
- )
2277
-
2278
- # Reraise the exception
2279
- raise
2280
-
2281
-
2282
- @asynccontextmanager
2283
- async def report_task_run_crashes(task_run: TaskRun, client: PrefectClient):
2284
- """
2285
- Detect task run crashes during this context and update the run to a proper final
2286
- state.
2287
-
2288
- This context _must_ reraise the exception to properly exit the run.
2289
- """
2290
- try:
2291
- with collapse_excgroups():
2292
- yield
2293
- except (Abort, Pause):
2294
- # Do not capture internal signals as crashes
2295
- raise
2296
- except BaseException as exc:
2297
- state = await exception_to_crashed_state(exc)
2298
- logger = task_run_logger(task_run)
2299
- with anyio.CancelScope(shield=True):
2300
- logger.error(f"Crash detected! {state.message}")
2301
- logger.debug("Crash details:", exc_info=exc)
2302
- await client.set_task_run_state(
2303
- state=state,
2304
- task_run_id=task_run.id,
2305
- force=True,
2306
- )
2307
- engine_logger.debug(
2308
- f"Reported crashed task run {task_run.name!r} successfully!"
2309
- )
2310
-
2311
- # Reraise the exception
2312
- raise
2313
-
2314
-
2315
- async def _run_task_hooks(task: Task, task_run: TaskRun, state: State) -> None:
2316
- """Run the on_failure and on_completion hooks for a task, making sure to
2317
- catch and log any errors that occur.
2318
- """
2319
- hooks = None
2320
- if state.is_failed() and task.on_failure:
2321
- hooks = task.on_failure
2322
- elif state.is_completed() and task.on_completion:
2323
- hooks = task.on_completion
2324
-
2325
- if hooks:
2326
- logger = task_run_logger(task_run)
2327
- for hook in hooks:
2328
- hook_name = _get_hook_name(hook)
2329
- try:
2330
- logger.info(
2331
- f"Running hook {hook_name!r} in response to entering state"
2332
- f" {state.name!r}"
2333
- )
2334
- if is_async_fn(hook):
2335
- await hook(task=task, task_run=task_run, state=state)
2336
- else:
2337
- await from_async.call_in_new_thread(
2338
- create_call(hook, task=task, task_run=task_run, state=state)
2339
- )
2340
- except Exception:
2341
- logger.error(
2342
- f"An error was encountered while running hook {hook_name!r}",
2343
- exc_info=True,
2344
- )
2345
- else:
2346
- logger.info(f"Hook {hook_name!r} finished running successfully")
2347
-
2348
-
2349
- async def _check_task_failure_retriable(
2350
- task: Task, task_run: TaskRun, state: State
2351
- ) -> bool:
2352
- """Run the `retry_condition_fn` callable for a task, making sure to catch and log any errors
2353
- that occur. If None, return True. If not callable, logs an error and returns False.
2354
- """
2355
- if task.retry_condition_fn is None:
2356
- return True
2357
-
2358
- logger = task_run_logger(task_run)
2359
-
2360
- try:
2361
- logger.debug(
2362
- f"Running `retry_condition_fn` check {task.retry_condition_fn!r} for task"
2363
- f" {task.name!r}"
2364
- )
2365
- if is_async_fn(task.retry_condition_fn):
2366
- return bool(
2367
- await task.retry_condition_fn(task=task, task_run=task_run, state=state)
2368
- )
2369
- else:
2370
- return bool(
2371
- await from_async.call_in_new_thread(
2372
- create_call(
2373
- task.retry_condition_fn,
2374
- task=task,
2375
- task_run=task_run,
2376
- state=state,
2377
- )
2378
- )
2379
- )
2380
- except Exception:
2381
- logger.error(
2382
- (
2383
- "An error was encountered while running `retry_condition_fn` check"
2384
- f" '{task.retry_condition_fn!r}' for task {task.name!r}"
2385
- ),
2386
- exc_info=True,
2387
- )
2388
- return False
2389
-
2390
-
2391
- async def _run_flow_hooks(flow: Flow, flow_run: FlowRun, state: State) -> None:
2392
- """Run the on_failure, on_completion, on_cancellation, and on_crashed hooks for a flow, making sure to
2393
- catch and log any errors that occur.
2394
- """
2395
- hooks = None
2396
- enable_cancellation_and_crashed_hooks = (
2397
- os.environ.get("PREFECT__ENABLE_CANCELLATION_AND_CRASHED_HOOKS", "true").lower()
2398
- == "true"
2399
- )
2400
-
2401
- if state.is_running() and flow.on_running:
2402
- hooks = flow.on_running
2403
- elif state.is_failed() and flow.on_failure:
2404
- hooks = flow.on_failure
2405
- elif state.is_completed() and flow.on_completion:
2406
- hooks = flow.on_completion
2407
- elif (
2408
- enable_cancellation_and_crashed_hooks
2409
- and state.is_cancelling()
2410
- and flow.on_cancellation
2411
- ):
2412
- hooks = flow.on_cancellation
2413
- elif (
2414
- enable_cancellation_and_crashed_hooks and state.is_crashed() and flow.on_crashed
2415
- ):
2416
- hooks = flow.on_crashed
2417
-
2418
- if hooks:
2419
- logger = flow_run_logger(flow_run)
2420
- for hook in hooks:
2421
- hook_name = _get_hook_name(hook)
2422
- try:
2423
- logger.info(
2424
- f"Running hook {hook_name!r} in response to entering state"
2425
- f" {state.name!r}"
2426
- )
2427
- if is_async_fn(hook):
2428
- await hook(flow=flow, flow_run=flow_run, state=state)
2429
- else:
2430
- await from_async.call_in_new_thread(
2431
- create_call(hook, flow=flow, flow_run=flow_run, state=state)
2432
- )
2433
- except Exception:
2434
- logger.error(
2435
- f"An error was encountered while running hook {hook_name!r}",
2436
- exc_info=True,
2437
- )
2438
- else:
2439
- logger.info(f"Hook {hook_name!r} finished running successfully")
2440
-
2441
-
2442
- async def create_autonomous_task_run(task: Task, parameters: Dict[str, Any]) -> TaskRun:
2443
- """Create a task run in the API for an autonomous task submission and store
2444
- the provided parameters using the existing result storage mechanism.
2445
- """
2446
- async with get_client() as client:
2447
- state = Scheduled()
2448
- if parameters:
2449
- parameters_id = uuid4()
2450
- state.state_details.task_parameters_id = parameters_id
2451
-
2452
- # TODO: Improve use of result storage for parameter storage / reference
2453
- task.persist_result = True
2454
-
2455
- factory = await ResultFactory.from_autonomous_task(task, client=client)
2456
- await factory.store_parameters(parameters_id, parameters)
2457
-
2458
- task_run = await client.create_task_run(
2459
- task=task,
2460
- flow_run_id=None,
2461
- dynamic_key=f"{task.task_key}-{str(uuid4())[:NUM_CHARS_DYNAMIC_KEY]}",
2462
- state=state,
2463
- )
2464
-
2465
- engine_logger.debug(f"Submitted run of task {task.name!r} for execution")
2466
-
2467
- return task_run
2468
-
2469
-
2470
20
  if __name__ == "__main__":
2471
21
  try:
2472
22
  flow_run_id = UUID(
@@ -2479,21 +29,18 @@ if __name__ == "__main__":
2479
29
  exit(1)
2480
30
 
2481
31
  try:
2482
- if PREFECT_EXPERIMENTAL_ENABLE_NEW_ENGINE.value():
2483
- from prefect.new_flow_engine import (
2484
- load_flow_and_flow_run,
2485
- run_flow_async,
2486
- run_flow_sync,
2487
- )
32
+ from prefect.flow_engine import (
33
+ load_flow_and_flow_run,
34
+ run_flow,
35
+ )
2488
36
 
2489
- flow_run, flow = run_sync(load_flow_and_flow_run)
2490
- # run the flow
2491
- if flow.isasync:
2492
- run_sync(run_flow_async(flow, flow_run=flow_run))
2493
- else:
2494
- run_flow_sync(flow, flow_run=flow_run)
37
+ flow_run, flow = load_flow_and_flow_run(flow_run_id=flow_run_id)
38
+ # run the flow
39
+ if flow.isasync:
40
+ run_coro_as_sync(run_flow(flow, flow_run=flow_run))
2495
41
  else:
2496
- enter_flow_run_engine_from_subprocess(flow_run_id)
42
+ run_flow(flow, flow_run=flow_run)
43
+
2497
44
  except Abort as exc:
2498
45
  engine_logger.info(
2499
46
  f"Engine execution of flow run '{flow_run_id}' aborted by orchestrator:"
@@ -2524,3 +71,5 @@ if __name__ == "__main__":
2524
71
  )
2525
72
  # Let the exit code be determined by the base exception type
2526
73
  raise
74
+
75
+ __getattr__ = getattr_migration(__name__)