prefect-client 2.20.4__py3-none-any.whl → 3.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (288) hide show
  1. prefect/__init__.py +74 -110
  2. prefect/_internal/compatibility/deprecated.py +6 -115
  3. prefect/_internal/compatibility/experimental.py +4 -79
  4. prefect/_internal/compatibility/migration.py +166 -0
  5. prefect/_internal/concurrency/__init__.py +2 -2
  6. prefect/_internal/concurrency/api.py +1 -35
  7. prefect/_internal/concurrency/calls.py +0 -6
  8. prefect/_internal/concurrency/cancellation.py +0 -3
  9. prefect/_internal/concurrency/event_loop.py +0 -20
  10. prefect/_internal/concurrency/inspection.py +3 -3
  11. prefect/_internal/concurrency/primitives.py +1 -0
  12. prefect/_internal/concurrency/services.py +23 -0
  13. prefect/_internal/concurrency/threads.py +35 -0
  14. prefect/_internal/concurrency/waiters.py +0 -28
  15. prefect/_internal/integrations.py +7 -0
  16. prefect/_internal/pydantic/__init__.py +0 -45
  17. prefect/_internal/pydantic/annotations/pendulum.py +2 -2
  18. prefect/_internal/pydantic/v1_schema.py +21 -22
  19. prefect/_internal/pydantic/v2_schema.py +0 -2
  20. prefect/_internal/pydantic/v2_validated_func.py +18 -23
  21. prefect/_internal/pytz.py +1 -1
  22. prefect/_internal/retries.py +61 -0
  23. prefect/_internal/schemas/bases.py +45 -177
  24. prefect/_internal/schemas/fields.py +1 -43
  25. prefect/_internal/schemas/validators.py +47 -233
  26. prefect/agent.py +3 -695
  27. prefect/artifacts.py +173 -14
  28. prefect/automations.py +39 -4
  29. prefect/blocks/abstract.py +1 -1
  30. prefect/blocks/core.py +405 -153
  31. prefect/blocks/fields.py +2 -57
  32. prefect/blocks/notifications.py +43 -28
  33. prefect/blocks/redis.py +168 -0
  34. prefect/blocks/system.py +67 -20
  35. prefect/blocks/webhook.py +2 -9
  36. prefect/cache_policies.py +239 -0
  37. prefect/client/__init__.py +4 -0
  38. prefect/client/base.py +33 -27
  39. prefect/client/cloud.py +65 -20
  40. prefect/client/collections.py +1 -1
  41. prefect/client/orchestration.py +650 -442
  42. prefect/client/schemas/actions.py +115 -100
  43. prefect/client/schemas/filters.py +46 -52
  44. prefect/client/schemas/objects.py +228 -178
  45. prefect/client/schemas/responses.py +18 -36
  46. prefect/client/schemas/schedules.py +55 -36
  47. prefect/client/schemas/sorting.py +2 -0
  48. prefect/client/subscriptions.py +8 -7
  49. prefect/client/types/flexible_schedule_list.py +11 -0
  50. prefect/client/utilities.py +9 -6
  51. prefect/concurrency/asyncio.py +60 -11
  52. prefect/concurrency/context.py +24 -0
  53. prefect/concurrency/events.py +2 -2
  54. prefect/concurrency/services.py +46 -16
  55. prefect/concurrency/sync.py +51 -7
  56. prefect/concurrency/v1/asyncio.py +143 -0
  57. prefect/concurrency/v1/context.py +27 -0
  58. prefect/concurrency/v1/events.py +61 -0
  59. prefect/concurrency/v1/services.py +116 -0
  60. prefect/concurrency/v1/sync.py +92 -0
  61. prefect/context.py +246 -149
  62. prefect/deployments/__init__.py +33 -18
  63. prefect/deployments/base.py +10 -15
  64. prefect/deployments/deployments.py +2 -1048
  65. prefect/deployments/flow_runs.py +178 -0
  66. prefect/deployments/runner.py +72 -173
  67. prefect/deployments/schedules.py +31 -25
  68. prefect/deployments/steps/__init__.py +0 -1
  69. prefect/deployments/steps/core.py +7 -0
  70. prefect/deployments/steps/pull.py +15 -21
  71. prefect/deployments/steps/utility.py +2 -1
  72. prefect/docker/__init__.py +20 -0
  73. prefect/docker/docker_image.py +82 -0
  74. prefect/engine.py +15 -2475
  75. prefect/events/actions.py +17 -23
  76. prefect/events/cli/automations.py +20 -7
  77. prefect/events/clients.py +142 -80
  78. prefect/events/filters.py +14 -18
  79. prefect/events/related.py +74 -75
  80. prefect/events/schemas/__init__.py +0 -5
  81. prefect/events/schemas/automations.py +55 -46
  82. prefect/events/schemas/deployment_triggers.py +7 -197
  83. prefect/events/schemas/events.py +46 -65
  84. prefect/events/schemas/labelling.py +10 -14
  85. prefect/events/utilities.py +4 -5
  86. prefect/events/worker.py +23 -8
  87. prefect/exceptions.py +15 -0
  88. prefect/filesystems.py +30 -529
  89. prefect/flow_engine.py +827 -0
  90. prefect/flow_runs.py +379 -7
  91. prefect/flows.py +470 -360
  92. prefect/futures.py +382 -331
  93. prefect/infrastructure/__init__.py +5 -26
  94. prefect/infrastructure/base.py +3 -320
  95. prefect/infrastructure/provisioners/__init__.py +5 -3
  96. prefect/infrastructure/provisioners/cloud_run.py +13 -8
  97. prefect/infrastructure/provisioners/container_instance.py +14 -9
  98. prefect/infrastructure/provisioners/ecs.py +10 -8
  99. prefect/infrastructure/provisioners/modal.py +8 -5
  100. prefect/input/__init__.py +4 -0
  101. prefect/input/actions.py +2 -4
  102. prefect/input/run_input.py +9 -9
  103. prefect/logging/formatters.py +2 -4
  104. prefect/logging/handlers.py +9 -14
  105. prefect/logging/loggers.py +5 -5
  106. prefect/main.py +72 -0
  107. prefect/plugins.py +2 -64
  108. prefect/profiles.toml +16 -2
  109. prefect/records/__init__.py +1 -0
  110. prefect/records/base.py +223 -0
  111. prefect/records/filesystem.py +207 -0
  112. prefect/records/memory.py +178 -0
  113. prefect/records/result_store.py +64 -0
  114. prefect/results.py +577 -504
  115. prefect/runner/runner.py +117 -47
  116. prefect/runner/server.py +32 -34
  117. prefect/runner/storage.py +3 -12
  118. prefect/runner/submit.py +2 -10
  119. prefect/runner/utils.py +2 -2
  120. prefect/runtime/__init__.py +1 -0
  121. prefect/runtime/deployment.py +1 -0
  122. prefect/runtime/flow_run.py +40 -5
  123. prefect/runtime/task_run.py +1 -0
  124. prefect/serializers.py +28 -39
  125. prefect/server/api/collections_data/views/aggregate-worker-metadata.json +5 -14
  126. prefect/settings.py +209 -332
  127. prefect/states.py +160 -63
  128. prefect/task_engine.py +1478 -57
  129. prefect/task_runners.py +383 -287
  130. prefect/task_runs.py +240 -0
  131. prefect/task_worker.py +463 -0
  132. prefect/tasks.py +684 -374
  133. prefect/transactions.py +410 -0
  134. prefect/types/__init__.py +72 -86
  135. prefect/types/entrypoint.py +13 -0
  136. prefect/utilities/annotations.py +4 -3
  137. prefect/utilities/asyncutils.py +227 -148
  138. prefect/utilities/callables.py +137 -45
  139. prefect/utilities/collections.py +134 -86
  140. prefect/utilities/dispatch.py +27 -14
  141. prefect/utilities/dockerutils.py +11 -4
  142. prefect/utilities/engine.py +186 -32
  143. prefect/utilities/filesystem.py +4 -5
  144. prefect/utilities/importtools.py +26 -27
  145. prefect/utilities/pydantic.py +128 -38
  146. prefect/utilities/schema_tools/hydration.py +18 -1
  147. prefect/utilities/schema_tools/validation.py +30 -0
  148. prefect/utilities/services.py +35 -9
  149. prefect/utilities/templating.py +12 -2
  150. prefect/utilities/timeout.py +20 -5
  151. prefect/utilities/urls.py +195 -0
  152. prefect/utilities/visualization.py +1 -0
  153. prefect/variables.py +78 -59
  154. prefect/workers/__init__.py +0 -1
  155. prefect/workers/base.py +237 -244
  156. prefect/workers/block.py +5 -226
  157. prefect/workers/cloud.py +6 -0
  158. prefect/workers/process.py +265 -12
  159. prefect/workers/server.py +29 -11
  160. {prefect_client-2.20.4.dist-info → prefect_client-3.0.0.dist-info}/METADATA +28 -24
  161. prefect_client-3.0.0.dist-info/RECORD +201 -0
  162. {prefect_client-2.20.4.dist-info → prefect_client-3.0.0.dist-info}/WHEEL +1 -1
  163. prefect/_internal/pydantic/_base_model.py +0 -51
  164. prefect/_internal/pydantic/_compat.py +0 -82
  165. prefect/_internal/pydantic/_flags.py +0 -20
  166. prefect/_internal/pydantic/_types.py +0 -8
  167. prefect/_internal/pydantic/utilities/config_dict.py +0 -72
  168. prefect/_internal/pydantic/utilities/field_validator.py +0 -150
  169. prefect/_internal/pydantic/utilities/model_construct.py +0 -56
  170. prefect/_internal/pydantic/utilities/model_copy.py +0 -55
  171. prefect/_internal/pydantic/utilities/model_dump.py +0 -136
  172. prefect/_internal/pydantic/utilities/model_dump_json.py +0 -112
  173. prefect/_internal/pydantic/utilities/model_fields.py +0 -50
  174. prefect/_internal/pydantic/utilities/model_fields_set.py +0 -29
  175. prefect/_internal/pydantic/utilities/model_json_schema.py +0 -82
  176. prefect/_internal/pydantic/utilities/model_rebuild.py +0 -80
  177. prefect/_internal/pydantic/utilities/model_validate.py +0 -75
  178. prefect/_internal/pydantic/utilities/model_validate_json.py +0 -68
  179. prefect/_internal/pydantic/utilities/model_validator.py +0 -87
  180. prefect/_internal/pydantic/utilities/type_adapter.py +0 -71
  181. prefect/_vendor/fastapi/__init__.py +0 -25
  182. prefect/_vendor/fastapi/applications.py +0 -946
  183. prefect/_vendor/fastapi/background.py +0 -3
  184. prefect/_vendor/fastapi/concurrency.py +0 -44
  185. prefect/_vendor/fastapi/datastructures.py +0 -58
  186. prefect/_vendor/fastapi/dependencies/__init__.py +0 -0
  187. prefect/_vendor/fastapi/dependencies/models.py +0 -64
  188. prefect/_vendor/fastapi/dependencies/utils.py +0 -877
  189. prefect/_vendor/fastapi/encoders.py +0 -177
  190. prefect/_vendor/fastapi/exception_handlers.py +0 -40
  191. prefect/_vendor/fastapi/exceptions.py +0 -46
  192. prefect/_vendor/fastapi/logger.py +0 -3
  193. prefect/_vendor/fastapi/middleware/__init__.py +0 -1
  194. prefect/_vendor/fastapi/middleware/asyncexitstack.py +0 -25
  195. prefect/_vendor/fastapi/middleware/cors.py +0 -3
  196. prefect/_vendor/fastapi/middleware/gzip.py +0 -3
  197. prefect/_vendor/fastapi/middleware/httpsredirect.py +0 -3
  198. prefect/_vendor/fastapi/middleware/trustedhost.py +0 -3
  199. prefect/_vendor/fastapi/middleware/wsgi.py +0 -3
  200. prefect/_vendor/fastapi/openapi/__init__.py +0 -0
  201. prefect/_vendor/fastapi/openapi/constants.py +0 -2
  202. prefect/_vendor/fastapi/openapi/docs.py +0 -203
  203. prefect/_vendor/fastapi/openapi/models.py +0 -480
  204. prefect/_vendor/fastapi/openapi/utils.py +0 -485
  205. prefect/_vendor/fastapi/param_functions.py +0 -340
  206. prefect/_vendor/fastapi/params.py +0 -453
  207. prefect/_vendor/fastapi/py.typed +0 -0
  208. prefect/_vendor/fastapi/requests.py +0 -4
  209. prefect/_vendor/fastapi/responses.py +0 -40
  210. prefect/_vendor/fastapi/routing.py +0 -1331
  211. prefect/_vendor/fastapi/security/__init__.py +0 -15
  212. prefect/_vendor/fastapi/security/api_key.py +0 -98
  213. prefect/_vendor/fastapi/security/base.py +0 -6
  214. prefect/_vendor/fastapi/security/http.py +0 -172
  215. prefect/_vendor/fastapi/security/oauth2.py +0 -227
  216. prefect/_vendor/fastapi/security/open_id_connect_url.py +0 -34
  217. prefect/_vendor/fastapi/security/utils.py +0 -10
  218. prefect/_vendor/fastapi/staticfiles.py +0 -1
  219. prefect/_vendor/fastapi/templating.py +0 -3
  220. prefect/_vendor/fastapi/testclient.py +0 -1
  221. prefect/_vendor/fastapi/types.py +0 -3
  222. prefect/_vendor/fastapi/utils.py +0 -235
  223. prefect/_vendor/fastapi/websockets.py +0 -7
  224. prefect/_vendor/starlette/__init__.py +0 -1
  225. prefect/_vendor/starlette/_compat.py +0 -28
  226. prefect/_vendor/starlette/_exception_handler.py +0 -80
  227. prefect/_vendor/starlette/_utils.py +0 -88
  228. prefect/_vendor/starlette/applications.py +0 -261
  229. prefect/_vendor/starlette/authentication.py +0 -159
  230. prefect/_vendor/starlette/background.py +0 -43
  231. prefect/_vendor/starlette/concurrency.py +0 -59
  232. prefect/_vendor/starlette/config.py +0 -151
  233. prefect/_vendor/starlette/convertors.py +0 -87
  234. prefect/_vendor/starlette/datastructures.py +0 -707
  235. prefect/_vendor/starlette/endpoints.py +0 -130
  236. prefect/_vendor/starlette/exceptions.py +0 -60
  237. prefect/_vendor/starlette/formparsers.py +0 -276
  238. prefect/_vendor/starlette/middleware/__init__.py +0 -17
  239. prefect/_vendor/starlette/middleware/authentication.py +0 -52
  240. prefect/_vendor/starlette/middleware/base.py +0 -220
  241. prefect/_vendor/starlette/middleware/cors.py +0 -176
  242. prefect/_vendor/starlette/middleware/errors.py +0 -265
  243. prefect/_vendor/starlette/middleware/exceptions.py +0 -74
  244. prefect/_vendor/starlette/middleware/gzip.py +0 -113
  245. prefect/_vendor/starlette/middleware/httpsredirect.py +0 -19
  246. prefect/_vendor/starlette/middleware/sessions.py +0 -82
  247. prefect/_vendor/starlette/middleware/trustedhost.py +0 -64
  248. prefect/_vendor/starlette/middleware/wsgi.py +0 -147
  249. prefect/_vendor/starlette/py.typed +0 -0
  250. prefect/_vendor/starlette/requests.py +0 -328
  251. prefect/_vendor/starlette/responses.py +0 -347
  252. prefect/_vendor/starlette/routing.py +0 -933
  253. prefect/_vendor/starlette/schemas.py +0 -154
  254. prefect/_vendor/starlette/staticfiles.py +0 -248
  255. prefect/_vendor/starlette/status.py +0 -199
  256. prefect/_vendor/starlette/templating.py +0 -231
  257. prefect/_vendor/starlette/testclient.py +0 -804
  258. prefect/_vendor/starlette/types.py +0 -30
  259. prefect/_vendor/starlette/websockets.py +0 -193
  260. prefect/blocks/kubernetes.py +0 -119
  261. prefect/deprecated/__init__.py +0 -0
  262. prefect/deprecated/data_documents.py +0 -350
  263. prefect/deprecated/packaging/__init__.py +0 -12
  264. prefect/deprecated/packaging/base.py +0 -96
  265. prefect/deprecated/packaging/docker.py +0 -146
  266. prefect/deprecated/packaging/file.py +0 -92
  267. prefect/deprecated/packaging/orion.py +0 -80
  268. prefect/deprecated/packaging/serializers.py +0 -171
  269. prefect/events/instrument.py +0 -135
  270. prefect/infrastructure/container.py +0 -824
  271. prefect/infrastructure/kubernetes.py +0 -920
  272. prefect/infrastructure/process.py +0 -289
  273. prefect/manifests.py +0 -20
  274. prefect/new_flow_engine.py +0 -449
  275. prefect/new_task_engine.py +0 -423
  276. prefect/pydantic/__init__.py +0 -76
  277. prefect/pydantic/main.py +0 -39
  278. prefect/software/__init__.py +0 -2
  279. prefect/software/base.py +0 -50
  280. prefect/software/conda.py +0 -199
  281. prefect/software/pip.py +0 -122
  282. prefect/software/python.py +0 -52
  283. prefect/task_server.py +0 -322
  284. prefect_client-2.20.4.dist-info/RECORD +0 -294
  285. /prefect/{_internal/pydantic/utilities → client/types}/__init__.py +0 -0
  286. /prefect/{_vendor → concurrency/v1}/__init__.py +0 -0
  287. {prefect_client-2.20.4.dist-info → prefect_client-3.0.0.dist-info}/LICENSE +0 -0
  288. {prefect_client-2.20.4.dist-info → prefect_client-3.0.0.dist-info}/top_level.txt +0 -0
prefect/agent.py CHANGED
@@ -1,698 +1,6 @@
1
1
  """
2
- DEPRECATION WARNING:
3
-
4
- This module is deprecated as of March 2024 and will not be available after September 2024.
5
- Agents have been replaced by workers, which offer enhanced functionality and better performance.
6
-
7
- For upgrade instructions, see https://docs.prefect.io/latest/guides/upgrade-guide-agents-to-workers/.
2
+ 2024-06-27: This surfaces an actionable error message for moved or removed objects in Prefect 3.0 upgrade.
8
3
  """
4
+ from prefect._internal.compatibility.migration import getattr_migration
9
5
 
10
- import inspect
11
- from typing import AsyncIterator, List, Optional, Set, Union
12
- from uuid import UUID
13
-
14
- import anyio
15
- import anyio.abc
16
- import anyio.to_process
17
- import pendulum
18
-
19
- from prefect._internal.compatibility.deprecated import (
20
- deprecated_class,
21
- )
22
- from prefect.blocks.core import Block
23
- from prefect.client.orchestration import PrefectClient, get_client
24
- from prefect.client.schemas.filters import (
25
- FlowRunFilter,
26
- FlowRunFilterId,
27
- FlowRunFilterState,
28
- FlowRunFilterStateName,
29
- FlowRunFilterStateType,
30
- WorkPoolFilter,
31
- WorkPoolFilterName,
32
- WorkQueueFilter,
33
- WorkQueueFilterName,
34
- )
35
- from prefect.client.schemas.objects import (
36
- DEFAULT_AGENT_WORK_POOL_NAME,
37
- BlockDocument,
38
- FlowRun,
39
- WorkQueue,
40
- )
41
- from prefect.engine import propose_state
42
- from prefect.exceptions import (
43
- Abort,
44
- InfrastructureNotAvailable,
45
- InfrastructureNotFound,
46
- ObjectNotFound,
47
- )
48
- from prefect.infrastructure import Infrastructure, InfrastructureResult, Process
49
- from prefect.logging import get_logger
50
- from prefect.settings import PREFECT_AGENT_PREFETCH_SECONDS
51
- from prefect.states import Crashed, Pending, StateType, exception_to_failed_state
52
-
53
-
54
- @deprecated_class(
55
- start_date="Mar 2024",
56
- help="Use a worker instead. Refer to the upgrade guide for more information: https://docs.prefect.io/latest/guides/upgrade-guide-agents-to-workers/.",
57
- )
58
- class PrefectAgent:
59
- def __init__(
60
- self,
61
- work_queues: List[str] = None,
62
- work_queue_prefix: Union[str, List[str]] = None,
63
- work_pool_name: str = None,
64
- prefetch_seconds: int = None,
65
- default_infrastructure: Infrastructure = None,
66
- default_infrastructure_document_id: UUID = None,
67
- limit: Optional[int] = None,
68
- ) -> None:
69
- if default_infrastructure and default_infrastructure_document_id:
70
- raise ValueError(
71
- "Provide only one of 'default_infrastructure' and"
72
- " 'default_infrastructure_document_id'."
73
- )
74
-
75
- self.work_queues: Set[str] = set(work_queues) if work_queues else set()
76
- self.work_pool_name = work_pool_name
77
- self.prefetch_seconds = prefetch_seconds
78
- self.submitting_flow_run_ids = set()
79
- self.cancelling_flow_run_ids = set()
80
- self.scheduled_task_scopes = set()
81
- self.started = False
82
- self.logger = get_logger("agent")
83
- self.task_group: Optional[anyio.abc.TaskGroup] = None
84
- self.limit: Optional[int] = limit
85
- self.limiter: Optional[anyio.CapacityLimiter] = None
86
- self.client: Optional[PrefectClient] = None
87
-
88
- if isinstance(work_queue_prefix, str):
89
- work_queue_prefix = [work_queue_prefix]
90
- self.work_queue_prefix = work_queue_prefix
91
-
92
- self._work_queue_cache_expiration: pendulum.DateTime = None
93
- self._work_queue_cache: List[WorkQueue] = []
94
-
95
- if default_infrastructure:
96
- self.default_infrastructure_document_id = (
97
- default_infrastructure._block_document_id
98
- )
99
- self.default_infrastructure = default_infrastructure
100
- elif default_infrastructure_document_id:
101
- self.default_infrastructure_document_id = default_infrastructure_document_id
102
- self.default_infrastructure = None
103
- else:
104
- self.default_infrastructure = Process()
105
- self.default_infrastructure_document_id = None
106
-
107
- async def update_matched_agent_work_queues(self):
108
- if self.work_queue_prefix:
109
- if self.work_pool_name:
110
- matched_queues = await self.client.read_work_queues(
111
- work_pool_name=self.work_pool_name,
112
- work_queue_filter=WorkQueueFilter(
113
- name=WorkQueueFilterName(startswith_=self.work_queue_prefix)
114
- ),
115
- )
116
- else:
117
- matched_queues = await self.client.match_work_queues(
118
- self.work_queue_prefix, work_pool_name=DEFAULT_AGENT_WORK_POOL_NAME
119
- )
120
-
121
- matched_queues = set(q.name for q in matched_queues)
122
- if matched_queues != self.work_queues:
123
- new_queues = matched_queues - self.work_queues
124
- removed_queues = self.work_queues - matched_queues
125
- if new_queues:
126
- self.logger.info(
127
- f"Matched new work queues: {', '.join(new_queues)}"
128
- )
129
- if removed_queues:
130
- self.logger.info(
131
- f"Work queues no longer matched: {', '.join(removed_queues)}"
132
- )
133
- self.work_queues = matched_queues
134
-
135
- async def get_work_queues(self) -> AsyncIterator[WorkQueue]:
136
- """
137
- Loads the work queue objects corresponding to the agent's target work
138
- queues. If any of them don't exist, they are created.
139
- """
140
-
141
- # if the queue cache has not expired, yield queues from the cache
142
- now = pendulum.now("UTC")
143
- if (self._work_queue_cache_expiration or now) > now:
144
- for queue in self._work_queue_cache:
145
- yield queue
146
- return
147
-
148
- # otherwise clear the cache, set the expiration for 30 seconds, and
149
- # reload the work queues
150
- self._work_queue_cache.clear()
151
- self._work_queue_cache_expiration = now.add(seconds=30)
152
-
153
- await self.update_matched_agent_work_queues()
154
-
155
- for name in self.work_queues:
156
- try:
157
- work_queue = await self.client.read_work_queue_by_name(
158
- work_pool_name=self.work_pool_name, name=name
159
- )
160
- except (ObjectNotFound, Exception):
161
- work_queue = None
162
-
163
- # if the work queue wasn't found and the agent is NOT polling
164
- # for queues using a regex, try to create it
165
- if work_queue is None and not self.work_queue_prefix:
166
- try:
167
- work_queue = await self.client.create_work_queue(
168
- work_pool_name=self.work_pool_name, name=name
169
- )
170
- except Exception:
171
- # if creating it raises an exception, it was probably just
172
- # created by some other agent; rather than entering a re-read
173
- # loop with new error handling, we log the exception and
174
- # continue.
175
- self.logger.exception(f"Failed to create work queue {name!r}.")
176
- continue
177
- else:
178
- log_str = f"Created work queue {name!r}"
179
- if self.work_pool_name:
180
- log_str = (
181
- f"Created work queue {name!r} in work pool"
182
- f" {self.work_pool_name!r}."
183
- )
184
- else:
185
- log_str = f"Created work queue '{name}'."
186
- self.logger.info(log_str)
187
-
188
- if work_queue is None:
189
- self.logger.error(
190
- f"Work queue '{name!r}' with prefix {self.work_queue_prefix} wasn't"
191
- " found"
192
- )
193
- else:
194
- self._work_queue_cache.append(work_queue)
195
- yield work_queue
196
-
197
- async def get_and_submit_flow_runs(self) -> List[FlowRun]:
198
- """
199
- The principle method on agents. Queries for scheduled flow runs and submits
200
- them for execution in parallel.
201
- """
202
- if not self.started:
203
- raise RuntimeError(
204
- "Agent is not started. Use `async with PrefectAgent()...`"
205
- )
206
-
207
- self.logger.debug("Checking for scheduled flow runs...")
208
-
209
- before = pendulum.now("utc").add(
210
- seconds=self.prefetch_seconds or PREFECT_AGENT_PREFETCH_SECONDS.value()
211
- )
212
-
213
- submittable_runs: List[FlowRun] = []
214
-
215
- if self.work_pool_name:
216
- responses = await self.client.get_scheduled_flow_runs_for_work_pool(
217
- work_pool_name=self.work_pool_name,
218
- work_queue_names=[wq.name async for wq in self.get_work_queues()],
219
- scheduled_before=before,
220
- )
221
- submittable_runs.extend([response.flow_run for response in responses])
222
-
223
- else:
224
- # load runs from each work queue
225
- async for work_queue in self.get_work_queues():
226
- # print a nice message if the work queue is paused
227
- if work_queue.is_paused:
228
- self.logger.info(
229
- f"Work queue {work_queue.name!r} ({work_queue.id}) is paused."
230
- )
231
-
232
- else:
233
- try:
234
- queue_runs = await self.client.get_runs_in_work_queue(
235
- id=work_queue.id, limit=10, scheduled_before=before
236
- )
237
- submittable_runs.extend(queue_runs)
238
- except ObjectNotFound:
239
- self.logger.error(
240
- f"Work queue {work_queue.name!r} ({work_queue.id}) not"
241
- " found."
242
- )
243
- except Exception as exc:
244
- self.logger.exception(exc)
245
-
246
- submittable_runs.sort(key=lambda run: run.next_scheduled_start_time)
247
-
248
- for flow_run in submittable_runs:
249
- # don't resubmit a run
250
- if flow_run.id in self.submitting_flow_run_ids:
251
- continue
252
-
253
- try:
254
- if self.limiter:
255
- self.limiter.acquire_on_behalf_of_nowait(flow_run.id)
256
- except anyio.WouldBlock:
257
- self.logger.info(
258
- f"Flow run limit reached; {self.limiter.borrowed_tokens} flow runs"
259
- " in progress."
260
- )
261
- break
262
- else:
263
- self.logger.info(f"Submitting flow run '{flow_run.id}'")
264
- self.submitting_flow_run_ids.add(flow_run.id)
265
- self.task_group.start_soon(
266
- self.submit_run,
267
- flow_run,
268
- )
269
-
270
- return list(
271
- filter(lambda run: run.id in self.submitting_flow_run_ids, submittable_runs)
272
- )
273
-
274
- async def check_for_cancelled_flow_runs(self):
275
- if not self.started:
276
- raise RuntimeError(
277
- "Agent is not started. Use `async with PrefectAgent()...`"
278
- )
279
-
280
- self.logger.debug("Checking for cancelled flow runs...")
281
-
282
- work_queue_filter = (
283
- WorkQueueFilter(name=WorkQueueFilterName(any_=list(self.work_queues)))
284
- if self.work_queues
285
- else None
286
- )
287
-
288
- work_pool_filter = (
289
- WorkPoolFilter(name=WorkPoolFilterName(any_=[self.work_pool_name]))
290
- if self.work_pool_name
291
- else WorkPoolFilter(name=WorkPoolFilterName(any_=["default-agent-pool"]))
292
- )
293
- named_cancelling_flow_runs = await self.client.read_flow_runs(
294
- flow_run_filter=FlowRunFilter(
295
- state=FlowRunFilterState(
296
- type=FlowRunFilterStateType(any_=[StateType.CANCELLED]),
297
- name=FlowRunFilterStateName(any_=["Cancelling"]),
298
- ),
299
- # Avoid duplicate cancellation calls
300
- id=FlowRunFilterId(not_any_=list(self.cancelling_flow_run_ids)),
301
- ),
302
- work_pool_filter=work_pool_filter,
303
- work_queue_filter=work_queue_filter,
304
- )
305
-
306
- typed_cancelling_flow_runs = await self.client.read_flow_runs(
307
- flow_run_filter=FlowRunFilter(
308
- state=FlowRunFilterState(
309
- type=FlowRunFilterStateType(any_=[StateType.CANCELLING]),
310
- ),
311
- # Avoid duplicate cancellation calls
312
- id=FlowRunFilterId(not_any_=list(self.cancelling_flow_run_ids)),
313
- ),
314
- work_pool_filter=work_pool_filter,
315
- work_queue_filter=work_queue_filter,
316
- )
317
-
318
- cancelling_flow_runs = named_cancelling_flow_runs + typed_cancelling_flow_runs
319
-
320
- if cancelling_flow_runs:
321
- self.logger.info(
322
- f"Found {len(cancelling_flow_runs)} flow runs awaiting cancellation."
323
- )
324
-
325
- for flow_run in cancelling_flow_runs:
326
- self.cancelling_flow_run_ids.add(flow_run.id)
327
- self.task_group.start_soon(self.cancel_run, flow_run)
328
-
329
- return cancelling_flow_runs
330
-
331
- async def cancel_run(self, flow_run: FlowRun) -> None:
332
- """
333
- Cancel a flow run by killing its infrastructure
334
- """
335
- if not flow_run.infrastructure_pid:
336
- self.logger.error(
337
- f"Flow run '{flow_run.id}' does not have an infrastructure pid"
338
- " attached. Cancellation cannot be guaranteed."
339
- )
340
- await self._mark_flow_run_as_cancelled(
341
- flow_run,
342
- state_updates={
343
- "message": (
344
- "This flow run is missing infrastructure tracking information"
345
- " and cancellation cannot be guaranteed."
346
- )
347
- },
348
- )
349
- return
350
-
351
- try:
352
- infrastructure = await self.get_infrastructure(flow_run)
353
- if infrastructure.is_using_a_runner:
354
- self.logger.info(
355
- f"Skipping cancellation because flow run {str(flow_run.id)!r} is"
356
- " using enhanced cancellation. A dedicated runner will handle"
357
- " cancellation."
358
- )
359
- return
360
- except Exception:
361
- self.logger.exception(
362
- f"Failed to get infrastructure for flow run '{flow_run.id}'. "
363
- "Flow run cannot be cancelled."
364
- )
365
- # Note: We leave this flow run in the cancelling set because it cannot be
366
- # cancelled and this will prevent additional attempts.
367
- return
368
-
369
- if not hasattr(infrastructure, "kill"):
370
- self.logger.error(
371
- f"Flow run '{flow_run.id}' infrastructure {infrastructure.type!r} "
372
- "does not support killing created infrastructure. "
373
- "Cancellation cannot be guaranteed."
374
- )
375
- return
376
-
377
- self.logger.info(
378
- f"Killing {infrastructure.type} {flow_run.infrastructure_pid} for flow run "
379
- f"'{flow_run.id}'..."
380
- )
381
- try:
382
- await infrastructure.kill(flow_run.infrastructure_pid)
383
- except InfrastructureNotFound as exc:
384
- self.logger.warning(f"{exc} Marking flow run as cancelled.")
385
- await self._mark_flow_run_as_cancelled(flow_run)
386
- except InfrastructureNotAvailable as exc:
387
- self.logger.warning(f"{exc} Flow run cannot be cancelled by this agent.")
388
- except Exception:
389
- self.logger.exception(
390
- "Encountered exception while killing infrastructure for flow run "
391
- f"'{flow_run.id}'. Flow run may not be cancelled."
392
- )
393
- # We will try again on generic exceptions
394
- self.cancelling_flow_run_ids.remove(flow_run.id)
395
- return
396
- else:
397
- await self._mark_flow_run_as_cancelled(flow_run)
398
- self.logger.info(f"Cancelled flow run '{flow_run.id}'!")
399
-
400
- async def _mark_flow_run_as_cancelled(
401
- self, flow_run: FlowRun, state_updates: Optional[dict] = None
402
- ) -> None:
403
- state_updates = state_updates or {}
404
- state_updates.setdefault("name", "Cancelled")
405
- state_updates.setdefault("type", StateType.CANCELLED)
406
- state = flow_run.state.copy(update=state_updates)
407
-
408
- await self.client.set_flow_run_state(flow_run.id, state, force=True)
409
-
410
- # Do not remove the flow run from the cancelling set immediately because
411
- # the API caches responses for the `read_flow_runs` and we do not want to
412
- # duplicate cancellations.
413
- await self._schedule_task(
414
- 60 * 10, self.cancelling_flow_run_ids.remove, flow_run.id
415
- )
416
-
417
- async def get_infrastructure(self, flow_run: FlowRun) -> Infrastructure:
418
- deployment = await self.client.read_deployment(flow_run.deployment_id)
419
-
420
- flow = await self.client.read_flow(deployment.flow_id)
421
-
422
- # overrides only apply when configuring known infra blocks
423
- if not deployment.infrastructure_document_id:
424
- if self.default_infrastructure:
425
- infra_block = self.default_infrastructure
426
- else:
427
- infra_document = await self.client.read_block_document(
428
- self.default_infrastructure_document_id
429
- )
430
- infra_block = Block._from_block_document(infra_document)
431
-
432
- # Add flow run metadata to the infrastructure
433
- prepared_infrastructure = infra_block.prepare_for_flow_run(
434
- flow_run, deployment=deployment, flow=flow
435
- )
436
- return prepared_infrastructure
437
-
438
- ## get infra
439
- infra_document = await self.client.read_block_document(
440
- deployment.infrastructure_document_id
441
- )
442
-
443
- # this piece of logic applies any overrides that may have been set on the
444
- # deployment; overrides are defined as dot.delimited paths on possibly nested
445
- # attributes of the infrastructure block
446
- doc_dict = infra_document.dict()
447
- infra_dict = doc_dict.get("data", {})
448
- for override, value in (deployment.job_variables or {}).items():
449
- nested_fields = override.split(".")
450
- data = infra_dict
451
- for field in nested_fields[:-1]:
452
- data = data[field]
453
-
454
- # once we reach the end, set the value
455
- data[nested_fields[-1]] = value
456
-
457
- # reconstruct the infra block
458
- doc_dict["data"] = infra_dict
459
- infra_document = BlockDocument(**doc_dict)
460
- infrastructure_block = Block._from_block_document(infra_document)
461
-
462
- # TODO: Here the agent may update the infrastructure with agent-level settings
463
-
464
- # Add flow run metadata to the infrastructure
465
- prepared_infrastructure = infrastructure_block.prepare_for_flow_run(
466
- flow_run, deployment=deployment, flow=flow
467
- )
468
-
469
- return prepared_infrastructure
470
-
471
- async def submit_run(self, flow_run: FlowRun) -> None:
472
- """
473
- Submit a flow run to the infrastructure
474
- """
475
- ready_to_submit = await self._propose_pending_state(flow_run)
476
-
477
- if ready_to_submit:
478
- try:
479
- infrastructure = await self.get_infrastructure(flow_run)
480
- except Exception as exc:
481
- self.logger.exception(
482
- f"Failed to get infrastructure for flow run '{flow_run.id}'."
483
- )
484
- await self._propose_failed_state(flow_run, exc)
485
- if self.limiter:
486
- self.limiter.release_on_behalf_of(flow_run.id)
487
- else:
488
- # Wait for submission to be completed. Note that the submission function
489
- # may continue to run in the background after this exits.
490
- readiness_result = await self.task_group.start(
491
- self._submit_run_and_capture_errors, flow_run, infrastructure
492
- )
493
-
494
- if readiness_result and not isinstance(readiness_result, Exception):
495
- try:
496
- await self.client.update_flow_run(
497
- flow_run_id=flow_run.id,
498
- infrastructure_pid=str(readiness_result),
499
- )
500
- except Exception:
501
- self.logger.exception(
502
- "An error occurred while setting the `infrastructure_pid`"
503
- f" on flow run {flow_run.id!r}. The flow run will not be"
504
- " cancellable."
505
- )
506
-
507
- self.logger.info(f"Completed submission of flow run '{flow_run.id}'")
508
-
509
- else:
510
- # If the run is not ready to submit, release the concurrency slot
511
- if self.limiter:
512
- self.limiter.release_on_behalf_of(flow_run.id)
513
-
514
- self.submitting_flow_run_ids.remove(flow_run.id)
515
-
516
- async def _submit_run_and_capture_errors(
517
- self,
518
- flow_run: FlowRun,
519
- infrastructure: Infrastructure,
520
- task_status: anyio.abc.TaskStatus = None,
521
- ) -> Union[InfrastructureResult, Exception]:
522
- # Note: There is not a clear way to determine if task_status.started() has been
523
- # called without peeking at the internal `_future`. Ideally we could just
524
- # check if the flow run id has been removed from `submitting_flow_run_ids`
525
- # but it is not so simple to guarantee that this coroutine yields back
526
- # to `submit_run` to execute that line when exceptions are raised during
527
- # submission.
528
- try:
529
- result = await infrastructure.run(task_status=task_status)
530
- except Exception as exc:
531
- if not task_status._future.done():
532
- # This flow run was being submitted and did not start successfully
533
- self.logger.exception(
534
- f"Failed to submit flow run '{flow_run.id}' to infrastructure."
535
- )
536
- # Mark the task as started to prevent agent crash
537
- task_status.started(exc)
538
- await self._propose_crashed_state(
539
- flow_run, "Flow run could not be submitted to infrastructure"
540
- )
541
- else:
542
- self.logger.exception(
543
- f"An error occurred while monitoring flow run '{flow_run.id}'. "
544
- "The flow run will not be marked as failed, but an issue may have "
545
- "occurred."
546
- )
547
- return exc
548
- finally:
549
- if self.limiter:
550
- self.limiter.release_on_behalf_of(flow_run.id)
551
-
552
- if not task_status._future.done():
553
- self.logger.error(
554
- f"Infrastructure returned without reporting flow run '{flow_run.id}' "
555
- "as started or raising an error. This behavior is not expected and "
556
- "generally indicates improper implementation of infrastructure. The "
557
- "flow run will not be marked as failed, but an issue may have occurred."
558
- )
559
- # Mark the task as started to prevent agent crash
560
- task_status.started()
561
-
562
- if result.status_code != 0:
563
- await self._propose_crashed_state(
564
- flow_run,
565
- (
566
- "Flow run infrastructure exited with non-zero status code"
567
- f" {result.status_code}."
568
- ),
569
- )
570
-
571
- return result
572
-
573
- async def _propose_pending_state(self, flow_run: FlowRun) -> bool:
574
- state = flow_run.state
575
- try:
576
- state = await propose_state(self.client, Pending(), flow_run_id=flow_run.id)
577
- except Abort as exc:
578
- self.logger.info(
579
- (
580
- f"Aborted submission of flow run '{flow_run.id}'. "
581
- f"Server sent an abort signal: {exc}"
582
- ),
583
- )
584
- return False
585
- except Exception:
586
- self.logger.error(
587
- f"Failed to update state of flow run '{flow_run.id}'",
588
- exc_info=True,
589
- )
590
- return False
591
-
592
- if not state.is_pending():
593
- self.logger.info(
594
- (
595
- f"Aborted submission of flow run '{flow_run.id}': "
596
- f"Server returned a non-pending state {state.type.value!r}"
597
- ),
598
- )
599
- return False
600
-
601
- return True
602
-
603
- async def _propose_failed_state(self, flow_run: FlowRun, exc: Exception) -> None:
604
- try:
605
- await propose_state(
606
- self.client,
607
- await exception_to_failed_state(message="Submission failed.", exc=exc),
608
- flow_run_id=flow_run.id,
609
- )
610
- except Abort:
611
- # We've already failed, no need to note the abort but we don't want it to
612
- # raise in the agent process
613
- pass
614
- except Exception:
615
- self.logger.error(
616
- f"Failed to update state of flow run '{flow_run.id}'",
617
- exc_info=True,
618
- )
619
-
620
- async def _propose_crashed_state(self, flow_run: FlowRun, message: str) -> None:
621
- try:
622
- state = await propose_state(
623
- self.client,
624
- Crashed(message=message),
625
- flow_run_id=flow_run.id,
626
- )
627
- except Abort:
628
- # Flow run already marked as failed
629
- pass
630
- except Exception:
631
- self.logger.exception(f"Failed to update state of flow run '{flow_run.id}'")
632
- else:
633
- if state.is_crashed():
634
- self.logger.info(
635
- f"Reported flow run '{flow_run.id}' as crashed: {message}"
636
- )
637
-
638
- async def _schedule_task(self, __in_seconds: int, fn, *args, **kwargs):
639
- """
640
- Schedule a background task to start after some time.
641
-
642
- These tasks will be run immediately when the agent exits instead of waiting.
643
-
644
- The function may be async or sync. Async functions will be awaited.
645
- """
646
-
647
- async def wrapper(task_status):
648
- # If we are shutting down, do not sleep; otherwise sleep until the scheduled
649
- # time or shutdown
650
- if self.started:
651
- with anyio.CancelScope() as scope:
652
- self.scheduled_task_scopes.add(scope)
653
- task_status.started()
654
- await anyio.sleep(__in_seconds)
655
-
656
- self.scheduled_task_scopes.remove(scope)
657
- else:
658
- task_status.started()
659
-
660
- result = fn(*args, **kwargs)
661
- if inspect.iscoroutine(result):
662
- await result
663
-
664
- await self.task_group.start(wrapper)
665
-
666
- # Context management ---------------------------------------------------------------
667
-
668
- async def start(self):
669
- self.started = True
670
- self.task_group = anyio.create_task_group()
671
- self.limiter = (
672
- anyio.CapacityLimiter(self.limit) if self.limit is not None else None
673
- )
674
- self.client = get_client()
675
- await self.client.__aenter__()
676
- await self.task_group.__aenter__()
677
-
678
- async def shutdown(self, *exc_info):
679
- self.started = False
680
- # We must cancel scheduled task scopes before closing the task group
681
- for scope in self.scheduled_task_scopes:
682
- scope.cancel()
683
- await self.task_group.__aexit__(*exc_info)
684
- await self.client.__aexit__(*exc_info)
685
- self.task_group = None
686
- self.client = None
687
- self.submitting_flow_run_ids.clear()
688
- self.cancelling_flow_run_ids.clear()
689
- self.scheduled_task_scopes.clear()
690
- self._work_queue_cache_expiration = None
691
- self._work_queue_cache = []
692
-
693
- async def __aenter__(self):
694
- await self.start()
695
- return self
696
-
697
- async def __aexit__(self, *exc_info):
698
- await self.shutdown(*exc_info)
6
+ __getattr__ = getattr_migration(__name__)