prefect-client 3.0.0rc10__py3-none-any.whl → 3.0.0rc12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. prefect/_internal/concurrency/api.py +1 -1
  2. prefect/_internal/concurrency/services.py +9 -0
  3. prefect/_internal/retries.py +61 -0
  4. prefect/artifacts.py +12 -0
  5. prefect/client/cloud.py +1 -1
  6. prefect/client/schemas/actions.py +4 -0
  7. prefect/client/schemas/objects.py +1 -1
  8. prefect/concurrency/asyncio.py +3 -3
  9. prefect/concurrency/events.py +1 -1
  10. prefect/concurrency/services.py +3 -2
  11. prefect/concurrency/sync.py +19 -5
  12. prefect/context.py +8 -2
  13. prefect/deployments/__init__.py +28 -15
  14. prefect/deployments/steps/pull.py +7 -0
  15. prefect/events/schemas/events.py +10 -0
  16. prefect/flow_engine.py +10 -9
  17. prefect/flows.py +194 -68
  18. prefect/futures.py +53 -7
  19. prefect/logging/loggers.py +1 -1
  20. prefect/results.py +1 -46
  21. prefect/runner/runner.py +96 -23
  22. prefect/runner/server.py +20 -22
  23. prefect/runner/submit.py +0 -8
  24. prefect/runtime/flow_run.py +38 -3
  25. prefect/settings.py +9 -30
  26. prefect/task_engine.py +158 -48
  27. prefect/task_worker.py +1 -1
  28. prefect/tasks.py +164 -17
  29. prefect/transactions.py +2 -15
  30. prefect/utilities/asyncutils.py +13 -9
  31. prefect/utilities/engine.py +34 -1
  32. prefect/workers/base.py +98 -208
  33. prefect/workers/process.py +262 -4
  34. prefect/workers/server.py +27 -9
  35. {prefect_client-3.0.0rc10.dist-info → prefect_client-3.0.0rc12.dist-info}/METADATA +4 -4
  36. {prefect_client-3.0.0rc10.dist-info → prefect_client-3.0.0rc12.dist-info}/RECORD +39 -38
  37. {prefect_client-3.0.0rc10.dist-info → prefect_client-3.0.0rc12.dist-info}/LICENSE +0 -0
  38. {prefect_client-3.0.0rc10.dist-info → prefect_client-3.0.0rc12.dist-info}/WHEEL +0 -0
  39. {prefect_client-3.0.0rc10.dist-info → prefect_client-3.0.0rc12.dist-info}/top_level.txt +0 -0
prefect/runner/runner.py CHANGED
@@ -65,17 +65,13 @@ from prefect.client.schemas.filters import (
65
65
  FlowRunFilterStateName,
66
66
  FlowRunFilterStateType,
67
67
  )
68
- from prefect.client.schemas.objects import (
69
- FlowRun,
70
- State,
71
- StateType,
72
- )
68
+ from prefect.client.schemas.objects import Flow as APIFlow
69
+ from prefect.client.schemas.objects import FlowRun, State, StateType
73
70
  from prefect.client.schemas.schedules import SCHEDULE_TYPES
74
- from prefect.deployments.runner import (
75
- EntrypointType,
76
- RunnerDeployment,
77
- )
78
71
  from prefect.events import DeploymentTriggerTypes, TriggerTypes
72
+ from prefect.events.related import tags_as_related_resources
73
+ from prefect.events.schemas.events import RelatedResource
74
+ from prefect.events.utilities import emit_event
79
75
  from prefect.exceptions import Abort, ObjectNotFound
80
76
  from prefect.flows import Flow, load_flow_from_flow_run
81
77
  from prefect.logging.loggers import PrefectLogAdapter, flow_run_logger, get_logger
@@ -88,6 +84,7 @@ from prefect.settings import (
88
84
  get_current_settings,
89
85
  )
90
86
  from prefect.states import Crashed, Pending, exception_to_failed_state
87
+ from prefect.types.entrypoint import EntrypointType
91
88
  from prefect.utilities.asyncutils import (
92
89
  asyncnullcontext,
93
90
  is_async_fn,
@@ -96,9 +93,12 @@ from prefect.utilities.asyncutils import (
96
93
  from prefect.utilities.engine import propose_state
97
94
  from prefect.utilities.processutils import _register_signal, run_process
98
95
  from prefect.utilities.services import critical_service_loop
96
+ from prefect.utilities.slugify import slugify
99
97
 
100
98
  if TYPE_CHECKING:
99
+ from prefect.client.schemas.objects import Deployment
101
100
  from prefect.client.types.flexible_schedule_list import FlexibleScheduleList
101
+ from prefect.deployments.runner import RunnerDeployment
102
102
 
103
103
  __all__ = ["Runner"]
104
104
 
@@ -130,6 +130,7 @@ class Runner:
130
130
  Examples:
131
131
  Set up a Runner to manage the execute of scheduled flow runs for two flows:
132
132
  ```python
133
+ import asyncio
133
134
  from prefect import flow, Runner
134
135
 
135
136
  @flow
@@ -149,7 +150,7 @@ class Runner:
149
150
  # Run on a cron schedule
150
151
  runner.add_flow(goodbye_flow, schedule={"cron": "0 * * * *"})
151
152
 
152
- runner.start()
153
+ asyncio.run(runner.start())
153
154
  ```
154
155
  """
155
156
  if name and ("/" in name or "%" in name):
@@ -166,30 +167,26 @@ class Runner:
166
167
  self.query_seconds = query_seconds or PREFECT_RUNNER_POLL_FREQUENCY.value()
167
168
  self._prefetch_seconds = prefetch_seconds
168
169
 
169
- self._runs_task_group: anyio.abc.TaskGroup = anyio.create_task_group()
170
- self._loops_task_group: anyio.abc.TaskGroup = anyio.create_task_group()
171
-
172
- self._limiter: Optional[anyio.CapacityLimiter] = anyio.CapacityLimiter(
173
- self.limit
174
- )
170
+ self._limiter: Optional[anyio.CapacityLimiter] = None
175
171
  self._client = get_client()
176
172
  self._submitting_flow_run_ids = set()
177
173
  self._cancelling_flow_run_ids = set()
178
174
  self._scheduled_task_scopes = set()
179
175
  self._deployment_ids: Set[UUID] = set()
180
- self._flow_run_process_map = dict()
176
+ self._flow_run_process_map: Dict[UUID, Dict] = dict()
181
177
 
182
178
  self._tmp_dir: Path = (
183
179
  Path(tempfile.gettempdir()) / "runner_storage" / str(uuid4())
184
180
  )
185
181
  self._storage_objs: List[RunnerStorage] = []
186
182
  self._deployment_storage_map: Dict[UUID, RunnerStorage] = {}
187
- self._loop = asyncio.get_event_loop()
183
+
184
+ self._loop: Optional[asyncio.AbstractEventLoop] = None
188
185
 
189
186
  @sync_compatible
190
187
  async def add_deployment(
191
188
  self,
192
- deployment: RunnerDeployment,
189
+ deployment: "RunnerDeployment",
193
190
  ) -> UUID:
194
191
  """
195
192
  Registers the deployment with the Prefect API and will monitor for work once
@@ -324,7 +321,6 @@ class Runner:
324
321
 
325
322
  sys.exit(0)
326
323
 
327
- @sync_compatible
328
324
  async def start(
329
325
  self, run_once: bool = False, webserver: Optional[bool] = None
330
326
  ) -> None:
@@ -342,6 +338,7 @@ class Runner:
342
338
  Initialize a Runner, add two flows, and serve them by starting the Runner:
343
339
 
344
340
  ```python
341
+ import asyncio
345
342
  from prefect import flow, Runner
346
343
 
347
344
  @flow
@@ -361,7 +358,7 @@ class Runner:
361
358
  # Run on a cron schedule
362
359
  runner.add_flow(goodbye_flow, schedule={"cron": "0 * * * *"})
363
360
 
364
- runner.start()
361
+ asyncio.run(runner.start())
365
362
  ```
366
363
  """
367
364
  from prefect.runner.server import start_webserver
@@ -695,8 +692,9 @@ class Runner:
695
692
  """
696
693
  self._logger.info("Pausing all deployments...")
697
694
  for deployment_id in self._deployment_ids:
698
- self._logger.debug(f"Pausing deployment '{deployment_id}'")
699
695
  await self._client.set_deployment_paused_state(deployment_id, True)
696
+ self._logger.debug(f"Paused deployment '{deployment_id}'")
697
+
700
698
  self._logger.info("All deployments have been paused!")
701
699
 
702
700
  async def _get_and_submit_flow_runs(self):
@@ -818,8 +816,71 @@ class Runner:
818
816
  "message": state_msg or "Flow run was cancelled successfully."
819
817
  },
820
818
  )
819
+ try:
820
+ deployment = await self._client.read_deployment(flow_run.deployment_id)
821
+ except ObjectNotFound:
822
+ deployment = None
823
+ try:
824
+ flow = await self._client.read_flow(flow_run.flow_id)
825
+ except ObjectNotFound:
826
+ flow = None
827
+ self._emit_flow_run_cancelled_event(
828
+ flow_run=flow_run, flow=flow, deployment=deployment
829
+ )
821
830
  run_logger.info(f"Cancelled flow run '{flow_run.name}'!")
822
831
 
832
+ def _event_resource(self):
833
+ from prefect import __version__
834
+
835
+ return {
836
+ "prefect.resource.id": f"prefect.runner.{slugify(self.name)}",
837
+ "prefect.resource.name": self.name,
838
+ "prefect.version": __version__,
839
+ }
840
+
841
+ def _emit_flow_run_cancelled_event(
842
+ self,
843
+ flow_run: "FlowRun",
844
+ flow: "Optional[APIFlow]",
845
+ deployment: "Optional[Deployment]",
846
+ ):
847
+ related = []
848
+ tags = []
849
+ if deployment:
850
+ related.append(
851
+ {
852
+ "prefect.resource.id": f"prefect.deployment.{deployment.id}",
853
+ "prefect.resource.role": "deployment",
854
+ "prefect.resource.name": deployment.name,
855
+ }
856
+ )
857
+ tags.extend(deployment.tags)
858
+ if flow:
859
+ related.append(
860
+ {
861
+ "prefect.resource.id": f"prefect.flow.{flow.id}",
862
+ "prefect.resource.role": "flow",
863
+ "prefect.resource.name": flow.name,
864
+ }
865
+ )
866
+ related.append(
867
+ {
868
+ "prefect.resource.id": f"prefect.flow-run.{flow_run.id}",
869
+ "prefect.resource.role": "flow-run",
870
+ "prefect.resource.name": flow_run.name,
871
+ }
872
+ )
873
+ tags.extend(flow_run.tags)
874
+
875
+ related = [RelatedResource.model_validate(r) for r in related]
876
+ related += tags_as_related_resources(set(tags))
877
+
878
+ emit_event(
879
+ event="prefect.runner.cancelled-flow-run",
880
+ resource=self._event_resource(),
881
+ related=related,
882
+ )
883
+
823
884
  async def _get_scheduled_flow_runs(
824
885
  self,
825
886
  ) -> List["FlowRun"]:
@@ -956,7 +1017,7 @@ class Runner:
956
1017
  # If the run is not ready to submit, release the concurrency slot
957
1018
  self._release_limit_slot(flow_run.id)
958
1019
 
959
- self._submitting_flow_run_ids.remove(flow_run.id)
1020
+ self._submitting_flow_run_ids.discard(flow_run.id)
960
1021
 
961
1022
  async def _submit_run_and_capture_errors(
962
1023
  self,
@@ -1163,6 +1224,18 @@ class Runner:
1163
1224
  self._logger.debug("Starting runner...")
1164
1225
  self._client = get_client()
1165
1226
  self._tmp_dir.mkdir(parents=True)
1227
+
1228
+ self._limiter = anyio.CapacityLimiter(self.limit)
1229
+
1230
+ if not hasattr(self, "_loop") or not self._loop:
1231
+ self._loop = asyncio.get_event_loop()
1232
+
1233
+ if not hasattr(self, "_runs_task_group") or not self._runs_task_group:
1234
+ self._runs_task_group: anyio.abc.TaskGroup = anyio.create_task_group()
1235
+
1236
+ if not hasattr(self, "_loops_task_group") or not self._loops_task_group:
1237
+ self._loops_task_group: anyio.abc.TaskGroup = anyio.create_task_group()
1238
+
1166
1239
  await self._client.__aenter__()
1167
1240
  await self._runs_task_group.__aenter__()
1168
1241
 
prefect/runner/server.py CHANGED
@@ -16,7 +16,6 @@ from prefect.runner.utils import (
16
16
  inject_schemas_into_openapi,
17
17
  )
18
18
  from prefect.settings import (
19
- PREFECT_EXPERIMENTAL_ENABLE_EXTRA_RUNNER_ENDPOINTS,
20
19
  PREFECT_RUNNER_POLL_FREQUENCY,
21
20
  PREFECT_RUNNER_SERVER_HOST,
22
21
  PREFECT_RUNNER_SERVER_LOG_LEVEL,
@@ -202,7 +201,7 @@ def _build_generic_endpoint_for_flows(
202
201
 
203
202
  try:
204
203
  flow = load_flow_from_entrypoint(body.entrypoint)
205
- except (MissingFlowError, ScriptError, ModuleNotFoundError):
204
+ except (FileNotFoundError, MissingFlowError, ScriptError, ModuleNotFoundError):
206
205
  return JSONResponse(
207
206
  status_code=status.HTTP_404_NOT_FOUND,
208
207
  content={"message": "Flow not found"},
@@ -261,29 +260,28 @@ async def build_server(runner: "Runner") -> FastAPI:
261
260
  router.add_api_route("/shutdown", shutdown(runner=runner), methods=["POST"])
262
261
  webserver.include_router(router)
263
262
 
264
- if PREFECT_EXPERIMENTAL_ENABLE_EXTRA_RUNNER_ENDPOINTS.value():
265
- deployments_router, deployment_schemas = await get_deployment_router(runner)
266
- webserver.include_router(deployments_router)
267
-
268
- subflow_schemas = await get_subflow_schemas(runner)
269
- webserver.add_api_route(
270
- "/flow/run",
271
- _build_generic_endpoint_for_flows(runner=runner, schemas=subflow_schemas),
272
- methods=["POST"],
273
- name="Run flow in background",
274
- description="Trigger any flow run as a background task on the runner.",
275
- summary="Run flow",
276
- )
277
-
278
- def customize_openapi():
279
- if webserver.openapi_schema:
280
- return webserver.openapi_schema
263
+ deployments_router, deployment_schemas = await get_deployment_router(runner)
264
+ webserver.include_router(deployments_router)
265
+
266
+ subflow_schemas = await get_subflow_schemas(runner)
267
+ webserver.add_api_route(
268
+ "/flow/run",
269
+ _build_generic_endpoint_for_flows(runner=runner, schemas=subflow_schemas),
270
+ methods=["POST"],
271
+ name="Run flow in background",
272
+ description="Trigger any flow run as a background task on the runner.",
273
+ summary="Run flow",
274
+ )
281
275
 
282
- openapi_schema = inject_schemas_into_openapi(webserver, deployment_schemas)
283
- webserver.openapi_schema = openapi_schema
276
+ def customize_openapi():
277
+ if webserver.openapi_schema:
284
278
  return webserver.openapi_schema
285
279
 
286
- webserver.openapi = customize_openapi
280
+ openapi_schema = inject_schemas_into_openapi(webserver, deployment_schemas)
281
+ webserver.openapi_schema = openapi_schema
282
+ return webserver.openapi_schema
283
+
284
+ webserver.openapi = customize_openapi
287
285
 
288
286
  return webserver
289
287
 
prefect/runner/submit.py CHANGED
@@ -14,7 +14,6 @@ from prefect.context import FlowRunContext
14
14
  from prefect.flows import Flow
15
15
  from prefect.logging import get_logger
16
16
  from prefect.settings import (
17
- PREFECT_EXPERIMENTAL_ENABLE_EXTRA_RUNNER_ENDPOINTS,
18
17
  PREFECT_RUNNER_PROCESS_LIMIT,
19
18
  PREFECT_RUNNER_SERVER_HOST,
20
19
  PREFECT_RUNNER_SERVER_PORT,
@@ -131,13 +130,6 @@ async def submit_to_runner(
131
130
  "The `submit_to_runner` utility only supports submitting flows and tasks."
132
131
  )
133
132
 
134
- if not PREFECT_EXPERIMENTAL_ENABLE_EXTRA_RUNNER_ENDPOINTS.value():
135
- raise ValueError(
136
- "The `submit_to_runner` utility requires the `Runner` webserver to be"
137
- " running and built with extra endpoints enabled. To enable this, set the"
138
- " `PREFECT_EXPERIMENTAL_ENABLE_EXTRA_RUNNER_ENDPOINTS` setting to `True`."
139
- )
140
-
141
133
  parameters = parameters or {}
142
134
  if isinstance(parameters, List):
143
135
  return_single = False
@@ -38,6 +38,7 @@ __all__ = [
38
38
  "parameters",
39
39
  "parent_flow_run_id",
40
40
  "parent_deployment_id",
41
+ "root_flow_run_id",
41
42
  "run_count",
42
43
  "api_url",
43
44
  "ui_url",
@@ -237,11 +238,12 @@ def get_parent_flow_run_id() -> Optional[str]:
237
238
  parent_task_run = from_sync.call_soon_in_loop_thread(
238
239
  create_call(_get_task_run, parent_task_run_id)
239
240
  ).result()
240
- return parent_task_run.flow_run_id
241
+ return str(parent_task_run.flow_run_id) if parent_task_run.flow_run_id else None
242
+
241
243
  return None
242
244
 
243
245
 
244
- def get_parent_deployment_id() -> Dict[str, Any]:
246
+ def get_parent_deployment_id() -> Optional[str]:
245
247
  parent_flow_run_id = get_parent_flow_run_id()
246
248
  if parent_flow_run_id is None:
247
249
  return None
@@ -249,7 +251,39 @@ def get_parent_deployment_id() -> Dict[str, Any]:
249
251
  parent_flow_run = from_sync.call_soon_in_loop_thread(
250
252
  create_call(_get_flow_run, parent_flow_run_id)
251
253
  ).result()
252
- return parent_flow_run.deployment_id if parent_flow_run else None
254
+
255
+ if parent_flow_run:
256
+ return (
257
+ str(parent_flow_run.deployment_id)
258
+ if parent_flow_run.deployment_id
259
+ else None
260
+ )
261
+
262
+ return None
263
+
264
+
265
+ def get_root_flow_run_id() -> str:
266
+ run_id = get_id()
267
+ parent_flow_run_id = get_parent_flow_run_id()
268
+ if parent_flow_run_id is None:
269
+ return run_id
270
+
271
+ def _get_root_flow_run_id(flow_run_id):
272
+ flow_run = from_sync.call_soon_in_loop_thread(
273
+ create_call(_get_flow_run, flow_run_id)
274
+ ).result()
275
+
276
+ if flow_run.parent_task_run_id is None:
277
+ return str(flow_run_id)
278
+ else:
279
+ parent_task_run = from_sync.call_soon_in_loop_thread(
280
+ create_call(_get_task_run, flow_run.parent_task_run_id)
281
+ ).result()
282
+ return _get_root_flow_run_id(parent_task_run.flow_run_id)
283
+
284
+ root_flow_run_id = _get_root_flow_run_id(parent_flow_run_id)
285
+
286
+ return root_flow_run_id
253
287
 
254
288
 
255
289
  def get_flow_run_api_url() -> Optional[str]:
@@ -275,6 +309,7 @@ FIELDS = {
275
309
  "parameters": get_parameters,
276
310
  "parent_flow_run_id": get_parent_flow_run_id,
277
311
  "parent_deployment_id": get_parent_deployment_id,
312
+ "root_flow_run_id": get_root_flow_run_id,
278
313
  "run_count": get_run_count,
279
314
  "api_url": get_flow_run_api_url,
280
315
  "ui_url": get_flow_run_ui_url,
prefect/settings.py CHANGED
@@ -481,18 +481,6 @@ PREFECT_HOME = Setting(
481
481
  directory may be created automatically when required.
482
482
  """
483
483
 
484
- PREFECT_EXTRA_ENTRYPOINTS = Setting(
485
- str,
486
- default="",
487
- )
488
- """
489
- Modules for Prefect to import when Prefect is imported.
490
-
491
- Values should be separated by commas, e.g. `my_module,my_other_module`.
492
- Objects within modules may be specified by a ':' partition, e.g. `my_module:my_object`.
493
- If a callable object is provided, it will be called with no arguments on import.
494
- """
495
-
496
484
  PREFECT_DEBUG_MODE = Setting(
497
485
  bool,
498
486
  default=False,
@@ -1160,6 +1148,11 @@ polled."""
1160
1148
  PREFECT_API_LOG_RETRYABLE_ERRORS = Setting(bool, default=False)
1161
1149
  """If `True`, log retryable errors in the API and it's services."""
1162
1150
 
1151
+ PREFECT_API_SERVICES_TASK_RUN_RECORDER_ENABLED = Setting(bool, default=True)
1152
+ """
1153
+ Whether or not to start the task run recorder service in the server application.
1154
+ """
1155
+
1163
1156
 
1164
1157
  PREFECT_API_DEFAULT_LIMIT = Setting(
1165
1158
  int,
@@ -1309,17 +1302,13 @@ The maximum number of artifacts to show on a flow run graph on the v2 API
1309
1302
  """
1310
1303
 
1311
1304
 
1312
- PREFECT_EXPERIMENTAL_ENABLE_ENHANCED_CANCELLATION = Setting(bool, default=True)
1313
- """
1314
- Whether or not to enable experimental enhanced flow run cancellation.
1315
- """
1316
-
1317
- PREFECT_EXPERIMENTAL_WARN_ENHANCED_CANCELLATION = Setting(bool, default=False)
1305
+ PREFECT_EXPERIMENTAL_ENABLE_CLIENT_SIDE_TASK_ORCHESTRATION = Setting(
1306
+ bool, default=False
1307
+ )
1318
1308
  """
1319
- Whether or not to warn when experimental enhanced flow run cancellation is used.
1309
+ Whether or not to enable experimental client side task run orchestration.
1320
1310
  """
1321
1311
 
1322
-
1323
1312
  # Prefect Events feature flags
1324
1313
 
1325
1314
  PREFECT_RUNNER_PROCESS_LIMIT = Setting(int, default=5)
@@ -1437,16 +1426,6 @@ a task worker should move a task from PENDING to RUNNING very quickly, so runs s
1437
1426
  PENDING for a while is a sign that the task worker may have crashed.
1438
1427
  """
1439
1428
 
1440
- PREFECT_EXPERIMENTAL_ENABLE_EXTRA_RUNNER_ENDPOINTS = Setting(bool, default=False)
1441
- """
1442
- Whether or not to enable experimental worker webserver endpoints.
1443
- """
1444
-
1445
- PREFECT_EXPERIMENTAL_DISABLE_SYNC_COMPAT = Setting(bool, default=False)
1446
- """
1447
- Whether or not to disable the sync_compatible decorator utility.
1448
- """
1449
-
1450
1429
  PREFECT_EXPERIMENTAL_ENABLE_SCHEDULE_CONCURRENCY = Setting(bool, default=False)
1451
1430
 
1452
1431
  # Defaults -----------------------------------------------------------------------------