prefect-client 3.0.0rc9__py3-none-any.whl → 3.0.0rc11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. prefect/_internal/compatibility/migration.py +48 -8
  2. prefect/_internal/concurrency/api.py +1 -1
  3. prefect/_internal/retries.py +61 -0
  4. prefect/agent.py +6 -0
  5. prefect/client/cloud.py +1 -1
  6. prefect/client/schemas/objects.py +3 -4
  7. prefect/concurrency/asyncio.py +3 -3
  8. prefect/concurrency/events.py +1 -1
  9. prefect/concurrency/services.py +3 -2
  10. prefect/concurrency/sync.py +19 -5
  11. prefect/context.py +14 -2
  12. prefect/deployments/__init__.py +28 -15
  13. prefect/deployments/schedules.py +5 -2
  14. prefect/deployments/steps/pull.py +7 -0
  15. prefect/events/schemas/automations.py +3 -3
  16. prefect/exceptions.py +4 -1
  17. prefect/filesystems.py +4 -3
  18. prefect/flow_engine.py +76 -14
  19. prefect/flows.py +222 -64
  20. prefect/futures.py +53 -7
  21. prefect/infrastructure/__init__.py +6 -0
  22. prefect/infrastructure/base.py +6 -0
  23. prefect/logging/loggers.py +1 -1
  24. prefect/results.py +50 -67
  25. prefect/runner/runner.py +93 -20
  26. prefect/runner/server.py +20 -22
  27. prefect/runner/submit.py +0 -8
  28. prefect/runtime/flow_run.py +38 -3
  29. prefect/serializers.py +3 -3
  30. prefect/settings.py +15 -45
  31. prefect/task_engine.py +77 -21
  32. prefect/task_runners.py +28 -16
  33. prefect/task_worker.py +6 -4
  34. prefect/tasks.py +30 -5
  35. prefect/transactions.py +18 -2
  36. prefect/utilities/asyncutils.py +9 -3
  37. prefect/utilities/engine.py +34 -1
  38. prefect/utilities/importtools.py +1 -1
  39. prefect/utilities/timeout.py +20 -5
  40. prefect/workers/base.py +98 -208
  41. prefect/workers/block.py +6 -0
  42. prefect/workers/cloud.py +6 -0
  43. prefect/workers/process.py +262 -4
  44. prefect/workers/server.py +27 -9
  45. {prefect_client-3.0.0rc9.dist-info → prefect_client-3.0.0rc11.dist-info}/METADATA +4 -4
  46. {prefect_client-3.0.0rc9.dist-info → prefect_client-3.0.0rc11.dist-info}/RECORD +49 -44
  47. {prefect_client-3.0.0rc9.dist-info → prefect_client-3.0.0rc11.dist-info}/LICENSE +0 -0
  48. {prefect_client-3.0.0rc9.dist-info → prefect_client-3.0.0rc11.dist-info}/WHEEL +0 -0
  49. {prefect_client-3.0.0rc9.dist-info → prefect_client-3.0.0rc11.dist-info}/top_level.txt +0 -0
prefect/futures.py CHANGED
@@ -1,4 +1,5 @@
1
1
  import abc
2
+ import collections
2
3
  import concurrent.futures
3
4
  import inspect
4
5
  import uuid
@@ -256,13 +257,7 @@ class PrefectFutureList(list, Iterator, Generic[F]):
256
257
  timeout: The maximum number of seconds to wait for all futures to
257
258
  complete. This method will not raise if the timeout is reached.
258
259
  """
259
- try:
260
- with timeout_context(timeout):
261
- for future in self:
262
- future.wait()
263
- except TimeoutError:
264
- logger.debug("Timed out waiting for all futures to complete.")
265
- return
260
+ wait(self, timeout=timeout)
266
261
 
267
262
  def result(
268
263
  self,
@@ -297,6 +292,57 @@ class PrefectFutureList(list, Iterator, Generic[F]):
297
292
  ) from exc
298
293
 
299
294
 
295
+ DoneAndNotDoneFutures = collections.namedtuple("DoneAndNotDoneFutures", "done not_done")
296
+
297
+
298
+ def wait(futures: List[PrefectFuture], timeout=None) -> DoneAndNotDoneFutures:
299
+ """
300
+ Wait for the futures in the given sequence to complete.
301
+
302
+ Args:
303
+ futures: The sequence of Futures to wait upon.
304
+ timeout: The maximum number of seconds to wait. If None, then there
305
+ is no limit on the wait time.
306
+
307
+ Returns:
308
+ A named 2-tuple of sets. The first set, named 'done', contains the
309
+ futures that completed (is finished or cancelled) before the wait
310
+ completed. The second set, named 'not_done', contains uncompleted
311
+ futures. Duplicate futures given to *futures* are removed and will be
312
+ returned only once.
313
+
314
+ Examples:
315
+ ```python
316
+ @task
317
+ def sleep_task(seconds):
318
+ sleep(seconds)
319
+ return 42
320
+
321
+ @flow
322
+ def flow():
323
+ futures = random_task.map(range(10))
324
+ done, not_done = wait(futures, timeout=5)
325
+ print(f"Done: {len(done)}")
326
+ print(f"Not Done: {len(not_done)}")
327
+ ```
328
+ """
329
+ futures = set(futures)
330
+ done = {f for f in futures if f._final_state}
331
+ not_done = futures - done
332
+ if len(done) == len(futures):
333
+ return DoneAndNotDoneFutures(done, not_done)
334
+ try:
335
+ with timeout_context(timeout):
336
+ for future in not_done.copy():
337
+ future.wait()
338
+ done.add(future)
339
+ not_done.remove(future)
340
+ return DoneAndNotDoneFutures(done, not_done)
341
+ except TimeoutError:
342
+ logger.debug("Timed out waiting for all futures to complete.")
343
+ return DoneAndNotDoneFutures(done, not_done)
344
+
345
+
300
346
  def resolve_futures_to_states(
301
347
  expr: Union[PrefectFuture, Any],
302
348
  ) -> Union[State, Any]:
@@ -0,0 +1,6 @@
1
+ """
2
+ 2024-06-27: This surfaces an actionable error message for moved or removed objects in Prefect 3.0 upgrade.
3
+ """
4
+ from prefect._internal.compatibility.migration import getattr_migration
5
+
6
+ __getattr__ = getattr_migration(__name__)
@@ -0,0 +1,6 @@
1
+ """
2
+ 2024-06-27: This surfaces an actionable error message for moved or removed objects in Prefect 3.0 upgrade.
3
+ """
4
+ from prefect._internal.compatibility.migration import getattr_migration
5
+
6
+ __getattr__ = getattr_migration(__name__)
@@ -97,7 +97,7 @@ def get_logger(name: Optional[str] = None) -> logging.Logger:
97
97
 
98
98
 
99
99
  def get_run_logger(
100
- context: "RunContext" = None, **kwargs: str
100
+ context: Optional["RunContext"] = None, **kwargs: str
101
101
  ) -> Union[logging.Logger, logging.LoggerAdapter]:
102
102
  """
103
103
  Get a Prefect logger for the current task run or flow run.
prefect/results.py CHANGED
@@ -28,7 +28,6 @@ from prefect.client.utilities import inject_client
28
28
  from prefect.exceptions import MissingResult, ObjectAlreadyExists
29
29
  from prefect.filesystems import (
30
30
  LocalFileSystem,
31
- ReadableFileSystem,
32
31
  WritableFileSystem,
33
32
  )
34
33
  from prefect.logging import get_logger
@@ -111,22 +110,32 @@ async def _get_or_create_default_storage(block_document_slug: str) -> ResultStor
111
110
 
112
111
 
113
112
  @sync_compatible
114
- async def get_or_create_default_result_storage() -> ResultStorage:
113
+ async def get_default_result_storage() -> ResultStorage:
115
114
  """
116
115
  Generate a default file system for result storage.
117
116
  """
118
- return await _get_or_create_default_storage(
119
- PREFECT_DEFAULT_RESULT_STORAGE_BLOCK.value()
120
- )
117
+ default_block = PREFECT_DEFAULT_RESULT_STORAGE_BLOCK.value()
118
+
119
+ if default_block is not None:
120
+ return await Block.load(default_block)
121
+
122
+ # otherwise, use the local file system
123
+ basepath = PREFECT_LOCAL_STORAGE_PATH.value()
124
+ return LocalFileSystem(basepath=basepath)
121
125
 
122
126
 
123
127
  async def get_or_create_default_task_scheduling_storage() -> ResultStorage:
124
128
  """
125
129
  Generate a default file system for background task parameter/result storage.
126
130
  """
127
- return await _get_or_create_default_storage(
128
- PREFECT_TASK_SCHEDULING_DEFAULT_STORAGE_BLOCK.value()
129
- )
131
+ default_block = PREFECT_TASK_SCHEDULING_DEFAULT_STORAGE_BLOCK.value()
132
+
133
+ if default_block is not None:
134
+ return await Block.load(default_block)
135
+
136
+ # otherwise, use the local file system
137
+ basepath = PREFECT_LOCAL_STORAGE_PATH.value()
138
+ return LocalFileSystem(basepath=basepath)
130
139
 
131
140
 
132
141
  def get_default_result_serializer() -> ResultSerializer:
@@ -177,9 +186,7 @@ class ResultFactory(BaseModel):
177
186
  kwargs.pop(key)
178
187
 
179
188
  # Apply defaults
180
- kwargs.setdefault(
181
- "result_storage", await get_or_create_default_result_storage()
182
- )
189
+ kwargs.setdefault("result_storage", await get_default_result_storage())
183
190
  kwargs.setdefault("result_serializer", get_default_result_serializer())
184
191
  kwargs.setdefault("persist_result", get_default_persist_setting())
185
192
  kwargs.setdefault("cache_result_in_memory", True)
@@ -230,9 +237,7 @@ class ResultFactory(BaseModel):
230
237
  """
231
238
  Create a new result factory for a task.
232
239
  """
233
- return await cls._from_task(
234
- task, get_or_create_default_result_storage, client=client
235
- )
240
+ return await cls._from_task(task, get_default_result_storage, client=client)
236
241
 
237
242
  @classmethod
238
243
  @inject_client
@@ -268,7 +273,14 @@ class ResultFactory(BaseModel):
268
273
  if ctx and ctx.result_factory
269
274
  else get_default_result_serializer()
270
275
  )
271
- persist_result = task.persist_result
276
+ if task.persist_result is None:
277
+ persist_result = (
278
+ ctx.result_factory.persist_result
279
+ if ctx and ctx.result_factory
280
+ else get_default_persist_setting()
281
+ )
282
+ else:
283
+ persist_result = task.persist_result
272
284
 
273
285
  cache_result_in_memory = task.cache_result_in_memory
274
286
 
@@ -330,16 +342,7 @@ class ResultFactory(BaseModel):
330
342
  # Avoid saving the block if it already has an identifier assigned
331
343
  storage_block_id = storage_block._block_document_id
332
344
  else:
333
- if persist_result:
334
- # TODO: Overwrite is true to avoid issues where the save collides with
335
- # a previously saved document with a matching hash
336
- storage_block_id = await storage_block._save(
337
- is_anonymous=True, overwrite=True, client=client
338
- )
339
- else:
340
- # a None-type UUID on unpersisted storage should not matter
341
- # since the ID is generated on the server
342
- storage_block_id = None
345
+ storage_block_id = None
343
346
  elif isinstance(result_storage, str):
344
347
  storage_block = await Block.load(result_storage, client=client)
345
348
  storage_block_id = storage_block._block_document_id
@@ -412,9 +415,6 @@ class ResultFactory(BaseModel):
412
415
 
413
416
  @sync_compatible
414
417
  async def store_parameters(self, identifier: UUID, parameters: Dict[str, Any]):
415
- assert (
416
- self.storage_block_id is not None
417
- ), "Unexpected storage block ID. Was it persisted?"
418
418
  data = self.serializer.dumps(parameters)
419
419
  blob = PersistedResultBlob(serializer=self.serializer, data=data)
420
420
  await self.storage_block.write_path(
@@ -423,9 +423,6 @@ class ResultFactory(BaseModel):
423
423
 
424
424
  @sync_compatible
425
425
  async def read_parameters(self, identifier: UUID) -> Dict[str, Any]:
426
- assert (
427
- self.storage_block_id is not None
428
- ), "Unexpected storage block ID. Was it persisted?"
429
426
  blob = PersistedResultBlob.model_validate_json(
430
427
  await self.storage_block.read_path(f"parameters/{identifier}")
431
428
  )
@@ -435,10 +432,7 @@ class ResultFactory(BaseModel):
435
432
  @register_base_type
436
433
  class BaseResult(BaseModel, abc.ABC, Generic[R]):
437
434
  model_config = ConfigDict(extra="forbid")
438
-
439
435
  type: str
440
- artifact_type: Optional[str] = None
441
- artifact_description: Optional[str] = None
442
436
 
443
437
  def __init__(self, **data: Any) -> None:
444
438
  type_string = get_dispatch_key(self) if type(self) != BaseResult else "__base__"
@@ -504,11 +498,7 @@ class UnpersistedResult(BaseResult):
504
498
  obj: R,
505
499
  cache_object: bool = True,
506
500
  ) -> "UnpersistedResult[R]":
507
- description = f"Unpersisted result of type `{type(obj).__name__}`"
508
- result = cls(
509
- artifact_type="result",
510
- artifact_description=description,
511
- )
501
+ result = cls()
512
502
  # Only store the object in local memory, it will not be sent to the API
513
503
  if cache_object:
514
504
  result._cache_object(obj)
@@ -528,8 +518,8 @@ class PersistedResult(BaseResult):
528
518
  type: str = "reference"
529
519
 
530
520
  serializer_type: str
531
- storage_block_id: uuid.UUID
532
521
  storage_key: str
522
+ storage_block_id: Optional[uuid.UUID] = None
533
523
  expiration: Optional[DateTime] = None
534
524
 
535
525
  _should_cache_object: bool = PrivateAttr(default=True)
@@ -547,6 +537,17 @@ class PersistedResult(BaseResult):
547
537
  self._storage_block = storage_block
548
538
  self._serializer = serializer
549
539
 
540
+ @inject_client
541
+ async def _get_storage_block(self, client: "PrefectClient") -> WritableFileSystem:
542
+ if self._storage_block is not None:
543
+ return self._storage_block
544
+ elif self.storage_block_id is not None:
545
+ block_document = await client.read_block_document(self.storage_block_id)
546
+ self._storage_block = Block._from_block_document(block_document)
547
+ else:
548
+ self._storage_block = await get_default_result_storage()
549
+ return self._storage_block
550
+
550
551
  @sync_compatible
551
552
  @inject_client
552
553
  async def get(self, client: "PrefectClient") -> R:
@@ -567,12 +568,8 @@ class PersistedResult(BaseResult):
567
568
 
568
569
  @inject_client
569
570
  async def _read_blob(self, client: "PrefectClient") -> "PersistedResultBlob":
570
- assert (
571
- self.storage_block_id is not None
572
- ), "Unexpected storage block ID. Was it persisted?"
573
- block_document = await client.read_block_document(self.storage_block_id)
574
- storage_block: ReadableFileSystem = Block._from_block_document(block_document)
575
- content = await storage_block.read_path(self.storage_key)
571
+ block = await self._get_storage_block(client=client)
572
+ content = await block.read_path(self.storage_key)
576
573
  blob = PersistedResultBlob.model_validate_json(content)
577
574
  return blob
578
575
 
@@ -607,10 +604,7 @@ class PersistedResult(BaseResult):
607
604
  obj = obj if obj is not NotSet else self._cache
608
605
 
609
606
  # next, the storage block
610
- storage_block = self._storage_block
611
- if storage_block is None:
612
- block_document = await client.read_block_document(self.storage_block_id)
613
- storage_block = Block._from_block_document(block_document)
607
+ storage_block = await self._get_storage_block(client=client)
614
608
 
615
609
  # finally, the serializer
616
610
  serializer = self._serializer
@@ -673,9 +667,9 @@ class PersistedResult(BaseResult):
673
667
  cls: "Type[PersistedResult]",
674
668
  obj: R,
675
669
  storage_block: WritableFileSystem,
676
- storage_block_id: uuid.UUID,
677
670
  storage_key_fn: Callable[[], str],
678
671
  serializer: Serializer,
672
+ storage_block_id: Optional[uuid.UUID] = None,
679
673
  cache_object: bool = True,
680
674
  expiration: Optional[DateTime] = None,
681
675
  defer_persistence: bool = False,
@@ -686,31 +680,21 @@ class PersistedResult(BaseResult):
686
680
  The object will be serialized and written to the storage block under a unique
687
681
  key. It will then be cached on the returned result.
688
682
  """
689
- assert (
690
- storage_block_id is not None
691
- ), "Unexpected storage block ID. Was it saved?"
692
-
693
683
  key = storage_key_fn()
694
684
  if not isinstance(key, str):
695
685
  raise TypeError(
696
686
  f"Expected type 'str' for result storage key; got value {key!r}"
697
687
  )
698
- description = f"Result of type `{type(obj).__name__}`"
699
688
  uri = cls._infer_path(storage_block, key)
700
- if uri:
701
- if isinstance(storage_block, LocalFileSystem):
702
- description += f" persisted to: `{uri}`"
703
- else:
704
- description += f" persisted to [{uri}]({uri})."
705
- else:
706
- description += f" persisted with storage block `{storage_block_id}`."
689
+
690
+ # in this case we store an absolute path
691
+ if storage_block_id is None and uri is not None:
692
+ key = str(uri)
707
693
 
708
694
  result = cls(
709
695
  serializer_type=serializer.type,
710
696
  storage_block_id=storage_block_id,
711
697
  storage_key=key,
712
- artifact_type="result",
713
- artifact_description=description,
714
698
  expiration=expiration,
715
699
  )
716
700
 
@@ -787,5 +771,4 @@ class UnknownResult(BaseResult):
787
771
  "Only None is supported."
788
772
  )
789
773
 
790
- description = "Unknown result persisted to Prefect."
791
- return cls(value=obj, artifact_type="result", artifact_description=description)
774
+ return cls(value=obj)
prefect/runner/runner.py CHANGED
@@ -65,17 +65,13 @@ from prefect.client.schemas.filters import (
65
65
  FlowRunFilterStateName,
66
66
  FlowRunFilterStateType,
67
67
  )
68
- from prefect.client.schemas.objects import (
69
- FlowRun,
70
- State,
71
- StateType,
72
- )
68
+ from prefect.client.schemas.objects import Flow as APIFlow
69
+ from prefect.client.schemas.objects import FlowRun, State, StateType
73
70
  from prefect.client.schemas.schedules import SCHEDULE_TYPES
74
- from prefect.deployments.runner import (
75
- EntrypointType,
76
- RunnerDeployment,
77
- )
78
71
  from prefect.events import DeploymentTriggerTypes, TriggerTypes
72
+ from prefect.events.related import tags_as_related_resources
73
+ from prefect.events.schemas.events import RelatedResource
74
+ from prefect.events.utilities import emit_event
79
75
  from prefect.exceptions import Abort, ObjectNotFound
80
76
  from prefect.flows import Flow, load_flow_from_flow_run
81
77
  from prefect.logging.loggers import PrefectLogAdapter, flow_run_logger, get_logger
@@ -88,6 +84,7 @@ from prefect.settings import (
88
84
  get_current_settings,
89
85
  )
90
86
  from prefect.states import Crashed, Pending, exception_to_failed_state
87
+ from prefect.types.entrypoint import EntrypointType
91
88
  from prefect.utilities.asyncutils import (
92
89
  asyncnullcontext,
93
90
  is_async_fn,
@@ -96,9 +93,12 @@ from prefect.utilities.asyncutils import (
96
93
  from prefect.utilities.engine import propose_state
97
94
  from prefect.utilities.processutils import _register_signal, run_process
98
95
  from prefect.utilities.services import critical_service_loop
96
+ from prefect.utilities.slugify import slugify
99
97
 
100
98
  if TYPE_CHECKING:
99
+ from prefect.client.schemas.objects import Deployment
101
100
  from prefect.client.types.flexible_schedule_list import FlexibleScheduleList
101
+ from prefect.deployments.runner import RunnerDeployment
102
102
 
103
103
  __all__ = ["Runner"]
104
104
 
@@ -130,6 +130,7 @@ class Runner:
130
130
  Examples:
131
131
  Set up a Runner to manage the execute of scheduled flow runs for two flows:
132
132
  ```python
133
+ import asyncio
133
134
  from prefect import flow, Runner
134
135
 
135
136
  @flow
@@ -149,7 +150,7 @@ class Runner:
149
150
  # Run on a cron schedule
150
151
  runner.add_flow(goodbye_flow, schedule={"cron": "0 * * * *"})
151
152
 
152
- runner.start()
153
+ asyncio.run(runner.start())
153
154
  ```
154
155
  """
155
156
  if name and ("/" in name or "%" in name):
@@ -166,9 +167,6 @@ class Runner:
166
167
  self.query_seconds = query_seconds or PREFECT_RUNNER_POLL_FREQUENCY.value()
167
168
  self._prefetch_seconds = prefetch_seconds
168
169
 
169
- self._runs_task_group: anyio.abc.TaskGroup = anyio.create_task_group()
170
- self._loops_task_group: anyio.abc.TaskGroup = anyio.create_task_group()
171
-
172
170
  self._limiter: Optional[anyio.CapacityLimiter] = anyio.CapacityLimiter(
173
171
  self.limit
174
172
  )
@@ -177,19 +175,20 @@ class Runner:
177
175
  self._cancelling_flow_run_ids = set()
178
176
  self._scheduled_task_scopes = set()
179
177
  self._deployment_ids: Set[UUID] = set()
180
- self._flow_run_process_map = dict()
178
+ self._flow_run_process_map: Dict[UUID, Dict] = dict()
181
179
 
182
180
  self._tmp_dir: Path = (
183
181
  Path(tempfile.gettempdir()) / "runner_storage" / str(uuid4())
184
182
  )
185
183
  self._storage_objs: List[RunnerStorage] = []
186
184
  self._deployment_storage_map: Dict[UUID, RunnerStorage] = {}
187
- self._loop = asyncio.get_event_loop()
185
+
186
+ self._loop: Optional[asyncio.AbstractEventLoop] = None
188
187
 
189
188
  @sync_compatible
190
189
  async def add_deployment(
191
190
  self,
192
- deployment: RunnerDeployment,
191
+ deployment: "RunnerDeployment",
193
192
  ) -> UUID:
194
193
  """
195
194
  Registers the deployment with the Prefect API and will monitor for work once
@@ -324,7 +323,6 @@ class Runner:
324
323
 
325
324
  sys.exit(0)
326
325
 
327
- @sync_compatible
328
326
  async def start(
329
327
  self, run_once: bool = False, webserver: Optional[bool] = None
330
328
  ) -> None:
@@ -342,6 +340,7 @@ class Runner:
342
340
  Initialize a Runner, add two flows, and serve them by starting the Runner:
343
341
 
344
342
  ```python
343
+ import asyncio
345
344
  from prefect import flow, Runner
346
345
 
347
346
  @flow
@@ -361,7 +360,7 @@ class Runner:
361
360
  # Run on a cron schedule
362
361
  runner.add_flow(goodbye_flow, schedule={"cron": "0 * * * *"})
363
362
 
364
- runner.start()
363
+ asyncio.run(runner.start())
365
364
  ```
366
365
  """
367
366
  from prefect.runner.server import start_webserver
@@ -695,8 +694,9 @@ class Runner:
695
694
  """
696
695
  self._logger.info("Pausing all deployments...")
697
696
  for deployment_id in self._deployment_ids:
698
- self._logger.debug(f"Pausing deployment '{deployment_id}'")
699
697
  await self._client.set_deployment_paused_state(deployment_id, True)
698
+ self._logger.debug(f"Paused deployment '{deployment_id}'")
699
+
700
700
  self._logger.info("All deployments have been paused!")
701
701
 
702
702
  async def _get_and_submit_flow_runs(self):
@@ -818,8 +818,71 @@ class Runner:
818
818
  "message": state_msg or "Flow run was cancelled successfully."
819
819
  },
820
820
  )
821
+ try:
822
+ deployment = await self._client.read_deployment(flow_run.deployment_id)
823
+ except ObjectNotFound:
824
+ deployment = None
825
+ try:
826
+ flow = await self._client.read_flow(flow_run.flow_id)
827
+ except ObjectNotFound:
828
+ flow = None
829
+ self._emit_flow_run_cancelled_event(
830
+ flow_run=flow_run, flow=flow, deployment=deployment
831
+ )
821
832
  run_logger.info(f"Cancelled flow run '{flow_run.name}'!")
822
833
 
834
+ def _event_resource(self):
835
+ from prefect import __version__
836
+
837
+ return {
838
+ "prefect.resource.id": f"prefect.runner.{slugify(self.name)}",
839
+ "prefect.resource.name": self.name,
840
+ "prefect.version": __version__,
841
+ }
842
+
843
+ def _emit_flow_run_cancelled_event(
844
+ self,
845
+ flow_run: "FlowRun",
846
+ flow: "Optional[APIFlow]",
847
+ deployment: "Optional[Deployment]",
848
+ ):
849
+ related = []
850
+ tags = []
851
+ if deployment:
852
+ related.append(
853
+ {
854
+ "prefect.resource.id": f"prefect.deployment.{deployment.id}",
855
+ "prefect.resource.role": "deployment",
856
+ "prefect.resource.name": deployment.name,
857
+ }
858
+ )
859
+ tags.extend(deployment.tags)
860
+ if flow:
861
+ related.append(
862
+ {
863
+ "prefect.resource.id": f"prefect.flow.{flow.id}",
864
+ "prefect.resource.role": "flow",
865
+ "prefect.resource.name": flow.name,
866
+ }
867
+ )
868
+ related.append(
869
+ {
870
+ "prefect.resource.id": f"prefect.flow-run.{flow_run.id}",
871
+ "prefect.resource.role": "flow-run",
872
+ "prefect.resource.name": flow_run.name,
873
+ }
874
+ )
875
+ tags.extend(flow_run.tags)
876
+
877
+ related = [RelatedResource.model_validate(r) for r in related]
878
+ related += tags_as_related_resources(set(tags))
879
+
880
+ emit_event(
881
+ event="prefect.runner.cancelled-flow-run",
882
+ resource=self._event_resource(),
883
+ related=related,
884
+ )
885
+
823
886
  async def _get_scheduled_flow_runs(
824
887
  self,
825
888
  ) -> List["FlowRun"]:
@@ -956,7 +1019,7 @@ class Runner:
956
1019
  # If the run is not ready to submit, release the concurrency slot
957
1020
  self._release_limit_slot(flow_run.id)
958
1021
 
959
- self._submitting_flow_run_ids.remove(flow_run.id)
1022
+ self._submitting_flow_run_ids.discard(flow_run.id)
960
1023
 
961
1024
  async def _submit_run_and_capture_errors(
962
1025
  self,
@@ -1163,6 +1226,16 @@ class Runner:
1163
1226
  self._logger.debug("Starting runner...")
1164
1227
  self._client = get_client()
1165
1228
  self._tmp_dir.mkdir(parents=True)
1229
+
1230
+ if not hasattr(self, "_loop") or not self._loop:
1231
+ self._loop = asyncio.get_event_loop()
1232
+
1233
+ if not hasattr(self, "_runs_task_group") or not self._runs_task_group:
1234
+ self._runs_task_group: anyio.abc.TaskGroup = anyio.create_task_group()
1235
+
1236
+ if not hasattr(self, "_loops_task_group") or not self._loops_task_group:
1237
+ self._loops_task_group: anyio.abc.TaskGroup = anyio.create_task_group()
1238
+
1166
1239
  await self._client.__aenter__()
1167
1240
  await self._runs_task_group.__aenter__()
1168
1241
 
prefect/runner/server.py CHANGED
@@ -16,7 +16,6 @@ from prefect.runner.utils import (
16
16
  inject_schemas_into_openapi,
17
17
  )
18
18
  from prefect.settings import (
19
- PREFECT_EXPERIMENTAL_ENABLE_EXTRA_RUNNER_ENDPOINTS,
20
19
  PREFECT_RUNNER_POLL_FREQUENCY,
21
20
  PREFECT_RUNNER_SERVER_HOST,
22
21
  PREFECT_RUNNER_SERVER_LOG_LEVEL,
@@ -202,7 +201,7 @@ def _build_generic_endpoint_for_flows(
202
201
 
203
202
  try:
204
203
  flow = load_flow_from_entrypoint(body.entrypoint)
205
- except (MissingFlowError, ScriptError, ModuleNotFoundError):
204
+ except (FileNotFoundError, MissingFlowError, ScriptError, ModuleNotFoundError):
206
205
  return JSONResponse(
207
206
  status_code=status.HTTP_404_NOT_FOUND,
208
207
  content={"message": "Flow not found"},
@@ -261,29 +260,28 @@ async def build_server(runner: "Runner") -> FastAPI:
261
260
  router.add_api_route("/shutdown", shutdown(runner=runner), methods=["POST"])
262
261
  webserver.include_router(router)
263
262
 
264
- if PREFECT_EXPERIMENTAL_ENABLE_EXTRA_RUNNER_ENDPOINTS.value():
265
- deployments_router, deployment_schemas = await get_deployment_router(runner)
266
- webserver.include_router(deployments_router)
267
-
268
- subflow_schemas = await get_subflow_schemas(runner)
269
- webserver.add_api_route(
270
- "/flow/run",
271
- _build_generic_endpoint_for_flows(runner=runner, schemas=subflow_schemas),
272
- methods=["POST"],
273
- name="Run flow in background",
274
- description="Trigger any flow run as a background task on the runner.",
275
- summary="Run flow",
276
- )
277
-
278
- def customize_openapi():
279
- if webserver.openapi_schema:
280
- return webserver.openapi_schema
263
+ deployments_router, deployment_schemas = await get_deployment_router(runner)
264
+ webserver.include_router(deployments_router)
265
+
266
+ subflow_schemas = await get_subflow_schemas(runner)
267
+ webserver.add_api_route(
268
+ "/flow/run",
269
+ _build_generic_endpoint_for_flows(runner=runner, schemas=subflow_schemas),
270
+ methods=["POST"],
271
+ name="Run flow in background",
272
+ description="Trigger any flow run as a background task on the runner.",
273
+ summary="Run flow",
274
+ )
281
275
 
282
- openapi_schema = inject_schemas_into_openapi(webserver, deployment_schemas)
283
- webserver.openapi_schema = openapi_schema
276
+ def customize_openapi():
277
+ if webserver.openapi_schema:
284
278
  return webserver.openapi_schema
285
279
 
286
- webserver.openapi = customize_openapi
280
+ openapi_schema = inject_schemas_into_openapi(webserver, deployment_schemas)
281
+ webserver.openapi_schema = openapi_schema
282
+ return webserver.openapi_schema
283
+
284
+ webserver.openapi = customize_openapi
287
285
 
288
286
  return webserver
289
287
 
prefect/runner/submit.py CHANGED
@@ -14,7 +14,6 @@ from prefect.context import FlowRunContext
14
14
  from prefect.flows import Flow
15
15
  from prefect.logging import get_logger
16
16
  from prefect.settings import (
17
- PREFECT_EXPERIMENTAL_ENABLE_EXTRA_RUNNER_ENDPOINTS,
18
17
  PREFECT_RUNNER_PROCESS_LIMIT,
19
18
  PREFECT_RUNNER_SERVER_HOST,
20
19
  PREFECT_RUNNER_SERVER_PORT,
@@ -131,13 +130,6 @@ async def submit_to_runner(
131
130
  "The `submit_to_runner` utility only supports submitting flows and tasks."
132
131
  )
133
132
 
134
- if not PREFECT_EXPERIMENTAL_ENABLE_EXTRA_RUNNER_ENDPOINTS.value():
135
- raise ValueError(
136
- "The `submit_to_runner` utility requires the `Runner` webserver to be"
137
- " running and built with extra endpoints enabled. To enable this, set the"
138
- " `PREFECT_EXPERIMENTAL_ENABLE_EXTRA_RUNNER_ENDPOINTS` setting to `True`."
139
- )
140
-
141
133
  parameters = parameters or {}
142
134
  if isinstance(parameters, List):
143
135
  return_single = False