prefect-client 2.14.20__py3-none-any.whl → 2.15.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (108) hide show
  1. prefect/_internal/concurrency/api.py +37 -2
  2. prefect/_internal/concurrency/calls.py +9 -0
  3. prefect/_internal/concurrency/cancellation.py +3 -1
  4. prefect/_internal/concurrency/event_loop.py +2 -2
  5. prefect/_internal/concurrency/threads.py +3 -2
  6. prefect/_internal/pydantic/annotations/pendulum.py +4 -4
  7. prefect/_internal/pydantic/v2_schema.py +2 -2
  8. prefect/_vendor/fastapi/__init__.py +1 -1
  9. prefect/_vendor/fastapi/applications.py +13 -13
  10. prefect/_vendor/fastapi/background.py +3 -1
  11. prefect/_vendor/fastapi/concurrency.py +7 -3
  12. prefect/_vendor/fastapi/datastructures.py +9 -7
  13. prefect/_vendor/fastapi/dependencies/utils.py +12 -7
  14. prefect/_vendor/fastapi/encoders.py +1 -1
  15. prefect/_vendor/fastapi/exception_handlers.py +7 -4
  16. prefect/_vendor/fastapi/exceptions.py +4 -2
  17. prefect/_vendor/fastapi/middleware/__init__.py +1 -1
  18. prefect/_vendor/fastapi/middleware/asyncexitstack.py +1 -1
  19. prefect/_vendor/fastapi/middleware/cors.py +3 -1
  20. prefect/_vendor/fastapi/middleware/gzip.py +3 -1
  21. prefect/_vendor/fastapi/middleware/httpsredirect.py +1 -1
  22. prefect/_vendor/fastapi/middleware/trustedhost.py +1 -1
  23. prefect/_vendor/fastapi/middleware/wsgi.py +3 -1
  24. prefect/_vendor/fastapi/openapi/docs.py +1 -1
  25. prefect/_vendor/fastapi/openapi/utils.py +3 -3
  26. prefect/_vendor/fastapi/requests.py +4 -2
  27. prefect/_vendor/fastapi/responses.py +13 -7
  28. prefect/_vendor/fastapi/routing.py +15 -15
  29. prefect/_vendor/fastapi/security/api_key.py +3 -3
  30. prefect/_vendor/fastapi/security/http.py +2 -2
  31. prefect/_vendor/fastapi/security/oauth2.py +2 -2
  32. prefect/_vendor/fastapi/security/open_id_connect_url.py +3 -3
  33. prefect/_vendor/fastapi/staticfiles.py +1 -1
  34. prefect/_vendor/fastapi/templating.py +3 -1
  35. prefect/_vendor/fastapi/testclient.py +1 -1
  36. prefect/_vendor/fastapi/utils.py +3 -3
  37. prefect/_vendor/fastapi/websockets.py +7 -3
  38. prefect/_vendor/starlette/__init__.py +1 -0
  39. prefect/_vendor/starlette/_compat.py +28 -0
  40. prefect/_vendor/starlette/_exception_handler.py +80 -0
  41. prefect/_vendor/starlette/_utils.py +88 -0
  42. prefect/_vendor/starlette/applications.py +261 -0
  43. prefect/_vendor/starlette/authentication.py +159 -0
  44. prefect/_vendor/starlette/background.py +43 -0
  45. prefect/_vendor/starlette/concurrency.py +59 -0
  46. prefect/_vendor/starlette/config.py +151 -0
  47. prefect/_vendor/starlette/convertors.py +87 -0
  48. prefect/_vendor/starlette/datastructures.py +707 -0
  49. prefect/_vendor/starlette/endpoints.py +130 -0
  50. prefect/_vendor/starlette/exceptions.py +60 -0
  51. prefect/_vendor/starlette/formparsers.py +276 -0
  52. prefect/_vendor/starlette/middleware/__init__.py +17 -0
  53. prefect/_vendor/starlette/middleware/authentication.py +52 -0
  54. prefect/_vendor/starlette/middleware/base.py +220 -0
  55. prefect/_vendor/starlette/middleware/cors.py +176 -0
  56. prefect/_vendor/starlette/middleware/errors.py +265 -0
  57. prefect/_vendor/starlette/middleware/exceptions.py +74 -0
  58. prefect/_vendor/starlette/middleware/gzip.py +113 -0
  59. prefect/_vendor/starlette/middleware/httpsredirect.py +19 -0
  60. prefect/_vendor/starlette/middleware/sessions.py +82 -0
  61. prefect/_vendor/starlette/middleware/trustedhost.py +64 -0
  62. prefect/_vendor/starlette/middleware/wsgi.py +147 -0
  63. prefect/_vendor/starlette/requests.py +328 -0
  64. prefect/_vendor/starlette/responses.py +347 -0
  65. prefect/_vendor/starlette/routing.py +933 -0
  66. prefect/_vendor/starlette/schemas.py +154 -0
  67. prefect/_vendor/starlette/staticfiles.py +248 -0
  68. prefect/_vendor/starlette/status.py +199 -0
  69. prefect/_vendor/starlette/templating.py +231 -0
  70. prefect/_vendor/starlette/testclient.py +805 -0
  71. prefect/_vendor/starlette/types.py +30 -0
  72. prefect/_vendor/starlette/websockets.py +193 -0
  73. prefect/blocks/core.py +3 -3
  74. prefect/blocks/notifications.py +10 -9
  75. prefect/client/base.py +1 -1
  76. prefect/client/cloud.py +1 -1
  77. prefect/client/orchestration.py +1 -1
  78. prefect/client/schemas/objects.py +11 -0
  79. prefect/client/subscriptions.py +19 -12
  80. prefect/concurrency/services.py +1 -1
  81. prefect/context.py +4 -4
  82. prefect/deployments/deployments.py +3 -3
  83. prefect/engine.py +89 -17
  84. prefect/events/clients.py +1 -1
  85. prefect/events/utilities.py +4 -1
  86. prefect/events/worker.py +10 -6
  87. prefect/filesystems.py +9 -9
  88. prefect/flow_runs.py +5 -1
  89. prefect/futures.py +1 -1
  90. prefect/infrastructure/container.py +3 -3
  91. prefect/infrastructure/kubernetes.py +4 -6
  92. prefect/infrastructure/process.py +3 -3
  93. prefect/input/run_input.py +1 -1
  94. prefect/logging/formatters.py +1 -1
  95. prefect/results.py +3 -6
  96. prefect/runner/server.py +4 -4
  97. prefect/settings.py +23 -3
  98. prefect/software/pip.py +1 -1
  99. prefect/task_engine.py +14 -11
  100. prefect/task_server.py +69 -35
  101. prefect/utilities/asyncutils.py +12 -2
  102. prefect/utilities/collections.py +1 -1
  103. prefect/utilities/filesystem.py +10 -5
  104. {prefect_client-2.14.20.dist-info → prefect_client-2.15.0.dist-info}/METADATA +4 -2
  105. {prefect_client-2.14.20.dist-info → prefect_client-2.15.0.dist-info}/RECORD +108 -73
  106. {prefect_client-2.14.20.dist-info → prefect_client-2.15.0.dist-info}/LICENSE +0 -0
  107. {prefect_client-2.14.20.dist-info → prefect_client-2.15.0.dist-info}/WHEEL +0 -0
  108. {prefect_client-2.14.20.dist-info → prefect_client-2.15.0.dist-info}/top_level.txt +0 -0
prefect/engine.py CHANGED
@@ -192,6 +192,8 @@ from prefect.states import (
192
192
  from prefect.task_runners import (
193
193
  CONCURRENCY_MESSAGES,
194
194
  BaseTaskRunner,
195
+ ConcurrentTaskRunner,
196
+ SequentialTaskRunner,
195
197
  TaskConcurrencyType,
196
198
  )
197
199
  from prefect.tasks import Task
@@ -850,7 +852,11 @@ async def orchestrate_flow_run(
850
852
  not parent_flow_run_context
851
853
  or (
852
854
  parent_flow_run_context
853
- and parent_flow_run_context.flow.isasync == flow.isasync
855
+ and
856
+ # Unless the parent is async and the child is sync, run the
857
+ # child flow in the parent thread; running a sync child in
858
+ # an async parent could be bad for async performance.
859
+ not (parent_flow_run_context.flow.isasync and not flow.isasync)
854
860
  )
855
861
  ):
856
862
  from_async.call_soon_in_waiting_thread(
@@ -1341,12 +1347,13 @@ async def resume_flow_run(flow_run_id, run_input: Optional[Dict] = None):
1341
1347
  run_input: a dictionary of inputs to provide to the flow run.
1342
1348
  """
1343
1349
  client = get_client()
1344
- flow_run = await client.read_flow_run(flow_run_id)
1350
+ async with client:
1351
+ flow_run = await client.read_flow_run(flow_run_id)
1345
1352
 
1346
- if not flow_run.state.is_paused():
1347
- raise NotPausedError("Cannot resume a run that isn't paused!")
1353
+ if not flow_run.state.is_paused():
1354
+ raise NotPausedError("Cannot resume a run that isn't paused!")
1348
1355
 
1349
- response = await client.resume_flow_run(flow_run_id, run_input=run_input)
1356
+ response = await client.resume_flow_run(flow_run_id, run_input=run_input)
1350
1357
 
1351
1358
  if response.status == SetStateStatus.REJECT:
1352
1359
  if response.state.type == StateType.FAILED:
@@ -1369,7 +1376,17 @@ def enter_task_run_engine(
1369
1376
 
1370
1377
  if not flow_run_context:
1371
1378
  if PREFECT_EXPERIMENTAL_ENABLE_TASK_SCHEDULING.value():
1372
- return _create_autonomous_task_run(task=task, parameters=parameters)
1379
+ create_autonomous_task_run = create_call(
1380
+ _create_autonomous_task_run, task=task, parameters=parameters
1381
+ )
1382
+ if task.isasync:
1383
+ return from_async.wait_for_call_in_loop_thread(
1384
+ create_autonomous_task_run
1385
+ )
1386
+ else:
1387
+ return from_sync.wait_for_call_in_loop_thread(
1388
+ create_autonomous_task_run
1389
+ )
1373
1390
 
1374
1391
  raise RuntimeError(
1375
1392
  "Tasks cannot be run outside of a flow"
@@ -1394,6 +1411,7 @@ def enter_task_run_engine(
1394
1411
  wait_for=wait_for,
1395
1412
  return_type=return_type,
1396
1413
  task_runner=task_runner,
1414
+ user_thread=threading.current_thread(),
1397
1415
  )
1398
1416
 
1399
1417
  if task.isasync and flow_run_context.flow.isasync:
@@ -1410,6 +1428,7 @@ async def begin_task_map(
1410
1428
  wait_for: Optional[Iterable[PrefectFuture]],
1411
1429
  return_type: EngineReturnType,
1412
1430
  task_runner: Optional[BaseTaskRunner],
1431
+ user_thread: threading.Thread,
1413
1432
  ) -> List[Union[PrefectFuture, Awaitable[PrefectFuture]]]:
1414
1433
  """Async entrypoint for task mapping"""
1415
1434
  # We need to resolve some futures to map over their data, collect the upstream
@@ -1487,6 +1506,7 @@ async def begin_task_map(
1487
1506
  return_type=return_type,
1488
1507
  task_runner=task_runner,
1489
1508
  extra_task_inputs=task_inputs,
1509
+ user_thread=user_thread,
1490
1510
  )
1491
1511
  )
1492
1512
 
@@ -1551,6 +1571,7 @@ async def get_task_call_return_value(
1551
1571
  wait_for: Optional[Iterable[PrefectFuture]],
1552
1572
  return_type: EngineReturnType,
1553
1573
  task_runner: Optional[BaseTaskRunner],
1574
+ user_thread: threading.Thread,
1554
1575
  extra_task_inputs: Optional[Dict[str, Set[TaskRunInput]]] = None,
1555
1576
  ):
1556
1577
  extra_task_inputs = extra_task_inputs or {}
@@ -1562,6 +1583,7 @@ async def get_task_call_return_value(
1562
1583
  wait_for=wait_for,
1563
1584
  task_runner=task_runner,
1564
1585
  extra_task_inputs=extra_task_inputs,
1586
+ user_thread=user_thread,
1565
1587
  )
1566
1588
  if return_type == "future":
1567
1589
  return future
@@ -1580,6 +1602,7 @@ async def create_task_run_future(
1580
1602
  wait_for: Optional[Iterable[PrefectFuture]],
1581
1603
  task_runner: Optional[BaseTaskRunner],
1582
1604
  extra_task_inputs: Dict[str, Set[TaskRunInput]],
1605
+ user_thread: threading.Thread,
1583
1606
  ) -> PrefectFuture:
1584
1607
  # Default to the flow run's task runner
1585
1608
  task_runner = task_runner or flow_run_context.task_runner
@@ -1617,6 +1640,7 @@ async def create_task_run_future(
1617
1640
  wait_for=wait_for,
1618
1641
  task_runner=task_runner,
1619
1642
  extra_task_inputs=extra_task_inputs,
1643
+ user_thread=user_thread,
1620
1644
  )
1621
1645
  )
1622
1646
 
@@ -1640,6 +1664,7 @@ async def create_task_run_then_submit(
1640
1664
  wait_for: Optional[Iterable[PrefectFuture]],
1641
1665
  task_runner: BaseTaskRunner,
1642
1666
  extra_task_inputs: Dict[str, Set[TaskRunInput]],
1667
+ user_thread: threading.Thread,
1643
1668
  ) -> None:
1644
1669
  task_run = (
1645
1670
  await create_task_run(
@@ -1666,6 +1691,7 @@ async def create_task_run_then_submit(
1666
1691
  task_run=task_run,
1667
1692
  wait_for=wait_for,
1668
1693
  task_runner=task_runner,
1694
+ user_thread=user_thread,
1669
1695
  )
1670
1696
 
1671
1697
  future._submitted.set()
@@ -1713,6 +1739,7 @@ async def submit_task_run(
1713
1739
  task_run: TaskRun,
1714
1740
  wait_for: Optional[Iterable[PrefectFuture]],
1715
1741
  task_runner: BaseTaskRunner,
1742
+ user_thread: threading.Thread,
1716
1743
  ) -> PrefectFuture:
1717
1744
  logger = get_run_logger(flow_run_context)
1718
1745
 
@@ -1722,6 +1749,10 @@ async def submit_task_run(
1722
1749
  ):
1723
1750
  logger.info(f"Executing {task_run.name!r} immediately...")
1724
1751
 
1752
+ if not isinstance(task_runner, (ConcurrentTaskRunner, SequentialTaskRunner)):
1753
+ # Only pass the user thread to "local" task runners
1754
+ user_thread = None
1755
+
1725
1756
  future = await task_runner.submit(
1726
1757
  key=future.key,
1727
1758
  call=partial(
@@ -1735,10 +1766,15 @@ async def submit_task_run(
1735
1766
  ),
1736
1767
  log_prints=should_log_prints(task),
1737
1768
  settings=prefect.context.SettingsContext.get().copy(),
1769
+ user_thread=user_thread,
1770
+ concurrency_type=task_runner.concurrency_type,
1738
1771
  ),
1739
1772
  )
1740
1773
 
1741
- if task_runner.concurrency_type != TaskConcurrencyType.SEQUENTIAL:
1774
+ if (
1775
+ task_runner.concurrency_type != TaskConcurrencyType.SEQUENTIAL
1776
+ and not flow_run_context.autonomous_task_run
1777
+ ):
1742
1778
  logger.info(f"Submitted task run {task_run.name!r} for execution.")
1743
1779
 
1744
1780
  return future
@@ -1752,6 +1788,8 @@ async def begin_task_run(
1752
1788
  result_factory: ResultFactory,
1753
1789
  log_prints: bool,
1754
1790
  settings: prefect.context.SettingsContext,
1791
+ user_thread: Optional[threading.Thread],
1792
+ concurrency_type: TaskConcurrencyType,
1755
1793
  ):
1756
1794
  """
1757
1795
  Entrypoint for task run execution.
@@ -1822,6 +1860,8 @@ async def begin_task_run(
1822
1860
  log_prints=log_prints,
1823
1861
  interruptible=interruptible,
1824
1862
  client=client,
1863
+ user_thread=user_thread,
1864
+ concurrency_type=concurrency_type,
1825
1865
  )
1826
1866
 
1827
1867
  if not maybe_flow_run_context:
@@ -1872,6 +1912,8 @@ async def orchestrate_task_run(
1872
1912
  log_prints: bool,
1873
1913
  interruptible: bool,
1874
1914
  client: PrefectClient,
1915
+ concurrency_type: TaskConcurrencyType,
1916
+ user_thread: Optional[threading.Thread],
1875
1917
  ) -> State:
1876
1918
  """
1877
1919
  Execute a task run
@@ -1903,6 +1945,7 @@ async def orchestrate_task_run(
1903
1945
  flow_run = flow_run_context.flow_run
1904
1946
  else:
1905
1947
  flow_run = await client.read_flow_run(task_run.flow_run_id)
1948
+
1906
1949
  logger = task_run_logger(task_run, task=task, flow_run=flow_run)
1907
1950
 
1908
1951
  partial_task_run_context = PartialModel(
@@ -2101,9 +2144,41 @@ async def orchestrate_task_run(
2101
2144
  "Beginning execution...", extra={"state_message": True}
2102
2145
  )
2103
2146
 
2104
- call = from_async.call_soon_in_new_thread(
2105
- create_call(task.fn, *args, **kwargs), timeout=task.timeout_seconds
2106
- )
2147
+ call = create_call(task.fn, *args, **kwargs)
2148
+
2149
+ if (
2150
+ flow_run_context
2151
+ and user_thread
2152
+ and (
2153
+ # Async and sync tasks can be executed on synchronous flows
2154
+ # if the task runner is sequential; if the task is sync and a
2155
+ # concurrent task runner is used, we must execute it in a worker
2156
+ # thread instead.
2157
+ (
2158
+ concurrency_type == TaskConcurrencyType.SEQUENTIAL
2159
+ and (
2160
+ flow_run_context.flow
2161
+ and not flow_run_context.flow.isasync
2162
+ )
2163
+ )
2164
+ # Async tasks can always be executed on asynchronous flow; if the
2165
+ # flow is async we do not want to block the event loop with
2166
+ # synchronous tasks
2167
+ or (
2168
+ flow_run_context.flow
2169
+ and flow_run_context.flow.isasync
2170
+ and task.isasync
2171
+ )
2172
+ )
2173
+ ):
2174
+ from_async.call_soon_in_waiting_thread(
2175
+ call, thread=user_thread, timeout=task.timeout_seconds
2176
+ )
2177
+ else:
2178
+ from_async.call_soon_in_new_thread(
2179
+ call, timeout=task.timeout_seconds
2180
+ )
2181
+
2107
2182
  result = await call.aresult()
2108
2183
 
2109
2184
  except (CancelledError, asyncio.CancelledError) as exc:
@@ -2200,7 +2275,6 @@ async def orchestrate_task_run(
2200
2275
  level=logging.INFO if state.is_completed() else logging.ERROR,
2201
2276
  msg=f"Finished in state {display_state}",
2202
2277
  )
2203
- logger.warning(f"Task run {task_run.name!r} finished in state {display_state}")
2204
2278
  return state
2205
2279
 
2206
2280
 
@@ -2433,8 +2507,7 @@ async def resolve_inputs(
2433
2507
  # incorrectly evaluate to false — to resolve this, we must track all
2434
2508
  # annotations wrapping the current expression but this is not yet
2435
2509
  # implemented.
2436
- isinstance(context.get("annotation"), allow_failure)
2437
- and state.is_failed()
2510
+ isinstance(context.get("annotation"), allow_failure) and state.is_failed()
2438
2511
  ):
2439
2512
  raise UpstreamTaskError(
2440
2513
  f"Upstream task run '{state.state_details.task_run_id}' did not reach a"
@@ -2914,15 +2987,14 @@ def _emit_task_run_state_change_event(
2914
2987
  )
2915
2988
 
2916
2989
 
2917
- @sync_compatible
2918
2990
  async def _create_autonomous_task_run(
2919
2991
  task: Task, parameters: Dict[str, Any]
2920
2992
  ) -> TaskRun:
2921
2993
  async with get_client() as client:
2922
- scheduled = Scheduled()
2994
+ state = Scheduled()
2923
2995
  if parameters:
2924
2996
  parameters_id = uuid4()
2925
- scheduled.state_details.task_parameters_id = parameters_id
2997
+ state.state_details.task_parameters_id = parameters_id
2926
2998
 
2927
2999
  # TODO: We want to use result storage for parameters, but we'll need
2928
3000
  # a better way to use it than this.
@@ -2934,7 +3006,7 @@ async def _create_autonomous_task_run(
2934
3006
  task=task,
2935
3007
  flow_run_id=None,
2936
3008
  dynamic_key=f"{task.task_key}-{str(uuid4())[:NUM_CHARS_DYNAMIC_KEY]}",
2937
- state=scheduled,
3009
+ state=state,
2938
3010
  )
2939
3011
 
2940
3012
  engine_logger.debug(f"Submitted run of task {task.name!r} for execution")
prefect/events/clients.py CHANGED
@@ -21,7 +21,7 @@ try:
21
21
  from cachetools import TTLCache
22
22
  except ImportError:
23
23
  pass
24
- from starlette.status import WS_1008_POLICY_VIOLATION
24
+ from prefect._vendor.starlette.status import WS_1008_POLICY_VIOLATION
25
25
  from websockets.client import WebSocketClientProtocol, connect
26
26
  from websockets.exceptions import (
27
27
  ConnectionClosed,
@@ -8,7 +8,7 @@ from prefect._internal.schemas.fields import DateTimeTZ
8
8
 
9
9
  from .clients import AssertingEventsClient, PrefectCloudEventsClient
10
10
  from .schemas import Event, RelatedResource
11
- from .worker import EventsWorker
11
+ from .worker import EventsWorker, emit_events_to_cloud
12
12
 
13
13
  TIGHT_TIMING = timedelta(minutes=5)
14
14
 
@@ -42,6 +42,9 @@ def emit_event(
42
42
  The event that was emitted if worker is using a client that emit
43
43
  events, otherwise None.
44
44
  """
45
+ if not emit_events_to_cloud():
46
+ return None
47
+
45
48
  operational_clients = [AssertingEventsClient, PrefectCloudEventsClient]
46
49
  worker_instance = EventsWorker.instance()
47
50
 
prefect/events/worker.py CHANGED
@@ -14,6 +14,15 @@ from .related import related_resources_from_run_context
14
14
  from .schemas import Event
15
15
 
16
16
 
17
+ def emit_events_to_cloud() -> bool:
18
+ api = PREFECT_API_URL.value()
19
+ return (
20
+ experiment_enabled("events_client")
21
+ and api
22
+ and api.startswith(PREFECT_CLOUD_API_URL.value())
23
+ )
24
+
25
+
17
26
  class EventsWorker(QueueService[Event]):
18
27
  def __init__(
19
28
  self, client_type: Type[EventsClient], client_options: Tuple[Tuple[str, Any]]
@@ -52,12 +61,7 @@ class EventsWorker(QueueService[Event]):
52
61
 
53
62
  # Select a client type for this worker based on settings
54
63
  if client_type is None:
55
- api = PREFECT_API_URL.value()
56
- if (
57
- experiment_enabled("events_client")
58
- and api
59
- and api.startswith(PREFECT_CLOUD_API_URL.value())
60
- ):
64
+ if emit_events_to_cloud():
61
65
  client_type = PrefectCloudEventsClient
62
66
  client_kwargs = {
63
67
  "api_url": PREFECT_API_URL.value(),
prefect/filesystems.py CHANGED
@@ -709,13 +709,13 @@ class Azure(WritableFileSystem, WritableDeploymentStorage):
709
709
  def filesystem(self) -> RemoteFileSystem:
710
710
  settings = {}
711
711
  if self.azure_storage_connection_string:
712
- settings["connection_string"] = (
713
- self.azure_storage_connection_string.get_secret_value()
714
- )
712
+ settings[
713
+ "connection_string"
714
+ ] = self.azure_storage_connection_string.get_secret_value()
715
715
  if self.azure_storage_account_name:
716
- settings["account_name"] = (
717
- self.azure_storage_account_name.get_secret_value()
718
- )
716
+ settings[
717
+ "account_name"
718
+ ] = self.azure_storage_account_name.get_secret_value()
719
719
  if self.azure_storage_account_key:
720
720
  settings["account_key"] = self.azure_storage_account_key.get_secret_value()
721
721
  if self.azure_storage_tenant_id:
@@ -723,9 +723,9 @@ class Azure(WritableFileSystem, WritableDeploymentStorage):
723
723
  if self.azure_storage_client_id:
724
724
  settings["client_id"] = self.azure_storage_client_id.get_secret_value()
725
725
  if self.azure_storage_client_secret:
726
- settings["client_secret"] = (
727
- self.azure_storage_client_secret.get_secret_value()
728
- )
726
+ settings[
727
+ "client_secret"
728
+ ] = self.azure_storage_client_secret.get_secret_value()
729
729
  settings["anon"] = self.azure_storage_anon
730
730
  self._remote_file_system = RemoteFileSystem(
731
731
  basepath=self.basepath, settings=settings
prefect/flow_runs.py CHANGED
@@ -7,6 +7,7 @@ from prefect.client.orchestration import PrefectClient
7
7
  from prefect.client.schemas import FlowRun
8
8
  from prefect.client.utilities import inject_client
9
9
  from prefect.exceptions import FlowRunWaitTimeout
10
+ from prefect.logging import get_logger
10
11
 
11
12
 
12
13
  @inject_client
@@ -15,6 +16,7 @@ async def wait_for_flow_run(
15
16
  timeout: Optional[int] = 10800,
16
17
  poll_interval: int = 5,
17
18
  client: Optional[PrefectClient] = None,
19
+ log_states: bool = False,
18
20
  ) -> FlowRun:
19
21
  """
20
22
  Waits for the prefect flow run to finish and returns the FlowRun
@@ -71,14 +73,16 @@ async def wait_for_flow_run(
71
73
  ```
72
74
  """
73
75
  assert client is not None, "Client injection failed"
76
+ logger = get_logger()
74
77
  with anyio.move_on_after(timeout):
75
78
  while True:
76
79
  flow_run = await client.read_flow_run(flow_run_id)
77
80
  flow_state = flow_run.state
81
+ if log_states:
82
+ logger.info(f"Flow run is in state {flow_run.state.name!r}")
78
83
  if flow_state and flow_state.is_final():
79
84
  return flow_run
80
85
  await anyio.sleep(poll_interval)
81
-
82
86
  raise FlowRunWaitTimeout(
83
87
  f"Flow run with ID {flow_run_id} exceeded watch timeout of {timeout} seconds"
84
88
  )
prefect/futures.py CHANGED
@@ -360,7 +360,7 @@ async def resolve_futures_to_data(
360
360
 
361
361
 
362
362
  async def resolve_futures_to_states(
363
- expr: Union[PrefectFuture[R, Any], Any]
363
+ expr: Union[PrefectFuture[R, Any], Any],
364
364
  ) -> Union[State[R], Any]:
365
365
  """
366
366
  Given a Python built-in collection, recursively find `PrefectFutures` and build a
@@ -398,9 +398,9 @@ class DockerContainer(Infrastructure):
398
398
  return await super().generate_work_pool_base_job_template()
399
399
  for key, value in self.dict(exclude_unset=True, exclude_defaults=True).items():
400
400
  if key == "command":
401
- base_job_template["variables"]["properties"]["command"]["default"] = (
402
- shlex.join(value)
403
- )
401
+ base_job_template["variables"]["properties"]["command"][
402
+ "default"
403
+ ] = shlex.join(value)
404
404
  elif key == "image_registry":
405
405
  self.logger.warning(
406
406
  "Image registry blocks are not supported by Docker"
@@ -376,9 +376,9 @@ class KubernetesJob(Infrastructure):
376
376
  ), "Failed to retrieve default base job template."
377
377
  for key, value in self.dict(exclude_unset=True, exclude_defaults=True).items():
378
378
  if key == "command":
379
- base_job_template["variables"]["properties"]["command"]["default"] = (
380
- shlex.join(value)
381
- )
379
+ base_job_template["variables"]["properties"]["command"][
380
+ "default"
381
+ ] = shlex.join(value)
382
382
  elif key in [
383
383
  "type",
384
384
  "block_type_slug",
@@ -892,9 +892,7 @@ class KubernetesJob(Infrastructure):
892
892
  prefix,
893
893
  max_length=253,
894
894
  regex_pattern=r"[^a-zA-Z0-9-\.]+",
895
- ).strip(
896
- "_-."
897
- ) # Must start or end with alphanumeric characters
895
+ ).strip("_-.") # Must start or end with alphanumeric characters
898
896
  or prefix
899
897
  )
900
898
 
@@ -264,9 +264,9 @@ class Process(Infrastructure):
264
264
  ), "Failed to generate default base job template for Process worker."
265
265
  for key, value in self.dict(exclude_unset=True, exclude_defaults=True).items():
266
266
  if key == "command":
267
- base_job_template["variables"]["properties"]["command"]["default"] = (
268
- shlex.join(value)
269
- )
267
+ base_job_template["variables"]["properties"]["command"][
268
+ "default"
269
+ ] = shlex.join(value)
270
270
  elif key in [
271
271
  "type",
272
272
  "block_type_slug",
@@ -379,7 +379,7 @@ class AutomaticRunInput(RunInput, Generic[T]):
379
379
 
380
380
 
381
381
  def run_input_subclass_from_type(
382
- _type: Union[Type[R], Type[T], pydantic.BaseModel]
382
+ _type: Union[Type[R], Type[T], pydantic.BaseModel],
383
383
  ) -> Union[Type[AutomaticRunInput[T]], Type[R]]:
384
384
  """
385
385
  Create a new `RunInput` subclass from the given type.
@@ -80,7 +80,7 @@ class PrefectFormatter(logging.Formatter):
80
80
  *,
81
81
  defaults=None,
82
82
  task_run_fmt: str = None,
83
- flow_run_fmt: str = None
83
+ flow_run_fmt: str = None,
84
84
  ) -> None:
85
85
  """
86
86
  Implementation of the standard Python formatter with support for multiple
prefect/results.py CHANGED
@@ -185,10 +185,9 @@ class ResultFactory(pydantic.BaseModel):
185
185
  persist_result=(
186
186
  flow.persist_result
187
187
  if flow.persist_result is not None
188
- else
189
188
  # !! Child flows persist their result by default if the it or the
190
189
  # parent flow uses a feature that requires it
191
- (
190
+ else (
192
191
  flow_features_require_result_persistence(flow)
193
192
  or flow_features_require_child_result_persistence(ctx.flow)
194
193
  or get_default_persist_setting()
@@ -209,10 +208,9 @@ class ResultFactory(pydantic.BaseModel):
209
208
  persist_result=(
210
209
  flow.persist_result
211
210
  if flow.persist_result is not None
212
- else
213
211
  # !! Flows persist their result by default if uses a feature that
214
212
  # requires it
215
- (
213
+ else (
216
214
  flow_features_require_result_persistence(flow)
217
215
  or get_default_persist_setting()
218
216
  )
@@ -246,10 +244,9 @@ class ResultFactory(pydantic.BaseModel):
246
244
  persist_result = (
247
245
  task.persist_result
248
246
  if task.persist_result is not None
249
- else
250
247
  # !! Tasks persist their result by default if their parent flow uses a
251
248
  # feature that requires it or the task uses a feature that requires it
252
- (
249
+ else (
253
250
  (
254
251
  flow_features_require_child_result_persistence(ctx.flow)
255
252
  if ctx
prefect/runner/server.py CHANGED
@@ -87,7 +87,7 @@ async def _build_endpoint_for_deployment(
87
87
  deployment: "DeploymentResponse", runner: "Runner"
88
88
  ) -> Callable:
89
89
  async def _create_flow_run_for_deployment(
90
- body: Optional[Dict[Any, Any]] = None
90
+ body: Optional[Dict[Any, Any]] = None,
91
91
  ) -> JSONResponse:
92
92
  body = body or {}
93
93
  if deployment.enforce_parameter_schema and deployment.parameter_openapi_schema:
@@ -139,9 +139,9 @@ async def get_deployment_router(
139
139
  )
140
140
 
141
141
  # Used for updating the route schemas later on
142
- schemas[f"{deployment.name}-{deployment_id}"] = (
143
- deployment.parameter_openapi_schema
144
- )
142
+ schemas[
143
+ f"{deployment.name}-{deployment_id}"
144
+ ] = deployment.parameter_openapi_schema
145
145
  schemas[deployment_id] = deployment.name
146
146
  return router, schemas
147
147
 
prefect/settings.py CHANGED
@@ -39,6 +39,7 @@ settings to be dynamically modified on retrieval. This allows us to make setting
39
39
  dependent on the value of other settings or perform other dynamic effects.
40
40
 
41
41
  """
42
+
42
43
  import logging
43
44
  import os
44
45
  import string
@@ -420,9 +421,7 @@ def warn_on_misconfigured_api_url(values):
420
421
  )
421
422
 
422
423
  if warnings_list:
423
- example = (
424
- 'e.g. PREFECT_API_URL="https://api.prefect.cloud/api/accounts/[ACCOUNT-ID]/workspaces/[WORKSPACE-ID]"'
425
- )
424
+ example = 'e.g. PREFECT_API_URL="https://api.prefect.cloud/api/accounts/[ACCOUNT-ID]/workspaces/[WORKSPACE-ID]"'
426
425
  warnings_list.append(example)
427
426
 
428
427
  warnings.warn("\n".join(warnings_list), stacklevel=2)
@@ -1416,6 +1415,22 @@ PREFECT_TASK_SCHEDULING_DELETE_FAILED_SUBMISSIONS = Setting(
1416
1415
  Whether or not to delete failed task submissions from the database.
1417
1416
  """
1418
1417
 
1418
+ PREFECT_TASK_SCHEDULING_MAX_SCHEDULED_QUEUE_SIZE = Setting(
1419
+ int,
1420
+ default=1000,
1421
+ )
1422
+ """
1423
+ The maximum number of scheduled tasks to queue for submission.
1424
+ """
1425
+
1426
+ PREFECT_TASK_SCHEDULING_MAX_RETRY_QUEUE_SIZE = Setting(
1427
+ int,
1428
+ default=100,
1429
+ )
1430
+ """
1431
+ The maximum number of retries to queue for submission.
1432
+ """
1433
+
1419
1434
  PREFECT_EXPERIMENTAL_ENABLE_EXTRA_RUNNER_ENDPOINTS = Setting(bool, default=False)
1420
1435
  """
1421
1436
  Whether or not to enable experimental worker webserver endpoints.
@@ -1447,6 +1462,11 @@ PREFECT_EXPERIMENTAL_ENABLE_TASK_SCHEDULING = Setting(bool, default=False)
1447
1462
  Whether or not to enable experimental task scheduling.
1448
1463
  """
1449
1464
 
1465
+ PREFECT_EXPERIMENTAL_ENABLE_WORK_QUEUE_STATUS = Setting(bool, default=True)
1466
+ """
1467
+ Whether or not to enable experimental work queue status in-place of work queue health.
1468
+ """
1469
+
1450
1470
  # Defaults -----------------------------------------------------------------------------
1451
1471
 
1452
1472
  PREFECT_DEFAULT_RESULT_STORAGE_BLOCK = Setting(
prefect/software/pip.py CHANGED
@@ -68,7 +68,7 @@ def _is_editable_install(dist: "importlib_metadata.Distribution") -> bool:
68
68
 
69
69
 
70
70
  def _remove_distributions_required_by_others(
71
- dists: Dict[str, "importlib_metadata.Distribution"]
71
+ dists: Dict[str, "importlib_metadata.Distribution"],
72
72
  ) -> Dict[str, "importlib_metadata.Distribution"]:
73
73
  # Collect all child requirements
74
74
  child_requirement_names = set()