prefect 3.6.5.dev3__py3-none-any.whl → 3.6.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (147) hide show
  1. prefect/_build_info.py +3 -3
  2. prefect/_states.py +292 -0
  3. prefect/blocks/notifications.py +71 -0
  4. prefect/cli/deploy/_core.py +7 -1
  5. prefect/cli/deploy/_models.py +4 -2
  6. prefect/client/orchestration/_flow_runs/client.py +1 -0
  7. prefect/client/schemas/objects.py +12 -0
  8. prefect/concurrency/services.py +71 -6
  9. prefect/deployments/base.py +3 -0
  10. prefect/deployments/runner.py +39 -28
  11. prefect/flows.py +17 -8
  12. prefect/input/__init__.py +10 -0
  13. prefect/input/actions.py +185 -34
  14. prefect/input/run_input.py +10 -28
  15. prefect/results.py +1 -1
  16. prefect/server/api/background_workers.py +38 -6
  17. prefect/server/api/flow_runs.py +2 -5
  18. prefect/server/api/server.py +5 -1
  19. prefect/server/api/task_runs.py +2 -4
  20. prefect/server/database/configurations.py +11 -3
  21. prefect/server/database/dependencies.py +2 -2
  22. prefect/server/events/pipeline.py +3 -1
  23. prefect/server/events/services/event_persister.py +77 -32
  24. prefect/server/events/services/triggers.py +4 -1
  25. prefect/server/models/task_runs.py +1 -0
  26. prefect/server/orchestration/core_policy.py +16 -3
  27. prefect/server/schemas/core.py +6 -0
  28. prefect/server/schemas/filters.py +8 -5
  29. prefect/server/schemas/sorting.py +37 -23
  30. prefect/server/services/base.py +0 -4
  31. prefect/server/services/cancellation_cleanup.py +143 -172
  32. prefect/server/services/pause_expirations.py +54 -67
  33. prefect/server/services/perpetual_services.py +170 -0
  34. prefect/server/services/task_run_recorder.py +213 -77
  35. prefect/server/ui/assets/404-D7bhkKwY.js +2 -0
  36. prefect/server/ui/assets/{404-cNTfarkM.js.map → 404-D7bhkKwY.js.map} +1 -1
  37. prefect/server/ui/assets/{AppRouterView-CsqzqHZP.js → AppRouterView-aiEFVVag.js} +2 -2
  38. prefect/server/ui/assets/{AppRouterView-CsqzqHZP.js.map → AppRouterView-aiEFVVag.js.map} +1 -1
  39. prefect/server/ui/assets/{Artifact-D9pQzQmC.js → Artifact-17nGoOVj.js} +2 -2
  40. prefect/server/ui/assets/{Artifact-D9pQzQmC.js.map → Artifact-17nGoOVj.js.map} +1 -1
  41. prefect/server/ui/assets/{ArtifactKey-Dci9S885.js → ArtifactKey-BFJWL_nY.js} +2 -2
  42. prefect/server/ui/assets/{ArtifactKey-Dci9S885.js.map → ArtifactKey-BFJWL_nY.js.map} +1 -1
  43. prefect/server/ui/assets/{Artifacts-3_tnsqcP.js → Artifacts-BzeaNa_a.js} +2 -2
  44. prefect/server/ui/assets/{Artifacts-3_tnsqcP.js.map → Artifacts-BzeaNa_a.js.map} +1 -1
  45. prefect/server/ui/assets/{Automation-WWzefaIP.js → Automation-D94bPqzI.js} +2 -2
  46. prefect/server/ui/assets/{Automation-WWzefaIP.js.map → Automation-D94bPqzI.js.map} +1 -1
  47. prefect/server/ui/assets/{AutomationCreate-BXntaZwK.js → AutomationCreate-BylCGTI5.js} +2 -2
  48. prefect/server/ui/assets/{AutomationCreate-BXntaZwK.js.map → AutomationCreate-BylCGTI5.js.map} +1 -1
  49. prefect/server/ui/assets/{AutomationEdit-Nmj5pgV7.js → AutomationEdit-DJlQDw7m.js} +2 -2
  50. prefect/server/ui/assets/{AutomationEdit-Nmj5pgV7.js.map → AutomationEdit-DJlQDw7m.js.map} +1 -1
  51. prefect/server/ui/assets/{AutomationWizard.vue_vue_type_script_setup_true_lang-C0eUtYtR.js → AutomationWizard.vue_vue_type_script_setup_true_lang-B83dSBsj.js} +2 -2
  52. prefect/server/ui/assets/{AutomationWizard.vue_vue_type_script_setup_true_lang-C0eUtYtR.js.map → AutomationWizard.vue_vue_type_script_setup_true_lang-B83dSBsj.js.map} +1 -1
  53. prefect/server/ui/assets/{Automations-BTByQSIa.js → Automations-Bg-224tL.js} +2 -2
  54. prefect/server/ui/assets/{Automations-BTByQSIa.js.map → Automations-Bg-224tL.js.map} +1 -1
  55. prefect/server/ui/assets/{BlockEdit-TXsu808p.js → BlockEdit-DWKQOAki.js} +2 -2
  56. prefect/server/ui/assets/{BlockEdit-TXsu808p.js.map → BlockEdit-DWKQOAki.js.map} +1 -1
  57. prefect/server/ui/assets/{BlockView-ClO3cjvJ.js → BlockView-C9V2cBug.js} +2 -2
  58. prefect/server/ui/assets/{BlockView-ClO3cjvJ.js.map → BlockView-C9V2cBug.js.map} +1 -1
  59. prefect/server/ui/assets/{Blocks-BkExfYYT.js → Blocks-CisDeGmU.js} +2 -2
  60. prefect/server/ui/assets/{Blocks-BkExfYYT.js.map → Blocks-CisDeGmU.js.map} +1 -1
  61. prefect/server/ui/assets/{BlocksCatalog-EKrfg03a.js → BlocksCatalog-xzMNomFw.js} +2 -2
  62. prefect/server/ui/assets/{BlocksCatalog-EKrfg03a.js.map → BlocksCatalog-xzMNomFw.js.map} +1 -1
  63. prefect/server/ui/assets/{BlocksCatalogCreate-BkEaFm81.js → BlocksCatalogCreate-BqygaI8K.js} +2 -2
  64. prefect/server/ui/assets/{BlocksCatalogCreate-BkEaFm81.js.map → BlocksCatalogCreate-BqygaI8K.js.map} +1 -1
  65. prefect/server/ui/assets/{BlocksCatalogView-Dra7rjpU.js → BlocksCatalogView-f-XjWttv.js} +2 -2
  66. prefect/server/ui/assets/{BlocksCatalogView-Dra7rjpU.js.map → BlocksCatalogView-f-XjWttv.js.map} +1 -1
  67. prefect/server/ui/assets/{ConcurrencyLimit-Dja_e7Qi.js → ConcurrencyLimit-Bkx91lbj.js} +2 -2
  68. prefect/server/ui/assets/{ConcurrencyLimit-Dja_e7Qi.js.map → ConcurrencyLimit-Bkx91lbj.js.map} +1 -1
  69. prefect/server/ui/assets/{ConcurrencyLimits-BhgfLozN.js → ConcurrencyLimits-CMBJj1KT.js} +2 -2
  70. prefect/server/ui/assets/{ConcurrencyLimits-BhgfLozN.js.map → ConcurrencyLimits-CMBJj1KT.js.map} +1 -1
  71. prefect/server/ui/assets/{Dashboard-DU4NaIUe.js → Dashboard-DseemlCu.js} +2 -2
  72. prefect/server/ui/assets/{Dashboard-DU4NaIUe.js.map → Dashboard-DseemlCu.js.map} +1 -1
  73. prefect/server/ui/assets/{Deployment-26av4B2n.js → Deployment-BB75yWUV.js} +2 -2
  74. prefect/server/ui/assets/{Deployment-26av4B2n.js.map → Deployment-BB75yWUV.js.map} +1 -1
  75. prefect/server/ui/assets/{DeploymentDuplicate-DrHdbfGO.js → DeploymentDuplicate-DYe295hC.js} +2 -2
  76. prefect/server/ui/assets/{DeploymentDuplicate-DrHdbfGO.js.map → DeploymentDuplicate-DYe295hC.js.map} +1 -1
  77. prefect/server/ui/assets/{DeploymentEdit-CTlfod0i.js → DeploymentEdit-CsdoTemd.js} +2 -2
  78. prefect/server/ui/assets/{DeploymentEdit-CTlfod0i.js.map → DeploymentEdit-CsdoTemd.js.map} +1 -1
  79. prefect/server/ui/assets/{Deployments-BqQB3LD3.js → Deployments-Chbfi5B0.js} +2 -2
  80. prefect/server/ui/assets/{Deployments-BqQB3LD3.js.map → Deployments-Chbfi5B0.js.map} +1 -1
  81. prefect/server/ui/assets/{Event-Ddqkp5Eb.js → Event-D0I8Mhm1.js} +2 -2
  82. prefect/server/ui/assets/{Event-Ddqkp5Eb.js.map → Event-D0I8Mhm1.js.map} +1 -1
  83. prefect/server/ui/assets/{Events-DVhuODUR.js → Events-CFZYmsfq.js} +2 -2
  84. prefect/server/ui/assets/{Events-DVhuODUR.js.map → Events-CFZYmsfq.js.map} +1 -1
  85. prefect/server/ui/assets/{Flow-CAl7lKeq.js → Flow-0PldtPn3.js} +2 -2
  86. prefect/server/ui/assets/{Flow-CAl7lKeq.js.map → Flow-0PldtPn3.js.map} +1 -1
  87. prefect/server/ui/assets/{FlowRun-h0x5CxKG.js → FlowRun-BO3KFay_.js} +2 -2
  88. prefect/server/ui/assets/{FlowRun-h0x5CxKG.js.map → FlowRun-BO3KFay_.js.map} +1 -1
  89. prefect/server/ui/assets/{FlowRunCreate-DLYa_njJ.js → FlowRunCreate-7xFw1kRY.js} +2 -2
  90. prefect/server/ui/assets/{FlowRunCreate-DLYa_njJ.js.map → FlowRunCreate-7xFw1kRY.js.map} +1 -1
  91. prefect/server/ui/assets/{Flows-DJ-cJREs.js → Flows-DFGeV1nS.js} +2 -2
  92. prefect/server/ui/assets/{Flows-DJ-cJREs.js.map → Flows-DFGeV1nS.js.map} +1 -1
  93. prefect/server/ui/assets/{Runs-rPCeL6qJ.js → Runs-ByONPbWf.js} +2 -2
  94. prefect/server/ui/assets/{Runs-rPCeL6qJ.js.map → Runs-ByONPbWf.js.map} +1 -1
  95. prefect/server/ui/assets/{RunsPageWithDefaultFilter-CceYGNey-BtMS3SJk.js → RunsPageWithDefaultFilter-CceYGNey-I7yMqcWT.js} +2 -2
  96. prefect/server/ui/assets/{RunsPageWithDefaultFilter-CceYGNey-BtMS3SJk.js.map → RunsPageWithDefaultFilter-CceYGNey-I7yMqcWT.js.map} +1 -1
  97. prefect/server/ui/assets/{Settings-w2bn-3ns.js → Settings-DQhcjTrh.js} +2 -2
  98. prefect/server/ui/assets/{Settings-w2bn-3ns.js.map → Settings-DQhcjTrh.js.map} +1 -1
  99. prefect/server/ui/assets/{TaskRun-CPjqF2yp.js → TaskRun-DSYwpbcC.js} +2 -2
  100. prefect/server/ui/assets/{TaskRun-CPjqF2yp.js.map → TaskRun-DSYwpbcC.js.map} +1 -1
  101. prefect/server/ui/assets/{Unauthenticated-Yg8XqjIn.js → Unauthenticated-CnEOt160.js} +2 -2
  102. prefect/server/ui/assets/{Unauthenticated-Yg8XqjIn.js.map → Unauthenticated-CnEOt160.js.map} +1 -1
  103. prefect/server/ui/assets/{Variables-CaD3bA0b.js → Variables-CCBjlP6W.js} +2 -2
  104. prefect/server/ui/assets/{Variables-CaD3bA0b.js.map → Variables-CCBjlP6W.js.map} +1 -1
  105. prefect/server/ui/assets/{WorkPool-BCBo-n5K.js → WorkPool-pCNoImTW.js} +2 -2
  106. prefect/server/ui/assets/{WorkPool-BCBo-n5K.js.map → WorkPool-pCNoImTW.js.map} +1 -1
  107. prefect/server/ui/assets/{WorkPoolCreate-DZbqjYpg.js → WorkPoolCreate-BhhKigkx.js} +2 -2
  108. prefect/server/ui/assets/{WorkPoolCreate-DZbqjYpg.js.map → WorkPoolCreate-BhhKigkx.js.map} +1 -1
  109. prefect/server/ui/assets/{WorkPoolEdit-BNqs1-RP.js → WorkPoolEdit-CBnjME_A.js} +2 -2
  110. prefect/server/ui/assets/{WorkPoolEdit-BNqs1-RP.js.map → WorkPoolEdit-CBnjME_A.js.map} +1 -1
  111. prefect/server/ui/assets/{WorkPoolQueue-CpbBSFRw.js → WorkPoolQueue-T6GXjAPq.js} +2 -2
  112. prefect/server/ui/assets/{WorkPoolQueue-CpbBSFRw.js.map → WorkPoolQueue-T6GXjAPq.js.map} +1 -1
  113. prefect/server/ui/assets/{WorkPoolQueueCreate-DavJ3kww.js → WorkPoolQueueCreate-Do2WmkDc.js} +2 -2
  114. prefect/server/ui/assets/{WorkPoolQueueCreate-DavJ3kww.js.map → WorkPoolQueueCreate-Do2WmkDc.js.map} +1 -1
  115. prefect/server/ui/assets/{WorkPoolQueueEdit-B-UNXtv9.js → WorkPoolQueueEdit-BWIHSa27.js} +2 -2
  116. prefect/server/ui/assets/{WorkPoolQueueEdit-B-UNXtv9.js.map → WorkPoolQueueEdit-BWIHSa27.js.map} +1 -1
  117. prefect/server/ui/assets/{WorkPools-8Aq4RA_W.js → WorkPools-CGyqQS4z.js} +2 -2
  118. prefect/server/ui/assets/{WorkPools-8Aq4RA_W.js.map → WorkPools-CGyqQS4z.js.map} +1 -1
  119. prefect/server/ui/assets/{WorkQueueToWorkPoolQueueRedirect-C9AtKf4r-ByNdkwZz.js → WorkQueueToWorkPoolQueueRedirect-C9AtKf4r-DHG155CP.js} +2 -2
  120. prefect/server/ui/assets/{WorkQueueToWorkPoolQueueRedirect-C9AtKf4r-ByNdkwZz.js.map → WorkQueueToWorkPoolQueueRedirect-C9AtKf4r-DHG155CP.js.map} +1 -1
  121. prefect/server/ui/assets/{index-B2Mqn-oh.js → index-Be5epLbh.js} +13 -81
  122. prefect/server/ui/assets/{index-B2Mqn-oh.js.map → index-Be5epLbh.js.map} +1 -1
  123. prefect/server/ui/assets/{mapper-Dc-EvePY.js → mapper-B6NMel0b.js} +2 -2
  124. prefect/server/ui/assets/{mapper-Dc-EvePY.js.map → mapper-B6NMel0b.js.map} +1 -1
  125. prefect/server/ui/assets/useCan-DoEZ83qN.js +2 -0
  126. prefect/server/ui/assets/{useCan-UjdHjQ2R.js.map → useCan-DoEZ83qN.js.map} +1 -1
  127. prefect/server/ui/assets/{usePageTitle-CuMQMQX5.js → usePageTitle-C1HwtkkQ.js} +2 -2
  128. prefect/server/ui/assets/{usePageTitle-CuMQMQX5.js.map → usePageTitle-C1HwtkkQ.js.map} +1 -1
  129. prefect/server/ui/assets/{usePrefectApi-CVNyESB6.js → usePrefectApi-CdfH7GYe.js} +2 -2
  130. prefect/server/ui/assets/{usePrefectApi-CVNyESB6.js.map → usePrefectApi-CdfH7GYe.js.map} +1 -1
  131. prefect/server/ui/index.html +1 -1
  132. prefect/server/utilities/postgres_listener.py +8 -2
  133. prefect/settings/AGENTS.md +82 -0
  134. prefect/settings/base.py +2 -2
  135. prefect/settings/models/server/database.py +10 -0
  136. prefect/settings/models/server/services.py +36 -0
  137. prefect/task_engine.py +138 -32
  138. prefect/tasks.py +22 -6
  139. prefect/workers/base.py +23 -1
  140. prefect/workers/server.py +5 -6
  141. {prefect-3.6.5.dev3.dist-info → prefect-3.6.6.dist-info}/METADATA +1 -1
  142. {prefect-3.6.5.dev3.dist-info → prefect-3.6.6.dist-info}/RECORD +145 -142
  143. {prefect-3.6.5.dev3.dist-info → prefect-3.6.6.dist-info}/WHEEL +1 -1
  144. prefect/server/ui/assets/404-cNTfarkM.js +0 -2
  145. prefect/server/ui/assets/useCan-UjdHjQ2R.js +0 -2
  146. {prefect-3.6.5.dev3.dist-info → prefect-3.6.6.dist-info}/entry_points.txt +0 -0
  147. {prefect-3.6.5.dev3.dist-info → prefect-3.6.6.dist-info}/licenses/LICENSE +0 -0
prefect/_build_info.py CHANGED
@@ -1,5 +1,5 @@
1
1
  # Generated by versioningit
2
- __version__ = "3.6.5.dev3"
3
- __build_date__ = "2025-11-26 08:10:05.347240+00:00"
4
- __git_commit__ = "a9d0a54f5ed7685cbc9184771351439d97517d56"
2
+ __version__ = "3.6.6"
3
+ __build_date__ = "2025-12-11 20:20:23.556858+00:00"
4
+ __git_commit__ = "f7d4baf4e04fc31b7a7949b9bc6f2e59d848272b"
5
5
  __dirty__ = False
prefect/_states.py ADDED
@@ -0,0 +1,292 @@
1
+ """
2
+ Private module containing sync versions of state functions.
3
+
4
+ These functions are used internally by the sync task engine to avoid
5
+ run_coro_as_sync overhead on Windows.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import datetime
11
+ import sys
12
+ import uuid
13
+ from types import GeneratorType
14
+ from typing import TYPE_CHECKING, Any, Optional
15
+
16
+ import anyio
17
+ import httpx
18
+
19
+ from prefect.client.schemas.objects import State, StateType
20
+ from prefect.exceptions import MissingContextError, TerminationSignal
21
+ from prefect.logging.loggers import get_logger, get_run_logger
22
+ from prefect.states import (
23
+ Completed,
24
+ Crashed,
25
+ Failed,
26
+ StateGroup,
27
+ format_exception,
28
+ is_state_iterable,
29
+ )
30
+ from prefect.utilities.collections import ensure_iterable
31
+
32
+ if TYPE_CHECKING:
33
+ import logging
34
+
35
+ from prefect.results import (
36
+ R,
37
+ ResultStore,
38
+ )
39
+
40
+ logger: "logging.Logger" = get_logger("states")
41
+
42
+
43
+ def exception_to_crashed_state_sync(
44
+ exc: BaseException,
45
+ result_store: Optional["ResultStore"] = None,
46
+ ) -> State:
47
+ """
48
+ Sync version of exception_to_crashed_state.
49
+
50
+ Takes an exception that occurs _outside_ of user code and converts it to a
51
+ 'Crash' exception with a 'Crashed' state.
52
+ """
53
+ state_message = None
54
+
55
+ if isinstance(exc, anyio.get_cancelled_exc_class()):
56
+ state_message = "Execution was cancelled by the runtime environment."
57
+
58
+ elif isinstance(exc, KeyboardInterrupt):
59
+ state_message = "Execution was aborted by an interrupt signal."
60
+
61
+ elif isinstance(exc, TerminationSignal):
62
+ state_message = "Execution was aborted by a termination signal."
63
+
64
+ elif isinstance(exc, SystemExit):
65
+ state_message = "Execution was aborted by Python system exit call."
66
+
67
+ elif isinstance(exc, (httpx.TimeoutException, httpx.ConnectError)):
68
+ try:
69
+ request: httpx.Request = exc.request
70
+ except RuntimeError:
71
+ # The request property is not set
72
+ state_message = (
73
+ "Request failed while attempting to contact the server:"
74
+ f" {format_exception(exc)}"
75
+ )
76
+ else:
77
+ # TODO: We can check if this is actually our API url
78
+ state_message = f"Request to {request.url} failed: {format_exception(exc)}."
79
+
80
+ else:
81
+ state_message = (
82
+ "Execution was interrupted by an unexpected exception:"
83
+ f" {format_exception(exc)}"
84
+ )
85
+
86
+ if result_store:
87
+ key = uuid.uuid4().hex
88
+ data = result_store.create_result_record(exc, key=key)
89
+ else:
90
+ # Attach the exception for local usage, will not be available when retrieved
91
+ # from the API
92
+ data = exc
93
+
94
+ return Crashed(message=state_message, data=data)
95
+
96
+
97
+ def exception_to_failed_state_sync(
98
+ exc: Optional[BaseException] = None,
99
+ result_store: Optional["ResultStore"] = None,
100
+ write_result: bool = False,
101
+ **kwargs: Any,
102
+ ) -> State[BaseException]:
103
+ """
104
+ Sync version of exception_to_failed_state.
105
+
106
+ Convenience function for creating `Failed` states from exceptions
107
+ """
108
+ try:
109
+ local_logger = get_run_logger()
110
+ except MissingContextError:
111
+ local_logger = logger
112
+
113
+ if not exc:
114
+ _, exc, _ = sys.exc_info()
115
+ if exc is None:
116
+ raise ValueError(
117
+ "Exception was not passed and no active exception could be found."
118
+ )
119
+ else:
120
+ pass
121
+
122
+ if result_store:
123
+ key = uuid.uuid4().hex
124
+ data = result_store.create_result_record(exc, key=key)
125
+ if write_result:
126
+ try:
127
+ result_store.persist_result_record(data)
128
+ except Exception as nested_exc:
129
+ local_logger.warning(
130
+ "Failed to write result: %s Execution will continue, but the result has not been written",
131
+ nested_exc,
132
+ )
133
+ else:
134
+ # Attach the exception for local usage, will not be available when retrieved
135
+ # from the API
136
+ data = exc
137
+
138
+ existing_message = kwargs.pop("message", "")
139
+ if existing_message and not existing_message.endswith(" "):
140
+ existing_message += " "
141
+
142
+ # TODO: Consider if we want to include traceback information, it is intentionally
143
+ # excluded from messages for now
144
+ message = existing_message + format_exception(exc)
145
+
146
+ state = Failed(data=data, message=message, **kwargs)
147
+ state.state_details.retriable = False
148
+
149
+ return state
150
+
151
+
152
+ def return_value_to_state_sync(
153
+ retval: "R",
154
+ result_store: "ResultStore",
155
+ key: Optional[str] = None,
156
+ expiration: Optional[datetime.datetime] = None,
157
+ write_result: bool = False,
158
+ ) -> "State[R]":
159
+ """
160
+ Sync version of return_value_to_state.
161
+
162
+ Given a return value from a user's function, create a `State` the run should
163
+ be placed in.
164
+
165
+ - If data is returned, we create a 'COMPLETED' state with the data
166
+ - If a single, manually created state is returned, we use that state as given
167
+ (manual creation is determined by the lack of ids)
168
+ - If an upstream state or iterable of upstream states is returned, we apply the
169
+ aggregate rule
170
+
171
+ The aggregate rule says that given multiple states we will determine the final state
172
+ such that:
173
+
174
+ - If any states are not COMPLETED the final state is FAILED
175
+ - If all of the states are COMPLETED the final state is COMPLETED
176
+ - The states will be placed in the final state `data` attribute
177
+
178
+ Callers should resolve all futures into states before passing return values to this
179
+ function.
180
+ """
181
+ from prefect.results import (
182
+ ResultRecord,
183
+ ResultRecordMetadata,
184
+ )
185
+
186
+ try:
187
+ local_logger = get_run_logger()
188
+ except MissingContextError:
189
+ local_logger = logger
190
+
191
+ if (
192
+ isinstance(retval, State)
193
+ # Check for manual creation
194
+ and not retval.state_details.flow_run_id
195
+ and not retval.state_details.task_run_id
196
+ ):
197
+ state = retval
198
+ # Unless the user has already constructed a result explicitly, use the store
199
+ # to update the data to the correct type
200
+ if not isinstance(state.data, (ResultRecord, ResultRecordMetadata)):
201
+ result_record = result_store.create_result_record(
202
+ state.data,
203
+ key=key,
204
+ expiration=expiration,
205
+ )
206
+ if write_result:
207
+ try:
208
+ result_store.persist_result_record(result_record)
209
+ except Exception as exc:
210
+ local_logger.warning(
211
+ "Encountered an error while persisting result: %s Execution will continue, but the result has not been persisted",
212
+ exc,
213
+ )
214
+ state.data = result_record
215
+ return state
216
+
217
+ # Determine a new state from the aggregate of contained states
218
+ if isinstance(retval, State) or is_state_iterable(retval):
219
+ states = StateGroup(ensure_iterable(retval))
220
+
221
+ # Determine the new state type
222
+ if states.all_completed():
223
+ new_state_type = StateType.COMPLETED
224
+ elif states.any_cancelled():
225
+ new_state_type = StateType.CANCELLED
226
+ elif states.any_paused():
227
+ new_state_type = StateType.PAUSED
228
+ else:
229
+ new_state_type = StateType.FAILED
230
+
231
+ # Generate a nice message for the aggregate
232
+ if states.all_completed():
233
+ message = "All states completed."
234
+ elif states.any_cancelled():
235
+ message = f"{states.cancelled_count}/{states.total_count} states cancelled."
236
+ elif states.any_paused():
237
+ message = f"{states.paused_count}/{states.total_count} states paused."
238
+ elif states.any_failed():
239
+ message = f"{states.fail_count}/{states.total_count} states failed."
240
+ elif not states.all_final():
241
+ message = (
242
+ f"{states.not_final_count}/{states.total_count} states are not final."
243
+ )
244
+ else:
245
+ message = "Given states: " + states.counts_message()
246
+
247
+ # TODO: We may actually want to set the data to a `StateGroup` object and just
248
+ # allow it to be unpacked into a tuple and such so users can interact with
249
+ # it
250
+ result_record = result_store.create_result_record(
251
+ retval,
252
+ key=key,
253
+ expiration=expiration,
254
+ )
255
+ if write_result:
256
+ try:
257
+ result_store.persist_result_record(result_record)
258
+ except Exception as exc:
259
+ local_logger.warning(
260
+ "Encountered an error while persisting result: %s Execution will continue, but the result has not been persisted",
261
+ exc,
262
+ )
263
+ return State(
264
+ type=new_state_type,
265
+ message=message,
266
+ data=result_record,
267
+ )
268
+
269
+ # Generators aren't portable, implicitly convert them to a list.
270
+ if isinstance(retval, GeneratorType):
271
+ data = list(retval)
272
+ else:
273
+ data = retval
274
+
275
+ # Otherwise, they just gave data and this is a completed retval
276
+ if isinstance(data, ResultRecord):
277
+ return Completed(data=data)
278
+ else:
279
+ result_record = result_store.create_result_record(
280
+ data,
281
+ key=key,
282
+ expiration=expiration,
283
+ )
284
+ if write_result:
285
+ try:
286
+ result_store.persist_result_record(result_record)
287
+ except Exception as exc:
288
+ local_logger.warning(
289
+ "Encountered an error while persisting result: %s Execution will continue, but the result has not been persisted",
290
+ exc,
291
+ )
292
+ return Completed(data=result_record)
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import logging
4
+ import re
4
5
  from abc import ABC
5
6
  from typing import Any, Optional, cast
6
7
 
@@ -105,6 +106,9 @@ class SlackWebhook(AppriseNotificationBlock):
105
106
  """
106
107
  Enables sending notifications via a provided Slack webhook.
107
108
 
109
+ Supports both standard Slack webhooks (hooks.slack.com) and Slack GovCloud
110
+ webhooks (hooks.slack-gov.com).
111
+
108
112
  Examples:
109
113
  Load a saved Slack webhook and send a message:
110
114
  ```python
@@ -130,6 +134,73 @@ class SlackWebhook(AppriseNotificationBlock):
130
134
  examples=["https://hooks.slack.com/XXX"],
131
135
  )
132
136
 
137
+ _SLACK_WEBHOOK_URL_PATTERN: re.Pattern[str] = re.compile(
138
+ r"^https?://(?P<host>hooks\.slack(?:-gov)?\.com)/services/"
139
+ r"(?P<token_a>[A-Z0-9]+)/"
140
+ r"(?P<token_b>[A-Z0-9]+)/"
141
+ r"(?P<token_c>[A-Z0-9]+)/?$",
142
+ re.I,
143
+ )
144
+
145
+ def block_initialization(self) -> None:
146
+ """Initialize the Slack webhook client.
147
+
148
+ This method handles both standard Slack webhooks and Slack GovCloud webhooks.
149
+ Apprise's built-in Slack plugin only supports hooks.slack.com, so we need to
150
+ manually construct the NotifySlack instance for slack-gov.com URLs to ensure
151
+ notifications are sent to the correct host.
152
+
153
+ See: https://github.com/caronc/apprise/issues/XXXX (upstream issue)
154
+ """
155
+ webhook_url = self.url.get_secret_value()
156
+ match = self._SLACK_WEBHOOK_URL_PATTERN.match(webhook_url)
157
+
158
+ # If it's not a recognized Slack webhook shape, delegate to the base behavior.
159
+ # This lets restricted-URL checks and existing Apprise validation run as before.
160
+ if not match:
161
+ self._start_apprise_client(self.url)
162
+ return
163
+
164
+ host = match.group("host")
165
+
166
+ # Standard Slack: let Apprise handle it like it always has.
167
+ if host == "hooks.slack.com":
168
+ self._start_apprise_client(self.url)
169
+ return
170
+
171
+ # GovCloud: we know it's a valid Slack webhook and host is hooks.slack-gov.com
172
+ # We must add the NotifySlack instance directly to the apprise client
173
+ # (rather than passing slack_instance.url()) because the webhook_url
174
+ # override is an instance attribute that would be lost if apprise
175
+ # re-parsed the URL string.
176
+ from apprise import Apprise, AppriseAsset
177
+
178
+ try:
179
+ from apprise.plugins.slack import NotifySlack
180
+ except ImportError:
181
+ from apprise.plugins.NotifySlack import (
182
+ NotifySlack, # pyright: ignore[reportMissingImports]
183
+ )
184
+
185
+ token_a = match.group("token_a")
186
+ token_b = match.group("token_b")
187
+ token_c = match.group("token_c")
188
+
189
+ slack_instance = NotifySlack(
190
+ token_a=token_a,
191
+ token_b=token_b,
192
+ token_c=token_c,
193
+ )
194
+ slack_instance.webhook_url = f"https://{host}/services"
195
+
196
+ prefect_app_data = AppriseAsset(
197
+ app_id="Prefect Notifications",
198
+ app_desc="Prefect Notifications",
199
+ app_url="https://prefect.io",
200
+ )
201
+ self._apprise_client = Apprise(asset=prefect_app_data)
202
+ self._apprise_client.add(slack_instance)
203
+
133
204
 
134
205
  class MicrosoftTeamsWebhook(AppriseNotificationBlock):
135
206
  """
@@ -327,11 +327,17 @@ async def _run_single_deploy(
327
327
  triggers = []
328
328
 
329
329
  if isinstance(deploy_config.get("concurrency_limit"), dict):
330
- deploy_config["concurrency_options"] = {
330
+ concurrency_options = {
331
331
  "collision_strategy": get_from_dict(
332
332
  deploy_config, "concurrency_limit.collision_strategy"
333
333
  )
334
334
  }
335
+ grace_period_seconds = get_from_dict(
336
+ deploy_config, "concurrency_limit.grace_period_seconds"
337
+ )
338
+ if grace_period_seconds is not None:
339
+ concurrency_options["grace_period_seconds"] = grace_period_seconds
340
+ deploy_config["concurrency_options"] = concurrency_options
335
341
  deploy_config["concurrency_limit"] = get_from_dict(
336
342
  deploy_config, "concurrency_limit.limit"
337
343
  )
@@ -7,7 +7,6 @@ from pydantic import BaseModel, ConfigDict, Field, field_validator, model_valida
7
7
  from prefect._experimental.sla.objects import SlaTypes
8
8
  from prefect.client.schemas.actions import DeploymentScheduleCreate
9
9
  from prefect.client.schemas.schedules import SCHEDULE_TYPES
10
- from prefect.events import DeploymentTriggerTypes
11
10
 
12
11
 
13
12
  class WorkPoolConfig(BaseModel):
@@ -52,7 +51,9 @@ class DeploymentConfig(BaseModel):
52
51
  work_pool: Optional[WorkPoolConfig] = None
53
52
 
54
53
  # automations metadata
55
- triggers: Optional[List[DeploymentTriggerTypes]] = None
54
+ # Triggers are stored as raw dicts to allow Jinja templating (e.g., enabled: "{{ prefect.variables.is_prod }}")
55
+ # Strict validation happens later in _initialize_deployment_triggers after template resolution
56
+ triggers: Optional[List[Dict[str, Any]]] = None
56
57
  sla: Optional[List[SlaTypes]] = None
57
58
 
58
59
 
@@ -99,6 +100,7 @@ class ConcurrencyLimitSpec(BaseModel):
99
100
 
100
101
  limit: Optional[int] = None
101
102
  collision_strategy: Optional[str] = None
103
+ grace_period_seconds: Optional[int] = None
102
104
 
103
105
 
104
106
  class RawScheduleConfig(BaseModel):
@@ -456,6 +456,7 @@ class FlowRunClient(BaseClient):
456
456
  value: The input value.
457
457
  sender: The sender of the input.
458
458
  """
459
+ from prefect.client.schemas.objects import FlowRunInput
459
460
 
460
461
  # Initialize the input to ensure that the key is valid.
461
462
  FlowRunInput(flow_run_id=flow_run_id, key=key, value=value)
@@ -155,6 +155,12 @@ class ConcurrencyOptions(PrefectBaseModel):
155
155
  """
156
156
 
157
157
  collision_strategy: ConcurrencyLimitStrategy
158
+ grace_period_seconds: Optional[int] = Field(
159
+ default=None,
160
+ ge=60,
161
+ le=86400,
162
+ description="Grace period in seconds for infrastructure to start before concurrency slots are revoked. If not set, falls back to server setting.",
163
+ )
158
164
 
159
165
 
160
166
  class ConcurrencyLimitConfig(PrefectBaseModel):
@@ -164,6 +170,12 @@ class ConcurrencyLimitConfig(PrefectBaseModel):
164
170
 
165
171
  limit: int
166
172
  collision_strategy: ConcurrencyLimitStrategy = ConcurrencyLimitStrategy.ENQUEUE
173
+ grace_period_seconds: Optional[int] = Field(
174
+ default=None,
175
+ ge=60,
176
+ le=86400,
177
+ description="Grace period in seconds for infrastructure to start before concurrency slots are revoked",
178
+ )
167
179
 
168
180
 
169
181
  class ConcurrencyLeaseHolder(PrefectBaseModel):
@@ -1,8 +1,12 @@
1
1
  import asyncio
2
+ import json
2
3
  from collections.abc import AsyncGenerator
3
4
  from contextlib import asynccontextmanager
5
+ from threading import Lock
4
6
  from typing import TYPE_CHECKING, Literal, Optional
7
+ from uuid import uuid4
5
8
 
9
+ import cachetools
6
10
  import httpx
7
11
  from starlette import status
8
12
  from typing_extensions import TypeAlias, Unpack
@@ -16,6 +20,14 @@ if TYPE_CHECKING:
16
20
  from prefect.client.orchestration import PrefectClient
17
21
  from prefect.client.schemas.objects import ConcurrencyLeaseHolder
18
22
 
23
+ # Shared cache for tags with no concurrency limits.
24
+ # When a set of tags is known to have no limits, we cache that result
25
+ # to avoid unnecessary API calls.
26
+ _no_limits_cache: cachetools.TTLCache[frozenset[str], bool] = cachetools.TTLCache(
27
+ maxsize=1000, ttl=5.0
28
+ )
29
+ _cache_lock = Lock()
30
+
19
31
  _Item: TypeAlias = tuple[
20
32
  int, Literal["concurrency", "rate_limit"], Optional[float], Optional[int]
21
33
  ]
@@ -127,16 +139,37 @@ class ConcurrencySlotAcquisitionWithLeaseService(
127
139
  httpx.HTTPStatusError: If the server returns an error other than 423 LOCKED
128
140
  TimeoutError: If acquisition times out
129
141
  """
142
+ use_cache = _should_use_cache(self.concurrency_limit_names, holder)
143
+ cache_key = frozenset(self.concurrency_limit_names)
144
+
130
145
  with timeout_async(seconds=timeout_seconds):
131
146
  while True:
132
147
  try:
133
- return await self._client.increment_concurrency_slots_with_lease(
134
- names=self.concurrency_limit_names,
135
- slots=slots,
136
- mode=mode,
137
- lease_duration=lease_duration,
138
- holder=holder,
148
+ if use_cache:
149
+ with _cache_lock:
150
+ if _no_limits_cache.get(cache_key, False):
151
+ return _create_empty_limits_response()
152
+
153
+ response = (
154
+ await self._client.increment_concurrency_slots_with_lease(
155
+ names=self.concurrency_limit_names,
156
+ slots=slots,
157
+ mode=mode,
158
+ lease_duration=lease_duration,
159
+ holder=holder,
160
+ )
139
161
  )
162
+
163
+ if use_cache:
164
+ try:
165
+ response_data = response.json()
166
+ if not response_data.get("limits"):
167
+ with _cache_lock:
168
+ _no_limits_cache[cache_key] = True
169
+ except Exception:
170
+ pass
171
+
172
+ return response
140
173
  except httpx.HTTPStatusError as exc:
141
174
  if exc.response.status_code != status.HTTP_423_LOCKED:
142
175
  raise
@@ -151,3 +184,35 @@ class ConcurrencySlotAcquisitionWithLeaseService(
151
184
  await asyncio.sleep(retry_after)
152
185
  if max_retries is not None:
153
186
  max_retries -= 1
187
+
188
+
189
+ def _should_use_cache(
190
+ names: list[str], holder: Optional["ConcurrencyLeaseHolder"]
191
+ ) -> bool:
192
+ """Determine if caching should be used for this concurrency acquisition.
193
+
194
+ Caching is only enabled for task-run tag-based concurrency checks to avoid
195
+ unnecessary API calls when no limits exist for those tags. This specifically
196
+ targets the task engine path that uses names like "tag:..." with a task_run holder.
197
+ """
198
+ if holder is None:
199
+ return False
200
+ if getattr(holder, "type", None) != "task_run":
201
+ return False
202
+ if not names:
203
+ return False
204
+ return all(name.startswith("tag:") for name in names)
205
+
206
+
207
+ def _create_empty_limits_response() -> httpx.Response:
208
+ """Create a synthetic httpx.Response indicating no concurrency limits exist.
209
+
210
+ This is used when we've cached that a set of tags has no limits,
211
+ allowing us to skip the API call entirely.
212
+ """
213
+ response_data = {"lease_id": str(uuid4()), "limits": []}
214
+ return httpx.Response(
215
+ status_code=200,
216
+ content=json.dumps(response_data).encode(),
217
+ headers={"content-type": "application/json"},
218
+ )
@@ -251,6 +251,9 @@ def _format_deployment_for_saving_to_prefect_file(
251
251
  concurrency_limit["collision_strategy"] = str(
252
252
  concurrency_limit["collision_strategy"].value
253
253
  )
254
+ concurrency_limit = {
255
+ k: v for k, v in concurrency_limit.items() if v is not None
256
+ }
254
257
  deployment["concurrency_limit"] = concurrency_limit
255
258
 
256
259
  return deployment