prefect 3.6.6__py3-none-any.whl → 3.6.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/__init__.py +8 -5
- prefect/_build_info.py +3 -3
- prefect/_experimental/bundles/__init__.py +8 -4
- prefect/_experimental/plugins/spec.py +20 -1
- prefect/blocks/notifications.py +1 -1
- prefect/cli/server.py +18 -2
- prefect/client/orchestration/_deployments/client.py +12 -0
- prefect/events/clients.py +24 -12
- prefect/flow_runs.py +31 -10
- prefect/logging/logging.yml +2 -0
- prefect/main.py +12 -6
- prefect/runner/storage.py +30 -1
- prefect/serializers.py +17 -1
- prefect/server/api/background_workers.py +5 -2
- prefect/server/api/server.py +1 -0
- prefect/server/database/configurations.py +34 -0
- prefect/server/events/services/triggers.py +17 -21
- prefect/server/models/events.py +67 -0
- prefect/server/models/work_queues.py +74 -11
- prefect/server/models/workers.py +107 -10
- prefect/server/orchestration/core_policy.py +111 -7
- prefect/server/schemas/responses.py +0 -8
- prefect/server/services/base.py +1 -218
- prefect/server/services/foreman.py +175 -201
- prefect/server/services/late_runs.py +56 -113
- prefect/server/services/perpetual_services.py +1 -1
- prefect/server/services/repossessor.py +66 -49
- prefect/server/services/scheduler.py +276 -326
- prefect/server/services/task_run_recorder.py +28 -4
- prefect/server/services/telemetry.py +86 -115
- prefect/settings/models/_defaults.py +0 -1
- prefect/tasks.py +18 -18
- prefect/testing/utilities.py +22 -3
- prefect/variables.py +59 -6
- prefect/workers/base.py +0 -8
- {prefect-3.6.6.dist-info → prefect-3.6.7.dist-info}/METADATA +3 -2
- {prefect-3.6.6.dist-info → prefect-3.6.7.dist-info}/RECORD +40 -40
- {prefect-3.6.6.dist-info → prefect-3.6.7.dist-info}/WHEEL +0 -0
- {prefect-3.6.6.dist-info → prefect-3.6.7.dist-info}/entry_points.txt +0 -0
- {prefect-3.6.6.dist-info → prefect-3.6.7.dist-info}/licenses/LICENSE +0 -0
|
@@ -43,6 +43,8 @@ if TYPE_CHECKING:
|
|
|
43
43
|
|
|
44
44
|
logger: "logging.Logger" = get_logger(__name__)
|
|
45
45
|
|
|
46
|
+
DEFAULT_PERSIST_MAX_RETRIES = 5
|
|
47
|
+
|
|
46
48
|
|
|
47
49
|
@db_injector
|
|
48
50
|
async def _insert_task_run_states(
|
|
@@ -300,7 +302,9 @@ class RetryableEvent(BaseModel):
|
|
|
300
302
|
|
|
301
303
|
@asynccontextmanager
|
|
302
304
|
async def consumer(
|
|
303
|
-
write_batch_size: int,
|
|
305
|
+
write_batch_size: int,
|
|
306
|
+
flush_every: int,
|
|
307
|
+
max_persist_retries: int = DEFAULT_PERSIST_MAX_RETRIES,
|
|
304
308
|
) -> AsyncGenerator[MessageHandler, None]:
|
|
305
309
|
logger.info(
|
|
306
310
|
f"Creating TaskRunRecorder consumer with batch size {write_batch_size} and flush every {flush_every} seconds"
|
|
@@ -323,8 +327,25 @@ async def consumer(
|
|
|
323
327
|
try:
|
|
324
328
|
await handle_task_run_events([batch.event for batch in batch])
|
|
325
329
|
except Exception:
|
|
326
|
-
|
|
327
|
-
|
|
330
|
+
dropped = 0
|
|
331
|
+
to_retry = 0
|
|
332
|
+
for item in batch:
|
|
333
|
+
item.persist_attempts += 1
|
|
334
|
+
if item.persist_attempts <= max_persist_retries:
|
|
335
|
+
to_retry += 1
|
|
336
|
+
await queue.put(item)
|
|
337
|
+
else:
|
|
338
|
+
dropped += 1
|
|
339
|
+
logger.error(
|
|
340
|
+
f"Dropping event {item.event.id} after {item.persist_attempts} failed attempts"
|
|
341
|
+
)
|
|
342
|
+
logger.error(
|
|
343
|
+
f"Error flushing {len(batch)} events ({to_retry} to retry, {dropped} dropped)",
|
|
344
|
+
exc_info=True,
|
|
345
|
+
)
|
|
346
|
+
|
|
347
|
+
if dropped > 0:
|
|
348
|
+
raise
|
|
328
349
|
|
|
329
350
|
async def flush_periodically():
|
|
330
351
|
try:
|
|
@@ -396,7 +417,9 @@ class TaskRunRecorder(RunInEphemeralServers, Service):
|
|
|
396
417
|
def started_event(self, value: asyncio.Event) -> None:
|
|
397
418
|
self._started_event = value
|
|
398
419
|
|
|
399
|
-
async def start(
|
|
420
|
+
async def start(
|
|
421
|
+
self, max_persist_retries: int = DEFAULT_PERSIST_MAX_RETRIES
|
|
422
|
+
) -> NoReturn:
|
|
400
423
|
assert self.consumer_task is None, "TaskRunRecorder already started"
|
|
401
424
|
self.consumer: Consumer = create_consumer(
|
|
402
425
|
"events",
|
|
@@ -408,6 +431,7 @@ class TaskRunRecorder(RunInEphemeralServers, Service):
|
|
|
408
431
|
async with consumer(
|
|
409
432
|
write_batch_size=self.service_settings().batch_size,
|
|
410
433
|
flush_every=int(self.service_settings().flush_interval),
|
|
434
|
+
max_persist_retries=max_persist_retries,
|
|
411
435
|
) as handler:
|
|
412
436
|
self.consumer_task = asyncio.create_task(self.consumer.run(handler))
|
|
413
437
|
self.metrics_task = asyncio.create_task(log_metrics_periodically())
|
|
@@ -1,143 +1,114 @@
|
|
|
1
1
|
"""
|
|
2
|
-
The Telemetry service.
|
|
2
|
+
The Telemetry service. Sends anonymous data to Prefect to help us improve.
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
|
-
import
|
|
5
|
+
import logging
|
|
6
6
|
import os
|
|
7
7
|
import platform
|
|
8
|
-
from
|
|
8
|
+
from datetime import timedelta
|
|
9
9
|
from uuid import uuid4
|
|
10
10
|
|
|
11
11
|
import httpx
|
|
12
|
+
from docket import Perpetual
|
|
12
13
|
|
|
13
14
|
import prefect
|
|
14
|
-
from prefect.
|
|
15
|
-
from prefect.server.database
|
|
15
|
+
from prefect.logging import get_logger
|
|
16
|
+
from prefect.server.database import PrefectDBInterface, provide_database_interface
|
|
16
17
|
from prefect.server.models import configuration
|
|
17
18
|
from prefect.server.schemas.core import Configuration
|
|
18
|
-
from prefect.server.services.
|
|
19
|
-
LoopService,
|
|
20
|
-
RunInEphemeralServers,
|
|
21
|
-
RunInWebservers,
|
|
22
|
-
)
|
|
19
|
+
from prefect.server.services.perpetual_services import perpetual_service
|
|
23
20
|
from prefect.settings import PREFECT_DEBUG_MODE
|
|
24
21
|
from prefect.settings.context import get_current_settings
|
|
25
|
-
from prefect.settings.models.server.services import ServicesBaseSetting
|
|
26
22
|
from prefect.types._datetime import now
|
|
27
23
|
|
|
24
|
+
logger: logging.Logger = get_logger(__name__)
|
|
25
|
+
|
|
28
26
|
|
|
29
|
-
|
|
27
|
+
async def _fetch_or_set_telemetry_session(
|
|
28
|
+
db: PrefectDBInterface,
|
|
29
|
+
) -> tuple[str, str]:
|
|
30
30
|
"""
|
|
31
|
-
|
|
31
|
+
Fetch or create a telemetry session in the configuration table.
|
|
32
32
|
|
|
33
|
-
|
|
33
|
+
Returns:
|
|
34
|
+
tuple of (session_start_timestamp, session_id)
|
|
34
35
|
"""
|
|
36
|
+
async with db.session_context(begin_transaction=True) as session:
|
|
37
|
+
telemetry_session = await configuration.read_configuration(
|
|
38
|
+
session, "TELEMETRY_SESSION"
|
|
39
|
+
)
|
|
35
40
|
|
|
36
|
-
|
|
41
|
+
if telemetry_session is None:
|
|
42
|
+
logger.debug("No telemetry session found, setting")
|
|
43
|
+
session_id = str(uuid4())
|
|
44
|
+
session_start_timestamp = now("UTC").isoformat()
|
|
45
|
+
|
|
46
|
+
telemetry_session = Configuration(
|
|
47
|
+
key="TELEMETRY_SESSION",
|
|
48
|
+
value={
|
|
49
|
+
"session_id": session_id,
|
|
50
|
+
"session_start_timestamp": session_start_timestamp,
|
|
51
|
+
},
|
|
52
|
+
)
|
|
37
53
|
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
54
|
+
await configuration.write_configuration(session, telemetry_session)
|
|
55
|
+
else:
|
|
56
|
+
logger.debug("Session information retrieved from database")
|
|
57
|
+
session_id = telemetry_session.value["session_id"]
|
|
58
|
+
session_start_timestamp = telemetry_session.value["session_start_timestamp"]
|
|
41
59
|
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
return "PREFECT_SERVER_ANALYTICS_ENABLED"
|
|
60
|
+
logger.debug(f"Telemetry Session: {session_id}, {session_start_timestamp}")
|
|
61
|
+
return (session_start_timestamp, session_id)
|
|
45
62
|
|
|
46
|
-
@classmethod
|
|
47
|
-
def enabled(cls) -> bool:
|
|
48
|
-
return get_current_settings().server.analytics_enabled
|
|
49
63
|
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
64
|
+
@perpetual_service(
|
|
65
|
+
enabled_getter=lambda: get_current_settings().server.analytics_enabled,
|
|
66
|
+
run_in_ephemeral=True,
|
|
67
|
+
run_in_webserver=True,
|
|
68
|
+
)
|
|
69
|
+
async def send_telemetry_heartbeat(
|
|
70
|
+
perpetual: Perpetual = Perpetual(automatic=True, every=timedelta(seconds=600)),
|
|
71
|
+
) -> None:
|
|
72
|
+
"""
|
|
73
|
+
Sends anonymous telemetry data to Prefect to help us improve.
|
|
55
74
|
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
75
|
+
It can be toggled off with the PREFECT_SERVER_ANALYTICS_ENABLED setting.
|
|
76
|
+
"""
|
|
77
|
+
from prefect.client.constants import SERVER_API_VERSION
|
|
78
|
+
|
|
79
|
+
db = provide_database_interface()
|
|
80
|
+
session_start_timestamp, session_id = await _fetch_or_set_telemetry_session(db=db)
|
|
81
|
+
telemetry_environment = os.environ.get(
|
|
82
|
+
"PREFECT_API_TELEMETRY_ENVIRONMENT", "production"
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
heartbeat = {
|
|
86
|
+
"source": "prefect_server",
|
|
87
|
+
"type": "heartbeat",
|
|
88
|
+
"payload": {
|
|
89
|
+
"platform": platform.system(),
|
|
90
|
+
"architecture": platform.machine(),
|
|
91
|
+
"python_version": platform.python_version(),
|
|
92
|
+
"python_implementation": platform.python_implementation(),
|
|
93
|
+
"environment": telemetry_environment,
|
|
94
|
+
"ephemeral_server": bool(os.getenv("PREFECT__SERVER_EPHEMERAL", False)),
|
|
95
|
+
"api_version": SERVER_API_VERSION,
|
|
96
|
+
"prefect_version": prefect.__version__,
|
|
97
|
+
"session_id": session_id,
|
|
98
|
+
"session_start_timestamp": session_start_timestamp,
|
|
99
|
+
},
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
try:
|
|
103
|
+
async with httpx.AsyncClient() as client:
|
|
104
|
+
result = await client.post(
|
|
105
|
+
"https://sens-o-matic.prefect.io/",
|
|
106
|
+
json=heartbeat,
|
|
107
|
+
headers={"x-prefect-event": "prefect_server"},
|
|
68
108
|
)
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
telemetry_session = Configuration(
|
|
76
|
-
key="TELEMETRY_SESSION",
|
|
77
|
-
value={
|
|
78
|
-
"session_id": session_id,
|
|
79
|
-
"session_start_timestamp": session_start_timestamp,
|
|
80
|
-
},
|
|
81
|
-
)
|
|
82
|
-
|
|
83
|
-
await configuration.write_configuration(session, telemetry_session)
|
|
84
|
-
|
|
85
|
-
self.session_id = session_id
|
|
86
|
-
self.session_start_timestamp = session_start_timestamp
|
|
87
|
-
else:
|
|
88
|
-
self.logger.debug("Session information retrieved from database")
|
|
89
|
-
self.session_id: str = telemetry_session.value["session_id"]
|
|
90
|
-
self.session_start_timestamp: str = telemetry_session.value[
|
|
91
|
-
"session_start_timestamp"
|
|
92
|
-
]
|
|
93
|
-
self.logger.debug(
|
|
94
|
-
f"Telemetry Session: {self.session_id}, {self.session_start_timestamp}"
|
|
109
|
+
result.raise_for_status()
|
|
110
|
+
except Exception as exc:
|
|
111
|
+
logger.error(
|
|
112
|
+
f"Failed to send telemetry: {exc}",
|
|
113
|
+
exc_info=PREFECT_DEBUG_MODE.value(),
|
|
95
114
|
)
|
|
96
|
-
return (self.session_start_timestamp, self.session_id)
|
|
97
|
-
|
|
98
|
-
async def run_once(self) -> None:
|
|
99
|
-
"""
|
|
100
|
-
Sends a heartbeat to the sens-o-matic
|
|
101
|
-
"""
|
|
102
|
-
from prefect.client.constants import SERVER_API_VERSION
|
|
103
|
-
|
|
104
|
-
if not hasattr(self, "session_id"):
|
|
105
|
-
await self._fetch_or_set_telemetry_session()
|
|
106
|
-
|
|
107
|
-
heartbeat = {
|
|
108
|
-
"source": "prefect_server",
|
|
109
|
-
"type": "heartbeat",
|
|
110
|
-
"payload": {
|
|
111
|
-
"platform": platform.system(),
|
|
112
|
-
"architecture": platform.machine(),
|
|
113
|
-
"python_version": platform.python_version(),
|
|
114
|
-
"python_implementation": platform.python_implementation(),
|
|
115
|
-
"environment": self.telemetry_environment,
|
|
116
|
-
"ephemeral_server": bool(os.getenv("PREFECT__SERVER_EPHEMERAL", False)),
|
|
117
|
-
"api_version": SERVER_API_VERSION,
|
|
118
|
-
"prefect_version": prefect.__version__,
|
|
119
|
-
"session_id": self.session_id,
|
|
120
|
-
"session_start_timestamp": self.session_start_timestamp,
|
|
121
|
-
},
|
|
122
|
-
}
|
|
123
|
-
|
|
124
|
-
try:
|
|
125
|
-
async with httpx.AsyncClient() as client:
|
|
126
|
-
result = await client.post(
|
|
127
|
-
"https://sens-o-matic.prefect.io/",
|
|
128
|
-
json=heartbeat,
|
|
129
|
-
headers={"x-prefect-event": "prefect_server"},
|
|
130
|
-
)
|
|
131
|
-
result.raise_for_status()
|
|
132
|
-
except Exception as exc:
|
|
133
|
-
self.logger.error(
|
|
134
|
-
f"Failed to send telemetry: {exc}\nShutting down telemetry service...",
|
|
135
|
-
# The traceback is only needed if doing deeper debugging, otherwise
|
|
136
|
-
# this looks like an impactful server error
|
|
137
|
-
exc_info=PREFECT_DEBUG_MODE.value(),
|
|
138
|
-
)
|
|
139
|
-
await self.stop(block=False)
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
if __name__ == "__main__":
|
|
143
|
-
asyncio.run(Telemetry(handle_signals=True).start())
|
prefect/tasks.py
CHANGED
|
@@ -1088,17 +1088,17 @@ class Task(Generic[P, R]):
|
|
|
1088
1088
|
# These preserve full parameter type checking when users call tasks normally
|
|
1089
1089
|
@overload
|
|
1090
1090
|
def __call__(
|
|
1091
|
-
self: "Task[P, Coroutine[Any, Any,
|
|
1091
|
+
self: "Task[P, Coroutine[Any, Any, T]]",
|
|
1092
1092
|
*args: P.args,
|
|
1093
1093
|
**kwargs: P.kwargs,
|
|
1094
|
-
) -> Coroutine[Any, Any,
|
|
1094
|
+
) -> Coroutine[Any, Any, T]: ...
|
|
1095
1095
|
|
|
1096
1096
|
@overload
|
|
1097
1097
|
def __call__(
|
|
1098
|
-
self: "Task[P,
|
|
1098
|
+
self: "Task[P, T]",
|
|
1099
1099
|
*args: P.args,
|
|
1100
1100
|
**kwargs: P.kwargs,
|
|
1101
|
-
) ->
|
|
1101
|
+
) -> T: ...
|
|
1102
1102
|
|
|
1103
1103
|
@overload
|
|
1104
1104
|
def __call__(
|
|
@@ -1116,65 +1116,65 @@ class Task(Generic[P, R]):
|
|
|
1116
1116
|
# are advanced use cases.
|
|
1117
1117
|
@overload
|
|
1118
1118
|
def __call__(
|
|
1119
|
-
self: "Task[..., Coroutine[Any, Any,
|
|
1119
|
+
self: "Task[..., Coroutine[Any, Any, T]]",
|
|
1120
1120
|
*args: Any,
|
|
1121
1121
|
return_state: Literal[False],
|
|
1122
1122
|
wait_for: Optional[OneOrManyFutureOrResult[Any]] = None,
|
|
1123
1123
|
**kwargs: Any,
|
|
1124
|
-
) -> Coroutine[Any, Any,
|
|
1124
|
+
) -> Coroutine[Any, Any, T]: ...
|
|
1125
1125
|
|
|
1126
1126
|
@overload
|
|
1127
1127
|
def __call__(
|
|
1128
|
-
self: "Task[..., Coroutine[Any, Any,
|
|
1128
|
+
self: "Task[..., Coroutine[Any, Any, T]]",
|
|
1129
1129
|
*args: Any,
|
|
1130
1130
|
return_state: Literal[True],
|
|
1131
1131
|
wait_for: Optional[OneOrManyFutureOrResult[Any]] = None,
|
|
1132
1132
|
**kwargs: Any,
|
|
1133
|
-
) -> State[
|
|
1133
|
+
) -> State[T]: ...
|
|
1134
1134
|
|
|
1135
1135
|
@overload
|
|
1136
1136
|
def __call__(
|
|
1137
|
-
self: "Task[...,
|
|
1137
|
+
self: "Task[..., T]",
|
|
1138
1138
|
*args: Any,
|
|
1139
1139
|
return_state: Literal[False],
|
|
1140
1140
|
wait_for: Optional[OneOrManyFutureOrResult[Any]] = None,
|
|
1141
1141
|
**kwargs: Any,
|
|
1142
|
-
) ->
|
|
1142
|
+
) -> T: ...
|
|
1143
1143
|
|
|
1144
1144
|
@overload
|
|
1145
1145
|
def __call__(
|
|
1146
|
-
self: "Task[...,
|
|
1146
|
+
self: "Task[..., T]",
|
|
1147
1147
|
*args: Any,
|
|
1148
1148
|
return_state: Literal[True],
|
|
1149
1149
|
wait_for: Optional[OneOrManyFutureOrResult[Any]] = None,
|
|
1150
1150
|
**kwargs: Any,
|
|
1151
|
-
) -> State[
|
|
1151
|
+
) -> State[T]: ...
|
|
1152
1152
|
|
|
1153
1153
|
@overload
|
|
1154
1154
|
def __call__(
|
|
1155
|
-
self: "Task[..., Coroutine[Any, Any,
|
|
1155
|
+
self: "Task[..., Coroutine[Any, Any, T]]",
|
|
1156
1156
|
*args: Any,
|
|
1157
1157
|
wait_for: OneOrManyFutureOrResult[Any],
|
|
1158
1158
|
return_state: Literal[False] = False,
|
|
1159
1159
|
**kwargs: Any,
|
|
1160
|
-
) -> Coroutine[Any, Any,
|
|
1160
|
+
) -> Coroutine[Any, Any, T]: ...
|
|
1161
1161
|
|
|
1162
1162
|
@overload
|
|
1163
1163
|
def __call__(
|
|
1164
|
-
self: "Task[...,
|
|
1164
|
+
self: "Task[..., T]",
|
|
1165
1165
|
*args: Any,
|
|
1166
1166
|
wait_for: OneOrManyFutureOrResult[Any],
|
|
1167
1167
|
return_state: Literal[False] = False,
|
|
1168
1168
|
**kwargs: Any,
|
|
1169
|
-
) ->
|
|
1169
|
+
) -> T: ...
|
|
1170
1170
|
|
|
1171
1171
|
def __call__(
|
|
1172
|
-
self: "Union[Task[...,
|
|
1172
|
+
self: "Union[Task[..., T], Task[..., NoReturn]]",
|
|
1173
1173
|
*args: Any,
|
|
1174
1174
|
return_state: bool = False,
|
|
1175
1175
|
wait_for: Optional[OneOrManyFutureOrResult[Any]] = None,
|
|
1176
1176
|
**kwargs: Any,
|
|
1177
|
-
) -> Union[
|
|
1177
|
+
) -> Union[T, State[T], None]:
|
|
1178
1178
|
"""
|
|
1179
1179
|
Run the task and return the result. If `return_state` is True returns
|
|
1180
1180
|
the result is wrapped in a Prefect State which provides error handling.
|
prefect/testing/utilities.py
CHANGED
|
@@ -5,6 +5,7 @@ Internal utilities for tests.
|
|
|
5
5
|
from __future__ import annotations
|
|
6
6
|
|
|
7
7
|
import atexit
|
|
8
|
+
import inspect
|
|
8
9
|
import shutil
|
|
9
10
|
import warnings
|
|
10
11
|
from contextlib import ExitStack, contextmanager
|
|
@@ -31,6 +32,7 @@ from prefect.results import (
|
|
|
31
32
|
from prefect.serializers import Serializer
|
|
32
33
|
from prefect.server.api.server import SubprocessASGIServer
|
|
33
34
|
from prefect.states import State
|
|
35
|
+
from prefect.utilities.asyncutils import run_coro_as_sync
|
|
34
36
|
|
|
35
37
|
if TYPE_CHECKING:
|
|
36
38
|
from prefect.client.orchestration import PrefectClient
|
|
@@ -172,9 +174,26 @@ def prefect_test_harness(server_startup_timeout: int | None = 30):
|
|
|
172
174
|
)
|
|
173
175
|
yield
|
|
174
176
|
# drain the logs before stopping the server to avoid connection errors on shutdown
|
|
175
|
-
|
|
176
|
-
#
|
|
177
|
-
|
|
177
|
+
# When running in an async context, drain() and drain_all() return awaitables.
|
|
178
|
+
# We use a wrapper coroutine passed to run_coro_as_sync to ensure the awaitable
|
|
179
|
+
# is created and awaited on the same loop, avoiding cross-loop issues (issue #19762)
|
|
180
|
+
|
|
181
|
+
async def drain_workers():
|
|
182
|
+
try:
|
|
183
|
+
result = APILogWorker.instance().drain()
|
|
184
|
+
if inspect.isawaitable(result):
|
|
185
|
+
await result
|
|
186
|
+
except RuntimeError:
|
|
187
|
+
# Worker may not have been started
|
|
188
|
+
pass
|
|
189
|
+
|
|
190
|
+
# drain events to prevent stale events from leaking into subsequent test harnesses
|
|
191
|
+
result = EventsWorker.drain_all()
|
|
192
|
+
if inspect.isawaitable(result):
|
|
193
|
+
await result
|
|
194
|
+
|
|
195
|
+
run_coro_as_sync(drain_workers())
|
|
196
|
+
|
|
178
197
|
test_server.stop()
|
|
179
198
|
|
|
180
199
|
|
prefect/variables.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
|
-
from typing import Any, Callable, Optional
|
|
1
|
+
from typing import Any, Callable, Generic, Optional, overload
|
|
2
2
|
|
|
3
3
|
from pydantic import BaseModel, Field
|
|
4
|
+
from typing_extensions import TypeVar
|
|
4
5
|
|
|
5
6
|
from prefect._internal.compatibility.async_dispatch import async_dispatch
|
|
6
7
|
from prefect._internal.compatibility.migration import getattr_migration
|
|
@@ -10,8 +11,10 @@ from prefect.client.utilities import get_or_create_client
|
|
|
10
11
|
from prefect.exceptions import ObjectNotFound
|
|
11
12
|
from prefect.types import MAX_VARIABLE_NAME_LENGTH, StrictVariableValue
|
|
12
13
|
|
|
14
|
+
T = TypeVar("T", bound=StrictVariableValue, default=StrictVariableValue)
|
|
13
15
|
|
|
14
|
-
|
|
16
|
+
|
|
17
|
+
class Variable(BaseModel, Generic[T]):
|
|
15
18
|
"""
|
|
16
19
|
Variables are named, mutable JSON values that can be shared across tasks and flows.
|
|
17
20
|
|
|
@@ -135,12 +138,27 @@ class Variable(BaseModel):
|
|
|
135
138
|
|
|
136
139
|
return cls.model_validate(var_dict)
|
|
137
140
|
|
|
141
|
+
@overload
|
|
142
|
+
@classmethod
|
|
143
|
+
async def aget(
|
|
144
|
+
cls,
|
|
145
|
+
name: str,
|
|
146
|
+
) -> T | None: ...
|
|
147
|
+
|
|
148
|
+
@overload
|
|
138
149
|
@classmethod
|
|
139
150
|
async def aget(
|
|
140
151
|
cls,
|
|
141
152
|
name: str,
|
|
142
|
-
default:
|
|
143
|
-
) ->
|
|
153
|
+
default: T,
|
|
154
|
+
) -> T: ...
|
|
155
|
+
|
|
156
|
+
@classmethod
|
|
157
|
+
async def aget(
|
|
158
|
+
cls,
|
|
159
|
+
name: str,
|
|
160
|
+
default: StrictVariableValue | None = None,
|
|
161
|
+
) -> StrictVariableValue | None:
|
|
144
162
|
"""
|
|
145
163
|
Asynchronously get a variable's value by name.
|
|
146
164
|
|
|
@@ -160,19 +178,44 @@ class Variable(BaseModel):
|
|
|
160
178
|
async def my_flow():
|
|
161
179
|
var = await Variable.aget("my_var")
|
|
162
180
|
```
|
|
181
|
+
|
|
182
|
+
Get a variable's value with explicit typing.
|
|
183
|
+
```python
|
|
184
|
+
from prefect import flow
|
|
185
|
+
from prefect.variables import Variable
|
|
186
|
+
|
|
187
|
+
@flow
|
|
188
|
+
async def my_flow():
|
|
189
|
+
var = await Variable[str].aget("my_var")
|
|
190
|
+
```
|
|
163
191
|
"""
|
|
164
192
|
client, _ = get_or_create_client()
|
|
165
193
|
variable = await client.read_variable_by_name(name)
|
|
166
194
|
|
|
167
195
|
return variable.value if variable else default
|
|
168
196
|
|
|
197
|
+
@overload
|
|
198
|
+
@classmethod
|
|
199
|
+
def get(
|
|
200
|
+
cls,
|
|
201
|
+
name: str,
|
|
202
|
+
) -> T | None: ...
|
|
203
|
+
|
|
204
|
+
@overload
|
|
205
|
+
@classmethod
|
|
206
|
+
def get(
|
|
207
|
+
cls,
|
|
208
|
+
name: str,
|
|
209
|
+
default: T,
|
|
210
|
+
) -> T: ...
|
|
211
|
+
|
|
169
212
|
@classmethod
|
|
170
213
|
@async_dispatch(aget)
|
|
171
214
|
def get(
|
|
172
215
|
cls,
|
|
173
216
|
name: str,
|
|
174
|
-
default: StrictVariableValue = None,
|
|
175
|
-
) -> StrictVariableValue:
|
|
217
|
+
default: StrictVariableValue | None = None,
|
|
218
|
+
) -> StrictVariableValue | None:
|
|
176
219
|
"""
|
|
177
220
|
Get a variable's value by name.
|
|
178
221
|
|
|
@@ -192,6 +235,16 @@ class Variable(BaseModel):
|
|
|
192
235
|
def my_flow():
|
|
193
236
|
var = Variable.get("my_var")
|
|
194
237
|
```
|
|
238
|
+
|
|
239
|
+
Get a variable's value with explicit typing.
|
|
240
|
+
```python
|
|
241
|
+
from prefect import flow
|
|
242
|
+
from prefect.variables import Variable
|
|
243
|
+
|
|
244
|
+
@flow
|
|
245
|
+
def my_flow():
|
|
246
|
+
var = Variable[str].get("my_var")
|
|
247
|
+
```
|
|
195
248
|
"""
|
|
196
249
|
with get_client(sync_client=True) as client:
|
|
197
250
|
variable = client.read_variable_by_name(name)
|
prefect/workers/base.py
CHANGED
|
@@ -536,7 +536,6 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
|
|
|
536
536
|
self._limit = limit
|
|
537
537
|
self._limiter: Optional[anyio.CapacityLimiter] = None
|
|
538
538
|
self._submitting_flow_run_ids: set[UUID] = set()
|
|
539
|
-
self._cancelling_flow_run_ids: set[UUID] = set()
|
|
540
539
|
self._scheduled_task_scopes: set[anyio.CancelScope] = set()
|
|
541
540
|
self._worker_metadata_sent = False
|
|
542
541
|
|
|
@@ -1539,13 +1538,6 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
|
|
|
1539
1538
|
f"Flow run '{flow_run.id}' was deleted before it could be marked as cancelled"
|
|
1540
1539
|
)
|
|
1541
1540
|
|
|
1542
|
-
# Do not remove the flow run from the cancelling set immediately because
|
|
1543
|
-
# the API caches responses for the `read_flow_runs` and we do not want to
|
|
1544
|
-
# duplicate cancellations.
|
|
1545
|
-
await self._schedule_task(
|
|
1546
|
-
60 * 10, self._cancelling_flow_run_ids.remove, flow_run.id
|
|
1547
|
-
)
|
|
1548
|
-
|
|
1549
1541
|
async def _set_work_pool_template(
|
|
1550
1542
|
self, work_pool: "WorkPool", job_template: dict[str, Any]
|
|
1551
1543
|
):
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: prefect
|
|
3
|
-
Version: 3.6.
|
|
3
|
+
Version: 3.6.7
|
|
4
4
|
Summary: Workflow orchestration and management.
|
|
5
5
|
Project-URL: Changelog, https://github.com/PrefectHQ/prefect/releases
|
|
6
6
|
Project-URL: Documentation, https://docs.prefect.io
|
|
@@ -72,7 +72,6 @@ Requires-Dist: sqlalchemy[asyncio]<3.0.0,>=2.0
|
|
|
72
72
|
Requires-Dist: toml>=0.10.0
|
|
73
73
|
Requires-Dist: typer<0.20.0,>=0.16.0
|
|
74
74
|
Requires-Dist: typing-extensions<5.0.0,>=4.10.0
|
|
75
|
-
Requires-Dist: uv>=0.6.0
|
|
76
75
|
Requires-Dist: uvicorn!=0.29.0,>=0.14.0
|
|
77
76
|
Requires-Dist: websockets<16.0,>=15.0.1
|
|
78
77
|
Requires-Dist: whenever<0.10.0,>=0.7.3; python_version >= '3.13'
|
|
@@ -82,6 +81,8 @@ Provides-Extra: azure
|
|
|
82
81
|
Requires-Dist: prefect-azure>=0.4.0; extra == 'azure'
|
|
83
82
|
Provides-Extra: bitbucket
|
|
84
83
|
Requires-Dist: prefect-bitbucket>=0.3.0; extra == 'bitbucket'
|
|
84
|
+
Provides-Extra: bundles
|
|
85
|
+
Requires-Dist: uv>=0.6.0; extra == 'bundles'
|
|
85
86
|
Provides-Extra: dask
|
|
86
87
|
Requires-Dist: prefect-dask>=0.3.0; extra == 'dask'
|
|
87
88
|
Provides-Extra: databricks
|