prefect 3.6.7.dev3__py3-none-any.whl → 3.6.8.dev3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/__init__.py +8 -5
- prefect/_build_info.py +3 -3
- prefect/_experimental/bundles/__init__.py +8 -4
- prefect/blocks/notifications.py +1 -1
- prefect/cache_policies.py +12 -0
- prefect/cli/server.py +18 -2
- prefect/client/orchestration/_deployments/client.py +12 -0
- prefect/events/clients.py +24 -12
- prefect/flow_runs.py +31 -10
- prefect/logging/logging.yml +2 -0
- prefect/main.py +12 -6
- prefect/runner/storage.py +30 -1
- prefect/serializers.py +17 -1
- prefect/server/api/background_workers.py +16 -3
- prefect/server/events/schemas/events.py +7 -0
- prefect/server/events/services/triggers.py +17 -21
- prefect/server/models/events.py +67 -0
- prefect/server/models/work_queues.py +74 -11
- prefect/server/models/workers.py +107 -10
- prefect/server/orchestration/core_policy.py +111 -7
- prefect/server/schemas/responses.py +0 -8
- prefect/server/services/base.py +1 -210
- prefect/server/services/perpetual_services.py +1 -1
- prefect/server/services/scheduler.py +276 -326
- prefect/server/services/task_run_recorder.py +28 -4
- prefect/tasks.py +26 -18
- prefect/testing/utilities.py +22 -3
- prefect/utilities/schema_tools/validation.py +1 -1
- prefect/utilities/urls.py +7 -3
- prefect/workers/base.py +0 -8
- {prefect-3.6.7.dev3.dist-info → prefect-3.6.8.dev3.dist-info}/METADATA +4 -3
- {prefect-3.6.7.dev3.dist-info → prefect-3.6.8.dev3.dist-info}/RECORD +35 -35
- {prefect-3.6.7.dev3.dist-info → prefect-3.6.8.dev3.dist-info}/WHEEL +0 -0
- {prefect-3.6.7.dev3.dist-info → prefect-3.6.8.dev3.dist-info}/entry_points.txt +0 -0
- {prefect-3.6.7.dev3.dist-info → prefect-3.6.8.dev3.dist-info}/licenses/LICENSE +0 -0
prefect/__init__.py
CHANGED
|
@@ -12,21 +12,22 @@ if TYPE_CHECKING:
|
|
|
12
12
|
from importlib.machinery import ModuleSpec
|
|
13
13
|
from .main import (
|
|
14
14
|
allow_failure,
|
|
15
|
+
aresume_flow_run,
|
|
16
|
+
aserve,
|
|
15
17
|
flow,
|
|
16
18
|
Flow,
|
|
17
19
|
get_client,
|
|
18
20
|
get_run_logger,
|
|
21
|
+
pause_flow_run,
|
|
22
|
+
resume_flow_run,
|
|
23
|
+
serve,
|
|
19
24
|
State,
|
|
25
|
+
suspend_flow_run,
|
|
20
26
|
tags,
|
|
21
27
|
task,
|
|
22
28
|
Task,
|
|
23
29
|
Transaction,
|
|
24
30
|
unmapped,
|
|
25
|
-
serve,
|
|
26
|
-
aserve,
|
|
27
|
-
pause_flow_run,
|
|
28
|
-
resume_flow_run,
|
|
29
|
-
suspend_flow_run,
|
|
30
31
|
)
|
|
31
32
|
from prefect.deployments.runner import deploy
|
|
32
33
|
|
|
@@ -118,6 +119,7 @@ _initialize_plugins()
|
|
|
118
119
|
|
|
119
120
|
_public_api: dict[str, tuple[Optional[str], str]] = {
|
|
120
121
|
"allow_failure": (__spec__.parent, ".main"),
|
|
122
|
+
"aresume_flow_run": (__spec__.parent, ".main"),
|
|
121
123
|
"aserve": (__spec__.parent, ".main"),
|
|
122
124
|
"deploy": (__spec__.parent, ".deployments.runner"),
|
|
123
125
|
"flow": (__spec__.parent, ".main"),
|
|
@@ -140,6 +142,7 @@ _public_api: dict[str, tuple[Optional[str], str]] = {
|
|
|
140
142
|
__all__ = [
|
|
141
143
|
"__version__",
|
|
142
144
|
"allow_failure",
|
|
145
|
+
"aresume_flow_run",
|
|
143
146
|
"aserve",
|
|
144
147
|
"deploy",
|
|
145
148
|
"flow",
|
prefect/_build_info.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
# Generated by versioningit
|
|
2
|
-
__version__ = "3.6.
|
|
3
|
-
__build_date__ = "2025-12-
|
|
4
|
-
__git_commit__ = "
|
|
2
|
+
__version__ = "3.6.8.dev3"
|
|
3
|
+
__build_date__ = "2025-12-24 08:10:40.420068+00:00"
|
|
4
|
+
__git_commit__ = "4baedb2efecfb4d82ce18b339ce16383da4a5754"
|
|
5
5
|
__dirty__ = False
|
|
@@ -38,14 +38,18 @@ logger: logging.Logger = get_logger(__name__)
|
|
|
38
38
|
|
|
39
39
|
|
|
40
40
|
def _get_uv_path() -> str:
|
|
41
|
+
"""
|
|
42
|
+
Get the path to the uv binary.
|
|
43
|
+
|
|
44
|
+
First tries to use the uv Python package to find the binary.
|
|
45
|
+
Falls back to "uv" string (assumes uv is in PATH).
|
|
46
|
+
"""
|
|
41
47
|
try:
|
|
42
48
|
import uv
|
|
43
49
|
|
|
44
|
-
|
|
50
|
+
return uv.find_uv_bin()
|
|
45
51
|
except (ImportError, ModuleNotFoundError, FileNotFoundError):
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
return uv_path
|
|
52
|
+
return "uv"
|
|
49
53
|
|
|
50
54
|
|
|
51
55
|
class SerializedBundle(TypedDict):
|
prefect/blocks/notifications.py
CHANGED
|
@@ -679,7 +679,7 @@ class MattermostWebhook(AbstractAppriseNotificationBlock):
|
|
|
679
679
|
token=self.token.get_secret_value(),
|
|
680
680
|
fullpath=self.path,
|
|
681
681
|
host=self.hostname,
|
|
682
|
-
|
|
682
|
+
user=self.botname,
|
|
683
683
|
channels=self.channels,
|
|
684
684
|
include_image=self.include_image,
|
|
685
685
|
port=self.port,
|
prefect/cache_policies.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import inspect
|
|
2
2
|
from copy import deepcopy
|
|
3
3
|
from dataclasses import dataclass, field
|
|
4
|
+
from logging import Logger
|
|
4
5
|
from pathlib import Path
|
|
5
6
|
from typing import (
|
|
6
7
|
TYPE_CHECKING,
|
|
@@ -16,6 +17,7 @@ from typing_extensions import Self
|
|
|
16
17
|
|
|
17
18
|
from prefect.context import TaskRunContext
|
|
18
19
|
from prefect.exceptions import HashError
|
|
20
|
+
from prefect.logging import get_logger
|
|
19
21
|
from prefect.utilities.hashing import hash_objects
|
|
20
22
|
|
|
21
23
|
if TYPE_CHECKING:
|
|
@@ -25,6 +27,8 @@ if TYPE_CHECKING:
|
|
|
25
27
|
|
|
26
28
|
STABLE_TRANSFORMS: dict[type, Callable[[Any], Any]] = {}
|
|
27
29
|
|
|
30
|
+
logger: Logger = get_logger(__name__)
|
|
31
|
+
|
|
28
32
|
|
|
29
33
|
def _register_stable_transforms() -> None:
|
|
30
34
|
"""
|
|
@@ -291,6 +295,13 @@ class TaskSource(CachePolicy):
|
|
|
291
295
|
) -> Optional[str]:
|
|
292
296
|
if not task_ctx:
|
|
293
297
|
return None
|
|
298
|
+
|
|
299
|
+
# Use stored source code if available (works after cloudpickle serialization)
|
|
300
|
+
lines = getattr(task_ctx.task, "source_code", None)
|
|
301
|
+
if lines is not None:
|
|
302
|
+
return hash_objects(lines, raise_on_failure=True)
|
|
303
|
+
|
|
304
|
+
# Fall back to inspect.getsource for local execution
|
|
294
305
|
try:
|
|
295
306
|
lines = inspect.getsource(task_ctx.task)
|
|
296
307
|
except TypeError:
|
|
@@ -300,6 +311,7 @@ class TaskSource(CachePolicy):
|
|
|
300
311
|
lines = task_ctx.task.fn.__code__.co_code
|
|
301
312
|
else:
|
|
302
313
|
raise
|
|
314
|
+
|
|
303
315
|
return hash_objects(lines, raise_on_failure=True)
|
|
304
316
|
|
|
305
317
|
|
prefect/cli/server.py
CHANGED
|
@@ -709,13 +709,29 @@ def run_manager_process():
|
|
|
709
709
|
|
|
710
710
|
logger.debug("Manager process started. Starting services...")
|
|
711
711
|
try:
|
|
712
|
-
asyncio.run(
|
|
712
|
+
asyncio.run(_run_all_services())
|
|
713
713
|
except KeyboardInterrupt:
|
|
714
714
|
pass
|
|
715
715
|
finally:
|
|
716
716
|
logger.debug("Manager process has exited.")
|
|
717
717
|
|
|
718
718
|
|
|
719
|
+
async def _run_all_services() -> None:
|
|
720
|
+
"""Run Service-based services and docket-based perpetual services."""
|
|
721
|
+
from docket import Docket
|
|
722
|
+
|
|
723
|
+
from prefect.server.api.background_workers import background_worker
|
|
724
|
+
from prefect.server.services.base import Service
|
|
725
|
+
from prefect.settings.context import get_current_settings
|
|
726
|
+
|
|
727
|
+
docket_url = get_current_settings().server.docket.url
|
|
728
|
+
|
|
729
|
+
async with Docket(name="prefect", url=docket_url) as docket:
|
|
730
|
+
async with background_worker(docket, ephemeral=False, webserver_only=False):
|
|
731
|
+
# Run Service-based services (will block until shutdown)
|
|
732
|
+
await Service.run_services()
|
|
733
|
+
|
|
734
|
+
|
|
719
735
|
# public, user-facing `prefect server services` commands
|
|
720
736
|
@services_app.command(aliases=["ls"])
|
|
721
737
|
def list_services():
|
|
@@ -772,7 +788,7 @@ def start_services(
|
|
|
772
788
|
if not background:
|
|
773
789
|
app.console.print("\n[blue]Starting services... Press CTRL+C to stop[/]\n")
|
|
774
790
|
try:
|
|
775
|
-
asyncio.run(
|
|
791
|
+
asyncio.run(_run_all_services())
|
|
776
792
|
except KeyboardInterrupt:
|
|
777
793
|
pass
|
|
778
794
|
app.console.print("\n[green]All services stopped.[/]")
|
|
@@ -157,6 +157,12 @@ class DeploymentClient(BaseClient):
|
|
|
157
157
|
payload["version_info"] = deployment_create.version_info.model_dump(
|
|
158
158
|
mode="json"
|
|
159
159
|
)
|
|
160
|
+
if deployment_create.concurrency_options:
|
|
161
|
+
payload["concurrency_options"] = (
|
|
162
|
+
deployment_create.concurrency_options.model_dump(
|
|
163
|
+
mode="json", exclude_unset=True
|
|
164
|
+
)
|
|
165
|
+
)
|
|
160
166
|
|
|
161
167
|
try:
|
|
162
168
|
response = self.request("POST", "/deployments/", json=payload)
|
|
@@ -823,6 +829,12 @@ class DeploymentAsyncClient(BaseAsyncClient):
|
|
|
823
829
|
payload["version_info"] = deployment_create.version_info.model_dump(
|
|
824
830
|
mode="json"
|
|
825
831
|
)
|
|
832
|
+
if deployment_create.concurrency_options:
|
|
833
|
+
payload["concurrency_options"] = (
|
|
834
|
+
deployment_create.concurrency_options.model_dump(
|
|
835
|
+
mode="json", exclude_unset=True
|
|
836
|
+
)
|
|
837
|
+
)
|
|
826
838
|
|
|
827
839
|
try:
|
|
828
840
|
response = await self.request("POST", "/deployments/", json=payload)
|
prefect/events/clients.py
CHANGED
|
@@ -281,7 +281,11 @@ class PrefectEventsClient(EventsClient):
|
|
|
281
281
|
# Don't handle any errors in the initial connection, because these are most
|
|
282
282
|
# likely a permission or configuration issue that should propagate
|
|
283
283
|
await super().__aenter__()
|
|
284
|
-
|
|
284
|
+
try:
|
|
285
|
+
await self._reconnect()
|
|
286
|
+
except Exception as e:
|
|
287
|
+
self._log_connection_error(e)
|
|
288
|
+
raise
|
|
285
289
|
return self
|
|
286
290
|
|
|
287
291
|
async def __aexit__(
|
|
@@ -298,6 +302,18 @@ class PrefectEventsClient(EventsClient):
|
|
|
298
302
|
message = f"EventsClient(id={id(self)}): " + message
|
|
299
303
|
logger.debug(message, *args, **kwargs)
|
|
300
304
|
|
|
305
|
+
def _log_connection_error(self, error: Exception) -> None:
|
|
306
|
+
logger.warning(
|
|
307
|
+
"Unable to connect to %r. "
|
|
308
|
+
"Please check your network settings to ensure websocket connections "
|
|
309
|
+
"to the API are allowed. Otherwise event data (including task run data) may be lost. "
|
|
310
|
+
"Reason: %s. "
|
|
311
|
+
"Set PREFECT_DEBUG_MODE=1 to see the full error.",
|
|
312
|
+
self._events_socket_url,
|
|
313
|
+
str(error),
|
|
314
|
+
exc_info=PREFECT_DEBUG_MODE.value(),
|
|
315
|
+
)
|
|
316
|
+
|
|
301
317
|
async def _reconnect(self) -> None:
|
|
302
318
|
logger.debug("Reconnecting websocket connection.")
|
|
303
319
|
|
|
@@ -315,15 +331,10 @@ class PrefectEventsClient(EventsClient):
|
|
|
315
331
|
await pong
|
|
316
332
|
logger.debug("Pong received. Websocket connected.")
|
|
317
333
|
except Exception as e:
|
|
318
|
-
#
|
|
319
|
-
#
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
"Unable to connect to %r. "
|
|
323
|
-
"Please check your network settings to ensure websocket connections "
|
|
324
|
-
"to the API are allowed. Otherwise event data (including task run data) may be lost. "
|
|
325
|
-
"Reason: %s. "
|
|
326
|
-
"Set PREFECT_DEBUG_MODE=1 to see the full error.",
|
|
334
|
+
# Log at debug level during reconnection attempts - the warning will
|
|
335
|
+
# only be logged if all reconnection attempts fail (in _emit)
|
|
336
|
+
logger.debug(
|
|
337
|
+
"Unable to connect to %r, will retry. Reason: %s",
|
|
327
338
|
self._events_socket_url,
|
|
328
339
|
str(e),
|
|
329
340
|
exc_info=PREFECT_DEBUG_MODE.value(),
|
|
@@ -391,10 +402,11 @@ class PrefectEventsClient(EventsClient):
|
|
|
391
402
|
await self._checkpoint()
|
|
392
403
|
|
|
393
404
|
return
|
|
394
|
-
except ConnectionClosed:
|
|
405
|
+
except ConnectionClosed as e:
|
|
395
406
|
self._log_debug("Got ConnectionClosed error.")
|
|
396
407
|
if i == self._reconnection_attempts:
|
|
397
|
-
# this was our final chance,
|
|
408
|
+
# this was our final chance, log warning and raise
|
|
409
|
+
self._log_connection_error(e)
|
|
398
410
|
raise
|
|
399
411
|
|
|
400
412
|
if i > 2:
|
prefect/flow_runs.py
CHANGED
|
@@ -11,6 +11,7 @@ from uuid import UUID, uuid4
|
|
|
11
11
|
|
|
12
12
|
import anyio
|
|
13
13
|
|
|
14
|
+
from prefect._internal.compatibility.async_dispatch import async_dispatch
|
|
14
15
|
from prefect.client.orchestration import PrefectClient, get_client
|
|
15
16
|
from prefect.client.schemas import FlowRun
|
|
16
17
|
from prefect.client.schemas.objects import (
|
|
@@ -42,9 +43,7 @@ from prefect.states import (
|
|
|
42
43
|
Paused,
|
|
43
44
|
Suspended,
|
|
44
45
|
)
|
|
45
|
-
from prefect.utilities.asyncutils import
|
|
46
|
-
sync_compatible,
|
|
47
|
-
)
|
|
46
|
+
from prefect.utilities.asyncutils import sync_compatible
|
|
48
47
|
from prefect.utilities.engine import (
|
|
49
48
|
propose_state,
|
|
50
49
|
)
|
|
@@ -459,28 +458,50 @@ async def suspend_flow_run(
|
|
|
459
458
|
raise Pause(state=state)
|
|
460
459
|
|
|
461
460
|
|
|
462
|
-
|
|
463
|
-
async def resume_flow_run(
|
|
461
|
+
async def aresume_flow_run(
|
|
464
462
|
flow_run_id: UUID, run_input: dict[str, Any] | None = None
|
|
465
463
|
) -> None:
|
|
466
464
|
"""
|
|
467
|
-
Resumes a paused flow.
|
|
465
|
+
Resumes a paused flow asynchronously.
|
|
468
466
|
|
|
469
467
|
Args:
|
|
470
468
|
flow_run_id: the flow_run_id to resume
|
|
471
469
|
run_input: a dictionary of inputs to provide to the flow run.
|
|
472
470
|
"""
|
|
473
|
-
|
|
474
|
-
async with client:
|
|
471
|
+
async with get_client() as client:
|
|
475
472
|
flow_run = await client.read_flow_run(flow_run_id)
|
|
476
473
|
|
|
477
|
-
if not flow_run.state.is_paused():
|
|
474
|
+
if not flow_run.state or not flow_run.state.is_paused():
|
|
478
475
|
raise NotPausedError("Cannot resume a run that isn't paused!")
|
|
479
476
|
|
|
480
477
|
response = await client.resume_flow_run(flow_run_id, run_input=run_input)
|
|
481
478
|
|
|
482
479
|
if response.status == SetStateStatus.REJECT:
|
|
483
|
-
if response.state.type == StateType.FAILED:
|
|
480
|
+
if response.state and response.state.type == StateType.FAILED:
|
|
481
|
+
raise FlowPauseTimeout("Flow run can no longer be resumed.")
|
|
482
|
+
else:
|
|
483
|
+
raise RuntimeError(f"Cannot resume this run: {response.details.reason}")
|
|
484
|
+
|
|
485
|
+
|
|
486
|
+
@async_dispatch(aresume_flow_run)
|
|
487
|
+
def resume_flow_run(flow_run_id: UUID, run_input: dict[str, Any] | None = None) -> None:
|
|
488
|
+
"""
|
|
489
|
+
Resumes a paused flow.
|
|
490
|
+
|
|
491
|
+
Args:
|
|
492
|
+
flow_run_id: the flow_run_id to resume
|
|
493
|
+
run_input: a dictionary of inputs to provide to the flow run.
|
|
494
|
+
"""
|
|
495
|
+
with get_client(sync_client=True) as client:
|
|
496
|
+
flow_run = client.read_flow_run(flow_run_id)
|
|
497
|
+
|
|
498
|
+
if not flow_run.state or not flow_run.state.is_paused():
|
|
499
|
+
raise NotPausedError("Cannot resume a run that isn't paused!")
|
|
500
|
+
|
|
501
|
+
response = client.resume_flow_run(flow_run_id, run_input=run_input)
|
|
502
|
+
|
|
503
|
+
if response.status == SetStateStatus.REJECT:
|
|
504
|
+
if response.state and response.state.type == StateType.FAILED:
|
|
484
505
|
raise FlowPauseTimeout("Flow run can no longer be resumed.")
|
|
485
506
|
else:
|
|
486
507
|
raise RuntimeError(f"Cannot resume this run: {response.details.reason}")
|
prefect/logging/logging.yml
CHANGED
|
@@ -43,6 +43,7 @@ handlers:
|
|
|
43
43
|
level: 0
|
|
44
44
|
class: prefect.logging.handlers.PrefectConsoleHandler
|
|
45
45
|
formatter: standard
|
|
46
|
+
stream: ext://sys.stderr
|
|
46
47
|
styles:
|
|
47
48
|
log.web_url: bright_blue
|
|
48
49
|
log.local_url: bright_blue
|
|
@@ -69,6 +70,7 @@ handlers:
|
|
|
69
70
|
level: 0
|
|
70
71
|
class: logging.StreamHandler
|
|
71
72
|
formatter: debug
|
|
73
|
+
stream: ext://sys.stderr
|
|
72
74
|
|
|
73
75
|
worker_api:
|
|
74
76
|
level: 0
|
prefect/main.py
CHANGED
|
@@ -8,7 +8,12 @@ from prefect.tasks import task, Task
|
|
|
8
8
|
from prefect.context import tags
|
|
9
9
|
from prefect.utilities.annotations import unmapped, allow_failure
|
|
10
10
|
from prefect._result_records import ResultRecordMetadata
|
|
11
|
-
from prefect.flow_runs import
|
|
11
|
+
from prefect.flow_runs import (
|
|
12
|
+
aresume_flow_run,
|
|
13
|
+
pause_flow_run,
|
|
14
|
+
resume_flow_run,
|
|
15
|
+
suspend_flow_run,
|
|
16
|
+
)
|
|
12
17
|
from prefect.client.orchestration import get_client
|
|
13
18
|
from prefect.client.cloud import get_cloud_client
|
|
14
19
|
import prefect.variables # pyright: ignore[reportUnusedImport] # TODO: Does this need to be imported here?
|
|
@@ -58,20 +63,21 @@ flow: FlowDecorator
|
|
|
58
63
|
# Declare API for type-checkers
|
|
59
64
|
__all__ = [
|
|
60
65
|
"allow_failure",
|
|
66
|
+
"aresume_flow_run",
|
|
67
|
+
"aserve",
|
|
61
68
|
"flow",
|
|
62
69
|
"Flow",
|
|
63
70
|
"get_client",
|
|
64
71
|
"get_cloud_client",
|
|
65
72
|
"get_run_logger",
|
|
73
|
+
"pause_flow_run",
|
|
74
|
+
"resume_flow_run",
|
|
75
|
+
"serve",
|
|
66
76
|
"State",
|
|
77
|
+
"suspend_flow_run",
|
|
67
78
|
"tags",
|
|
68
79
|
"task",
|
|
69
80
|
"Task",
|
|
70
81
|
"Transaction",
|
|
71
82
|
"unmapped",
|
|
72
|
-
"serve",
|
|
73
|
-
"aserve",
|
|
74
|
-
"pause_flow_run",
|
|
75
|
-
"resume_flow_run",
|
|
76
|
-
"suspend_flow_run",
|
|
77
83
|
]
|
prefect/runner/storage.py
CHANGED
|
@@ -919,7 +919,36 @@ def _format_token_from_credentials(
|
|
|
919
919
|
if username:
|
|
920
920
|
return f"{username}:{user_provided_token}"
|
|
921
921
|
|
|
922
|
-
#
|
|
922
|
+
# Netloc-based provider detection for dict credentials (e.g., from YAML block references).
|
|
923
|
+
# When credentials come from deployment YAML like:
|
|
924
|
+
# credentials: "{{ prefect.blocks.gitlab-credentials.my-block }}"
|
|
925
|
+
# they resolve to dicts, not Block instances, so the protocol check above doesn't apply.
|
|
926
|
+
# This provides sensible defaults for common git providers.
|
|
927
|
+
if "bitbucketserver" in netloc:
|
|
928
|
+
if ":" not in user_provided_token:
|
|
929
|
+
raise ValueError(
|
|
930
|
+
"Please provide a `username` and a `password` or `token` in your"
|
|
931
|
+
" BitBucketCredentials block to clone a repo from BitBucket Server."
|
|
932
|
+
)
|
|
933
|
+
return user_provided_token
|
|
934
|
+
|
|
935
|
+
elif "bitbucket" in netloc:
|
|
936
|
+
if (
|
|
937
|
+
user_provided_token.startswith("x-token-auth:")
|
|
938
|
+
or ":" in user_provided_token
|
|
939
|
+
):
|
|
940
|
+
return user_provided_token
|
|
941
|
+
return f"x-token-auth:{user_provided_token}"
|
|
942
|
+
|
|
943
|
+
elif "gitlab" in netloc:
|
|
944
|
+
if user_provided_token.startswith("oauth2:"):
|
|
945
|
+
return user_provided_token
|
|
946
|
+
# Deploy tokens contain ":" (username:token format) and should not get oauth2: prefix
|
|
947
|
+
if ":" in user_provided_token:
|
|
948
|
+
return user_provided_token
|
|
949
|
+
return f"oauth2:{user_provided_token}"
|
|
950
|
+
|
|
951
|
+
# GitHub and other providers: plain token
|
|
923
952
|
return user_provided_token
|
|
924
953
|
|
|
925
954
|
|
prefect/serializers.py
CHANGED
|
@@ -41,6 +41,21 @@ D = TypeVar("D", default=Any)
|
|
|
41
41
|
_TYPE_ADAPTER_CACHE: dict[str, TypeAdapter[Any]] = {}
|
|
42
42
|
|
|
43
43
|
|
|
44
|
+
def _get_importable_class(cls: type) -> type:
|
|
45
|
+
"""
|
|
46
|
+
Get an importable class from a potentially parameterized generic.
|
|
47
|
+
|
|
48
|
+
For Pydantic generic models like `APIResult[str]`, the class name includes
|
|
49
|
+
type parameters (e.g., `APIResult[str]`) which cannot be imported. This
|
|
50
|
+
function extracts the origin class (e.g., `APIResult`) which can be imported.
|
|
51
|
+
"""
|
|
52
|
+
if hasattr(cls, "__pydantic_generic_metadata__"):
|
|
53
|
+
origin = cls.__pydantic_generic_metadata__.get("origin")
|
|
54
|
+
if origin is not None:
|
|
55
|
+
return origin
|
|
56
|
+
return cls
|
|
57
|
+
|
|
58
|
+
|
|
44
59
|
def prefect_json_object_encoder(obj: Any) -> Any:
|
|
45
60
|
"""
|
|
46
61
|
`JSONEncoder.default` for encoding objects into JSON with extended type support.
|
|
@@ -58,8 +73,9 @@ def prefect_json_object_encoder(obj: Any) -> Any:
|
|
|
58
73
|
),
|
|
59
74
|
}
|
|
60
75
|
else:
|
|
76
|
+
importable_class = _get_importable_class(obj.__class__)
|
|
61
77
|
return {
|
|
62
|
-
"__class__": to_qualified_name(
|
|
78
|
+
"__class__": to_qualified_name(importable_class),
|
|
63
79
|
"data": custom_pydantic_encoder({}, obj),
|
|
64
80
|
}
|
|
65
81
|
|
|
@@ -1,15 +1,16 @@
|
|
|
1
1
|
import asyncio
|
|
2
2
|
from contextlib import asynccontextmanager
|
|
3
|
+
from logging import Logger
|
|
3
4
|
from typing import Any, AsyncGenerator, Callable
|
|
4
5
|
|
|
5
6
|
from docket import Docket, Worker
|
|
6
7
|
|
|
8
|
+
from prefect.logging import get_logger
|
|
7
9
|
from prefect.server.api.flow_runs import delete_flow_run_logs
|
|
8
10
|
from prefect.server.api.task_runs import delete_task_run_logs
|
|
11
|
+
from prefect.server.events.services import triggers as _triggers_module # noqa: F401
|
|
9
12
|
from prefect.server.models.deployments import mark_deployments_ready
|
|
10
13
|
from prefect.server.models.work_queues import mark_work_queues_ready
|
|
11
|
-
|
|
12
|
-
# Import task functions that need to be registered with docket
|
|
13
14
|
from prefect.server.services.cancellation_cleanup import (
|
|
14
15
|
cancel_child_task_runs,
|
|
15
16
|
cancel_subflow_run,
|
|
@@ -21,6 +22,8 @@ from prefect.server.services.perpetual_services import (
|
|
|
21
22
|
)
|
|
22
23
|
from prefect.server.services.repossessor import revoke_expired_lease
|
|
23
24
|
|
|
25
|
+
logger: Logger = get_logger(__name__)
|
|
26
|
+
|
|
24
27
|
# Task functions to register with docket for background processing
|
|
25
28
|
task_functions: list[Callable[..., Any]] = [
|
|
26
29
|
# Simple background tasks (from Alex's PR #19377)
|
|
@@ -63,6 +66,16 @@ async def background_worker(
|
|
|
63
66
|
if worker_task:
|
|
64
67
|
worker_task.cancel()
|
|
65
68
|
try:
|
|
66
|
-
|
|
69
|
+
logger.debug(
|
|
70
|
+
"Waiting for background worker to finish after cancellation..."
|
|
71
|
+
)
|
|
72
|
+
await asyncio.wait_for(worker_task, timeout=5.0)
|
|
73
|
+
logger.debug(
|
|
74
|
+
"Background worker finished successfully after cancellation"
|
|
75
|
+
)
|
|
76
|
+
except asyncio.TimeoutError:
|
|
77
|
+
logger.debug(
|
|
78
|
+
"Background worker did not finish within 5 seconds after cancellation. Proceeding with shutdown"
|
|
79
|
+
)
|
|
67
80
|
except asyncio.CancelledError:
|
|
68
81
|
pass
|
|
@@ -34,6 +34,7 @@ from prefect.settings import (
|
|
|
34
34
|
PREFECT_EVENTS_MAXIMUM_LABELS_PER_RESOURCE,
|
|
35
35
|
PREFECT_EVENTS_MAXIMUM_RELATED_RESOURCES,
|
|
36
36
|
)
|
|
37
|
+
from prefect.utilities.urls import url_for
|
|
37
38
|
|
|
38
39
|
if TYPE_CHECKING:
|
|
39
40
|
import logging
|
|
@@ -212,6 +213,12 @@ class ReceivedEvent(Event):
|
|
|
212
213
|
description="When the event was received by Prefect Cloud",
|
|
213
214
|
)
|
|
214
215
|
|
|
216
|
+
@property
|
|
217
|
+
def url(self) -> Optional[str]:
|
|
218
|
+
"""Returns the UI URL for this event, allowing users to link to events
|
|
219
|
+
in automation templates without parsing date strings."""
|
|
220
|
+
return url_for(self, url_type="ui")
|
|
221
|
+
|
|
215
222
|
def as_database_row(self) -> dict[str, Any]:
|
|
216
223
|
row = self.model_dump()
|
|
217
224
|
row["resource_id"] = self.resource.id
|
|
@@ -1,16 +1,18 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import asyncio
|
|
4
|
-
from typing import TYPE_CHECKING,
|
|
4
|
+
from typing import TYPE_CHECKING, NoReturn
|
|
5
|
+
|
|
6
|
+
from docket import Perpetual
|
|
5
7
|
|
|
6
8
|
from prefect.logging import get_logger
|
|
7
9
|
from prefect.server.events import triggers
|
|
8
|
-
from prefect.server.services.base import
|
|
10
|
+
from prefect.server.services.base import RunInEphemeralServers, Service
|
|
11
|
+
from prefect.server.services.perpetual_services import perpetual_service
|
|
9
12
|
from prefect.server.utilities.messaging import Consumer, create_consumer
|
|
10
13
|
from prefect.server.utilities.messaging._consumer_names import (
|
|
11
14
|
generate_unique_consumer_name,
|
|
12
15
|
)
|
|
13
|
-
from prefect.settings import PREFECT_EVENTS_PROACTIVE_GRANULARITY
|
|
14
16
|
from prefect.settings.context import get_current_settings
|
|
15
17
|
from prefect.settings.models.server.services import ServicesBaseSetting
|
|
16
18
|
|
|
@@ -65,21 +67,15 @@ class ReactiveTriggers(RunInEphemeralServers, Service):
|
|
|
65
67
|
logger.debug("Reactive triggers stopped")
|
|
66
68
|
|
|
67
69
|
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
),
|
|
81
|
-
**kwargs,
|
|
82
|
-
)
|
|
83
|
-
|
|
84
|
-
async def run_once(self) -> None:
|
|
85
|
-
await triggers.evaluate_proactive_triggers()
|
|
70
|
+
@perpetual_service(
|
|
71
|
+
enabled_getter=lambda: get_current_settings().server.services.triggers.enabled,
|
|
72
|
+
run_in_ephemeral=True,
|
|
73
|
+
)
|
|
74
|
+
async def evaluate_proactive_triggers_periodic(
|
|
75
|
+
perpetual: Perpetual = Perpetual(
|
|
76
|
+
automatic=True,
|
|
77
|
+
every=get_current_settings().server.events.proactive_granularity,
|
|
78
|
+
),
|
|
79
|
+
) -> None:
|
|
80
|
+
"""Evaluate proactive automation triggers on a periodic schedule."""
|
|
81
|
+
await triggers.evaluate_proactive_triggers()
|
prefect/server/models/events.py
CHANGED
|
@@ -419,6 +419,73 @@ async def work_pool_status_event(
|
|
|
419
419
|
)
|
|
420
420
|
|
|
421
421
|
|
|
422
|
+
async def work_pool_updated_event(
|
|
423
|
+
session: AsyncSession,
|
|
424
|
+
work_pool: "ORMWorkPool",
|
|
425
|
+
changed_fields: Dict[
|
|
426
|
+
str, Dict[str, Any]
|
|
427
|
+
], # {"field_name": {"old": value, "new": value}}
|
|
428
|
+
occurred: DateTime,
|
|
429
|
+
) -> Event:
|
|
430
|
+
"""Create an event for work pool field updates (non-status)."""
|
|
431
|
+
return Event(
|
|
432
|
+
occurred=occurred,
|
|
433
|
+
event="prefect.work-pool.updated",
|
|
434
|
+
resource={
|
|
435
|
+
"prefect.resource.id": f"prefect.work-pool.{work_pool.id}",
|
|
436
|
+
"prefect.resource.name": work_pool.name,
|
|
437
|
+
"prefect.work-pool.type": work_pool.type,
|
|
438
|
+
"prefect.resource.role": "work-pool",
|
|
439
|
+
},
|
|
440
|
+
payload={
|
|
441
|
+
"updated_fields": list(changed_fields.keys()),
|
|
442
|
+
"updates": changed_fields,
|
|
443
|
+
},
|
|
444
|
+
id=uuid7(),
|
|
445
|
+
)
|
|
446
|
+
|
|
447
|
+
|
|
448
|
+
async def work_queue_updated_event(
|
|
449
|
+
session: AsyncSession,
|
|
450
|
+
work_queue: "ORMWorkQueue",
|
|
451
|
+
changed_fields: Dict[str, Dict[str, Any]],
|
|
452
|
+
occurred: DateTime,
|
|
453
|
+
) -> Event:
|
|
454
|
+
"""Create an event for work queue field updates (non-status)."""
|
|
455
|
+
related_work_pool_info: List[Dict[str, Any]] = []
|
|
456
|
+
|
|
457
|
+
if work_queue.work_pool_id:
|
|
458
|
+
work_pool = await models.workers.read_work_pool(
|
|
459
|
+
session=session,
|
|
460
|
+
work_pool_id=work_queue.work_pool_id,
|
|
461
|
+
)
|
|
462
|
+
if work_pool and work_pool.id and work_pool.name:
|
|
463
|
+
related_work_pool_info.append(
|
|
464
|
+
{
|
|
465
|
+
"prefect.resource.id": f"prefect.work-pool.{work_pool.id}",
|
|
466
|
+
"prefect.resource.name": work_pool.name,
|
|
467
|
+
"prefect.work-pool.type": work_pool.type,
|
|
468
|
+
"prefect.resource.role": "work-pool",
|
|
469
|
+
}
|
|
470
|
+
)
|
|
471
|
+
|
|
472
|
+
return Event(
|
|
473
|
+
occurred=occurred,
|
|
474
|
+
event="prefect.work-queue.updated",
|
|
475
|
+
resource={
|
|
476
|
+
"prefect.resource.id": f"prefect.work-queue.{work_queue.id}",
|
|
477
|
+
"prefect.resource.name": work_queue.name,
|
|
478
|
+
"prefect.resource.role": "work-queue",
|
|
479
|
+
},
|
|
480
|
+
related=related_work_pool_info,
|
|
481
|
+
payload={
|
|
482
|
+
"updated_fields": list(changed_fields.keys()),
|
|
483
|
+
"updates": changed_fields,
|
|
484
|
+
},
|
|
485
|
+
id=uuid7(),
|
|
486
|
+
)
|
|
487
|
+
|
|
488
|
+
|
|
422
489
|
def _get_recent_preceding_work_pool_event_id(
|
|
423
490
|
work_pool: Optional["ORMWorkPool"],
|
|
424
491
|
) -> Optional[UUID]:
|