prefect 3.6.16.dev4__py3-none-any.whl → 3.6.16.dev5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/__init__.py +5 -4
- prefect/_build_info.py +3 -3
- prefect/_experimental/plugins/__init__.py +1 -1
- prefect/cli/deploy/_core.py +8 -1
- prefect/client/orchestration/routes.py +0 -3
- prefect/server/api/admin.py +0 -60
- prefect/server/api/deployments.py +267 -1
- prefect/server/api/flow_runs.py +139 -0
- prefect/server/api/flows.py +49 -1
- prefect/server/models/deployments.py +53 -0
- prefect/server/models/flow_runs.py +42 -0
- prefect/server/models/flows.py +40 -1
- prefect/server/schemas/filters.py +5 -0
- prefect/server/schemas/responses.py +50 -0
- prefect/server/ui/assets/404-DG3AGnT0.js +2 -0
- prefect/server/ui/assets/{404-K6PaZiye.js.map → 404-DG3AGnT0.js.map} +1 -1
- prefect/server/ui/assets/{AppRouterView-DwDQtyvQ.js → AppRouterView-C0FcJnhj.js} +2 -2
- prefect/server/ui/assets/{AppRouterView-DwDQtyvQ.js.map → AppRouterView-C0FcJnhj.js.map} +1 -1
- prefect/server/ui/assets/{Artifact-C7r-iu_n.js → Artifact-sGuFYSde.js} +2 -2
- prefect/server/ui/assets/{Artifact-C7r-iu_n.js.map → Artifact-sGuFYSde.js.map} +1 -1
- prefect/server/ui/assets/{ArtifactKey-BEMo4yoK.js → ArtifactKey-9Q9Vx-fc.js} +2 -2
- prefect/server/ui/assets/{ArtifactKey-BEMo4yoK.js.map → ArtifactKey-9Q9Vx-fc.js.map} +1 -1
- prefect/server/ui/assets/{Artifacts-jFQdgIdh.js → Artifacts-B53vn988.js} +2 -2
- prefect/server/ui/assets/{Artifacts-jFQdgIdh.js.map → Artifacts-B53vn988.js.map} +1 -1
- prefect/server/ui/assets/{Automation-CoPk3WhK.js → Automation-BaE0He4N.js} +2 -2
- prefect/server/ui/assets/{Automation-CoPk3WhK.js.map → Automation-BaE0He4N.js.map} +1 -1
- prefect/server/ui/assets/{AutomationCreate-DC7QapQA.js → AutomationCreate-CYfB5QDE.js} +2 -2
- prefect/server/ui/assets/{AutomationCreate-DC7QapQA.js.map → AutomationCreate-CYfB5QDE.js.map} +1 -1
- prefect/server/ui/assets/{AutomationEdit-DEqdGB6r.js → AutomationEdit-CUrFmfaN.js} +2 -2
- prefect/server/ui/assets/{AutomationEdit-DEqdGB6r.js.map → AutomationEdit-CUrFmfaN.js.map} +1 -1
- prefect/server/ui/assets/{AutomationWizard.vue_vue_type_script_setup_true_lang-Cx_Qey3p.js → AutomationWizard.vue_vue_type_script_setup_true_lang-CRRm91Zq.js} +2 -2
- prefect/server/ui/assets/{AutomationWizard.vue_vue_type_script_setup_true_lang-Cx_Qey3p.js.map → AutomationWizard.vue_vue_type_script_setup_true_lang-CRRm91Zq.js.map} +1 -1
- prefect/server/ui/assets/{Automations-BSljFu5U.js → Automations--PyqO2IS.js} +2 -2
- prefect/server/ui/assets/{Automations-BSljFu5U.js.map → Automations--PyqO2IS.js.map} +1 -1
- prefect/server/ui/assets/{BlockEdit-PBHxWs2I.js → BlockEdit-BnyuldVn.js} +2 -2
- prefect/server/ui/assets/{BlockEdit-PBHxWs2I.js.map → BlockEdit-BnyuldVn.js.map} +1 -1
- prefect/server/ui/assets/{BlockView-D0GWTwFv.js → BlockView-B2WHGgyQ.js} +2 -2
- prefect/server/ui/assets/{BlockView-D0GWTwFv.js.map → BlockView-B2WHGgyQ.js.map} +1 -1
- prefect/server/ui/assets/{Blocks-D_9GkxFk.js → Blocks-CCtj7wrz.js} +2 -2
- prefect/server/ui/assets/{Blocks-D_9GkxFk.js.map → Blocks-CCtj7wrz.js.map} +1 -1
- prefect/server/ui/assets/{BlocksCatalog-kqMThRld.js → BlocksCatalog-CMTvZPfa.js} +2 -2
- prefect/server/ui/assets/{BlocksCatalog-kqMThRld.js.map → BlocksCatalog-CMTvZPfa.js.map} +1 -1
- prefect/server/ui/assets/{BlocksCatalogCreate-DMhVoHqt.js → BlocksCatalogCreate-BvAnp5Nv.js} +2 -2
- prefect/server/ui/assets/{BlocksCatalogCreate-DMhVoHqt.js.map → BlocksCatalogCreate-BvAnp5Nv.js.map} +1 -1
- prefect/server/ui/assets/{BlocksCatalogView-30zvf2Zf.js → BlocksCatalogView-dqnPzpRL.js} +2 -2
- prefect/server/ui/assets/{BlocksCatalogView-30zvf2Zf.js.map → BlocksCatalogView-dqnPzpRL.js.map} +1 -1
- prefect/server/ui/assets/{ConcurrencyLimit-BW7Bnl17.js → ConcurrencyLimit-ontTM1oT.js} +2 -2
- prefect/server/ui/assets/{ConcurrencyLimit-BW7Bnl17.js.map → ConcurrencyLimit-ontTM1oT.js.map} +1 -1
- prefect/server/ui/assets/{ConcurrencyLimits-Ct9fPcPd.js → ConcurrencyLimits-Ctczt53J.js} +2 -2
- prefect/server/ui/assets/{ConcurrencyLimits-Ct9fPcPd.js.map → ConcurrencyLimits-Ctczt53J.js.map} +1 -1
- prefect/server/ui/assets/{Dashboard-C9HbsW9C.js → Dashboard-o-S3xTqz.js} +2 -2
- prefect/server/ui/assets/{Dashboard-C9HbsW9C.js.map → Dashboard-o-S3xTqz.js.map} +1 -1
- prefect/server/ui/assets/{Deployment-BnhoLgTP.js → Deployment-DY8bGLAc.js} +2 -2
- prefect/server/ui/assets/{Deployment-BnhoLgTP.js.map → Deployment-DY8bGLAc.js.map} +1 -1
- prefect/server/ui/assets/{DeploymentDuplicate-B8_LrQLl.js → DeploymentDuplicate-CMMz8lrd.js} +2 -2
- prefect/server/ui/assets/{DeploymentDuplicate-B8_LrQLl.js.map → DeploymentDuplicate-CMMz8lrd.js.map} +1 -1
- prefect/server/ui/assets/{DeploymentEdit-B4W9AHKw.js → DeploymentEdit-BlRSxdKn.js} +2 -2
- prefect/server/ui/assets/{DeploymentEdit-B4W9AHKw.js.map → DeploymentEdit-BlRSxdKn.js.map} +1 -1
- prefect/server/ui/assets/Deployments-lcIIbm1i.js +2 -0
- prefect/server/ui/assets/Deployments-lcIIbm1i.js.map +1 -0
- prefect/server/ui/assets/{Event-DQwLIzdd.js → Event-0j-HIfsu.js} +2 -2
- prefect/server/ui/assets/{Event-DQwLIzdd.js.map → Event-0j-HIfsu.js.map} +1 -1
- prefect/server/ui/assets/{Events-D62-llVt.js → Events-UuQjwz0L.js} +2 -2
- prefect/server/ui/assets/{Events-D62-llVt.js.map → Events-UuQjwz0L.js.map} +1 -1
- prefect/server/ui/assets/{Flow-Cq4u3Hyv.js → Flow-RmpH3LjK.js} +2 -2
- prefect/server/ui/assets/{Flow-Cq4u3Hyv.js.map → Flow-RmpH3LjK.js.map} +1 -1
- prefect/server/ui/assets/{FlowRun-3dhHtN2i.js → FlowRun-C3yAz7L4.js} +2 -2
- prefect/server/ui/assets/{FlowRun-3dhHtN2i.js.map → FlowRun-C3yAz7L4.js.map} +1 -1
- prefect/server/ui/assets/{FlowRunCreate-BwLIWkPS.js → FlowRunCreate-UNkalFWW.js} +2 -2
- prefect/server/ui/assets/{FlowRunCreate-BwLIWkPS.js.map → FlowRunCreate-UNkalFWW.js.map} +1 -1
- prefect/server/ui/assets/{Flows-DDhFukcU.js → Flows-BrSJzZzX.js} +2 -2
- prefect/server/ui/assets/{Flows-DDhFukcU.js.map → Flows-BrSJzZzX.js.map} +1 -1
- prefect/server/ui/assets/{Runs-CAwq35x9.js → Runs-DlwKZ4nz.js} +2 -2
- prefect/server/ui/assets/{Runs-CAwq35x9.js.map → Runs-DlwKZ4nz.js.map} +1 -1
- prefect/server/ui/assets/{RunsPageWithDefaultFilter-VUJP7e64-CnkYar0q.js → RunsPageWithDefaultFilter-VUJP7e64-C4FVTl9j.js} +2 -2
- prefect/server/ui/assets/{RunsPageWithDefaultFilter-VUJP7e64-CnkYar0q.js.map → RunsPageWithDefaultFilter-VUJP7e64-C4FVTl9j.js.map} +1 -1
- prefect/server/ui/assets/{Settings-DBdmXrdg.js → Settings-CrsUm3c9.js} +2 -2
- prefect/server/ui/assets/{Settings-DBdmXrdg.js.map → Settings-CrsUm3c9.js.map} +1 -1
- prefect/server/ui/assets/{TaskRun-DWfpXthA.js → TaskRun-BPnbcZPW.js} +2 -2
- prefect/server/ui/assets/{TaskRun-DWfpXthA.js.map → TaskRun-BPnbcZPW.js.map} +1 -1
- prefect/server/ui/assets/{Unauthenticated-C8GRLLPD.js → Unauthenticated-2pBvXwGA.js} +2 -2
- prefect/server/ui/assets/{Unauthenticated-C8GRLLPD.js.map → Unauthenticated-2pBvXwGA.js.map} +1 -1
- prefect/server/ui/assets/{Variables-EylxYrv9.js → Variables-Bjlboyhp.js} +2 -2
- prefect/server/ui/assets/{Variables-EylxYrv9.js.map → Variables-Bjlboyhp.js.map} +1 -1
- prefect/server/ui/assets/{WorkPool-CfgEuRe7.js → WorkPool-lRyupHlB.js} +2 -2
- prefect/server/ui/assets/{WorkPool-CfgEuRe7.js.map → WorkPool-lRyupHlB.js.map} +1 -1
- prefect/server/ui/assets/{WorkPoolCreate-C4DC7Wtp.js → WorkPoolCreate-_lyGs-1E.js} +2 -2
- prefect/server/ui/assets/{WorkPoolCreate-C4DC7Wtp.js.map → WorkPoolCreate-_lyGs-1E.js.map} +1 -1
- prefect/server/ui/assets/{WorkPoolEdit-D1PFei0l.js → WorkPoolEdit-FAAVeANy.js} +2 -2
- prefect/server/ui/assets/{WorkPoolEdit-D1PFei0l.js.map → WorkPoolEdit-FAAVeANy.js.map} +1 -1
- prefect/server/ui/assets/{WorkPoolQueue-CisaZ0pF.js → WorkPoolQueue-CRctEElk.js} +2 -2
- prefect/server/ui/assets/{WorkPoolQueue-CisaZ0pF.js.map → WorkPoolQueue-CRctEElk.js.map} +1 -1
- prefect/server/ui/assets/{WorkPoolQueueCreate-30kVnfeo.js → WorkPoolQueueCreate-BDTuIRGA.js} +2 -2
- prefect/server/ui/assets/{WorkPoolQueueCreate-30kVnfeo.js.map → WorkPoolQueueCreate-BDTuIRGA.js.map} +1 -1
- prefect/server/ui/assets/{WorkPoolQueueEdit-DBsgToTG.js → WorkPoolQueueEdit-qqk5OXB2.js} +2 -2
- prefect/server/ui/assets/{WorkPoolQueueEdit-DBsgToTG.js.map → WorkPoolQueueEdit-qqk5OXB2.js.map} +1 -1
- prefect/server/ui/assets/{WorkPools-Dt8Bj0YS.js → WorkPools-CLW1U3iu.js} +2 -2
- prefect/server/ui/assets/{WorkPools-Dt8Bj0YS.js.map → WorkPools-CLW1U3iu.js.map} +1 -1
- prefect/server/ui/assets/{WorkQueueToWorkPoolQueueRedirect-DJZTvjVT-D9qSz7kl.js → WorkQueueToWorkPoolQueueRedirect-DJZTvjVT-B__6dQYl.js} +2 -2
- prefect/server/ui/assets/{WorkQueueToWorkPoolQueueRedirect-DJZTvjVT-D9qSz7kl.js.map → WorkQueueToWorkPoolQueueRedirect-DJZTvjVT-B__6dQYl.js.map} +1 -1
- prefect/server/ui/assets/{index-C9FkjDpZ.js → index-DS86r6aC.js} +4 -4
- prefect/server/ui/assets/{index-C9FkjDpZ.js.map → index-DS86r6aC.js.map} +1 -1
- prefect/server/ui/assets/{mapper-NwaeXCHc.js → mapper-Bm5aN_dh.js} +2 -2
- prefect/server/ui/assets/{mapper-NwaeXCHc.js.map → mapper-Bm5aN_dh.js.map} +1 -1
- prefect/server/ui/assets/useCan-DzvhQ1fT.js +2 -0
- prefect/server/ui/assets/{useCan-DDmEnNFg.js.map → useCan-DzvhQ1fT.js.map} +1 -1
- prefect/server/ui/assets/{usePageTitle-B0Hz_El0.js → usePageTitle-BXVkHaWQ.js} +2 -2
- prefect/server/ui/assets/{usePageTitle-B0Hz_El0.js.map → usePageTitle-BXVkHaWQ.js.map} +1 -1
- prefect/server/ui/assets/{usePrefectApi-B2fKZYDa.js → usePrefectApi-Cn-hX9ms.js} +2 -2
- prefect/server/ui/assets/{usePrefectApi-B2fKZYDa.js.map → usePrefectApi-Cn-hX9ms.js.map} +1 -1
- prefect/server/ui/index.html +1 -1
- prefect/utilities/collections.py +5 -1
- {prefect-3.6.16.dev4.dist-info → prefect-3.6.16.dev5.dist-info}/METADATA +1 -1
- {prefect-3.6.16.dev4.dist-info → prefect-3.6.16.dev5.dist-info}/RECORD +117 -117
- prefect/server/ui/assets/404-K6PaZiye.js +0 -2
- prefect/server/ui/assets/Deployments-DAHDVCIs.js +0 -2
- prefect/server/ui/assets/Deployments-DAHDVCIs.js.map +0 -1
- prefect/server/ui/assets/useCan-DDmEnNFg.js +0 -2
- {prefect-3.6.16.dev4.dist-info → prefect-3.6.16.dev5.dist-info}/WHEEL +0 -0
- {prefect-3.6.16.dev4.dist-info → prefect-3.6.16.dev5.dist-info}/entry_points.txt +0 -0
- {prefect-3.6.16.dev4.dist-info → prefect-3.6.16.dev5.dist-info}/licenses/LICENSE +0 -0
prefect/__init__.py
CHANGED
|
@@ -127,10 +127,6 @@ def _initialize_plugins() -> None:
|
|
|
127
127
|
print(f"Failed to initialize plugins: {e}", file=sys.stderr)
|
|
128
128
|
|
|
129
129
|
|
|
130
|
-
# Initialize plugins on import if enabled
|
|
131
|
-
_initialize_plugins()
|
|
132
|
-
|
|
133
|
-
|
|
134
130
|
def _initialize_sdk_analytics() -> None:
|
|
135
131
|
"""
|
|
136
132
|
Initialize SDK analytics for telemetry.
|
|
@@ -214,3 +210,8 @@ def __getattr__(attr_name: str) -> Any:
|
|
|
214
210
|
mname, _, attr = (ex.name or "").rpartition(".")
|
|
215
211
|
ctx = {"name": mname, "obj": attr} if sys.version_info >= (3, 10) else {}
|
|
216
212
|
raise AttributeError(f"module {mname} has no attribute {attr}", **ctx) from ex
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
# Initialize plugins on import if enabled
|
|
216
|
+
# Must be after __getattr__ so lazy imports work when plugins import from prefect
|
|
217
|
+
_initialize_plugins()
|
prefect/_build_info.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
# Generated by versioningit
|
|
2
|
-
__version__ = "3.6.16.
|
|
3
|
-
__build_date__ = "2026-02-
|
|
4
|
-
__git_commit__ = "
|
|
2
|
+
__version__ = "3.6.16.dev5"
|
|
3
|
+
__build_date__ = "2026-02-03 08:18:27.573940+00:00"
|
|
4
|
+
__git_commit__ = "2c9a7cfbeaeefcf69d03a2f98b68baa7fe794ef2"
|
|
5
5
|
__dirty__ = False
|
|
@@ -135,5 +135,5 @@ async def run_startup_hooks(ctx: HookContext) -> list[SetupSummary]:
|
|
|
135
135
|
f"None instead of SetupResult."
|
|
136
136
|
)
|
|
137
137
|
|
|
138
|
-
logger.
|
|
138
|
+
logger.debug("Plugin system initialization complete (%d plugins)", len(summaries))
|
|
139
139
|
return summaries
|
prefect/cli/deploy/_core.py
CHANGED
|
@@ -6,6 +6,7 @@ from copy import deepcopy
|
|
|
6
6
|
from pathlib import Path
|
|
7
7
|
from typing import TYPE_CHECKING, Any, Optional
|
|
8
8
|
|
|
9
|
+
from rich.markup import escape
|
|
9
10
|
from rich.panel import Panel
|
|
10
11
|
|
|
11
12
|
import prefect.cli.root as root
|
|
@@ -500,5 +501,11 @@ async def _run_multi_deploy(
|
|
|
500
501
|
else:
|
|
501
502
|
app.console.print("Skipping unnamed deployment.", style="yellow")
|
|
502
503
|
continue
|
|
503
|
-
|
|
504
|
+
# Resolve env var templates in name for display purposes only
|
|
505
|
+
resolved_name = apply_values(
|
|
506
|
+
{"name": deploy_config["name"]}, os.environ, remove_notset=False
|
|
507
|
+
)["name"]
|
|
508
|
+
# Escape Rich markup to prevent brackets from being interpreted as style tags
|
|
509
|
+
display_name = escape(str(resolved_name))
|
|
510
|
+
app.console.print(Panel(f"Deploying {display_name}", style="blue"))
|
|
504
511
|
await _run_single_deploy(deploy_config, actions, prefect_file=prefect_file)
|
prefect/server/api/admin.py
CHANGED
|
@@ -2,11 +2,8 @@
|
|
|
2
2
|
Routes for admin-level interactions with the Prefect REST API.
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
|
-
from fastapi import Body, Depends, Response, status
|
|
6
|
-
|
|
7
5
|
import prefect
|
|
8
6
|
import prefect.settings
|
|
9
|
-
from prefect.server.database import PrefectDBInterface, provide_database_interface
|
|
10
7
|
from prefect.server.utilities.server import PrefectRouter
|
|
11
8
|
|
|
12
9
|
router: PrefectRouter = PrefectRouter(prefix="/admin", tags=["Admin"])
|
|
@@ -26,60 +23,3 @@ async def read_settings() -> prefect.settings.Settings:
|
|
|
26
23
|
async def read_version() -> str:
|
|
27
24
|
"""Returns the Prefect version number"""
|
|
28
25
|
return prefect.__version__
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
@router.post("/database/clear", status_code=status.HTTP_204_NO_CONTENT)
|
|
32
|
-
async def clear_database(
|
|
33
|
-
db: PrefectDBInterface = Depends(provide_database_interface),
|
|
34
|
-
confirm: bool = Body(
|
|
35
|
-
False,
|
|
36
|
-
embed=True,
|
|
37
|
-
description="Pass confirm=True to confirm you want to modify the database.",
|
|
38
|
-
),
|
|
39
|
-
response: Response = None, # type: ignore
|
|
40
|
-
) -> None:
|
|
41
|
-
"""Clear all database tables without dropping them."""
|
|
42
|
-
if not confirm:
|
|
43
|
-
response.status_code = status.HTTP_400_BAD_REQUEST
|
|
44
|
-
return
|
|
45
|
-
async with db.session_context(begin_transaction=True) as session:
|
|
46
|
-
# work pool has a circular dependency on pool queue; delete it first
|
|
47
|
-
await session.execute(db.WorkPool.__table__.delete())
|
|
48
|
-
for table in reversed(db.Base.metadata.sorted_tables):
|
|
49
|
-
await session.execute(table.delete())
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
@router.post("/database/drop", status_code=status.HTTP_204_NO_CONTENT)
|
|
53
|
-
async def drop_database(
|
|
54
|
-
db: PrefectDBInterface = Depends(provide_database_interface),
|
|
55
|
-
confirm: bool = Body(
|
|
56
|
-
False,
|
|
57
|
-
embed=True,
|
|
58
|
-
description="Pass confirm=True to confirm you want to modify the database.",
|
|
59
|
-
),
|
|
60
|
-
response: Response = None,
|
|
61
|
-
) -> None:
|
|
62
|
-
"""Drop all database objects."""
|
|
63
|
-
if not confirm:
|
|
64
|
-
response.status_code = status.HTTP_400_BAD_REQUEST
|
|
65
|
-
return
|
|
66
|
-
|
|
67
|
-
await db.drop_db()
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
@router.post("/database/create", status_code=status.HTTP_204_NO_CONTENT)
|
|
71
|
-
async def create_database(
|
|
72
|
-
db: PrefectDBInterface = Depends(provide_database_interface),
|
|
73
|
-
confirm: bool = Body(
|
|
74
|
-
False,
|
|
75
|
-
embed=True,
|
|
76
|
-
description="Pass confirm=True to confirm you want to modify the database.",
|
|
77
|
-
),
|
|
78
|
-
response: Response = None,
|
|
79
|
-
) -> None:
|
|
80
|
-
"""Create all database objects."""
|
|
81
|
-
if not confirm:
|
|
82
|
-
response.status_code = status.HTTP_400_BAD_REQUEST
|
|
83
|
-
return
|
|
84
|
-
|
|
85
|
-
await db.create_db()
|
|
@@ -23,7 +23,12 @@ from prefect.server.database import PrefectDBInterface, provide_database_interfa
|
|
|
23
23
|
from prefect.server.exceptions import MissingVariableError, ObjectNotFoundError
|
|
24
24
|
from prefect.server.models.deployments import mark_deployments_ready
|
|
25
25
|
from prefect.server.models.workers import DEFAULT_AGENT_WORK_POOL_NAME
|
|
26
|
-
from prefect.server.schemas.responses import
|
|
26
|
+
from prefect.server.schemas.responses import (
|
|
27
|
+
DeploymentBulkDeleteResponse,
|
|
28
|
+
DeploymentPaginationResponse,
|
|
29
|
+
FlowRunBulkCreateResponse,
|
|
30
|
+
FlowRunCreateResult,
|
|
31
|
+
)
|
|
27
32
|
from prefect.server.utilities.server import PrefectRouter
|
|
28
33
|
from prefect.types import DateTime
|
|
29
34
|
from prefect.types._datetime import now
|
|
@@ -605,6 +610,49 @@ async def delete_deployment(
|
|
|
605
610
|
)
|
|
606
611
|
|
|
607
612
|
|
|
613
|
+
BULK_OPERATION_LIMIT = 50
|
|
614
|
+
|
|
615
|
+
|
|
616
|
+
@router.post("/bulk_delete")
|
|
617
|
+
async def bulk_delete_deployments(
|
|
618
|
+
deployments: Optional[schemas.filters.DeploymentFilter] = Body(
|
|
619
|
+
None, description="Filter criteria for deployments to delete"
|
|
620
|
+
),
|
|
621
|
+
limit: int = Body(
|
|
622
|
+
BULK_OPERATION_LIMIT,
|
|
623
|
+
ge=1,
|
|
624
|
+
le=BULK_OPERATION_LIMIT,
|
|
625
|
+
description=f"Maximum number of deployments to delete. Defaults to {BULK_OPERATION_LIMIT}.",
|
|
626
|
+
),
|
|
627
|
+
db: PrefectDBInterface = Depends(provide_database_interface),
|
|
628
|
+
) -> DeploymentBulkDeleteResponse:
|
|
629
|
+
"""
|
|
630
|
+
Bulk delete deployments matching the specified filter criteria.
|
|
631
|
+
|
|
632
|
+
Returns the IDs of deployments that were deleted.
|
|
633
|
+
"""
|
|
634
|
+
async with db.session_context(begin_transaction=True) as session:
|
|
635
|
+
# Query matching deployments
|
|
636
|
+
db_deployments = await models.deployments.read_deployments(
|
|
637
|
+
session=session,
|
|
638
|
+
deployment_filter=deployments,
|
|
639
|
+
limit=limit,
|
|
640
|
+
)
|
|
641
|
+
|
|
642
|
+
if not db_deployments:
|
|
643
|
+
return DeploymentBulkDeleteResponse(deleted=[])
|
|
644
|
+
|
|
645
|
+
deployment_ids = [d.id for d in db_deployments]
|
|
646
|
+
|
|
647
|
+
# Delete deployments
|
|
648
|
+
deleted_ids = await models.deployments.delete_deployments(
|
|
649
|
+
session=session,
|
|
650
|
+
deployment_ids=deployment_ids,
|
|
651
|
+
)
|
|
652
|
+
|
|
653
|
+
return DeploymentBulkDeleteResponse(deleted=deleted_ids)
|
|
654
|
+
|
|
655
|
+
|
|
608
656
|
@router.post("/{id:uuid}/schedule")
|
|
609
657
|
async def schedule_deployment(
|
|
610
658
|
deployment_id: UUID = Path(..., description="The deployment id", alias="id"),
|
|
@@ -842,6 +890,224 @@ async def create_flow_run_from_deployment(
|
|
|
842
890
|
)
|
|
843
891
|
|
|
844
892
|
|
|
893
|
+
BULK_CREATE_LIMIT = 100
|
|
894
|
+
|
|
895
|
+
|
|
896
|
+
@router.post("/{id:uuid}/create_flow_run/bulk")
|
|
897
|
+
async def bulk_create_flow_runs_from_deployment(
|
|
898
|
+
flow_runs: List[schemas.actions.DeploymentFlowRunCreate] = Body(
|
|
899
|
+
..., description="List of flow run configurations to create"
|
|
900
|
+
),
|
|
901
|
+
deployment_id: UUID = Path(..., description="The deployment id", alias="id"),
|
|
902
|
+
created_by: Optional[schemas.core.CreatedBy] = Depends(dependencies.get_created_by),
|
|
903
|
+
db: PrefectDBInterface = Depends(provide_database_interface),
|
|
904
|
+
worker_lookups: WorkerLookups = Depends(WorkerLookups),
|
|
905
|
+
) -> FlowRunBulkCreateResponse:
|
|
906
|
+
"""
|
|
907
|
+
Create multiple flow runs from a deployment.
|
|
908
|
+
|
|
909
|
+
Any parameters not provided will be inferred from the deployment's parameters.
|
|
910
|
+
If tags are not provided, the deployment's tags will be used.
|
|
911
|
+
|
|
912
|
+
If no state is provided, the flow runs will be created in a SCHEDULED state.
|
|
913
|
+
"""
|
|
914
|
+
if len(flow_runs) > BULK_CREATE_LIMIT:
|
|
915
|
+
raise HTTPException(
|
|
916
|
+
status_code=status.HTTP_400_BAD_REQUEST,
|
|
917
|
+
detail=f"Cannot create more than {BULK_CREATE_LIMIT} flow runs at once.",
|
|
918
|
+
)
|
|
919
|
+
|
|
920
|
+
results: List[FlowRunCreateResult] = []
|
|
921
|
+
|
|
922
|
+
async with db.session_context(begin_transaction=True) as session:
|
|
923
|
+
# Get the deployment once - do this before the empty check so we
|
|
924
|
+
# return 404 for non-existent deployments even with an empty list
|
|
925
|
+
deployment = await models.deployments.read_deployment(
|
|
926
|
+
session=session, deployment_id=deployment_id
|
|
927
|
+
)
|
|
928
|
+
|
|
929
|
+
if not deployment:
|
|
930
|
+
raise HTTPException(
|
|
931
|
+
status_code=status.HTTP_404_NOT_FOUND, detail="Deployment not found"
|
|
932
|
+
)
|
|
933
|
+
|
|
934
|
+
# Return early for empty list, but only after verifying deployment exists
|
|
935
|
+
if not flow_runs:
|
|
936
|
+
return FlowRunBulkCreateResponse(results=[])
|
|
937
|
+
|
|
938
|
+
# Pre-create unique work queues to avoid race conditions
|
|
939
|
+
# Collect unique work queue names that differ from the deployment's default
|
|
940
|
+
unique_work_queue_names = {
|
|
941
|
+
fr.work_queue_name
|
|
942
|
+
for fr in flow_runs
|
|
943
|
+
if fr.work_queue_name and fr.work_queue_name != deployment.work_queue_name
|
|
944
|
+
}
|
|
945
|
+
|
|
946
|
+
# Pre-create work queues if needed
|
|
947
|
+
if (
|
|
948
|
+
unique_work_queue_names
|
|
949
|
+
and deployment.work_queue
|
|
950
|
+
and deployment.work_queue.work_pool
|
|
951
|
+
):
|
|
952
|
+
work_pool_name = deployment.work_queue.work_pool.name
|
|
953
|
+
for work_queue_name in unique_work_queue_names:
|
|
954
|
+
await worker_lookups._get_work_queue_id_from_name(
|
|
955
|
+
session=session,
|
|
956
|
+
work_pool_name=work_pool_name,
|
|
957
|
+
work_queue_name=work_queue_name,
|
|
958
|
+
create_queue_if_not_found=True,
|
|
959
|
+
)
|
|
960
|
+
|
|
961
|
+
# Build hydration context once
|
|
962
|
+
try:
|
|
963
|
+
ctx = await HydrationContext.build(
|
|
964
|
+
session=session,
|
|
965
|
+
raise_on_error=True,
|
|
966
|
+
render_jinja=True,
|
|
967
|
+
render_workspace_variables=True,
|
|
968
|
+
)
|
|
969
|
+
except HydrationError as exc:
|
|
970
|
+
raise HTTPException(
|
|
971
|
+
status.HTTP_400_BAD_REQUEST,
|
|
972
|
+
detail=f"Error building hydration context: {exc}",
|
|
973
|
+
)
|
|
974
|
+
|
|
975
|
+
# Process flow runs sequentially within the transaction
|
|
976
|
+
# (SQLAlchemy sessions are not safe for concurrent operations)
|
|
977
|
+
for flow_run_request in flow_runs:
|
|
978
|
+
try:
|
|
979
|
+
# Hydrate parameters
|
|
980
|
+
dehydrated_params = deployment.parameters.copy()
|
|
981
|
+
dehydrated_params.update(flow_run_request.parameters or {})
|
|
982
|
+
parameters = hydrate(dehydrated_params, ctx)
|
|
983
|
+
|
|
984
|
+
# Default and override for enforce_parameter_schema
|
|
985
|
+
enforce_parameter_schema = deployment.enforce_parameter_schema
|
|
986
|
+
if flow_run_request.enforce_parameter_schema is not None:
|
|
987
|
+
enforce_parameter_schema = flow_run_request.enforce_parameter_schema
|
|
988
|
+
|
|
989
|
+
# Validate parameters if schema enforcement is enabled
|
|
990
|
+
if enforce_parameter_schema:
|
|
991
|
+
if not isinstance(deployment.parameter_openapi_schema, dict):
|
|
992
|
+
results.append(
|
|
993
|
+
FlowRunCreateResult(
|
|
994
|
+
status="FAILED",
|
|
995
|
+
error="Parameter schema enforcement is enabled but deployment has no valid schema.",
|
|
996
|
+
)
|
|
997
|
+
)
|
|
998
|
+
continue
|
|
999
|
+
try:
|
|
1000
|
+
validate(
|
|
1001
|
+
parameters,
|
|
1002
|
+
deployment.parameter_openapi_schema,
|
|
1003
|
+
raise_on_error=True,
|
|
1004
|
+
)
|
|
1005
|
+
except ValidationError as exc:
|
|
1006
|
+
results.append(
|
|
1007
|
+
FlowRunCreateResult(
|
|
1008
|
+
status="FAILED",
|
|
1009
|
+
error=f"Parameter validation failed: {exc}",
|
|
1010
|
+
)
|
|
1011
|
+
)
|
|
1012
|
+
continue
|
|
1013
|
+
except CircularSchemaRefError:
|
|
1014
|
+
results.append(
|
|
1015
|
+
FlowRunCreateResult(
|
|
1016
|
+
status="FAILED",
|
|
1017
|
+
error="Invalid schema: circular references detected.",
|
|
1018
|
+
)
|
|
1019
|
+
)
|
|
1020
|
+
continue
|
|
1021
|
+
|
|
1022
|
+
# Validate job variables
|
|
1023
|
+
try:
|
|
1024
|
+
await validate_job_variables_for_deployment_flow_run(
|
|
1025
|
+
session, deployment, flow_run_request
|
|
1026
|
+
)
|
|
1027
|
+
except HTTPException as exc:
|
|
1028
|
+
results.append(
|
|
1029
|
+
FlowRunCreateResult(
|
|
1030
|
+
status="FAILED",
|
|
1031
|
+
error=str(exc.detail),
|
|
1032
|
+
)
|
|
1033
|
+
)
|
|
1034
|
+
continue
|
|
1035
|
+
|
|
1036
|
+
# Determine work queue
|
|
1037
|
+
work_queue_name = deployment.work_queue_name
|
|
1038
|
+
work_queue_id = deployment.work_queue_id
|
|
1039
|
+
|
|
1040
|
+
if flow_run_request.work_queue_name:
|
|
1041
|
+
if (
|
|
1042
|
+
deployment.work_queue is None
|
|
1043
|
+
or deployment.work_queue.work_pool is None
|
|
1044
|
+
):
|
|
1045
|
+
results.append(
|
|
1046
|
+
FlowRunCreateResult(
|
|
1047
|
+
status="FAILED",
|
|
1048
|
+
error=f"Cannot create flow run in work queue {flow_run_request.work_queue_name} because deployment is not associated with a work pool.",
|
|
1049
|
+
)
|
|
1050
|
+
)
|
|
1051
|
+
continue
|
|
1052
|
+
|
|
1053
|
+
work_queue_id = await worker_lookups._get_work_queue_id_from_name(
|
|
1054
|
+
session=session,
|
|
1055
|
+
work_pool_name=deployment.work_queue.work_pool.name,
|
|
1056
|
+
work_queue_name=flow_run_request.work_queue_name,
|
|
1057
|
+
create_queue_if_not_found=True,
|
|
1058
|
+
)
|
|
1059
|
+
work_queue_name = flow_run_request.work_queue_name
|
|
1060
|
+
|
|
1061
|
+
# Create the flow run model
|
|
1062
|
+
flow_run_model = schemas.core.FlowRun(
|
|
1063
|
+
**flow_run_request.model_dump(
|
|
1064
|
+
exclude={
|
|
1065
|
+
"parameters",
|
|
1066
|
+
"tags",
|
|
1067
|
+
"infrastructure_document_id",
|
|
1068
|
+
"work_queue_name",
|
|
1069
|
+
"enforce_parameter_schema",
|
|
1070
|
+
}
|
|
1071
|
+
),
|
|
1072
|
+
flow_id=deployment.flow_id,
|
|
1073
|
+
deployment_id=deployment.id,
|
|
1074
|
+
deployment_version=deployment.version,
|
|
1075
|
+
parameters=parameters,
|
|
1076
|
+
tags=set(deployment.tags).union(flow_run_request.tags),
|
|
1077
|
+
infrastructure_document_id=(
|
|
1078
|
+
flow_run_request.infrastructure_document_id
|
|
1079
|
+
or deployment.infrastructure_document_id
|
|
1080
|
+
),
|
|
1081
|
+
work_queue_name=work_queue_name,
|
|
1082
|
+
work_queue_id=work_queue_id,
|
|
1083
|
+
created_by=created_by,
|
|
1084
|
+
)
|
|
1085
|
+
|
|
1086
|
+
if not flow_run_model.state:
|
|
1087
|
+
flow_run_model.state = schemas.states.Scheduled()
|
|
1088
|
+
|
|
1089
|
+
model = await models.flow_runs.create_flow_run(
|
|
1090
|
+
session=session, flow_run=flow_run_model
|
|
1091
|
+
)
|
|
1092
|
+
|
|
1093
|
+
results.append(
|
|
1094
|
+
FlowRunCreateResult(
|
|
1095
|
+
flow_run_id=model.id,
|
|
1096
|
+
status="CREATED",
|
|
1097
|
+
)
|
|
1098
|
+
)
|
|
1099
|
+
|
|
1100
|
+
except Exception as exc:
|
|
1101
|
+
results.append(
|
|
1102
|
+
FlowRunCreateResult(
|
|
1103
|
+
status="FAILED",
|
|
1104
|
+
error=str(exc),
|
|
1105
|
+
)
|
|
1106
|
+
)
|
|
1107
|
+
|
|
1108
|
+
return FlowRunBulkCreateResponse(results=results)
|
|
1109
|
+
|
|
1110
|
+
|
|
845
1111
|
# DEPRECATED
|
|
846
1112
|
@router.get("/{id:uuid}/work_queue_check", deprecated=True)
|
|
847
1113
|
async def work_queue_check_for_deployment(
|
prefect/server/api/flow_runs.py
CHANGED
|
@@ -46,6 +46,9 @@ from prefect.server.orchestration.policies import (
|
|
|
46
46
|
)
|
|
47
47
|
from prefect.server.schemas.graph import Graph
|
|
48
48
|
from prefect.server.schemas.responses import (
|
|
49
|
+
FlowRunBulkDeleteResponse,
|
|
50
|
+
FlowRunBulkSetStateResponse,
|
|
51
|
+
FlowRunOrchestrationResult,
|
|
49
52
|
FlowRunPaginationResponse,
|
|
50
53
|
OrchestrationResult,
|
|
51
54
|
)
|
|
@@ -609,6 +612,142 @@ async def delete_flow_run_logs(
|
|
|
609
612
|
)
|
|
610
613
|
|
|
611
614
|
|
|
615
|
+
BULK_OPERATION_LIMIT = 50
|
|
616
|
+
|
|
617
|
+
|
|
618
|
+
@router.post("/bulk_delete")
|
|
619
|
+
async def bulk_delete_flow_runs(
|
|
620
|
+
docket: dependencies.Docket,
|
|
621
|
+
flow_runs: Optional[schemas.filters.FlowRunFilter] = Body(
|
|
622
|
+
None, description="Filter criteria for flow runs to delete"
|
|
623
|
+
),
|
|
624
|
+
limit: int = Body(
|
|
625
|
+
BULK_OPERATION_LIMIT,
|
|
626
|
+
ge=1,
|
|
627
|
+
le=BULK_OPERATION_LIMIT,
|
|
628
|
+
description=f"Maximum number of flow runs to delete. Defaults to {BULK_OPERATION_LIMIT}.",
|
|
629
|
+
),
|
|
630
|
+
db: PrefectDBInterface = Depends(provide_database_interface),
|
|
631
|
+
) -> FlowRunBulkDeleteResponse:
|
|
632
|
+
"""
|
|
633
|
+
Bulk delete flow runs matching the specified filter criteria.
|
|
634
|
+
|
|
635
|
+
Returns the IDs of flow runs that were deleted.
|
|
636
|
+
"""
|
|
637
|
+
async with db.session_context(begin_transaction=True) as session:
|
|
638
|
+
# Query matching flow runs
|
|
639
|
+
db_flow_runs = await models.flow_runs.read_flow_runs(
|
|
640
|
+
session=session,
|
|
641
|
+
flow_run_filter=flow_runs,
|
|
642
|
+
limit=limit,
|
|
643
|
+
)
|
|
644
|
+
|
|
645
|
+
if not db_flow_runs:
|
|
646
|
+
return FlowRunBulkDeleteResponse(deleted=[])
|
|
647
|
+
|
|
648
|
+
flow_run_ids = [fr.id for fr in db_flow_runs]
|
|
649
|
+
|
|
650
|
+
# Delete flow runs
|
|
651
|
+
deleted_ids = await models.flow_runs.delete_flow_runs(
|
|
652
|
+
session=session,
|
|
653
|
+
flow_run_ids=flow_run_ids,
|
|
654
|
+
)
|
|
655
|
+
|
|
656
|
+
# Queue log cleanup for each deleted flow run
|
|
657
|
+
for flow_run_id in deleted_ids:
|
|
658
|
+
await docket.add(
|
|
659
|
+
delete_flow_run_logs,
|
|
660
|
+
key=f"delete_flow_run_logs:{flow_run_id}",
|
|
661
|
+
)(flow_run_id=flow_run_id)
|
|
662
|
+
|
|
663
|
+
return FlowRunBulkDeleteResponse(deleted=deleted_ids)
|
|
664
|
+
|
|
665
|
+
|
|
666
|
+
@router.post("/bulk_set_state")
|
|
667
|
+
async def bulk_set_flow_run_state(
|
|
668
|
+
flow_runs: Optional[schemas.filters.FlowRunFilter] = Body(
|
|
669
|
+
None, description="Filter criteria for flow runs to update"
|
|
670
|
+
),
|
|
671
|
+
state: schemas.actions.StateCreate = Body(..., description="The state to set"),
|
|
672
|
+
force: bool = Body(
|
|
673
|
+
False,
|
|
674
|
+
description=(
|
|
675
|
+
"If false, orchestration rules will be applied that may alter or prevent"
|
|
676
|
+
" the state transition. If True, orchestration rules are not applied."
|
|
677
|
+
),
|
|
678
|
+
),
|
|
679
|
+
limit: int = Body(
|
|
680
|
+
BULK_OPERATION_LIMIT,
|
|
681
|
+
ge=1,
|
|
682
|
+
le=BULK_OPERATION_LIMIT,
|
|
683
|
+
description=f"Maximum number of flow runs to update. Defaults to {BULK_OPERATION_LIMIT}.",
|
|
684
|
+
),
|
|
685
|
+
db: PrefectDBInterface = Depends(provide_database_interface),
|
|
686
|
+
flow_policy: type[FlowRunOrchestrationPolicy] = Depends(
|
|
687
|
+
orchestration_dependencies.provide_flow_policy
|
|
688
|
+
),
|
|
689
|
+
orchestration_parameters: Dict[str, Any] = Depends(
|
|
690
|
+
orchestration_dependencies.provide_flow_orchestration_parameters
|
|
691
|
+
),
|
|
692
|
+
api_version: str = Depends(dependencies.provide_request_api_version),
|
|
693
|
+
client_version: Optional[str] = Depends(dependencies.get_prefect_client_version),
|
|
694
|
+
) -> FlowRunBulkSetStateResponse:
|
|
695
|
+
"""
|
|
696
|
+
Bulk set state for flow runs matching the specified filter criteria.
|
|
697
|
+
|
|
698
|
+
Returns the orchestration results for each flow run.
|
|
699
|
+
"""
|
|
700
|
+
orchestration_parameters.update({"api-version": api_version})
|
|
701
|
+
|
|
702
|
+
async with db.session_context() as session:
|
|
703
|
+
# Query matching flow runs
|
|
704
|
+
db_flow_runs = await models.flow_runs.read_flow_runs(
|
|
705
|
+
session=session,
|
|
706
|
+
flow_run_filter=flow_runs,
|
|
707
|
+
limit=limit,
|
|
708
|
+
)
|
|
709
|
+
|
|
710
|
+
if not db_flow_runs:
|
|
711
|
+
return FlowRunBulkSetStateResponse(results=[])
|
|
712
|
+
|
|
713
|
+
results: List[FlowRunOrchestrationResult] = []
|
|
714
|
+
|
|
715
|
+
# Process flow runs sequentially to avoid session conflicts
|
|
716
|
+
for flow_run in db_flow_runs:
|
|
717
|
+
async with db.session_context(
|
|
718
|
+
begin_transaction=True, with_for_update=True
|
|
719
|
+
) as session:
|
|
720
|
+
try:
|
|
721
|
+
orchestration_result = await models.flow_runs.set_flow_run_state(
|
|
722
|
+
session=session,
|
|
723
|
+
flow_run_id=flow_run.id,
|
|
724
|
+
state=schemas.states.State.model_validate(state),
|
|
725
|
+
force=force,
|
|
726
|
+
flow_policy=flow_policy,
|
|
727
|
+
orchestration_parameters=orchestration_parameters,
|
|
728
|
+
client_version=client_version,
|
|
729
|
+
)
|
|
730
|
+
results.append(
|
|
731
|
+
FlowRunOrchestrationResult(
|
|
732
|
+
flow_run_id=flow_run.id,
|
|
733
|
+
status=orchestration_result.status,
|
|
734
|
+
state=orchestration_result.state,
|
|
735
|
+
details=orchestration_result.details,
|
|
736
|
+
)
|
|
737
|
+
)
|
|
738
|
+
except Exception as e:
|
|
739
|
+
results.append(
|
|
740
|
+
FlowRunOrchestrationResult(
|
|
741
|
+
flow_run_id=flow_run.id,
|
|
742
|
+
status=schemas.responses.SetStateStatus.ABORT,
|
|
743
|
+
state=None,
|
|
744
|
+
details=schemas.responses.StateAbortDetails(reason=str(e)),
|
|
745
|
+
)
|
|
746
|
+
)
|
|
747
|
+
|
|
748
|
+
return FlowRunBulkSetStateResponse(results=results)
|
|
749
|
+
|
|
750
|
+
|
|
612
751
|
@router.post("/{id:uuid}/set_state")
|
|
613
752
|
async def set_flow_run_state(
|
|
614
753
|
response: Response,
|
prefect/server/api/flows.py
CHANGED
|
@@ -12,7 +12,10 @@ import prefect.server.api.dependencies as dependencies
|
|
|
12
12
|
import prefect.server.models as models
|
|
13
13
|
import prefect.server.schemas as schemas
|
|
14
14
|
from prefect.server.database import PrefectDBInterface, provide_database_interface
|
|
15
|
-
from prefect.server.schemas.responses import
|
|
15
|
+
from prefect.server.schemas.responses import (
|
|
16
|
+
FlowBulkDeleteResponse,
|
|
17
|
+
FlowPaginationResponse,
|
|
18
|
+
)
|
|
16
19
|
from prefect.server.utilities.server import PrefectRouter
|
|
17
20
|
from prefect.types._datetime import now
|
|
18
21
|
|
|
@@ -164,6 +167,51 @@ async def delete_flow(
|
|
|
164
167
|
)
|
|
165
168
|
|
|
166
169
|
|
|
170
|
+
BULK_OPERATION_LIMIT = 50
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
@router.post("/bulk_delete")
|
|
174
|
+
async def bulk_delete_flows(
|
|
175
|
+
flows: Optional[schemas.filters.FlowFilter] = Body(
|
|
176
|
+
None, description="Filter criteria for flows to delete"
|
|
177
|
+
),
|
|
178
|
+
limit: int = Body(
|
|
179
|
+
BULK_OPERATION_LIMIT,
|
|
180
|
+
ge=1,
|
|
181
|
+
le=BULK_OPERATION_LIMIT,
|
|
182
|
+
description=f"Maximum number of flows to delete. Defaults to {BULK_OPERATION_LIMIT}.",
|
|
183
|
+
),
|
|
184
|
+
db: PrefectDBInterface = Depends(provide_database_interface),
|
|
185
|
+
) -> FlowBulkDeleteResponse:
|
|
186
|
+
"""
|
|
187
|
+
Bulk delete flows matching the specified filter criteria.
|
|
188
|
+
|
|
189
|
+
This also deletes all associated deployments.
|
|
190
|
+
|
|
191
|
+
Returns the IDs of flows that were deleted.
|
|
192
|
+
"""
|
|
193
|
+
async with db.session_context(begin_transaction=True) as session:
|
|
194
|
+
# Query matching flows
|
|
195
|
+
db_flows = await models.flows.read_flows(
|
|
196
|
+
session=session,
|
|
197
|
+
flow_filter=flows,
|
|
198
|
+
limit=limit,
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
if not db_flows:
|
|
202
|
+
return FlowBulkDeleteResponse(deleted=[])
|
|
203
|
+
|
|
204
|
+
flow_ids = [f.id for f in db_flows]
|
|
205
|
+
|
|
206
|
+
# Delete flows (and their deployments)
|
|
207
|
+
deleted_ids = await models.flows.delete_flows(
|
|
208
|
+
session=session,
|
|
209
|
+
flow_ids=flow_ids,
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
return FlowBulkDeleteResponse(deleted=deleted_ids)
|
|
213
|
+
|
|
214
|
+
|
|
167
215
|
@router.post("/paginate")
|
|
168
216
|
async def paginate_flows(
|
|
169
217
|
limit: int = dependencies.LimitBody(),
|