prefect-client 3.2.1__py3-none-any.whl → 3.2.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/__init__.py +15 -8
- prefect/_build_info.py +5 -0
- prefect/_internal/schemas/bases.py +4 -7
- prefect/_internal/schemas/validators.py +5 -6
- prefect/_result_records.py +6 -1
- prefect/client/orchestration/__init__.py +18 -6
- prefect/client/schemas/schedules.py +2 -2
- prefect/concurrency/asyncio.py +4 -3
- prefect/concurrency/sync.py +3 -3
- prefect/concurrency/v1/asyncio.py +3 -3
- prefect/concurrency/v1/sync.py +3 -3
- prefect/deployments/flow_runs.py +2 -2
- prefect/docker/docker_image.py +2 -3
- prefect/engine.py +1 -1
- prefect/events/clients.py +4 -3
- prefect/events/related.py +3 -5
- prefect/flows.py +11 -5
- prefect/locking/filesystem.py +8 -8
- prefect/logging/handlers.py +7 -11
- prefect/main.py +0 -2
- prefect/runtime/flow_run.py +10 -17
- prefect/server/api/__init__.py +34 -0
- prefect/server/api/admin.py +85 -0
- prefect/server/api/artifacts.py +224 -0
- prefect/server/api/automations.py +239 -0
- prefect/server/api/block_capabilities.py +25 -0
- prefect/server/api/block_documents.py +164 -0
- prefect/server/api/block_schemas.py +153 -0
- prefect/server/api/block_types.py +211 -0
- prefect/server/api/clients.py +246 -0
- prefect/server/api/collections.py +75 -0
- prefect/server/api/concurrency_limits.py +286 -0
- prefect/server/api/concurrency_limits_v2.py +269 -0
- prefect/server/api/csrf_token.py +38 -0
- prefect/server/api/dependencies.py +196 -0
- prefect/server/api/deployments.py +941 -0
- prefect/server/api/events.py +300 -0
- prefect/server/api/flow_run_notification_policies.py +120 -0
- prefect/server/api/flow_run_states.py +52 -0
- prefect/server/api/flow_runs.py +867 -0
- prefect/server/api/flows.py +210 -0
- prefect/server/api/logs.py +43 -0
- prefect/server/api/middleware.py +73 -0
- prefect/server/api/root.py +35 -0
- prefect/server/api/run_history.py +170 -0
- prefect/server/api/saved_searches.py +99 -0
- prefect/server/api/server.py +891 -0
- prefect/server/api/task_run_states.py +52 -0
- prefect/server/api/task_runs.py +342 -0
- prefect/server/api/task_workers.py +31 -0
- prefect/server/api/templates.py +35 -0
- prefect/server/api/ui/__init__.py +3 -0
- prefect/server/api/ui/flow_runs.py +128 -0
- prefect/server/api/ui/flows.py +173 -0
- prefect/server/api/ui/schemas.py +63 -0
- prefect/server/api/ui/task_runs.py +175 -0
- prefect/server/api/validation.py +382 -0
- prefect/server/api/variables.py +181 -0
- prefect/server/api/work_queues.py +230 -0
- prefect/server/api/workers.py +656 -0
- prefect/settings/sources.py +18 -5
- prefect/states.py +3 -3
- prefect/task_engine.py +3 -3
- prefect/types/_datetime.py +82 -3
- prefect/utilities/dockerutils.py +2 -2
- prefect/workers/base.py +5 -5
- {prefect_client-3.2.1.dist-info → prefect_client-3.2.3.dist-info}/METADATA +10 -15
- {prefect_client-3.2.1.dist-info → prefect_client-3.2.3.dist-info}/RECORD +70 -32
- {prefect_client-3.2.1.dist-info → prefect_client-3.2.3.dist-info}/WHEEL +1 -2
- prefect/_version.py +0 -21
- prefect_client-3.2.1.dist-info/top_level.txt +0 -1
- {prefect_client-3.2.1.dist-info → prefect_client-3.2.3.dist-info/licenses}/LICENSE +0 -0
@@ -0,0 +1,656 @@
|
|
1
|
+
"""
|
2
|
+
Routes for interacting with work queue objects.
|
3
|
+
"""
|
4
|
+
|
5
|
+
from typing import TYPE_CHECKING, List, Optional
|
6
|
+
from uuid import UUID, uuid4
|
7
|
+
|
8
|
+
import sqlalchemy as sa
|
9
|
+
from fastapi import (
|
10
|
+
BackgroundTasks,
|
11
|
+
Body,
|
12
|
+
Depends,
|
13
|
+
HTTPException,
|
14
|
+
Path,
|
15
|
+
status,
|
16
|
+
)
|
17
|
+
from sqlalchemy.ext.asyncio import AsyncSession
|
18
|
+
|
19
|
+
import prefect.server.api.dependencies as dependencies
|
20
|
+
import prefect.server.models as models
|
21
|
+
import prefect.server.schemas as schemas
|
22
|
+
from prefect.server.api.validation import validate_job_variable_defaults_for_work_pool
|
23
|
+
from prefect.server.database import PrefectDBInterface, provide_database_interface
|
24
|
+
from prefect.server.models.deployments import mark_deployments_ready
|
25
|
+
from prefect.server.models.work_queues import (
|
26
|
+
emit_work_queue_status_event,
|
27
|
+
mark_work_queues_ready,
|
28
|
+
)
|
29
|
+
from prefect.server.models.workers import emit_work_pool_status_event
|
30
|
+
from prefect.server.schemas.statuses import WorkQueueStatus
|
31
|
+
from prefect.server.utilities.server import PrefectRouter
|
32
|
+
from prefect.types import DateTime
|
33
|
+
from prefect.types._datetime import now
|
34
|
+
|
35
|
+
if TYPE_CHECKING:
|
36
|
+
from prefect.server.database.orm_models import ORMWorkQueue
|
37
|
+
|
38
|
+
router: PrefectRouter = PrefectRouter(
|
39
|
+
prefix="/work_pools",
|
40
|
+
tags=["Work Pools"],
|
41
|
+
)
|
42
|
+
|
43
|
+
|
44
|
+
# -----------------------------------------------------
|
45
|
+
# --
|
46
|
+
# --
|
47
|
+
# -- Utility functions & dependencies
|
48
|
+
# --
|
49
|
+
# --
|
50
|
+
# -----------------------------------------------------
|
51
|
+
|
52
|
+
|
53
|
+
class WorkerLookups:
|
54
|
+
async def _get_work_pool_id_from_name(
|
55
|
+
self, session: AsyncSession, work_pool_name: str
|
56
|
+
) -> UUID:
|
57
|
+
"""
|
58
|
+
Given a work pool name, return its ID. Used for translating
|
59
|
+
user-facing APIs (which are name-based) to internal ones (which are
|
60
|
+
id-based).
|
61
|
+
"""
|
62
|
+
work_pool = await models.workers.read_work_pool_by_name(
|
63
|
+
session=session,
|
64
|
+
work_pool_name=work_pool_name,
|
65
|
+
)
|
66
|
+
if not work_pool:
|
67
|
+
raise HTTPException(
|
68
|
+
status_code=status.HTTP_404_NOT_FOUND,
|
69
|
+
detail=f'Work pool "{work_pool_name}" not found.',
|
70
|
+
)
|
71
|
+
|
72
|
+
return work_pool.id
|
73
|
+
|
74
|
+
async def _get_default_work_queue_id_from_work_pool_name(
|
75
|
+
self, session: AsyncSession, work_pool_name: str
|
76
|
+
):
|
77
|
+
"""
|
78
|
+
Given a work pool name, return the ID of its default queue.
|
79
|
+
Used for translating user-facing APIs (which are name-based)
|
80
|
+
to internal ones (which are id-based).
|
81
|
+
"""
|
82
|
+
work_pool = await models.workers.read_work_pool_by_name(
|
83
|
+
session=session,
|
84
|
+
work_pool_name=work_pool_name,
|
85
|
+
)
|
86
|
+
if not work_pool:
|
87
|
+
raise HTTPException(
|
88
|
+
status_code=status.HTTP_404_NOT_FOUND,
|
89
|
+
detail=f'Work pool "{work_pool_name}" not found.',
|
90
|
+
)
|
91
|
+
|
92
|
+
return work_pool.default_queue_id
|
93
|
+
|
94
|
+
async def _get_work_queue_from_name(
|
95
|
+
self,
|
96
|
+
session: AsyncSession,
|
97
|
+
work_pool_name: str,
|
98
|
+
work_queue_name: str,
|
99
|
+
create_queue_if_not_found: bool = False,
|
100
|
+
) -> "ORMWorkQueue":
|
101
|
+
"""
|
102
|
+
Given a work pool name and work pool queue name, return the ID of the
|
103
|
+
queue. Used for translating user-facing APIs (which are name-based) to
|
104
|
+
internal ones (which are id-based).
|
105
|
+
"""
|
106
|
+
work_queue = await models.workers.read_work_queue_by_name(
|
107
|
+
session=session,
|
108
|
+
work_pool_name=work_pool_name,
|
109
|
+
work_queue_name=work_queue_name,
|
110
|
+
)
|
111
|
+
if not work_queue:
|
112
|
+
if not create_queue_if_not_found:
|
113
|
+
raise HTTPException(
|
114
|
+
status_code=status.HTTP_404_NOT_FOUND,
|
115
|
+
detail=(
|
116
|
+
f"Work pool queue '{work_pool_name}/{work_queue_name}' not"
|
117
|
+
" found."
|
118
|
+
),
|
119
|
+
)
|
120
|
+
work_pool_id = await self._get_work_pool_id_from_name(
|
121
|
+
session=session, work_pool_name=work_pool_name
|
122
|
+
)
|
123
|
+
work_queue = await models.workers.create_work_queue(
|
124
|
+
session=session,
|
125
|
+
work_pool_id=work_pool_id,
|
126
|
+
work_queue=schemas.actions.WorkQueueCreate(name=work_queue_name),
|
127
|
+
)
|
128
|
+
|
129
|
+
return work_queue
|
130
|
+
|
131
|
+
async def _get_work_queue_id_from_name(
|
132
|
+
self,
|
133
|
+
session: AsyncSession,
|
134
|
+
work_pool_name: str,
|
135
|
+
work_queue_name: str,
|
136
|
+
create_queue_if_not_found: bool = False,
|
137
|
+
) -> UUID:
|
138
|
+
queue = await self._get_work_queue_from_name(
|
139
|
+
session=session,
|
140
|
+
work_pool_name=work_pool_name,
|
141
|
+
work_queue_name=work_queue_name,
|
142
|
+
create_queue_if_not_found=create_queue_if_not_found,
|
143
|
+
)
|
144
|
+
return queue.id
|
145
|
+
|
146
|
+
|
147
|
+
# -----------------------------------------------------
|
148
|
+
# --
|
149
|
+
# --
|
150
|
+
# -- Worker Pools
|
151
|
+
# --
|
152
|
+
# --
|
153
|
+
# -----------------------------------------------------
|
154
|
+
|
155
|
+
|
156
|
+
@router.post("/", status_code=status.HTTP_201_CREATED)
|
157
|
+
async def create_work_pool(
|
158
|
+
work_pool: schemas.actions.WorkPoolCreate,
|
159
|
+
db: PrefectDBInterface = Depends(provide_database_interface),
|
160
|
+
) -> schemas.core.WorkPool:
|
161
|
+
"""
|
162
|
+
Creates a new work pool. If a work pool with the same
|
163
|
+
name already exists, an error will be raised.
|
164
|
+
"""
|
165
|
+
if work_pool.name.lower().startswith("prefect"):
|
166
|
+
raise HTTPException(
|
167
|
+
status_code=status.HTTP_403_FORBIDDEN,
|
168
|
+
detail="Work pools starting with 'Prefect' are reserved for internal use.",
|
169
|
+
)
|
170
|
+
|
171
|
+
try:
|
172
|
+
async with db.session_context(begin_transaction=True) as session:
|
173
|
+
await validate_job_variable_defaults_for_work_pool(
|
174
|
+
session, work_pool.name, work_pool.base_job_template
|
175
|
+
)
|
176
|
+
model = await models.workers.create_work_pool(
|
177
|
+
session=session, work_pool=work_pool
|
178
|
+
)
|
179
|
+
|
180
|
+
await emit_work_pool_status_event(
|
181
|
+
event_id=uuid4(),
|
182
|
+
occurred=now("UTC"),
|
183
|
+
pre_update_work_pool=None,
|
184
|
+
work_pool=model,
|
185
|
+
)
|
186
|
+
|
187
|
+
return schemas.core.WorkPool.model_validate(model, from_attributes=True)
|
188
|
+
|
189
|
+
except sa.exc.IntegrityError:
|
190
|
+
raise HTTPException(
|
191
|
+
status_code=status.HTTP_409_CONFLICT,
|
192
|
+
detail="A work pool with this name already exists.",
|
193
|
+
)
|
194
|
+
|
195
|
+
|
196
|
+
@router.get("/{name}")
|
197
|
+
async def read_work_pool(
|
198
|
+
work_pool_name: str = Path(..., description="The work pool name", alias="name"),
|
199
|
+
worker_lookups: WorkerLookups = Depends(WorkerLookups),
|
200
|
+
db: PrefectDBInterface = Depends(provide_database_interface),
|
201
|
+
) -> schemas.core.WorkPool:
|
202
|
+
"""
|
203
|
+
Read a work pool by name
|
204
|
+
"""
|
205
|
+
|
206
|
+
async with db.session_context() as session:
|
207
|
+
work_pool_id = await worker_lookups._get_work_pool_id_from_name(
|
208
|
+
session=session, work_pool_name=work_pool_name
|
209
|
+
)
|
210
|
+
orm_work_pool = await models.workers.read_work_pool(
|
211
|
+
session=session, work_pool_id=work_pool_id
|
212
|
+
)
|
213
|
+
return schemas.core.WorkPool.model_validate(orm_work_pool, from_attributes=True)
|
214
|
+
|
215
|
+
|
216
|
+
@router.post("/filter")
|
217
|
+
async def read_work_pools(
|
218
|
+
work_pools: Optional[schemas.filters.WorkPoolFilter] = None,
|
219
|
+
limit: int = dependencies.LimitBody(),
|
220
|
+
offset: int = Body(0, ge=0),
|
221
|
+
worker_lookups: WorkerLookups = Depends(WorkerLookups),
|
222
|
+
db: PrefectDBInterface = Depends(provide_database_interface),
|
223
|
+
) -> List[schemas.core.WorkPool]:
|
224
|
+
"""
|
225
|
+
Read multiple work pools
|
226
|
+
"""
|
227
|
+
async with db.session_context() as session:
|
228
|
+
orm_work_pools = await models.workers.read_work_pools(
|
229
|
+
session=session,
|
230
|
+
work_pool_filter=work_pools,
|
231
|
+
offset=offset,
|
232
|
+
limit=limit,
|
233
|
+
)
|
234
|
+
return [
|
235
|
+
schemas.core.WorkPool.model_validate(w, from_attributes=True)
|
236
|
+
for w in orm_work_pools
|
237
|
+
]
|
238
|
+
|
239
|
+
|
240
|
+
@router.post("/count")
|
241
|
+
async def count_work_pools(
|
242
|
+
work_pools: Optional[schemas.filters.WorkPoolFilter] = Body(None, embed=True),
|
243
|
+
db: PrefectDBInterface = Depends(provide_database_interface),
|
244
|
+
) -> int:
|
245
|
+
"""
|
246
|
+
Count work pools
|
247
|
+
"""
|
248
|
+
async with db.session_context() as session:
|
249
|
+
return await models.workers.count_work_pools(
|
250
|
+
session=session, work_pool_filter=work_pools
|
251
|
+
)
|
252
|
+
|
253
|
+
|
254
|
+
@router.patch("/{name}", status_code=status.HTTP_204_NO_CONTENT)
|
255
|
+
async def update_work_pool(
|
256
|
+
work_pool: schemas.actions.WorkPoolUpdate,
|
257
|
+
work_pool_name: str = Path(..., description="The work pool name", alias="name"),
|
258
|
+
worker_lookups: WorkerLookups = Depends(WorkerLookups),
|
259
|
+
db: PrefectDBInterface = Depends(provide_database_interface),
|
260
|
+
) -> None:
|
261
|
+
"""
|
262
|
+
Update a work pool
|
263
|
+
"""
|
264
|
+
|
265
|
+
# Reserved pools can only updated pause / concurrency
|
266
|
+
update_values = work_pool.model_dump(exclude_unset=True)
|
267
|
+
if work_pool_name.lower().startswith("prefect") and (
|
268
|
+
set(update_values).difference({"is_paused", "concurrency_limit"})
|
269
|
+
):
|
270
|
+
raise HTTPException(
|
271
|
+
status_code=status.HTTP_403_FORBIDDEN,
|
272
|
+
detail=(
|
273
|
+
"Work pools starting with 'Prefect' are reserved for internal use "
|
274
|
+
"and can only be updated to set concurrency limits or pause."
|
275
|
+
),
|
276
|
+
)
|
277
|
+
|
278
|
+
async with db.session_context(begin_transaction=True) as session:
|
279
|
+
work_pool_id = await worker_lookups._get_work_pool_id_from_name(
|
280
|
+
session=session, work_pool_name=work_pool_name
|
281
|
+
)
|
282
|
+
await models.workers.update_work_pool(
|
283
|
+
session=session,
|
284
|
+
work_pool_id=work_pool_id,
|
285
|
+
work_pool=work_pool,
|
286
|
+
emit_status_change=emit_work_pool_status_event,
|
287
|
+
)
|
288
|
+
|
289
|
+
|
290
|
+
@router.delete("/{name}", status_code=status.HTTP_204_NO_CONTENT)
|
291
|
+
async def delete_work_pool(
|
292
|
+
work_pool_name: str = Path(..., description="The work pool name", alias="name"),
|
293
|
+
worker_lookups: WorkerLookups = Depends(WorkerLookups),
|
294
|
+
db: PrefectDBInterface = Depends(provide_database_interface),
|
295
|
+
) -> None:
|
296
|
+
"""
|
297
|
+
Delete a work pool
|
298
|
+
"""
|
299
|
+
|
300
|
+
if work_pool_name.lower().startswith("prefect"):
|
301
|
+
raise HTTPException(
|
302
|
+
status_code=status.HTTP_403_FORBIDDEN,
|
303
|
+
detail=(
|
304
|
+
"Work pools starting with 'Prefect' are reserved for internal use and"
|
305
|
+
" can not be deleted."
|
306
|
+
),
|
307
|
+
)
|
308
|
+
|
309
|
+
async with db.session_context(begin_transaction=True) as session:
|
310
|
+
work_pool_id = await worker_lookups._get_work_pool_id_from_name(
|
311
|
+
session=session, work_pool_name=work_pool_name
|
312
|
+
)
|
313
|
+
|
314
|
+
await models.workers.delete_work_pool(
|
315
|
+
session=session, work_pool_id=work_pool_id
|
316
|
+
)
|
317
|
+
|
318
|
+
|
319
|
+
@router.post("/{name}/get_scheduled_flow_runs")
|
320
|
+
async def get_scheduled_flow_runs(
|
321
|
+
background_tasks: BackgroundTasks,
|
322
|
+
work_pool_name: str = Path(..., description="The work pool name", alias="name"),
|
323
|
+
work_queue_names: List[str] = Body(
|
324
|
+
None, description="The names of work pool queues"
|
325
|
+
),
|
326
|
+
scheduled_before: DateTime = Body(
|
327
|
+
None, description="The maximum time to look for scheduled flow runs"
|
328
|
+
),
|
329
|
+
scheduled_after: DateTime = Body(
|
330
|
+
None, description="The minimum time to look for scheduled flow runs"
|
331
|
+
),
|
332
|
+
limit: int = dependencies.LimitBody(),
|
333
|
+
worker_lookups: WorkerLookups = Depends(WorkerLookups),
|
334
|
+
db: PrefectDBInterface = Depends(provide_database_interface),
|
335
|
+
) -> List[schemas.responses.WorkerFlowRunResponse]:
|
336
|
+
"""
|
337
|
+
Load scheduled runs for a worker
|
338
|
+
"""
|
339
|
+
async with db.session_context() as session:
|
340
|
+
work_pool_id = await worker_lookups._get_work_pool_id_from_name(
|
341
|
+
session=session, work_pool_name=work_pool_name
|
342
|
+
)
|
343
|
+
|
344
|
+
if not work_queue_names:
|
345
|
+
work_queues = list(
|
346
|
+
await models.workers.read_work_queues(
|
347
|
+
session=session, work_pool_id=work_pool_id
|
348
|
+
)
|
349
|
+
)
|
350
|
+
# None here instructs get_scheduled_flow_runs to use the default behavior
|
351
|
+
# of just operating on all work queues of the pool
|
352
|
+
work_queue_ids = None
|
353
|
+
else:
|
354
|
+
work_queues = [
|
355
|
+
await worker_lookups._get_work_queue_from_name(
|
356
|
+
session=session,
|
357
|
+
work_pool_name=work_pool_name,
|
358
|
+
work_queue_name=name,
|
359
|
+
)
|
360
|
+
for name in work_queue_names
|
361
|
+
]
|
362
|
+
work_queue_ids = [wq.id for wq in work_queues]
|
363
|
+
|
364
|
+
async with db.session_context(begin_transaction=True) as session:
|
365
|
+
queue_response = await models.workers.get_scheduled_flow_runs(
|
366
|
+
session=session,
|
367
|
+
work_pool_ids=[work_pool_id],
|
368
|
+
work_queue_ids=work_queue_ids,
|
369
|
+
scheduled_before=scheduled_before,
|
370
|
+
scheduled_after=scheduled_after,
|
371
|
+
limit=limit,
|
372
|
+
)
|
373
|
+
|
374
|
+
background_tasks.add_task(
|
375
|
+
mark_work_queues_ready,
|
376
|
+
polled_work_queue_ids=[
|
377
|
+
wq.id for wq in work_queues if wq.status != WorkQueueStatus.NOT_READY
|
378
|
+
],
|
379
|
+
ready_work_queue_ids=[
|
380
|
+
wq.id for wq in work_queues if wq.status == WorkQueueStatus.NOT_READY
|
381
|
+
],
|
382
|
+
)
|
383
|
+
|
384
|
+
background_tasks.add_task(
|
385
|
+
mark_deployments_ready,
|
386
|
+
work_queue_ids=[wq.id for wq in work_queues],
|
387
|
+
)
|
388
|
+
|
389
|
+
return queue_response
|
390
|
+
|
391
|
+
|
392
|
+
# -----------------------------------------------------
|
393
|
+
# --
|
394
|
+
# --
|
395
|
+
# -- Work Pool Queues
|
396
|
+
# --
|
397
|
+
# --
|
398
|
+
# -----------------------------------------------------
|
399
|
+
|
400
|
+
|
401
|
+
@router.post("/{work_pool_name}/queues", status_code=status.HTTP_201_CREATED)
|
402
|
+
async def create_work_queue(
|
403
|
+
work_queue: schemas.actions.WorkQueueCreate,
|
404
|
+
work_pool_name: str = Path(..., description="The work pool name"),
|
405
|
+
worker_lookups: WorkerLookups = Depends(WorkerLookups),
|
406
|
+
db: PrefectDBInterface = Depends(provide_database_interface),
|
407
|
+
) -> schemas.responses.WorkQueueResponse:
|
408
|
+
"""
|
409
|
+
Creates a new work pool queue. If a work pool queue with the same
|
410
|
+
name already exists, an error will be raised.
|
411
|
+
"""
|
412
|
+
|
413
|
+
try:
|
414
|
+
async with db.session_context(begin_transaction=True) as session:
|
415
|
+
work_pool_id = await worker_lookups._get_work_pool_id_from_name(
|
416
|
+
session=session,
|
417
|
+
work_pool_name=work_pool_name,
|
418
|
+
)
|
419
|
+
|
420
|
+
model = await models.workers.create_work_queue(
|
421
|
+
session=session,
|
422
|
+
work_pool_id=work_pool_id,
|
423
|
+
work_queue=work_queue,
|
424
|
+
)
|
425
|
+
except sa.exc.IntegrityError:
|
426
|
+
raise HTTPException(
|
427
|
+
status_code=status.HTTP_409_CONFLICT,
|
428
|
+
detail=(
|
429
|
+
"A work queue with this name already exists in work pool"
|
430
|
+
" {work_pool_name!r}."
|
431
|
+
),
|
432
|
+
)
|
433
|
+
|
434
|
+
return schemas.responses.WorkQueueResponse.model_validate(
|
435
|
+
model, from_attributes=True
|
436
|
+
)
|
437
|
+
|
438
|
+
|
439
|
+
@router.get("/{work_pool_name}/queues/{name}")
|
440
|
+
async def read_work_queue(
|
441
|
+
work_pool_name: str = Path(..., description="The work pool name"),
|
442
|
+
work_queue_name: str = Path(
|
443
|
+
..., description="The work pool queue name", alias="name"
|
444
|
+
),
|
445
|
+
worker_lookups: WorkerLookups = Depends(WorkerLookups),
|
446
|
+
db: PrefectDBInterface = Depends(provide_database_interface),
|
447
|
+
) -> schemas.responses.WorkQueueResponse:
|
448
|
+
"""
|
449
|
+
Read a work pool queue
|
450
|
+
"""
|
451
|
+
|
452
|
+
async with db.session_context(begin_transaction=True) as session:
|
453
|
+
work_queue_id = await worker_lookups._get_work_queue_id_from_name(
|
454
|
+
session=session,
|
455
|
+
work_pool_name=work_pool_name,
|
456
|
+
work_queue_name=work_queue_name,
|
457
|
+
)
|
458
|
+
|
459
|
+
model = await models.workers.read_work_queue(
|
460
|
+
session=session, work_queue_id=work_queue_id
|
461
|
+
)
|
462
|
+
|
463
|
+
return schemas.responses.WorkQueueResponse.model_validate(
|
464
|
+
model, from_attributes=True
|
465
|
+
)
|
466
|
+
|
467
|
+
|
468
|
+
@router.post("/{work_pool_name}/queues/filter")
|
469
|
+
async def read_work_queues(
|
470
|
+
work_pool_name: str = Path(..., description="The work pool name"),
|
471
|
+
work_queues: schemas.filters.WorkQueueFilter = None,
|
472
|
+
limit: int = dependencies.LimitBody(),
|
473
|
+
offset: int = Body(0, ge=0),
|
474
|
+
worker_lookups: WorkerLookups = Depends(WorkerLookups),
|
475
|
+
db: PrefectDBInterface = Depends(provide_database_interface),
|
476
|
+
) -> List[schemas.responses.WorkQueueResponse]:
|
477
|
+
"""
|
478
|
+
Read all work pool queues
|
479
|
+
"""
|
480
|
+
async with db.session_context() as session:
|
481
|
+
work_pool_id = await worker_lookups._get_work_pool_id_from_name(
|
482
|
+
session=session,
|
483
|
+
work_pool_name=work_pool_name,
|
484
|
+
)
|
485
|
+
wqs = await models.workers.read_work_queues(
|
486
|
+
session=session,
|
487
|
+
work_pool_id=work_pool_id,
|
488
|
+
work_queue_filter=work_queues,
|
489
|
+
limit=limit,
|
490
|
+
offset=offset,
|
491
|
+
)
|
492
|
+
|
493
|
+
return [
|
494
|
+
schemas.responses.WorkQueueResponse.model_validate(wq, from_attributes=True)
|
495
|
+
for wq in wqs
|
496
|
+
]
|
497
|
+
|
498
|
+
|
499
|
+
@router.patch("/{work_pool_name}/queues/{name}", status_code=status.HTTP_204_NO_CONTENT)
|
500
|
+
async def update_work_queue(
|
501
|
+
work_queue: schemas.actions.WorkQueueUpdate,
|
502
|
+
work_pool_name: str = Path(..., description="The work pool name"),
|
503
|
+
work_queue_name: str = Path(
|
504
|
+
..., description="The work pool queue name", alias="name"
|
505
|
+
),
|
506
|
+
worker_lookups: WorkerLookups = Depends(WorkerLookups),
|
507
|
+
db: PrefectDBInterface = Depends(provide_database_interface),
|
508
|
+
) -> None:
|
509
|
+
"""
|
510
|
+
Update a work pool queue
|
511
|
+
"""
|
512
|
+
|
513
|
+
async with db.session_context(begin_transaction=True) as session:
|
514
|
+
work_queue_id = await worker_lookups._get_work_queue_id_from_name(
|
515
|
+
work_pool_name=work_pool_name,
|
516
|
+
work_queue_name=work_queue_name,
|
517
|
+
session=session,
|
518
|
+
)
|
519
|
+
|
520
|
+
await models.workers.update_work_queue(
|
521
|
+
session=session,
|
522
|
+
work_queue_id=work_queue_id,
|
523
|
+
work_queue=work_queue,
|
524
|
+
emit_status_change=emit_work_queue_status_event,
|
525
|
+
)
|
526
|
+
|
527
|
+
|
528
|
+
@router.delete(
|
529
|
+
"/{work_pool_name}/queues/{name}", status_code=status.HTTP_204_NO_CONTENT
|
530
|
+
)
|
531
|
+
async def delete_work_queue(
|
532
|
+
work_pool_name: str = Path(..., description="The work pool name"),
|
533
|
+
work_queue_name: str = Path(
|
534
|
+
..., description="The work pool queue name", alias="name"
|
535
|
+
),
|
536
|
+
worker_lookups: WorkerLookups = Depends(WorkerLookups),
|
537
|
+
db: PrefectDBInterface = Depends(provide_database_interface),
|
538
|
+
) -> None:
|
539
|
+
"""
|
540
|
+
Delete a work pool queue
|
541
|
+
"""
|
542
|
+
|
543
|
+
async with db.session_context(begin_transaction=True) as session:
|
544
|
+
work_queue_id = await worker_lookups._get_work_queue_id_from_name(
|
545
|
+
session=session,
|
546
|
+
work_pool_name=work_pool_name,
|
547
|
+
work_queue_name=work_queue_name,
|
548
|
+
)
|
549
|
+
|
550
|
+
await models.workers.delete_work_queue(
|
551
|
+
session=session, work_queue_id=work_queue_id
|
552
|
+
)
|
553
|
+
|
554
|
+
|
555
|
+
# -----------------------------------------------------
|
556
|
+
# --
|
557
|
+
# --
|
558
|
+
# -- Workers
|
559
|
+
# --
|
560
|
+
# --
|
561
|
+
# -----------------------------------------------------
|
562
|
+
|
563
|
+
|
564
|
+
@router.post(
|
565
|
+
"/{work_pool_name}/workers/heartbeat",
|
566
|
+
status_code=status.HTTP_204_NO_CONTENT,
|
567
|
+
)
|
568
|
+
async def worker_heartbeat(
|
569
|
+
work_pool_name: str = Path(..., description="The work pool name"),
|
570
|
+
name: str = Body(..., description="The worker process name", embed=True),
|
571
|
+
heartbeat_interval_seconds: Optional[int] = Body(
|
572
|
+
None, description="The worker's heartbeat interval in seconds", embed=True
|
573
|
+
),
|
574
|
+
worker_lookups: WorkerLookups = Depends(WorkerLookups),
|
575
|
+
db: PrefectDBInterface = Depends(provide_database_interface),
|
576
|
+
) -> None:
|
577
|
+
async with db.session_context(begin_transaction=True) as session:
|
578
|
+
work_pool = await models.workers.read_work_pool_by_name(
|
579
|
+
session=session,
|
580
|
+
work_pool_name=work_pool_name,
|
581
|
+
)
|
582
|
+
if not work_pool:
|
583
|
+
raise HTTPException(
|
584
|
+
status_code=status.HTTP_404_NOT_FOUND,
|
585
|
+
detail=f'Work pool "{work_pool_name}" not found.',
|
586
|
+
)
|
587
|
+
|
588
|
+
await models.workers.worker_heartbeat(
|
589
|
+
session=session,
|
590
|
+
work_pool_id=work_pool.id,
|
591
|
+
worker_name=name,
|
592
|
+
heartbeat_interval_seconds=heartbeat_interval_seconds,
|
593
|
+
)
|
594
|
+
|
595
|
+
if work_pool.status == schemas.statuses.WorkPoolStatus.NOT_READY:
|
596
|
+
await models.workers.update_work_pool(
|
597
|
+
session=session,
|
598
|
+
work_pool_id=work_pool.id,
|
599
|
+
work_pool=schemas.internal.InternalWorkPoolUpdate(
|
600
|
+
status=schemas.statuses.WorkPoolStatus.READY
|
601
|
+
),
|
602
|
+
emit_status_change=emit_work_pool_status_event,
|
603
|
+
)
|
604
|
+
|
605
|
+
|
606
|
+
@router.post("/{work_pool_name}/workers/filter")
|
607
|
+
async def read_workers(
|
608
|
+
work_pool_name: str = Path(..., description="The work pool name"),
|
609
|
+
workers: Optional[schemas.filters.WorkerFilter] = None,
|
610
|
+
limit: int = dependencies.LimitBody(),
|
611
|
+
offset: int = Body(0, ge=0),
|
612
|
+
worker_lookups: WorkerLookups = Depends(WorkerLookups),
|
613
|
+
db: PrefectDBInterface = Depends(provide_database_interface),
|
614
|
+
) -> List[schemas.responses.WorkerResponse]:
|
615
|
+
"""
|
616
|
+
Read all worker processes
|
617
|
+
"""
|
618
|
+
async with db.session_context() as session:
|
619
|
+
work_pool_id = await worker_lookups._get_work_pool_id_from_name(
|
620
|
+
session=session, work_pool_name=work_pool_name
|
621
|
+
)
|
622
|
+
return await models.workers.read_workers(
|
623
|
+
session=session,
|
624
|
+
work_pool_id=work_pool_id,
|
625
|
+
worker_filter=workers,
|
626
|
+
limit=limit,
|
627
|
+
offset=offset,
|
628
|
+
)
|
629
|
+
|
630
|
+
|
631
|
+
@router.delete(
|
632
|
+
"/{work_pool_name}/workers/{name}", status_code=status.HTTP_204_NO_CONTENT
|
633
|
+
)
|
634
|
+
async def delete_worker(
|
635
|
+
work_pool_name: str = Path(..., description="The work pool name"),
|
636
|
+
worker_name: str = Path(
|
637
|
+
..., description="The work pool's worker name", alias="name"
|
638
|
+
),
|
639
|
+
worker_lookups: WorkerLookups = Depends(WorkerLookups),
|
640
|
+
db: PrefectDBInterface = Depends(provide_database_interface),
|
641
|
+
) -> None:
|
642
|
+
"""
|
643
|
+
Delete a work pool's worker
|
644
|
+
"""
|
645
|
+
|
646
|
+
async with db.session_context(begin_transaction=True) as session:
|
647
|
+
work_pool_id = await worker_lookups._get_work_pool_id_from_name(
|
648
|
+
session=session, work_pool_name=work_pool_name
|
649
|
+
)
|
650
|
+
deleted = await models.workers.delete_worker(
|
651
|
+
session=session, work_pool_id=work_pool_id, worker_name=worker_name
|
652
|
+
)
|
653
|
+
if not deleted:
|
654
|
+
raise HTTPException(
|
655
|
+
status_code=status.HTTP_404_NOT_FOUND, detail="Worker not found."
|
656
|
+
)
|