fractal-server 2.14.0a13__py3-none-any.whl → 2.14.0a14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fractal_server/__init__.py +1 -1
- fractal_server/app/models/linkusergroup.py +6 -2
- fractal_server/app/models/v2/dataset.py +1 -1
- fractal_server/app/models/v2/job.py +7 -3
- fractal_server/app/models/v2/task_group.py +2 -2
- fractal_server/app/models/v2/workflow.py +1 -1
- fractal_server/app/models/v2/workflowtask.py +1 -1
- fractal_server/app/routes/admin/v2/task_group.py +0 -17
- fractal_server/app/routes/api/v2/dataset.py +0 -8
- fractal_server/app/routes/api/v2/history.py +112 -27
- fractal_server/app/routes/api/v2/images.py +16 -14
- fractal_server/app/routes/api/v2/project.py +0 -52
- fractal_server/app/routes/api/v2/task_group.py +0 -17
- fractal_server/app/routes/api/v2/workflow.py +0 -8
- fractal_server/app/routes/auth/group.py +0 -16
- fractal_server/app/runner/executors/base_runner.py +5 -0
- fractal_server/app/runner/executors/local/runner.py +15 -7
- fractal_server/app/runner/executors/slurm_common/_handle_exception_proxy.py +17 -0
- fractal_server/app/runner/executors/slurm_common/base_slurm_runner.py +676 -0
- fractal_server/app/runner/executors/slurm_common/slurm_job_task_models.py +102 -0
- fractal_server/app/runner/executors/slurm_ssh/runner.py +110 -648
- fractal_server/app/runner/executors/slurm_sudo/runner.py +32 -661
- fractal_server/app/runner/task_files.py +20 -6
- fractal_server/app/runner/v2/_slurm_ssh.py +6 -6
- fractal_server/app/runner/v2/_slurm_sudo.py +4 -4
- fractal_server/app/runner/v2/runner.py +4 -0
- fractal_server/app/runner/v2/runner_functions.py +2 -2
- fractal_server/app/runner/v2/submit_workflow.py +7 -16
- fractal_server/app/schemas/v2/__init__.py +3 -1
- fractal_server/app/schemas/v2/history.py +27 -2
- fractal_server/config.py +6 -2
- fractal_server/images/tools.py +23 -0
- fractal_server/migrations/versions/5b6007027595_on_cascade.py +250 -0
- fractal_server/migrations/versions/fbce16ff4e47_new_history_items.py +2 -2
- fractal_server/tasks/v2/utils_background.py +0 -19
- {fractal_server-2.14.0a13.dist-info → fractal_server-2.14.0a14.dist-info}/METADATA +1 -1
- {fractal_server-2.14.0a13.dist-info → fractal_server-2.14.0a14.dist-info}/RECORD +40 -41
- fractal_server/app/runner/executors/slurm_common/_check_jobs_status.py +0 -77
- fractal_server/app/runner/executors/slurm_ssh/_check_job_status_ssh.py +0 -67
- fractal_server/app/runner/executors/slurm_ssh/_executor_wait_thread.py +0 -126
- fractal_server/app/runner/executors/slurm_ssh/_slurm_job.py +0 -116
- fractal_server/app/runner/executors/slurm_ssh/executor.py +0 -1386
- {fractal_server-2.14.0a13.dist-info → fractal_server-2.14.0a14.dist-info}/LICENSE +0 -0
- {fractal_server-2.14.0a13.dist-info → fractal_server-2.14.0a14.dist-info}/WHEEL +0 -0
- {fractal_server-2.14.0a13.dist-info → fractal_server-2.14.0a14.dist-info}/entry_points.txt +0 -0
fractal_server/__init__.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__VERSION__ = "2.14.
|
1
|
+
__VERSION__ = "2.14.0a14"
|
@@ -13,8 +13,12 @@ class LinkUserGroup(SQLModel, table=True):
|
|
13
13
|
Crossing table between User and UserGroup
|
14
14
|
"""
|
15
15
|
|
16
|
-
group_id: int = Field(
|
17
|
-
|
16
|
+
group_id: int = Field(
|
17
|
+
foreign_key="usergroup.id", primary_key=True, ondelete="CASCADE"
|
18
|
+
)
|
19
|
+
user_id: int = Field(
|
20
|
+
foreign_key="user_oauth.id", primary_key=True, ondelete="CASCADE"
|
21
|
+
)
|
18
22
|
|
19
23
|
timestamp_created: datetime = Field(
|
20
24
|
default_factory=get_timestamp,
|
@@ -19,7 +19,7 @@ class DatasetV2(SQLModel, table=True):
|
|
19
19
|
id: Optional[int] = Field(default=None, primary_key=True)
|
20
20
|
name: str
|
21
21
|
|
22
|
-
project_id: int = Field(foreign_key="projectv2.id")
|
22
|
+
project_id: int = Field(foreign_key="projectv2.id", ondelete="CASCADE")
|
23
23
|
project: "ProjectV2" = Relationship( # noqa: F821
|
24
24
|
sa_relationship_kwargs=dict(lazy="selectin"),
|
25
25
|
)
|
@@ -18,11 +18,15 @@ class JobV2(SQLModel, table=True):
|
|
18
18
|
model_config = ConfigDict(arbitrary_types_allowed=True)
|
19
19
|
|
20
20
|
id: Optional[int] = Field(default=None, primary_key=True)
|
21
|
-
project_id: Optional[int] = Field(
|
21
|
+
project_id: Optional[int] = Field(
|
22
|
+
foreign_key="projectv2.id", default=None, ondelete="SET NULL"
|
23
|
+
)
|
22
24
|
workflow_id: Optional[int] = Field(
|
23
|
-
foreign_key="workflowv2.id", default=None
|
25
|
+
foreign_key="workflowv2.id", default=None, ondelete="SET NULL"
|
26
|
+
)
|
27
|
+
dataset_id: Optional[int] = Field(
|
28
|
+
foreign_key="datasetv2.id", default=None, ondelete="SET NULL"
|
24
29
|
)
|
25
|
-
dataset_id: Optional[int] = Field(foreign_key="datasetv2.id", default=None)
|
26
30
|
|
27
31
|
user_email: str = Field(nullable=False)
|
28
32
|
slurm_account: Optional[str] = None
|
@@ -23,7 +23,7 @@ class TaskGroupV2(SQLModel, table=True):
|
|
23
23
|
|
24
24
|
user_id: int = Field(foreign_key="user_oauth.id")
|
25
25
|
user_group_id: Optional[int] = Field(
|
26
|
-
foreign_key="usergroup.id", default=None
|
26
|
+
foreign_key="usergroup.id", default=None, ondelete="SET NULL"
|
27
27
|
)
|
28
28
|
|
29
29
|
origin: str
|
@@ -100,7 +100,7 @@ class TaskGroupActivityV2(SQLModel, table=True):
|
|
100
100
|
id: Optional[int] = Field(default=None, primary_key=True)
|
101
101
|
user_id: int = Field(foreign_key="user_oauth.id")
|
102
102
|
taskgroupv2_id: Optional[int] = Field(
|
103
|
-
default=None, foreign_key="taskgroupv2.id"
|
103
|
+
default=None, foreign_key="taskgroupv2.id", ondelete="SET NULL"
|
104
104
|
)
|
105
105
|
timestamp_started: datetime = Field(
|
106
106
|
default_factory=get_timestamp,
|
@@ -16,7 +16,7 @@ class WorkflowV2(SQLModel, table=True):
|
|
16
16
|
|
17
17
|
id: Optional[int] = Field(default=None, primary_key=True)
|
18
18
|
name: str
|
19
|
-
project_id: int = Field(foreign_key="projectv2.id")
|
19
|
+
project_id: int = Field(foreign_key="projectv2.id", ondelete="CASCADE")
|
20
20
|
project: "ProjectV2" = Relationship( # noqa: F821
|
21
21
|
sa_relationship_kwargs=dict(lazy="selectin"),
|
22
22
|
)
|
@@ -16,7 +16,7 @@ class WorkflowTaskV2(SQLModel, table=True):
|
|
16
16
|
|
17
17
|
id: Optional[int] = Field(default=None, primary_key=True)
|
18
18
|
|
19
|
-
workflow_id: int = Field(foreign_key="workflowv2.id")
|
19
|
+
workflow_id: int = Field(foreign_key="workflowv2.id", ondelete="CASCADE")
|
20
20
|
order: Optional[int] = None
|
21
21
|
meta_parallel: Optional[dict[str, Any]] = Field(
|
22
22
|
sa_column=Column(JSON), default=None
|
@@ -192,23 +192,6 @@ async def delete_task_group(
|
|
192
192
|
detail=f"TaskV2 {workflow_tasks[0].task_id} is still in use",
|
193
193
|
)
|
194
194
|
|
195
|
-
# Cascade operations: set foreign-keys to null for TaskGroupActivityV2
|
196
|
-
# which are in relationship with the current TaskGroupV2
|
197
|
-
logger.debug("Start of cascade operations on TaskGroupActivityV2.")
|
198
|
-
stm = select(TaskGroupActivityV2).where(
|
199
|
-
TaskGroupActivityV2.taskgroupv2_id == task_group_id
|
200
|
-
)
|
201
|
-
res = await db.execute(stm)
|
202
|
-
task_group_activity_list = res.scalars().all()
|
203
|
-
for task_group_activity in task_group_activity_list:
|
204
|
-
logger.debug(
|
205
|
-
f"Setting TaskGroupActivityV2[{task_group_activity.id}]"
|
206
|
-
".taskgroupv2_id to None."
|
207
|
-
)
|
208
|
-
task_group_activity.taskgroupv2_id = None
|
209
|
-
db.add(task_group_activity)
|
210
|
-
logger.debug("End of cascade operations on TaskGroupActivityV2.")
|
211
|
-
|
212
195
|
await db.delete(task_group)
|
213
196
|
await db.commit()
|
214
197
|
|
@@ -212,14 +212,6 @@ async def delete_dataset(
|
|
212
212
|
),
|
213
213
|
)
|
214
214
|
|
215
|
-
# Cascade operations: set foreign-keys to null for jobs which are in
|
216
|
-
# relationship with the current dataset
|
217
|
-
stm = select(JobV2).where(JobV2.dataset_id == dataset_id)
|
218
|
-
res = await db.execute(stm)
|
219
|
-
jobs = res.scalars().all()
|
220
|
-
for job in jobs:
|
221
|
-
job.dataset_id = None
|
222
|
-
|
223
215
|
# Delete dataset
|
224
216
|
await db.delete(dataset)
|
225
217
|
await db.commit()
|
@@ -1,3 +1,4 @@
|
|
1
|
+
from typing import Any
|
1
2
|
from typing import Optional
|
2
3
|
|
3
4
|
from fastapi import APIRouter
|
@@ -15,6 +16,7 @@ from ._aux_functions_history import get_history_run_or_404
|
|
15
16
|
from ._aux_functions_history import get_history_unit_or_404
|
16
17
|
from ._aux_functions_history import get_wftask_check_owner
|
17
18
|
from ._aux_functions_history import read_log_file
|
19
|
+
from .images import ImageQuery
|
18
20
|
from fractal_server.app.db import AsyncSession
|
19
21
|
from fractal_server.app.db import get_async_db
|
20
22
|
from fractal_server.app.models import UserOAuth
|
@@ -25,15 +27,26 @@ from fractal_server.app.routes.auth import current_active_user
|
|
25
27
|
from fractal_server.app.routes.pagination import get_pagination_params
|
26
28
|
from fractal_server.app.routes.pagination import PaginationRequest
|
27
29
|
from fractal_server.app.routes.pagination import PaginationResponse
|
30
|
+
from fractal_server.app.schemas.v2 import HistoryRunRead
|
28
31
|
from fractal_server.app.schemas.v2 import HistoryRunReadAggregated
|
29
32
|
from fractal_server.app.schemas.v2 import HistoryUnitRead
|
30
33
|
from fractal_server.app.schemas.v2 import HistoryUnitStatus
|
34
|
+
from fractal_server.app.schemas.v2 import HistoryUnitStatusQuery
|
31
35
|
from fractal_server.app.schemas.v2 import ImageLogsRequest
|
32
|
-
from fractal_server.app.schemas.v2 import
|
36
|
+
from fractal_server.app.schemas.v2 import SingleImageWithStatus
|
37
|
+
from fractal_server.images.tools import aggregate_attributes
|
38
|
+
from fractal_server.images.tools import aggregate_types
|
33
39
|
from fractal_server.images.tools import filter_image_list
|
34
40
|
from fractal_server.images.tools import merge_type_filters
|
35
41
|
from fractal_server.logger import set_logger
|
36
42
|
|
43
|
+
|
44
|
+
class ImageWithStatusPage(PaginationResponse[SingleImageWithStatus]):
|
45
|
+
|
46
|
+
attributes: dict[str, list[Any]]
|
47
|
+
types: list[str]
|
48
|
+
|
49
|
+
|
37
50
|
router = APIRouter()
|
38
51
|
logger = set_logger(__name__)
|
39
52
|
|
@@ -70,8 +83,12 @@ async def get_workflow_tasks_statuses(
|
|
70
83
|
.order_by(HistoryRun.timestamp_started.desc())
|
71
84
|
.limit(1)
|
72
85
|
)
|
73
|
-
latest_history_run = res.
|
74
|
-
|
86
|
+
latest_history_run = res.scalar_one_or_none()
|
87
|
+
logger.debug( # FIXME: remove
|
88
|
+
f"Given {dataset_id=} and {wftask.id}, "
|
89
|
+
f"found {latest_history_run=}."
|
90
|
+
)
|
91
|
+
if latest_history_run is None:
|
75
92
|
response[wftask.id] = None
|
76
93
|
continue
|
77
94
|
response[wftask.id] = dict(
|
@@ -213,15 +230,17 @@ async def get_history_run_units(
|
|
213
230
|
)
|
214
231
|
|
215
232
|
|
216
|
-
@router.
|
233
|
+
@router.post("/project/{project_id}/status/images/")
|
217
234
|
async def get_history_images(
|
218
235
|
project_id: int,
|
219
236
|
dataset_id: int,
|
220
237
|
workflowtask_id: int,
|
238
|
+
request_body: ImageQuery,
|
239
|
+
unit_status: Optional[HistoryUnitStatusQuery] = None,
|
221
240
|
user: UserOAuth = Depends(current_active_user),
|
222
241
|
db: AsyncSession = Depends(get_async_db),
|
223
242
|
pagination: PaginationRequest = Depends(get_pagination_params),
|
224
|
-
) ->
|
243
|
+
) -> ImageWithStatusPage:
|
225
244
|
|
226
245
|
# Access control and object retrieval
|
227
246
|
wftask = await get_wftask_check_owner(
|
@@ -264,10 +283,15 @@ async def get_history_images(
|
|
264
283
|
actual_filters.update(type_filters_patch)
|
265
284
|
logger.debug(f"{prefix} {actual_filters=}")
|
266
285
|
# (1D) Get all matching images from the dataset
|
267
|
-
|
286
|
+
pre_filtered_dataset_images = filter_image_list(
|
268
287
|
images=dataset.images,
|
269
288
|
type_filters=inferred_dataset_type_filters,
|
270
289
|
)
|
290
|
+
filtered_dataset_images = filter_image_list(
|
291
|
+
pre_filtered_dataset_images,
|
292
|
+
type_filters=request_body.type_filters,
|
293
|
+
attribute_filters=request_body.attribute_filters,
|
294
|
+
)
|
271
295
|
logger.debug(f"{prefix} {len(dataset.images)=}")
|
272
296
|
logger.debug(f"{prefix} {len(filtered_dataset_images)=}")
|
273
297
|
# (1E) Extract the list of URLs for filtered images
|
@@ -276,52 +300,84 @@ async def get_history_images(
|
|
276
300
|
)
|
277
301
|
|
278
302
|
# (2) Get `(zarr_url, status)` pairs for all images that have already
|
279
|
-
# been processed
|
280
|
-
|
303
|
+
# been processed, and
|
304
|
+
# (3) When relevant, find images that have not been processed
|
305
|
+
base_stmt = (
|
281
306
|
select(HistoryImageCache.zarr_url, HistoryUnit.status)
|
282
307
|
.join(HistoryUnit)
|
283
308
|
.where(HistoryImageCache.dataset_id == dataset_id)
|
284
309
|
.where(HistoryImageCache.workflowtask_id == workflowtask_id)
|
285
310
|
.where(HistoryImageCache.latest_history_unit_id == HistoryUnit.id)
|
286
311
|
.where(HistoryImageCache.zarr_url.in_(filtered_dataset_images_url))
|
287
|
-
.order_by(HistoryImageCache.zarr_url)
|
288
312
|
)
|
289
|
-
|
313
|
+
|
314
|
+
if unit_status in [HistoryUnitStatusQuery.UNSET, None]:
|
315
|
+
stmt = base_stmt.order_by(HistoryImageCache.zarr_url)
|
316
|
+
res = await db.execute(stmt)
|
317
|
+
list_processed_url_status = res.all()
|
318
|
+
list_processed_url = list(
|
319
|
+
item[0] for item in list_processed_url_status
|
320
|
+
)
|
321
|
+
list_non_processed_url_status = list(
|
322
|
+
(url, None)
|
323
|
+
for url in filtered_dataset_images_url
|
324
|
+
if url not in list_processed_url
|
325
|
+
)
|
326
|
+
if unit_status == HistoryUnitStatusQuery.UNSET:
|
327
|
+
list_processed_url_status = []
|
328
|
+
else:
|
329
|
+
stmt = base_stmt.where(HistoryUnit.status == unit_status).order_by(
|
330
|
+
HistoryImageCache.zarr_url
|
331
|
+
)
|
332
|
+
res = await db.execute(stmt)
|
333
|
+
list_processed_url_status = res.all()
|
334
|
+
list_non_processed_url_status = []
|
335
|
+
|
290
336
|
logger.debug(f"{prefix} {len(list_processed_url_status)=}")
|
337
|
+
logger.debug(f"{prefix} {len(list_non_processed_url_status)=}")
|
291
338
|
|
292
339
|
# (3) Combine outputs from 1 and 2
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
list_non_processed_url_status = list(
|
297
|
-
(url, None)
|
298
|
-
for url in filtered_dataset_images_url
|
299
|
-
if url not in list_processed_url
|
340
|
+
full_list_url_status = (
|
341
|
+
list_processed_url_status + list_non_processed_url_status
|
300
342
|
)
|
301
|
-
logger.debug(f"{prefix} {len(
|
343
|
+
logger.debug(f"{prefix} {len(full_list_url_status)=}")
|
344
|
+
|
345
|
+
attributes = aggregate_attributes(pre_filtered_dataset_images)
|
346
|
+
types = aggregate_types(pre_filtered_dataset_images)
|
302
347
|
|
303
348
|
sorted_list_url_status = sorted(
|
304
|
-
|
349
|
+
full_list_url_status,
|
305
350
|
key=lambda url_status: url_status[0],
|
306
351
|
)
|
307
352
|
logger.debug(f"{prefix} {len(sorted_list_url_status)=}")
|
308
353
|
|
309
354
|
# Final list of objects
|
310
|
-
sorted_list_objects = list(
|
311
|
-
dict(zarr_url=url_status[0], status=url_status[1])
|
312
|
-
for url_status in sorted_list_url_status
|
313
|
-
)
|
314
355
|
|
315
|
-
total_count = len(
|
356
|
+
total_count = len(sorted_list_url_status)
|
316
357
|
page_size = pagination.page_size or total_count
|
317
358
|
|
359
|
+
paginated_list_url_status = sorted_list_url_status[
|
360
|
+
(pagination.page - 1) * page_size : pagination.page * page_size
|
361
|
+
]
|
362
|
+
|
363
|
+
# Aggregate information to create 'SingleImageWithStatus'
|
364
|
+
items = [
|
365
|
+
{
|
366
|
+
**filtered_dataset_images[
|
367
|
+
filtered_dataset_images_url.index(url_status[0])
|
368
|
+
],
|
369
|
+
"status": url_status[1],
|
370
|
+
}
|
371
|
+
for url_status in paginated_list_url_status
|
372
|
+
]
|
373
|
+
|
318
374
|
return dict(
|
319
375
|
current_page=pagination.page,
|
320
376
|
page_size=page_size,
|
321
377
|
total_count=total_count,
|
322
|
-
items=
|
323
|
-
|
324
|
-
|
378
|
+
items=items,
|
379
|
+
attributes=attributes,
|
380
|
+
types=types,
|
325
381
|
)
|
326
382
|
|
327
383
|
|
@@ -402,3 +458,32 @@ async def get_history_unit_log(
|
|
402
458
|
dataset_id=dataset_id,
|
403
459
|
)
|
404
460
|
return JSONResponse(content=log)
|
461
|
+
|
462
|
+
|
463
|
+
@router.get("/project/{project_id}/dataset/{dataset_id}/history/")
|
464
|
+
async def get_dataset_history(
|
465
|
+
project_id: int,
|
466
|
+
dataset_id: int,
|
467
|
+
user: UserOAuth = Depends(current_active_user),
|
468
|
+
db: AsyncSession = Depends(get_async_db),
|
469
|
+
) -> list[HistoryRunRead]:
|
470
|
+
"""
|
471
|
+
Returns a list of all HistoryRuns associated to a given dataset, sorted by
|
472
|
+
timestamp.
|
473
|
+
"""
|
474
|
+
# Access control
|
475
|
+
await _get_dataset_check_owner(
|
476
|
+
project_id=project_id,
|
477
|
+
dataset_id=dataset_id,
|
478
|
+
user_id=user.id,
|
479
|
+
db=db,
|
480
|
+
)
|
481
|
+
|
482
|
+
res = await db.execute(
|
483
|
+
select(HistoryRun)
|
484
|
+
.where(HistoryRun.dataset_id == dataset_id)
|
485
|
+
.order_by(HistoryRun.timestamp_started)
|
486
|
+
)
|
487
|
+
history_run_list = res.scalars().all()
|
488
|
+
|
489
|
+
return history_run_list
|
@@ -11,10 +11,12 @@ from pydantic import Field
|
|
11
11
|
from pydantic import field_validator
|
12
12
|
from pydantic import model_validator
|
13
13
|
from sqlalchemy.orm.attributes import flag_modified
|
14
|
+
from sqlmodel import delete
|
14
15
|
|
15
16
|
from ._aux_functions import _get_dataset_check_owner
|
16
17
|
from fractal_server.app.db import AsyncSession
|
17
18
|
from fractal_server.app.db import get_async_db
|
19
|
+
from fractal_server.app.models import HistoryImageCache
|
18
20
|
from fractal_server.app.models import UserOAuth
|
19
21
|
from fractal_server.app.routes.auth import current_active_user
|
20
22
|
from fractal_server.app.routes.pagination import get_pagination_params
|
@@ -28,6 +30,8 @@ from fractal_server.app.schemas._validators import root_validate_dict_keys
|
|
28
30
|
from fractal_server.images import SingleImage
|
29
31
|
from fractal_server.images import SingleImageUpdate
|
30
32
|
from fractal_server.images.models import AttributeFiltersType
|
33
|
+
from fractal_server.images.tools import aggregate_attributes
|
34
|
+
from fractal_server.images.tools import aggregate_types
|
31
35
|
from fractal_server.images.tools import find_image_by_zarr_url
|
32
36
|
from fractal_server.images.tools import match_filter
|
33
37
|
|
@@ -133,16 +137,8 @@ async def query_dataset_images(
|
|
133
137
|
dataset = output["dataset"]
|
134
138
|
images = dataset.images
|
135
139
|
|
136
|
-
attributes =
|
137
|
-
|
138
|
-
for k, v in image["attributes"].items():
|
139
|
-
attributes.setdefault(k, []).append(v)
|
140
|
-
for k, v in attributes.items():
|
141
|
-
attributes[k] = list(set(v))
|
142
|
-
|
143
|
-
types = list(
|
144
|
-
set(type for image in images for type in image["types"].keys())
|
145
|
-
)
|
140
|
+
attributes = aggregate_attributes(images)
|
141
|
+
types = aggregate_types(images)
|
146
142
|
|
147
143
|
if query is not None:
|
148
144
|
|
@@ -210,10 +206,10 @@ async def delete_dataset_images(
|
|
210
206
|
)
|
211
207
|
dataset = output["dataset"]
|
212
208
|
|
213
|
-
image_to_remove =
|
214
|
-
|
215
|
-
None,
|
209
|
+
image_to_remove = find_image_by_zarr_url(
|
210
|
+
images=dataset.images, zarr_url=zarr_url
|
216
211
|
)
|
212
|
+
|
217
213
|
if image_to_remove is None:
|
218
214
|
raise HTTPException(
|
219
215
|
status_code=status.HTTP_404_NOT_FOUND,
|
@@ -223,9 +219,15 @@ async def delete_dataset_images(
|
|
223
219
|
),
|
224
220
|
)
|
225
221
|
|
226
|
-
dataset.images.remove(image_to_remove)
|
222
|
+
dataset.images.remove(image_to_remove["image"])
|
227
223
|
flag_modified(dataset, "images")
|
228
224
|
|
225
|
+
await db.execute(
|
226
|
+
delete(HistoryImageCache)
|
227
|
+
.where(HistoryImageCache.dataset_id == dataset_id)
|
228
|
+
.where(HistoryImageCache.zarr_url == zarr_url)
|
229
|
+
)
|
230
|
+
|
229
231
|
await db.commit()
|
230
232
|
|
231
233
|
return Response(status_code=status.HTTP_204_NO_CONTENT)
|
@@ -11,11 +11,9 @@ from .....logger import reset_logger_handlers
|
|
11
11
|
from .....logger import set_logger
|
12
12
|
from ....db import AsyncSession
|
13
13
|
from ....db import get_async_db
|
14
|
-
from ....models.v2 import DatasetV2
|
15
14
|
from ....models.v2 import JobV2
|
16
15
|
from ....models.v2 import LinkUserProjectV2
|
17
16
|
from ....models.v2 import ProjectV2
|
18
|
-
from ....models.v2 import WorkflowV2
|
19
17
|
from ....schemas.v2 import ProjectCreateV2
|
20
18
|
from ....schemas.v2 import ProjectReadV2
|
21
19
|
from ....schemas.v2 import ProjectUpdateV2
|
@@ -145,56 +143,6 @@ async def delete_project(
|
|
145
143
|
),
|
146
144
|
)
|
147
145
|
|
148
|
-
# Cascade operations
|
149
|
-
|
150
|
-
# Workflows
|
151
|
-
stm = select(WorkflowV2).where(WorkflowV2.project_id == project_id)
|
152
|
-
res = await db.execute(stm)
|
153
|
-
workflows = res.scalars().all()
|
154
|
-
logger.info("Start of cascade operations on Workflows.")
|
155
|
-
for wf in workflows:
|
156
|
-
# Cascade operations: set foreign-keys to null for jobs which are in
|
157
|
-
# relationship with the current workflow
|
158
|
-
stm = select(JobV2).where(JobV2.workflow_id == wf.id)
|
159
|
-
res = await db.execute(stm)
|
160
|
-
jobs = res.scalars().all()
|
161
|
-
for job in jobs:
|
162
|
-
logger.info(f"Setting Job[{job.id}].workflow_id to None.")
|
163
|
-
job.workflow_id = None
|
164
|
-
# Delete workflow
|
165
|
-
logger.info(f"Adding Workflow[{wf.id}] to deletion.")
|
166
|
-
await db.delete(wf)
|
167
|
-
logger.info("End of cascade operations on Workflows.")
|
168
|
-
|
169
|
-
# Dataset
|
170
|
-
stm = select(DatasetV2).where(DatasetV2.project_id == project_id)
|
171
|
-
res = await db.execute(stm)
|
172
|
-
datasets = res.scalars().all()
|
173
|
-
logger.info("Start of cascade operations on Datasets.")
|
174
|
-
for ds in datasets:
|
175
|
-
# Cascade operations: set foreign-keys to null for jobs which are in
|
176
|
-
# relationship with the current dataset
|
177
|
-
stm = select(JobV2).where(JobV2.dataset_id == ds.id)
|
178
|
-
res = await db.execute(stm)
|
179
|
-
jobs = res.scalars().all()
|
180
|
-
for job in jobs:
|
181
|
-
logger.info(f"Setting Job[{job.id}].dataset_id to None.")
|
182
|
-
job.dataset_id = None
|
183
|
-
# Delete dataset
|
184
|
-
logger.info(f"Adding Dataset[{ds.id}] to deletion.")
|
185
|
-
await db.delete(ds)
|
186
|
-
logger.info("End of cascade operations on Datasets.")
|
187
|
-
|
188
|
-
# Job
|
189
|
-
logger.info("Start of cascade operations on Jobs.")
|
190
|
-
stm = select(JobV2).where(JobV2.project_id == project_id)
|
191
|
-
res = await db.execute(stm)
|
192
|
-
jobs = res.scalars().all()
|
193
|
-
for job in jobs:
|
194
|
-
logger.info(f"Setting Job[{job.id}].project_id to None.")
|
195
|
-
job.project_id = None
|
196
|
-
logger.info("End of cascade operations on Jobs.")
|
197
|
-
|
198
146
|
logger.info(f"Adding Project[{project.id}] to deletion.")
|
199
147
|
await db.delete(project)
|
200
148
|
|
@@ -181,23 +181,6 @@ async def delete_task_group(
|
|
181
181
|
detail=f"TaskV2 {workflow_tasks[0].task_id} is still in use",
|
182
182
|
)
|
183
183
|
|
184
|
-
# Cascade operations: set foreign-keys to null for TaskGroupActivityV2
|
185
|
-
# which are in relationship with the current TaskGroupV2
|
186
|
-
logger.debug("Start of cascade operations on TaskGroupActivityV2.")
|
187
|
-
stm = select(TaskGroupActivityV2).where(
|
188
|
-
TaskGroupActivityV2.taskgroupv2_id == task_group_id
|
189
|
-
)
|
190
|
-
res = await db.execute(stm)
|
191
|
-
task_group_activity_list = res.scalars().all()
|
192
|
-
for task_group_activity in task_group_activity_list:
|
193
|
-
logger.debug(
|
194
|
-
f"Setting TaskGroupActivityV2[{task_group_activity.id}]"
|
195
|
-
".taskgroupv2_id to None."
|
196
|
-
)
|
197
|
-
task_group_activity.taskgroupv2_id = None
|
198
|
-
db.add(task_group_activity)
|
199
|
-
logger.debug("End of cascade operations on TaskGroupActivityV2.")
|
200
|
-
|
201
184
|
await db.delete(task_group)
|
202
185
|
await db.commit()
|
203
186
|
|
@@ -221,14 +221,6 @@ async def delete_workflow(
|
|
221
221
|
),
|
222
222
|
)
|
223
223
|
|
224
|
-
# Cascade operation: set foreign-keys to null for jobs which are in
|
225
|
-
# relationship with the current workflow.
|
226
|
-
stm = select(JobV2).where(JobV2.workflow_id == workflow_id)
|
227
|
-
res = await db.execute(stm)
|
228
|
-
jobs = res.scalars().all()
|
229
|
-
for job in jobs:
|
230
|
-
job.workflow_id = None
|
231
|
-
|
232
224
|
# Delete workflow
|
233
225
|
await db.delete(workflow)
|
234
226
|
await db.commit()
|
@@ -19,7 +19,6 @@ from fractal_server.app.models import LinkUserGroup
|
|
19
19
|
from fractal_server.app.models import UserGroup
|
20
20
|
from fractal_server.app.models import UserOAuth
|
21
21
|
from fractal_server.app.models import UserSettings
|
22
|
-
from fractal_server.app.models.v2 import TaskGroupV2
|
23
22
|
from fractal_server.app.schemas.user_group import UserGroupCreate
|
24
23
|
from fractal_server.app.schemas.user_group import UserGroupRead
|
25
24
|
from fractal_server.app.schemas.user_group import UserGroupUpdate
|
@@ -156,21 +155,6 @@ async def delete_single_group(
|
|
156
155
|
),
|
157
156
|
)
|
158
157
|
|
159
|
-
# Cascade operations
|
160
|
-
|
161
|
-
res = await db.execute(
|
162
|
-
select(LinkUserGroup).where(LinkUserGroup.group_id == group_id)
|
163
|
-
)
|
164
|
-
for link in res.scalars().all():
|
165
|
-
await db.delete(link)
|
166
|
-
|
167
|
-
res = await db.execute(
|
168
|
-
select(TaskGroupV2).where(TaskGroupV2.user_group_id == group_id)
|
169
|
-
)
|
170
|
-
for task_group in res.scalars().all():
|
171
|
-
task_group.user_group_id = None
|
172
|
-
db.add(task_group)
|
173
|
-
|
174
158
|
# Delete
|
175
159
|
|
176
160
|
await db.delete(group)
|
@@ -2,6 +2,7 @@ from typing import Any
|
|
2
2
|
|
3
3
|
from fractal_server.app.runner.task_files import TaskFiles
|
4
4
|
from fractal_server.app.schemas.v2.task import TaskTypeType
|
5
|
+
from fractal_server.logger import set_logger
|
5
6
|
|
6
7
|
|
7
8
|
TASK_TYPES_SUBMIT: list[TaskTypeType] = [
|
@@ -16,6 +17,8 @@ TASK_TYPES_MULTISUBMIT: list[TaskTypeType] = [
|
|
16
17
|
"parallel",
|
17
18
|
]
|
18
19
|
|
20
|
+
logger = set_logger(__name__)
|
21
|
+
|
19
22
|
|
20
23
|
class BaseRunner(object):
|
21
24
|
"""
|
@@ -88,6 +91,7 @@ class BaseRunner(object):
|
|
88
91
|
parameters: Parameters dictionary.
|
89
92
|
task_type: Task type.s
|
90
93
|
"""
|
94
|
+
logger.info("[validate_submit_parameters] START")
|
91
95
|
if task_type not in TASK_TYPES_SUBMIT:
|
92
96
|
raise ValueError(f"Invalid {task_type=} for `submit`.")
|
93
97
|
if not isinstance(parameters, dict):
|
@@ -102,6 +106,7 @@ class BaseRunner(object):
|
|
102
106
|
raise ValueError(
|
103
107
|
f"Forbidden 'zarr_urls' key in {list(parameters.keys())}"
|
104
108
|
)
|
109
|
+
logger.info("[validate_submit_parameters] END")
|
105
110
|
|
106
111
|
def validate_multisubmit_parameters(
|
107
112
|
self,
|
@@ -6,12 +6,16 @@ from typing import Literal
|
|
6
6
|
|
7
7
|
from .get_local_config import LocalBackendConfig
|
8
8
|
from fractal_server.app.db import get_sync_db
|
9
|
+
from fractal_server.app.runner.exceptions import TaskExecutionError
|
9
10
|
from fractal_server.app.runner.executors.base_runner import BaseRunner
|
11
|
+
from fractal_server.app.runner.task_files import MULTISUBMIT_PREFIX
|
12
|
+
from fractal_server.app.runner.task_files import SUBMIT_PREFIX
|
10
13
|
from fractal_server.app.runner.task_files import TaskFiles
|
11
14
|
from fractal_server.app.runner.v2.db_tools import update_status_of_history_unit
|
12
15
|
from fractal_server.app.schemas.v2 import HistoryUnitStatus
|
13
16
|
from fractal_server.logger import set_logger
|
14
17
|
|
18
|
+
|
15
19
|
logger = set_logger(__name__)
|
16
20
|
|
17
21
|
|
@@ -65,6 +69,8 @@ class LocalRunner(BaseRunner):
|
|
65
69
|
workdir_local = task_files.wftask_subfolder_local
|
66
70
|
workdir_local.mkdir()
|
67
71
|
|
72
|
+
task_files.prefix = SUBMIT_PREFIX
|
73
|
+
|
68
74
|
# SUBMISSION PHASE
|
69
75
|
future = self.executor.submit(
|
70
76
|
func,
|
@@ -85,15 +91,13 @@ class LocalRunner(BaseRunner):
|
|
85
91
|
)
|
86
92
|
return result, None
|
87
93
|
except Exception as e:
|
88
|
-
exception = e
|
89
94
|
logger.debug("[submit] END with exception")
|
90
95
|
update_status_of_history_unit(
|
91
96
|
history_unit_id=history_unit_id,
|
92
97
|
status=HistoryUnitStatus.FAILED,
|
93
98
|
db_sync=db,
|
94
99
|
)
|
95
|
-
|
96
|
-
return None, exception
|
100
|
+
return None, TaskExecutionError(str(e))
|
97
101
|
|
98
102
|
def multisubmit(
|
99
103
|
self,
|
@@ -151,12 +155,14 @@ class LocalRunner(BaseRunner):
|
|
151
155
|
active_futures: dict[int, Future] = {}
|
152
156
|
for ind_within_chunk, kwargs in enumerate(list_parameters_chunk):
|
153
157
|
positional_index = ind_chunk + ind_within_chunk
|
158
|
+
current_task_files = list_task_files[positional_index]
|
159
|
+
current_task_files.prefix = (
|
160
|
+
f"{MULTISUBMIT_PREFIX}-{positional_index:06d}"
|
161
|
+
)
|
154
162
|
future = self.executor.submit(
|
155
163
|
func,
|
156
164
|
parameters=kwargs,
|
157
|
-
remote_files=
|
158
|
-
positional_index
|
159
|
-
].remote_files_dict,
|
165
|
+
remote_files=current_task_files.remote_files_dict,
|
160
166
|
)
|
161
167
|
active_futures[positional_index] = future
|
162
168
|
|
@@ -189,7 +195,9 @@ class LocalRunner(BaseRunner):
|
|
189
195
|
)
|
190
196
|
|
191
197
|
except Exception as e:
|
192
|
-
exceptions[positional_index] =
|
198
|
+
exceptions[positional_index] = TaskExecutionError(
|
199
|
+
str(e)
|
200
|
+
)
|
193
201
|
if task_type == "parallel":
|
194
202
|
update_status_of_history_unit(
|
195
203
|
history_unit_id=current_history_unit_id,
|