fractal-server 2.6.4__py3-none-any.whl → 2.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fractal_server/__init__.py +1 -1
- fractal_server/__main__.py +1 -1
- fractal_server/app/models/linkusergroup.py +11 -0
- fractal_server/app/models/v2/__init__.py +2 -0
- fractal_server/app/models/v2/collection_state.py +1 -0
- fractal_server/app/models/v2/task.py +67 -2
- fractal_server/app/routes/admin/v2/__init__.py +16 -0
- fractal_server/app/routes/admin/{v2.py → v2/job.py} +20 -191
- fractal_server/app/routes/admin/v2/project.py +43 -0
- fractal_server/app/routes/admin/v2/task.py +133 -0
- fractal_server/app/routes/admin/v2/task_group.py +162 -0
- fractal_server/app/routes/api/v1/task_collection.py +4 -4
- fractal_server/app/routes/api/v2/__init__.py +8 -0
- fractal_server/app/routes/api/v2/_aux_functions.py +1 -68
- fractal_server/app/routes/api/v2/_aux_functions_tasks.py +343 -0
- fractal_server/app/routes/api/v2/submit.py +16 -35
- fractal_server/app/routes/api/v2/task.py +85 -110
- fractal_server/app/routes/api/v2/task_collection.py +184 -196
- fractal_server/app/routes/api/v2/task_collection_custom.py +70 -64
- fractal_server/app/routes/api/v2/task_group.py +173 -0
- fractal_server/app/routes/api/v2/workflow.py +39 -102
- fractal_server/app/routes/api/v2/workflow_import.py +360 -0
- fractal_server/app/routes/api/v2/workflowtask.py +4 -8
- fractal_server/app/routes/auth/_aux_auth.py +86 -40
- fractal_server/app/routes/auth/current_user.py +5 -5
- fractal_server/app/routes/auth/group.py +73 -23
- fractal_server/app/routes/auth/router.py +0 -2
- fractal_server/app/routes/auth/users.py +8 -7
- fractal_server/app/runner/executors/slurm/ssh/executor.py +82 -63
- fractal_server/app/runner/v2/__init__.py +13 -7
- fractal_server/app/runner/v2/task_interface.py +4 -9
- fractal_server/app/schemas/user.py +1 -2
- fractal_server/app/schemas/v2/__init__.py +7 -0
- fractal_server/app/schemas/v2/dataset.py +2 -7
- fractal_server/app/schemas/v2/dumps.py +1 -2
- fractal_server/app/schemas/v2/job.py +1 -1
- fractal_server/app/schemas/v2/manifest.py +25 -1
- fractal_server/app/schemas/v2/project.py +1 -1
- fractal_server/app/schemas/v2/task.py +95 -36
- fractal_server/app/schemas/v2/task_collection.py +8 -6
- fractal_server/app/schemas/v2/task_group.py +85 -0
- fractal_server/app/schemas/v2/workflow.py +7 -2
- fractal_server/app/schemas/v2/workflowtask.py +9 -6
- fractal_server/app/security/__init__.py +8 -1
- fractal_server/config.py +8 -28
- fractal_server/data_migrations/2_7_0.py +323 -0
- fractal_server/images/models.py +2 -4
- fractal_server/main.py +1 -1
- fractal_server/migrations/versions/034a469ec2eb_task_groups.py +184 -0
- fractal_server/ssh/_fabric.py +186 -73
- fractal_server/string_tools.py +6 -2
- fractal_server/tasks/utils.py +19 -5
- fractal_server/tasks/v1/_TaskCollectPip.py +1 -1
- fractal_server/tasks/v1/background_operations.py +5 -5
- fractal_server/tasks/v1/get_collection_data.py +2 -2
- fractal_server/tasks/v2/_venv_pip.py +67 -70
- fractal_server/tasks/v2/background_operations.py +180 -69
- fractal_server/tasks/v2/background_operations_ssh.py +57 -70
- fractal_server/tasks/v2/database_operations.py +44 -0
- fractal_server/tasks/v2/endpoint_operations.py +104 -116
- fractal_server/tasks/v2/templates/_1_create_venv.sh +9 -5
- fractal_server/tasks/v2/templates/{_2_upgrade_pip.sh → _2_preliminary_pip_operations.sh} +1 -0
- fractal_server/tasks/v2/utils.py +5 -0
- fractal_server/utils.py +3 -2
- {fractal_server-2.6.4.dist-info → fractal_server-2.7.0.dist-info}/METADATA +3 -7
- {fractal_server-2.6.4.dist-info → fractal_server-2.7.0.dist-info}/RECORD +69 -60
- fractal_server/app/routes/auth/group_names.py +0 -34
- fractal_server/tasks/v2/_TaskCollectPip.py +0 -132
- {fractal_server-2.6.4.dist-info → fractal_server-2.7.0.dist-info}/LICENSE +0 -0
- {fractal_server-2.6.4.dist-info → fractal_server-2.7.0.dist-info}/WHEEL +0 -0
- {fractal_server-2.6.4.dist-info → fractal_server-2.7.0.dist-info}/entry_points.txt +0 -0
@@ -4,6 +4,7 @@ Definition of `/auth/group/` routes
|
|
4
4
|
from fastapi import APIRouter
|
5
5
|
from fastapi import Depends
|
6
6
|
from fastapi import HTTPException
|
7
|
+
from fastapi import Response
|
7
8
|
from fastapi import status
|
8
9
|
from sqlalchemy.exc import IntegrityError
|
9
10
|
from sqlalchemy.ext.asyncio import AsyncSession
|
@@ -12,14 +13,19 @@ from sqlmodel import func
|
|
12
13
|
from sqlmodel import select
|
13
14
|
|
14
15
|
from . import current_active_superuser
|
15
|
-
from
|
16
|
-
from
|
17
|
-
from
|
18
|
-
from ...schemas.user_group import UserGroupUpdate
|
19
|
-
from ._aux_auth import _get_single_group_with_user_ids
|
16
|
+
from ._aux_auth import _get_single_usergroup_with_user_ids
|
17
|
+
from ._aux_auth import _usergroup_or_404
|
18
|
+
from fractal_server.app.db import get_async_db
|
20
19
|
from fractal_server.app.models import LinkUserGroup
|
21
20
|
from fractal_server.app.models import UserGroup
|
22
21
|
from fractal_server.app.models import UserOAuth
|
22
|
+
from fractal_server.app.models import UserSettings
|
23
|
+
from fractal_server.app.models.v2 import TaskGroupV2
|
24
|
+
from fractal_server.app.schemas.user_group import UserGroupCreate
|
25
|
+
from fractal_server.app.schemas.user_group import UserGroupRead
|
26
|
+
from fractal_server.app.schemas.user_group import UserGroupUpdate
|
27
|
+
from fractal_server.app.schemas.user_settings import UserSettingsUpdate
|
28
|
+
from fractal_server.app.security import FRACTAL_DEFAULT_GROUP_NAME
|
23
29
|
from fractal_server.logger import set_logger
|
24
30
|
|
25
31
|
logger = set_logger(__name__)
|
@@ -70,7 +76,7 @@ async def get_single_user_group(
|
|
70
76
|
user: UserOAuth = Depends(current_active_superuser),
|
71
77
|
db: AsyncSession = Depends(get_async_db),
|
72
78
|
) -> UserGroupRead:
|
73
|
-
group = await
|
79
|
+
group = await _get_single_usergroup_with_user_ids(group_id=group_id, db=db)
|
74
80
|
return group
|
75
81
|
|
76
82
|
|
@@ -118,12 +124,7 @@ async def update_single_group(
|
|
118
124
|
db: AsyncSession = Depends(get_async_db),
|
119
125
|
) -> UserGroupRead:
|
120
126
|
|
121
|
-
group = await
|
122
|
-
if group is None:
|
123
|
-
raise HTTPException(
|
124
|
-
status_code=status.HTTP_404_NOT_FOUND,
|
125
|
-
detail=f"UserGroup {group_id} not found.",
|
126
|
-
)
|
127
|
+
group = await _usergroup_or_404(group_id, db)
|
127
128
|
|
128
129
|
# Check that all required users exist
|
129
130
|
# Note: The reason for introducing `col` is as in
|
@@ -167,25 +168,74 @@ async def update_single_group(
|
|
167
168
|
db.add(group)
|
168
169
|
await db.commit()
|
169
170
|
|
170
|
-
updated_group = await
|
171
|
+
updated_group = await _get_single_usergroup_with_user_ids(
|
171
172
|
group_id=group_id, db=db
|
172
173
|
)
|
173
174
|
|
174
175
|
return updated_group
|
175
176
|
|
176
177
|
|
177
|
-
@router_group.delete(
|
178
|
-
"/group/{group_id}/", status_code=status.HTTP_405_METHOD_NOT_ALLOWED
|
179
|
-
)
|
178
|
+
@router_group.delete("/group/{group_id}/", status_code=204)
|
180
179
|
async def delete_single_group(
|
181
180
|
group_id: int,
|
182
181
|
user: UserOAuth = Depends(current_active_superuser),
|
183
182
|
db: AsyncSession = Depends(get_async_db),
|
184
|
-
) ->
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
183
|
+
) -> Response:
|
184
|
+
|
185
|
+
group = await _usergroup_or_404(group_id, db)
|
186
|
+
|
187
|
+
if group.name == FRACTAL_DEFAULT_GROUP_NAME:
|
188
|
+
raise HTTPException(
|
189
|
+
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
|
190
|
+
detail=(
|
191
|
+
"Cannot delete default UserGroup "
|
192
|
+
f"'{FRACTAL_DEFAULT_GROUP_NAME}'."
|
193
|
+
),
|
194
|
+
)
|
195
|
+
|
196
|
+
# Cascade operations
|
197
|
+
|
198
|
+
res = await db.execute(
|
199
|
+
select(LinkUserGroup).where(LinkUserGroup.group_id == group_id)
|
191
200
|
)
|
201
|
+
for link in res.scalars().all():
|
202
|
+
await db.delete(link)
|
203
|
+
|
204
|
+
res = await db.execute(
|
205
|
+
select(TaskGroupV2).where(TaskGroupV2.user_group_id == group_id)
|
206
|
+
)
|
207
|
+
for task_group in res.scalars().all():
|
208
|
+
task_group.user_group_id = None
|
209
|
+
db.add(task_group)
|
210
|
+
|
211
|
+
# Delete
|
212
|
+
|
213
|
+
await db.delete(group)
|
214
|
+
await db.commit()
|
215
|
+
|
216
|
+
return Response(status_code=status.HTTP_204_NO_CONTENT)
|
217
|
+
|
218
|
+
|
219
|
+
@router_group.patch("/group/{group_id}/user-settings/", status_code=200)
|
220
|
+
async def patch_user_settings_bulk(
|
221
|
+
group_id: int,
|
222
|
+
settings_update: UserSettingsUpdate,
|
223
|
+
superuser: UserOAuth = Depends(current_active_superuser),
|
224
|
+
db: AsyncSession = Depends(get_async_db),
|
225
|
+
):
|
226
|
+
await _usergroup_or_404(group_id, db)
|
227
|
+
res = await db.execute(
|
228
|
+
select(UserSettings)
|
229
|
+
.join(UserOAuth)
|
230
|
+
.where(LinkUserGroup.user_id == UserOAuth.id)
|
231
|
+
.where(LinkUserGroup.group_id == group_id)
|
232
|
+
)
|
233
|
+
settings_list = res.scalars().all()
|
234
|
+
update = settings_update.dict(exclude_unset=True)
|
235
|
+
for settings in settings_list:
|
236
|
+
for k, v in update.items():
|
237
|
+
setattr(settings, k, v)
|
238
|
+
db.add(settings)
|
239
|
+
await db.commit()
|
240
|
+
|
241
|
+
return Response(status_code=status.HTTP_200_OK)
|
@@ -2,7 +2,6 @@ from fastapi import APIRouter
|
|
2
2
|
|
3
3
|
from .current_user import router_current_user
|
4
4
|
from .group import router_group
|
5
|
-
from .group_names import router_group_names
|
6
5
|
from .login import router_login
|
7
6
|
from .oauth import router_oauth
|
8
7
|
from .register import router_register
|
@@ -13,7 +12,6 @@ router_auth = APIRouter()
|
|
13
12
|
router_auth.include_router(router_register)
|
14
13
|
router_auth.include_router(router_current_user)
|
15
14
|
router_auth.include_router(router_login)
|
16
|
-
router_auth.include_router(router_group_names)
|
17
15
|
router_auth.include_router(router_users)
|
18
16
|
router_auth.include_router(router_group)
|
19
17
|
router_auth.include_router(router_oauth)
|
@@ -20,7 +20,7 @@ from ...schemas.user import UserRead
|
|
20
20
|
from ...schemas.user import UserUpdate
|
21
21
|
from ...schemas.user import UserUpdateWithNewGroupIds
|
22
22
|
from ..aux.validate_user_settings import verify_user_has_settings
|
23
|
-
from ._aux_auth import
|
23
|
+
from ._aux_auth import _get_single_user_with_groups
|
24
24
|
from fractal_server.app.models import LinkUserGroup
|
25
25
|
from fractal_server.app.models import UserGroup
|
26
26
|
from fractal_server.app.models import UserOAuth
|
@@ -41,13 +41,14 @@ logger = set_logger(__name__)
|
|
41
41
|
@router_users.get("/users/{user_id}/", response_model=UserRead)
|
42
42
|
async def get_user(
|
43
43
|
user_id: int,
|
44
|
-
|
44
|
+
group_ids_names: bool = True,
|
45
45
|
superuser: UserOAuth = Depends(current_active_superuser),
|
46
46
|
db: AsyncSession = Depends(get_async_db),
|
47
47
|
) -> UserRead:
|
48
48
|
user = await _user_or_404(user_id, db)
|
49
|
-
if
|
50
|
-
|
49
|
+
if group_ids_names:
|
50
|
+
user_with_groups = await _get_single_user_with_groups(user, db)
|
51
|
+
return user_with_groups
|
51
52
|
return user
|
52
53
|
|
53
54
|
|
@@ -163,12 +164,12 @@ async def patch_user(
|
|
163
164
|
# Nothing to do, just continue
|
164
165
|
patched_user = user_to_patch
|
165
166
|
|
166
|
-
# Enrich user object with `
|
167
|
-
|
167
|
+
# Enrich user object with `group_ids_names` attribute
|
168
|
+
patched_user_with_groups = await _get_single_user_with_groups(
|
168
169
|
patched_user, db
|
169
170
|
)
|
170
171
|
|
171
|
-
return
|
172
|
+
return patched_user_with_groups
|
172
173
|
|
173
174
|
|
174
175
|
@router_users.get("/users/", response_model=list[UserRead])
|
@@ -861,7 +861,7 @@ class FractalSlurmSSHExecutor(SlurmExecutor):
|
|
861
861
|
|
862
862
|
# Transfer archive
|
863
863
|
t_0_put = time.perf_counter()
|
864
|
-
self.fractal_ssh.
|
864
|
+
self.fractal_ssh.send_file(
|
865
865
|
local=tarfile_path_local,
|
866
866
|
remote=tarfile_path_remote,
|
867
867
|
)
|
@@ -1055,55 +1055,59 @@ class FractalSlurmSSHExecutor(SlurmExecutor):
|
|
1055
1055
|
Arguments:
|
1056
1056
|
jobid: ID of the SLURM job
|
1057
1057
|
"""
|
1058
|
-
|
1059
|
-
# Loop over all job_ids, and fetch future and job objects
|
1060
|
-
futures: list[Future] = []
|
1061
|
-
jobs: list[SlurmJob] = []
|
1062
|
-
with self.jobs_lock:
|
1063
|
-
for job_id in job_ids:
|
1064
|
-
future, job = self.jobs.pop(job_id)
|
1065
|
-
futures.append(future)
|
1066
|
-
jobs.append(job)
|
1067
|
-
if not self.jobs:
|
1068
|
-
self.jobs_empty_cond.notify_all()
|
1069
|
-
|
1070
|
-
# Fetch subfolder from remote host
|
1058
|
+
# Handle all uncaught exceptions in this broad try/except block
|
1071
1059
|
try:
|
1072
|
-
self._get_subfolder_sftp(jobs=jobs)
|
1073
|
-
except NoValidConnectionsError as e:
|
1074
|
-
logger.error("NoValidConnectionError")
|
1075
|
-
logger.error(f"{str(e)=}")
|
1076
|
-
logger.error(f"{e.errors=}")
|
1077
|
-
for err in e.errors:
|
1078
|
-
logger.error(f"{str(err)}")
|
1079
|
-
|
1080
|
-
raise e
|
1081
|
-
|
1082
|
-
# First round of checking whether all output files exist
|
1083
|
-
missing_out_paths = []
|
1084
|
-
for job in jobs:
|
1085
|
-
for ind_out_path, out_path in enumerate(
|
1086
|
-
job.output_pickle_files_local
|
1087
|
-
):
|
1088
|
-
if not out_path.exists():
|
1089
|
-
missing_out_paths.append(out_path)
|
1090
|
-
num_missing = len(missing_out_paths)
|
1091
|
-
if num_missing > 0:
|
1092
|
-
# Output pickle files may be missing e.g. because of some slow
|
1093
|
-
# filesystem operation; wait some time before re-trying
|
1094
|
-
settings = Inject(get_settings)
|
1095
|
-
sleep_time = settings.FRACTAL_SLURM_ERROR_HANDLING_INTERVAL
|
1096
1060
|
logger.info(
|
1097
|
-
f"
|
1098
|
-
f"sleep {sleep_time} seconds."
|
1061
|
+
f"[FractalSlurmSSHExecutor._completion] START, for {job_ids=}."
|
1099
1062
|
)
|
1100
|
-
for missing_file in missing_out_paths:
|
1101
|
-
logger.debug(f"Missing output pickle file: {missing_file}")
|
1102
|
-
time.sleep(sleep_time)
|
1103
1063
|
|
1104
|
-
|
1105
|
-
|
1064
|
+
# Loop over all job_ids, and fetch future and job objects
|
1065
|
+
futures: list[Future] = []
|
1066
|
+
jobs: list[SlurmJob] = []
|
1067
|
+
with self.jobs_lock:
|
1068
|
+
for job_id in job_ids:
|
1069
|
+
future, job = self.jobs.pop(job_id)
|
1070
|
+
futures.append(future)
|
1071
|
+
jobs.append(job)
|
1072
|
+
if not self.jobs:
|
1073
|
+
self.jobs_empty_cond.notify_all()
|
1074
|
+
|
1075
|
+
# Fetch subfolder from remote host
|
1106
1076
|
try:
|
1077
|
+
self._get_subfolder_sftp(jobs=jobs)
|
1078
|
+
except NoValidConnectionsError as e:
|
1079
|
+
logger.error("NoValidConnectionError")
|
1080
|
+
logger.error(f"{str(e)=}")
|
1081
|
+
logger.error(f"{e.errors=}")
|
1082
|
+
for err in e.errors:
|
1083
|
+
logger.error(f"{str(err)}")
|
1084
|
+
|
1085
|
+
raise e
|
1086
|
+
|
1087
|
+
# First round of checking whether all output files exist
|
1088
|
+
missing_out_paths = []
|
1089
|
+
for job in jobs:
|
1090
|
+
for ind_out_path, out_path in enumerate(
|
1091
|
+
job.output_pickle_files_local
|
1092
|
+
):
|
1093
|
+
if not out_path.exists():
|
1094
|
+
missing_out_paths.append(out_path)
|
1095
|
+
num_missing = len(missing_out_paths)
|
1096
|
+
if num_missing > 0:
|
1097
|
+
# Output pickle files may be missing e.g. because of some slow
|
1098
|
+
# filesystem operation; wait some time before re-trying
|
1099
|
+
settings = Inject(get_settings)
|
1100
|
+
sleep_time = settings.FRACTAL_SLURM_ERROR_HANDLING_INTERVAL
|
1101
|
+
logger.info(
|
1102
|
+
f"{num_missing} output pickle files are missing; "
|
1103
|
+
f"sleep {sleep_time} seconds."
|
1104
|
+
)
|
1105
|
+
for missing_file in missing_out_paths:
|
1106
|
+
logger.debug(f"Missing output pickle file: {missing_file}")
|
1107
|
+
time.sleep(sleep_time)
|
1108
|
+
|
1109
|
+
# Handle all jobs
|
1110
|
+
for ind_job, job_id in enumerate(job_ids):
|
1107
1111
|
# Retrieve job and future objects
|
1108
1112
|
job = jobs[ind_job]
|
1109
1113
|
future = futures[ind_job]
|
@@ -1128,6 +1132,11 @@ class FractalSlurmSSHExecutor(SlurmExecutor):
|
|
1128
1132
|
remaining_futures=remaining_futures,
|
1129
1133
|
remaining_job_ids=remaining_job_ids,
|
1130
1134
|
)
|
1135
|
+
logger.info(
|
1136
|
+
"[FractalSlurmSSHExecutor._completion] END, "
|
1137
|
+
f"for {job_ids=}, with JobExecutionError due "
|
1138
|
+
f"to missing {out_path.as_posix()}."
|
1139
|
+
)
|
1131
1140
|
return
|
1132
1141
|
except InvalidStateError:
|
1133
1142
|
logger.warning(
|
@@ -1141,6 +1150,12 @@ class FractalSlurmSSHExecutor(SlurmExecutor):
|
|
1141
1150
|
remaining_futures=remaining_futures,
|
1142
1151
|
remaining_job_ids=remaining_job_ids,
|
1143
1152
|
)
|
1153
|
+
logger.info(
|
1154
|
+
"[FractalSlurmSSHExecutor._completion] END, "
|
1155
|
+
f"for {job_ids=}, with JobExecutionError/"
|
1156
|
+
"InvalidStateError due to "
|
1157
|
+
f"missing {out_path.as_posix()}."
|
1158
|
+
)
|
1144
1159
|
return
|
1145
1160
|
|
1146
1161
|
# Read the task output
|
@@ -1217,16 +1232,22 @@ class FractalSlurmSSHExecutor(SlurmExecutor):
|
|
1217
1232
|
else:
|
1218
1233
|
future.set_result(outputs)
|
1219
1234
|
|
1220
|
-
|
1235
|
+
except Exception as e:
|
1236
|
+
logger.warning(
|
1237
|
+
"[FractalSlurmSSHExecutor._completion] "
|
1238
|
+
f"An exception took place: {str(e)}."
|
1239
|
+
)
|
1240
|
+
for future in futures:
|
1221
1241
|
try:
|
1242
|
+
logger.info(f"Set exception for {future=}")
|
1222
1243
|
future.set_exception(e)
|
1223
|
-
return
|
1224
1244
|
except InvalidStateError:
|
1225
|
-
logger.
|
1226
|
-
|
1227
|
-
|
1228
|
-
|
1229
|
-
|
1245
|
+
logger.info(f"Future {future} was already cancelled.")
|
1246
|
+
logger.info(
|
1247
|
+
f"[FractalSlurmSSHExecutor._completion] END, for {job_ids=}, "
|
1248
|
+
"from within exception handling."
|
1249
|
+
)
|
1250
|
+
return
|
1230
1251
|
|
1231
1252
|
def _get_subfolder_sftp(self, jobs: list[SlurmJob]) -> None:
|
1232
1253
|
"""
|
@@ -1255,16 +1276,9 @@ class FractalSlurmSSHExecutor(SlurmExecutor):
|
|
1255
1276
|
self.workflow_dir_remote / f"{subfolder_name}.tar.gz"
|
1256
1277
|
).as_posix()
|
1257
1278
|
|
1258
|
-
# Remove
|
1259
|
-
|
1260
|
-
|
1261
|
-
|
1262
|
-
# Remove remote tarfile - FIXME SSH: is this needed?
|
1263
|
-
# rm_command = f"rm {tarfile_path_remote}"
|
1264
|
-
# _run_command_over_ssh(cmd=rm_command, fractal_ssh=self.fractal_ssh)
|
1265
|
-
logger.warning(f"Unlink {tarfile_path_remote=} - START")
|
1266
|
-
self.fractal_ssh.sftp().unlink(tarfile_path_remote)
|
1267
|
-
logger.warning(f"Unlink {tarfile_path_remote=} - STOP")
|
1279
|
+
# Remove remote tarfile
|
1280
|
+
rm_command = f"rm {tarfile_path_remote}"
|
1281
|
+
self.fractal_ssh.run_command(cmd=rm_command)
|
1268
1282
|
|
1269
1283
|
# Create remote tarfile
|
1270
1284
|
tar_command = (
|
@@ -1278,7 +1292,7 @@ class FractalSlurmSSHExecutor(SlurmExecutor):
|
|
1278
1292
|
|
1279
1293
|
# Fetch tarfile
|
1280
1294
|
t_0_get = time.perf_counter()
|
1281
|
-
self.fractal_ssh.
|
1295
|
+
self.fractal_ssh.fetch_file(
|
1282
1296
|
remote=tarfile_path_remote,
|
1283
1297
|
local=tarfile_path_local,
|
1284
1298
|
)
|
@@ -1291,6 +1305,11 @@ class FractalSlurmSSHExecutor(SlurmExecutor):
|
|
1291
1305
|
# Extract tarfile locally
|
1292
1306
|
extract_archive(Path(tarfile_path_local))
|
1293
1307
|
|
1308
|
+
# Remove local tarfile
|
1309
|
+
if Path(tarfile_path_local).exists():
|
1310
|
+
logger.warning(f"Remove existing file {tarfile_path_local}.")
|
1311
|
+
Path(tarfile_path_local).unlink()
|
1312
|
+
|
1294
1313
|
t_1 = time.perf_counter()
|
1295
1314
|
logger.info("[_get_subfolder_sftp] End - " f"elapsed: {t_1-t_0:.3f} s")
|
1296
1315
|
|
@@ -177,11 +177,13 @@ async def submit_workflow(
|
|
177
177
|
return
|
178
178
|
|
179
179
|
try:
|
180
|
-
|
181
180
|
# Create WORKFLOW_DIR_LOCAL
|
182
|
-
|
183
|
-
|
184
|
-
|
181
|
+
if FRACTAL_RUNNER_BACKEND == "slurm":
|
182
|
+
original_umask = os.umask(0)
|
183
|
+
WORKFLOW_DIR_LOCAL.mkdir(parents=True, mode=0o755)
|
184
|
+
os.umask(original_umask)
|
185
|
+
else:
|
186
|
+
WORKFLOW_DIR_LOCAL.mkdir(parents=True)
|
185
187
|
|
186
188
|
# Define and create WORKFLOW_DIR_REMOTE
|
187
189
|
if FRACTAL_RUNNER_BACKEND == "local":
|
@@ -214,15 +216,19 @@ async def submit_workflow(
|
|
214
216
|
order=order,
|
215
217
|
task_name=task_name,
|
216
218
|
)
|
217
|
-
original_umask = os.umask(0)
|
218
|
-
(WORKFLOW_DIR_LOCAL / subfolder_name).mkdir(mode=0o755)
|
219
|
-
os.umask(original_umask)
|
220
219
|
if FRACTAL_RUNNER_BACKEND == "slurm":
|
220
|
+
# Create local subfolder (with 755) and remote one
|
221
|
+
# (via `sudo -u`)
|
222
|
+
original_umask = os.umask(0)
|
223
|
+
(WORKFLOW_DIR_LOCAL / subfolder_name).mkdir(mode=0o755)
|
224
|
+
os.umask(original_umask)
|
221
225
|
_mkdir_as_user(
|
222
226
|
folder=str(WORKFLOW_DIR_REMOTE / subfolder_name),
|
223
227
|
user=slurm_user,
|
224
228
|
)
|
225
229
|
else:
|
230
|
+
# Create local subfolder (with standard permission set)
|
231
|
+
(WORKFLOW_DIR_LOCAL / subfolder_name).mkdir()
|
226
232
|
logger.info("Skip remote-subfolder creation")
|
227
233
|
except Exception as e:
|
228
234
|
error_type = type(e).__name__
|
@@ -1,6 +1,7 @@
|
|
1
1
|
from typing import Any
|
2
2
|
|
3
3
|
from pydantic import BaseModel
|
4
|
+
from pydantic import Extra
|
4
5
|
from pydantic import Field
|
5
6
|
from pydantic import validator
|
6
7
|
|
@@ -9,9 +10,7 @@ from fractal_server.images import Filters
|
|
9
10
|
from fractal_server.urls import normalize_url
|
10
11
|
|
11
12
|
|
12
|
-
class TaskOutput(BaseModel):
|
13
|
-
class Config:
|
14
|
-
extra = "forbid"
|
13
|
+
class TaskOutput(BaseModel, extra=Extra.forbid):
|
15
14
|
|
16
15
|
image_list_updates: list[SingleImageTaskOutput] = Field(
|
17
16
|
default_factory=list
|
@@ -43,9 +42,7 @@ class TaskOutput(BaseModel):
|
|
43
42
|
return [normalize_url(zarr_url) for zarr_url in v]
|
44
43
|
|
45
44
|
|
46
|
-
class InitArgsModel(BaseModel):
|
47
|
-
class Config:
|
48
|
-
extra = "forbid"
|
45
|
+
class InitArgsModel(BaseModel, extra=Extra.forbid):
|
49
46
|
|
50
47
|
zarr_url: str
|
51
48
|
init_args: dict[str, Any] = Field(default_factory=dict)
|
@@ -55,8 +52,6 @@ class InitArgsModel(BaseModel):
|
|
55
52
|
return normalize_url(v)
|
56
53
|
|
57
54
|
|
58
|
-
class InitTaskOutput(BaseModel):
|
59
|
-
class Config:
|
60
|
-
extra = "forbid"
|
55
|
+
class InitTaskOutput(BaseModel, extra=Extra.forbid):
|
61
56
|
|
62
57
|
parallelization_list: list[InitArgsModel] = Field(default_factory=list)
|
@@ -41,8 +41,7 @@ class UserRead(schemas.BaseUser[int]):
|
|
41
41
|
"""
|
42
42
|
|
43
43
|
username: Optional[str]
|
44
|
-
|
45
|
-
group_ids: Optional[list[int]] = None
|
44
|
+
group_ids_names: Optional[list[tuple[int, str]]] = None
|
46
45
|
oauth_accounts: list[OAuthAccountRead]
|
47
46
|
|
48
47
|
|
@@ -20,20 +20,27 @@ from .project import ProjectUpdateV2 # noqa F401
|
|
20
20
|
from .task import TaskCreateV2 # noqa F401
|
21
21
|
from .task import TaskExportV2 # noqa F401
|
22
22
|
from .task import TaskImportV2 # noqa F401
|
23
|
+
from .task import TaskImportV2Legacy # noqa F401
|
23
24
|
from .task import TaskReadV2 # noqa F401
|
24
25
|
from .task import TaskUpdateV2 # noqa F401
|
25
26
|
from .task_collection import CollectionStateReadV2 # noqa F401
|
26
27
|
from .task_collection import CollectionStatusV2 # noqa F401
|
27
28
|
from .task_collection import TaskCollectCustomV2 # noqa F401
|
28
29
|
from .task_collection import TaskCollectPipV2 # noqa F401
|
30
|
+
from .task_group import TaskGroupCreateV2 # noqa F401
|
31
|
+
from .task_group import TaskGroupReadV2 # noqa F401
|
32
|
+
from .task_group import TaskGroupUpdateV2 # noqa F401
|
33
|
+
from .task_group import TaskGroupV2OriginEnum # noqa F401
|
29
34
|
from .workflow import WorkflowCreateV2 # noqa F401
|
30
35
|
from .workflow import WorkflowExportV2 # noqa F401
|
31
36
|
from .workflow import WorkflowImportV2 # noqa F401
|
32
37
|
from .workflow import WorkflowReadV2 # noqa F401
|
38
|
+
from .workflow import WorkflowReadV2WithWarnings # noqa F401
|
33
39
|
from .workflow import WorkflowUpdateV2 # noqa F401
|
34
40
|
from .workflowtask import WorkflowTaskCreateV2 # noqa F401
|
35
41
|
from .workflowtask import WorkflowTaskExportV2 # noqa F401
|
36
42
|
from .workflowtask import WorkflowTaskImportV2 # noqa F401
|
37
43
|
from .workflowtask import WorkflowTaskReadV2 # noqa F401
|
44
|
+
from .workflowtask import WorkflowTaskReadV2WithWarning # noqa F401
|
38
45
|
from .workflowtask import WorkflowTaskStatusTypeV2 # noqa F401
|
39
46
|
from .workflowtask import WorkflowTaskUpdateV2 # noqa F401
|
@@ -66,9 +66,7 @@ class DatasetReadV2(BaseModel):
|
|
66
66
|
)
|
67
67
|
|
68
68
|
|
69
|
-
class DatasetUpdateV2(BaseModel):
|
70
|
-
class Config:
|
71
|
-
extra = "forbid"
|
69
|
+
class DatasetUpdateV2(BaseModel, extra=Extra.forbid):
|
72
70
|
|
73
71
|
name: Optional[str]
|
74
72
|
zarr_dir: Optional[str]
|
@@ -84,7 +82,7 @@ class DatasetUpdateV2(BaseModel):
|
|
84
82
|
_name = validator("name", allow_reuse=True)(valstr("name"))
|
85
83
|
|
86
84
|
|
87
|
-
class DatasetImportV2(BaseModel):
|
85
|
+
class DatasetImportV2(BaseModel, extra=Extra.forbid):
|
88
86
|
"""
|
89
87
|
Class for `Dataset` import.
|
90
88
|
|
@@ -95,9 +93,6 @@ class DatasetImportV2(BaseModel):
|
|
95
93
|
filters:
|
96
94
|
"""
|
97
95
|
|
98
|
-
class Config:
|
99
|
-
extra = "forbid"
|
100
|
-
|
101
96
|
name: str
|
102
97
|
zarr_dir: str
|
103
98
|
images: list[SingleImage] = Field(default_factory=[])
|
@@ -7,6 +7,8 @@ from pydantic import HttpUrl
|
|
7
7
|
from pydantic import root_validator
|
8
8
|
from pydantic import validator
|
9
9
|
|
10
|
+
from .._validators import valstr
|
11
|
+
|
10
12
|
|
11
13
|
class TaskManifestV2(BaseModel):
|
12
14
|
"""
|
@@ -50,6 +52,10 @@ class TaskManifestV2(BaseModel):
|
|
50
52
|
docs_info: Optional[str] = None
|
51
53
|
docs_link: Optional[HttpUrl] = None
|
52
54
|
|
55
|
+
category: Optional[str] = None
|
56
|
+
modality: Optional[str] = None
|
57
|
+
tags: list[str] = Field(default_factory=list)
|
58
|
+
|
53
59
|
@root_validator
|
54
60
|
def validate_executable_args_meta(cls, values):
|
55
61
|
|
@@ -128,7 +134,8 @@ class ManifestV2(BaseModel):
|
|
128
134
|
manifest_version: str
|
129
135
|
task_list: list[TaskManifestV2]
|
130
136
|
has_args_schemas: bool = False
|
131
|
-
args_schema_version: Optional[str]
|
137
|
+
args_schema_version: Optional[str] = None
|
138
|
+
authors: Optional[str] = None
|
132
139
|
|
133
140
|
@root_validator()
|
134
141
|
def _check_args_schemas_are_present(cls, values):
|
@@ -152,8 +159,25 @@ class ManifestV2(BaseModel):
|
|
152
159
|
)
|
153
160
|
return values
|
154
161
|
|
162
|
+
@root_validator()
|
163
|
+
def _unique_task_names(cls, values):
|
164
|
+
task_list = values["task_list"]
|
165
|
+
task_list_names = [t.name for t in task_list]
|
166
|
+
if len(set(task_list_names)) != len(task_list_names):
|
167
|
+
raise ValueError(
|
168
|
+
(
|
169
|
+
"Task names in manifest must be unique.\n",
|
170
|
+
f"Given: {task_list_names}.",
|
171
|
+
)
|
172
|
+
)
|
173
|
+
return values
|
174
|
+
|
155
175
|
@validator("manifest_version")
|
156
176
|
def manifest_version_2(cls, value):
|
157
177
|
if value != "2":
|
158
178
|
raise ValueError(f"Wrong manifest version (given {value})")
|
159
179
|
return value
|
180
|
+
|
181
|
+
_authors = validator("authors", allow_reuse=True)(
|
182
|
+
valstr("authors", accept_none=True)
|
183
|
+
)
|