fractal-server 2.4.1__py3-none-any.whl → 2.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fractal_server/__init__.py +1 -1
- fractal_server/__main__.py +3 -4
- fractal_server/app/db/__init__.py +4 -1
- fractal_server/app/models/v1/task.py +0 -5
- fractal_server/app/models/v2/workflowtask.py +2 -10
- fractal_server/app/routes/admin/v2.py +0 -30
- fractal_server/app/routes/api/v2/__init__.py +0 -4
- fractal_server/app/routes/api/v2/_aux_functions.py +11 -46
- fractal_server/app/routes/api/v2/workflow.py +23 -54
- fractal_server/app/routes/api/v2/workflowtask.py +9 -33
- fractal_server/app/routes/auth/_aux_auth.py +11 -5
- fractal_server/app/routes/auth/current_user.py +5 -1
- fractal_server/app/routes/auth/users.py +9 -8
- fractal_server/app/runner/executors/slurm/sudo/_subprocess_run_as_user.py +1 -1
- fractal_server/app/runner/executors/slurm/sudo/executor.py +1 -1
- fractal_server/app/runner/v2/__init__.py +1 -4
- fractal_server/app/runner/v2/_slurm_common/get_slurm_config.py +1 -4
- fractal_server/app/runner/v2/handle_failed_job.py +2 -9
- fractal_server/app/runner/v2/runner.py +42 -70
- fractal_server/app/runner/v2/runner_functions.py +0 -58
- fractal_server/app/runner/v2/runner_functions_low_level.py +7 -21
- fractal_server/app/schemas/user.py +17 -0
- fractal_server/app/schemas/v2/__init__.py +0 -1
- fractal_server/app/schemas/v2/dumps.py +2 -23
- fractal_server/app/schemas/v2/task.py +0 -5
- fractal_server/app/schemas/v2/workflowtask.py +4 -29
- fractal_server/app/security/__init__.py +22 -15
- fractal_server/migrations/env.py +4 -7
- fractal_server/migrations/naming_convention.py +7 -0
- fractal_server/migrations/versions/091b01f51f88_add_usergroup_and_linkusergroup_table.py +1 -1
- fractal_server/migrations/versions/501961cfcd85_remove_link_between_v1_and_v2_tasks_.py +97 -0
- {fractal_server-2.4.1.dist-info → fractal_server-2.5.0.dist-info}/METADATA +1 -1
- {fractal_server-2.4.1.dist-info → fractal_server-2.5.0.dist-info}/RECORD +36 -36
- fractal_server/app/routes/api/v2/task_legacy.py +0 -59
- fractal_server/app/runner/v2/v1_compat.py +0 -31
- {fractal_server-2.4.1.dist-info → fractal_server-2.5.0.dist-info}/LICENSE +0 -0
- {fractal_server-2.4.1.dist-info → fractal_server-2.5.0.dist-info}/WHEEL +0 -0
- {fractal_server-2.4.1.dist-info → fractal_server-2.5.0.dist-info}/entry_points.txt +0 -0
@@ -17,7 +17,6 @@ from ..filenames import FILTERS_FILENAME
|
|
17
17
|
from ..filenames import HISTORY_FILENAME
|
18
18
|
from ..filenames import IMAGES_FILENAME
|
19
19
|
from .runner_functions import no_op_submit_setup_call
|
20
|
-
from .runner_functions import run_v1_task_parallel
|
21
20
|
from .runner_functions import run_v2_task_compound
|
22
21
|
from .runner_functions import run_v2_task_non_parallel
|
23
22
|
from .runner_functions import run_v2_task_parallel
|
@@ -53,16 +52,8 @@ def execute_tasks_v2(
|
|
53
52
|
|
54
53
|
for wftask in wf_task_list:
|
55
54
|
task = wftask.task
|
56
|
-
|
57
|
-
|
58
|
-
task_name = task_legacy.name
|
59
|
-
logger.debug(
|
60
|
-
f"SUBMIT {wftask.order}-th task "
|
61
|
-
f'(legacy, name="{task_name}")'
|
62
|
-
)
|
63
|
-
else:
|
64
|
-
task_name = task.name
|
65
|
-
logger.debug(f'SUBMIT {wftask.order}-th task (name="{task_name}")')
|
55
|
+
task_name = task.name
|
56
|
+
logger.debug(f'SUBMIT {wftask.order}-th task (name="{task_name}")')
|
66
57
|
|
67
58
|
# PRE TASK EXECUTION
|
68
59
|
|
@@ -78,67 +69,53 @@ def execute_tasks_v2(
|
|
78
69
|
filters=Filters(**pre_filters),
|
79
70
|
)
|
80
71
|
# Verify that filtered images comply with task input_types
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
)
|
72
|
+
for image in filtered_images:
|
73
|
+
if not match_filter(image, Filters(types=task.input_types)):
|
74
|
+
raise JobExecutionError(
|
75
|
+
"Invalid filtered image list\n"
|
76
|
+
f"Task input types: {task.input_types=}\n"
|
77
|
+
f'Image zarr_url: {image["zarr_url"]}\n'
|
78
|
+
f'Image types: {image["types"]}\n'
|
79
|
+
)
|
90
80
|
|
91
81
|
# TASK EXECUTION (V2)
|
92
|
-
if
|
93
|
-
|
94
|
-
current_task_output = run_v2_task_non_parallel(
|
95
|
-
images=filtered_images,
|
96
|
-
zarr_dir=zarr_dir,
|
97
|
-
wftask=wftask,
|
98
|
-
task=task,
|
99
|
-
workflow_dir_local=workflow_dir_local,
|
100
|
-
workflow_dir_remote=workflow_dir_remote,
|
101
|
-
executor=executor,
|
102
|
-
logger_name=logger_name,
|
103
|
-
submit_setup_call=submit_setup_call,
|
104
|
-
)
|
105
|
-
elif task.type == "parallel":
|
106
|
-
current_task_output = run_v2_task_parallel(
|
107
|
-
images=filtered_images,
|
108
|
-
wftask=wftask,
|
109
|
-
task=task,
|
110
|
-
workflow_dir_local=workflow_dir_local,
|
111
|
-
workflow_dir_remote=workflow_dir_remote,
|
112
|
-
executor=executor,
|
113
|
-
logger_name=logger_name,
|
114
|
-
submit_setup_call=submit_setup_call,
|
115
|
-
)
|
116
|
-
elif task.type == "compound":
|
117
|
-
current_task_output = run_v2_task_compound(
|
118
|
-
images=filtered_images,
|
119
|
-
zarr_dir=zarr_dir,
|
120
|
-
wftask=wftask,
|
121
|
-
task=task,
|
122
|
-
workflow_dir_local=workflow_dir_local,
|
123
|
-
workflow_dir_remote=workflow_dir_remote,
|
124
|
-
executor=executor,
|
125
|
-
logger_name=logger_name,
|
126
|
-
submit_setup_call=submit_setup_call,
|
127
|
-
)
|
128
|
-
else:
|
129
|
-
raise ValueError(f"Unexpected error: Invalid {task.type=}.")
|
130
|
-
# TASK EXECUTION (V1)
|
131
|
-
else:
|
132
|
-
current_task_output = run_v1_task_parallel(
|
82
|
+
if task.type == "non_parallel":
|
83
|
+
current_task_output = run_v2_task_non_parallel(
|
133
84
|
images=filtered_images,
|
85
|
+
zarr_dir=zarr_dir,
|
134
86
|
wftask=wftask,
|
135
|
-
|
87
|
+
task=task,
|
88
|
+
workflow_dir_local=workflow_dir_local,
|
89
|
+
workflow_dir_remote=workflow_dir_remote,
|
136
90
|
executor=executor,
|
137
91
|
logger_name=logger_name,
|
92
|
+
submit_setup_call=submit_setup_call,
|
93
|
+
)
|
94
|
+
elif task.type == "parallel":
|
95
|
+
current_task_output = run_v2_task_parallel(
|
96
|
+
images=filtered_images,
|
97
|
+
wftask=wftask,
|
98
|
+
task=task,
|
99
|
+
workflow_dir_local=workflow_dir_local,
|
100
|
+
workflow_dir_remote=workflow_dir_remote,
|
101
|
+
executor=executor,
|
102
|
+
logger_name=logger_name,
|
103
|
+
submit_setup_call=submit_setup_call,
|
104
|
+
)
|
105
|
+
elif task.type == "compound":
|
106
|
+
current_task_output = run_v2_task_compound(
|
107
|
+
images=filtered_images,
|
108
|
+
zarr_dir=zarr_dir,
|
109
|
+
wftask=wftask,
|
110
|
+
task=task,
|
138
111
|
workflow_dir_local=workflow_dir_local,
|
139
112
|
workflow_dir_remote=workflow_dir_remote,
|
113
|
+
executor=executor,
|
114
|
+
logger_name=logger_name,
|
140
115
|
submit_setup_call=submit_setup_call,
|
141
116
|
)
|
117
|
+
else:
|
118
|
+
raise ValueError(f"Unexpected error: Invalid {task.type=}.")
|
142
119
|
|
143
120
|
# POST TASK EXECUTION
|
144
121
|
|
@@ -191,8 +168,7 @@ def execute_tasks_v2(
|
|
191
168
|
# Update image attributes/types with task output and manifest
|
192
169
|
updated_attributes.update(image["attributes"])
|
193
170
|
updated_types.update(image["types"])
|
194
|
-
|
195
|
-
updated_types.update(task.output_types)
|
171
|
+
updated_types.update(task.output_types)
|
196
172
|
|
197
173
|
# Unset attributes with None value
|
198
174
|
updated_attributes = {
|
@@ -249,8 +225,7 @@ def execute_tasks_v2(
|
|
249
225
|
if value is not None
|
250
226
|
}
|
251
227
|
updated_types.update(image["types"])
|
252
|
-
|
253
|
-
updated_types.update(task.output_types)
|
228
|
+
updated_types.update(task.output_types)
|
254
229
|
new_image = dict(
|
255
230
|
zarr_url=image["zarr_url"],
|
256
231
|
origin=image["origin"],
|
@@ -282,10 +257,7 @@ def execute_tasks_v2(
|
|
282
257
|
)
|
283
258
|
|
284
259
|
# Find manifest ouptut types
|
285
|
-
|
286
|
-
types_from_manifest = {}
|
287
|
-
else:
|
288
|
-
types_from_manifest = task.output_types
|
260
|
+
types_from_manifest = task.output_types
|
289
261
|
|
290
262
|
# Find task-output types
|
291
263
|
if current_task_output.filters is not None:
|
@@ -16,8 +16,6 @@ from .merge_outputs import merge_outputs
|
|
16
16
|
from .runner_functions_low_level import run_single_task
|
17
17
|
from .task_interface import InitTaskOutput
|
18
18
|
from .task_interface import TaskOutput
|
19
|
-
from .v1_compat import convert_v2_args_into_v1
|
20
|
-
from fractal_server.app.models.v1 import Task as TaskV1
|
21
19
|
from fractal_server.app.models.v2 import TaskV2
|
22
20
|
from fractal_server.app.models.v2 import WorkflowTaskV2
|
23
21
|
from fractal_server.app.runner.components import _COMPONENT_KEY_
|
@@ -28,7 +26,6 @@ __all__ = [
|
|
28
26
|
"run_v2_task_non_parallel",
|
29
27
|
"run_v2_task_parallel",
|
30
28
|
"run_v2_task_compound",
|
31
|
-
"run_v1_task_parallel",
|
32
29
|
]
|
33
30
|
|
34
31
|
MAX_PARALLELIZATION_LIST_SIZE = 20_000
|
@@ -317,58 +314,3 @@ def run_v2_task_compound(
|
|
317
314
|
|
318
315
|
merged_output = merge_outputs(outputs)
|
319
316
|
return merged_output
|
320
|
-
|
321
|
-
|
322
|
-
def run_v1_task_parallel(
|
323
|
-
*,
|
324
|
-
images: list[dict[str, Any]],
|
325
|
-
task_legacy: TaskV1,
|
326
|
-
wftask: WorkflowTaskV2,
|
327
|
-
executor: Executor,
|
328
|
-
workflow_dir_local: Path,
|
329
|
-
workflow_dir_remote: Optional[Path] = None,
|
330
|
-
logger_name: Optional[str] = None,
|
331
|
-
submit_setup_call: Callable = no_op_submit_setup_call,
|
332
|
-
) -> TaskOutput:
|
333
|
-
|
334
|
-
_check_parallelization_list_size(images)
|
335
|
-
|
336
|
-
executor_options = _get_executor_options(
|
337
|
-
wftask=wftask,
|
338
|
-
workflow_dir_local=workflow_dir_local,
|
339
|
-
workflow_dir_remote=workflow_dir_remote,
|
340
|
-
submit_setup_call=submit_setup_call,
|
341
|
-
which_type="parallel",
|
342
|
-
)
|
343
|
-
|
344
|
-
list_function_kwargs = []
|
345
|
-
for ind, image in enumerate(images):
|
346
|
-
list_function_kwargs.append(
|
347
|
-
convert_v2_args_into_v1(
|
348
|
-
kwargs_v2=dict(
|
349
|
-
zarr_url=image["zarr_url"],
|
350
|
-
**(wftask.args_parallel or {}),
|
351
|
-
),
|
352
|
-
parallelization_level=task_legacy.parallelization_level,
|
353
|
-
),
|
354
|
-
)
|
355
|
-
list_function_kwargs[-1][_COMPONENT_KEY_] = _index_to_component(ind)
|
356
|
-
|
357
|
-
results_iterator = executor.map(
|
358
|
-
functools.partial(
|
359
|
-
run_single_task,
|
360
|
-
wftask=wftask,
|
361
|
-
command=task_legacy.command,
|
362
|
-
workflow_dir_local=workflow_dir_local,
|
363
|
-
workflow_dir_remote=workflow_dir_remote,
|
364
|
-
is_task_v1=True,
|
365
|
-
),
|
366
|
-
list_function_kwargs,
|
367
|
-
**executor_options,
|
368
|
-
)
|
369
|
-
# Explicitly iterate over the whole list, so that all futures are waited
|
370
|
-
list(results_iterator)
|
371
|
-
|
372
|
-
# Ignore any output metadata for V1 tasks, and return an empty object
|
373
|
-
out = TaskOutput()
|
374
|
-
return out
|
@@ -61,7 +61,6 @@ def run_single_task(
|
|
61
61
|
workflow_dir_local: Path,
|
62
62
|
workflow_dir_remote: Optional[Path] = None,
|
63
63
|
logger_name: Optional[str] = None,
|
64
|
-
is_task_v1: bool = False,
|
65
64
|
) -> dict[str, Any]:
|
66
65
|
"""
|
67
66
|
Runs within an executor.
|
@@ -73,10 +72,7 @@ def run_single_task(
|
|
73
72
|
if not workflow_dir_remote:
|
74
73
|
workflow_dir_remote = workflow_dir_local
|
75
74
|
|
76
|
-
|
77
|
-
task_name = wftask.task_legacy.name
|
78
|
-
else:
|
79
|
-
task_name = wftask.task.name
|
75
|
+
task_name = wftask.task.name
|
80
76
|
|
81
77
|
component = args.pop(_COMPONENT_KEY_, None)
|
82
78
|
task_files = get_task_file_paths(
|
@@ -92,18 +88,11 @@ def run_single_task(
|
|
92
88
|
json.dump(args, f, indent=2)
|
93
89
|
|
94
90
|
# Assemble full command
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
)
|
101
|
-
else:
|
102
|
-
full_command = (
|
103
|
-
f"{command} "
|
104
|
-
f"--args-json {task_files.args.as_posix()} "
|
105
|
-
f"--out-json {task_files.metadiff.as_posix()}"
|
106
|
-
)
|
91
|
+
full_command = (
|
92
|
+
f"{command} "
|
93
|
+
f"--args-json {task_files.args.as_posix()} "
|
94
|
+
f"--out-json {task_files.metadiff.as_posix()}"
|
95
|
+
)
|
107
96
|
|
108
97
|
try:
|
109
98
|
_call_command_wrapper(
|
@@ -113,10 +102,7 @@ def run_single_task(
|
|
113
102
|
except TaskExecutionError as e:
|
114
103
|
e.workflow_task_order = wftask.order
|
115
104
|
e.workflow_task_id = wftask.id
|
116
|
-
|
117
|
-
e.task_name = wftask.task_legacy.name
|
118
|
-
else:
|
119
|
-
e.task_name = wftask.task.name
|
105
|
+
e.task_name = wftask.task.name
|
120
106
|
raise e
|
121
107
|
|
122
108
|
try:
|
@@ -20,6 +20,22 @@ __all__ = (
|
|
20
20
|
)
|
21
21
|
|
22
22
|
|
23
|
+
class OAuthAccountRead(BaseModel):
|
24
|
+
"""
|
25
|
+
Schema for storing essential `OAuthAccount` information within
|
26
|
+
`UserRead.oauth_accounts`.
|
27
|
+
|
28
|
+
Attributes:
|
29
|
+
id: ID of the row in fractal-owned `oauthaccount` table.
|
30
|
+
account_email: Email associated to OAuth account
|
31
|
+
oauth_name: Name of the OAuth provider (e.g. `github`)
|
32
|
+
"""
|
33
|
+
|
34
|
+
id: int
|
35
|
+
account_email: str
|
36
|
+
oauth_name: str
|
37
|
+
|
38
|
+
|
23
39
|
class UserRead(schemas.BaseUser[int]):
|
24
40
|
"""
|
25
41
|
Schema for `User` read from database.
|
@@ -37,6 +53,7 @@ class UserRead(schemas.BaseUser[int]):
|
|
37
53
|
slurm_accounts: list[str]
|
38
54
|
group_names: Optional[list[str]] = None
|
39
55
|
group_ids: Optional[list[int]] = None
|
56
|
+
oauth_accounts: list[OAuthAccountRead]
|
40
57
|
|
41
58
|
|
42
59
|
class UserUpdate(schemas.BaseUserUpdate):
|
@@ -20,7 +20,6 @@ from .project import ProjectUpdateV2 # noqa F401
|
|
20
20
|
from .task import TaskCreateV2 # noqa F401
|
21
21
|
from .task import TaskExportV2 # noqa F401
|
22
22
|
from .task import TaskImportV2 # noqa F401
|
23
|
-
from .task import TaskLegacyReadV2 # noqa F401
|
24
23
|
from .task import TaskReadV2 # noqa F401
|
25
24
|
from .task import TaskUpdateV2 # noqa F401
|
26
25
|
from .task_collection import CollectionStateReadV2 # noqa F401
|
@@ -12,9 +12,7 @@ from typing import Optional
|
|
12
12
|
|
13
13
|
from pydantic import BaseModel
|
14
14
|
from pydantic import Extra
|
15
|
-
from pydantic import root_validator
|
16
15
|
|
17
|
-
from fractal_server.app.schemas.v1.dumps import TaskDumpV1
|
18
16
|
from fractal_server.images import Filters
|
19
17
|
|
20
18
|
|
@@ -45,29 +43,10 @@ class WorkflowTaskDumpV2(BaseModel):
|
|
45
43
|
workflow_id: int
|
46
44
|
order: Optional[int]
|
47
45
|
|
48
|
-
is_legacy_task: bool
|
49
|
-
|
50
46
|
input_filters: Filters
|
51
47
|
|
52
|
-
task_id:
|
53
|
-
task:
|
54
|
-
task_legacy_id: Optional[int]
|
55
|
-
task_legacy: Optional[TaskDumpV1]
|
56
|
-
|
57
|
-
# Validators
|
58
|
-
@root_validator
|
59
|
-
def task_v1_or_v2(cls, values):
|
60
|
-
v1 = values.get("task_legacy_id")
|
61
|
-
v2 = values.get("task_id")
|
62
|
-
if ((v1 is not None) and (v2 is not None)) or (
|
63
|
-
(v1 is None) and (v2 is None)
|
64
|
-
):
|
65
|
-
message = "both" if (v1 and v2) else "none"
|
66
|
-
raise ValueError(
|
67
|
-
"One and only one must be provided between "
|
68
|
-
f"'task_legacy_id' and 'task_id' (you provided {message})"
|
69
|
-
)
|
70
|
-
return values
|
48
|
+
task_id: int
|
49
|
+
task: TaskDumpV2
|
71
50
|
|
72
51
|
|
73
52
|
class WorkflowDumpV2(BaseModel, extra=Extra.forbid):
|
@@ -11,7 +11,6 @@ from pydantic import validator
|
|
11
11
|
|
12
12
|
from .._validators import valdictkeys
|
13
13
|
from .._validators import valstr
|
14
|
-
from ..v1.task import TaskReadV1
|
15
14
|
|
16
15
|
|
17
16
|
class TaskCreateV2(BaseModel, extra=Extra.forbid):
|
@@ -101,10 +100,6 @@ class TaskReadV2(BaseModel):
|
|
101
100
|
output_types: dict[str, bool]
|
102
101
|
|
103
102
|
|
104
|
-
class TaskLegacyReadV2(TaskReadV1):
|
105
|
-
is_v2_compatible: bool
|
106
|
-
|
107
|
-
|
108
103
|
class TaskUpdateV2(BaseModel):
|
109
104
|
|
110
105
|
name: Optional[str]
|
@@ -5,16 +5,12 @@ from typing import Optional
|
|
5
5
|
from pydantic import BaseModel
|
6
6
|
from pydantic import Extra
|
7
7
|
from pydantic import Field
|
8
|
-
from pydantic import root_validator
|
9
8
|
from pydantic import validator
|
10
9
|
|
11
10
|
from .._validators import valdictkeys
|
12
11
|
from .._validators import valint
|
13
|
-
from ..v1.task import TaskExportV1
|
14
|
-
from ..v1.task import TaskImportV1
|
15
12
|
from .task import TaskExportV2
|
16
13
|
from .task import TaskImportV2
|
17
|
-
from .task import TaskLegacyReadV2
|
18
14
|
from .task import TaskReadV2
|
19
15
|
from fractal_server.images import Filters
|
20
16
|
|
@@ -49,8 +45,6 @@ class WorkflowTaskCreateV2(BaseModel, extra=Extra.forbid):
|
|
49
45
|
order: Optional[int]
|
50
46
|
input_filters: Filters = Field(default_factory=Filters)
|
51
47
|
|
52
|
-
is_legacy_task: bool = False
|
53
|
-
|
54
48
|
# Validators
|
55
49
|
_meta_non_parallel = validator("meta_non_parallel", allow_reuse=True)(
|
56
50
|
valdictkeys("meta_non_parallel")
|
@@ -88,18 +82,6 @@ class WorkflowTaskCreateV2(BaseModel, extra=Extra.forbid):
|
|
88
82
|
)
|
89
83
|
return value
|
90
84
|
|
91
|
-
@root_validator
|
92
|
-
def validate_legacy_task(cls, values):
|
93
|
-
if values["is_legacy_task"] and (
|
94
|
-
values.get("meta_non_parallel") is not None
|
95
|
-
or values.get("args_non_parallel") is not None
|
96
|
-
):
|
97
|
-
raise ValueError(
|
98
|
-
"If Task is legacy, 'args_non_parallel' and 'meta_non_parallel"
|
99
|
-
"must be None"
|
100
|
-
)
|
101
|
-
return values
|
102
|
-
|
103
85
|
|
104
86
|
class WorkflowTaskReadV2(BaseModel):
|
105
87
|
|
@@ -115,12 +97,9 @@ class WorkflowTaskReadV2(BaseModel):
|
|
115
97
|
|
116
98
|
input_filters: Filters
|
117
99
|
|
118
|
-
is_legacy_task: bool
|
119
100
|
task_type: str
|
120
|
-
task_id:
|
121
|
-
task:
|
122
|
-
task_legacy_id: Optional[int]
|
123
|
-
task_legacy: Optional[TaskLegacyReadV2]
|
101
|
+
task_id: int
|
102
|
+
task: TaskReadV2
|
124
103
|
|
125
104
|
|
126
105
|
class WorkflowTaskUpdateV2(BaseModel):
|
@@ -177,9 +156,7 @@ class WorkflowTaskImportV2(BaseModel):
|
|
177
156
|
|
178
157
|
input_filters: Optional[Filters] = None
|
179
158
|
|
180
|
-
|
181
|
-
task: Optional[TaskImportV2] = None
|
182
|
-
task_legacy: Optional[TaskImportV1] = None
|
159
|
+
task: TaskImportV2
|
183
160
|
|
184
161
|
_meta_non_parallel = validator("meta_non_parallel", allow_reuse=True)(
|
185
162
|
valdictkeys("meta_non_parallel")
|
@@ -203,6 +180,4 @@ class WorkflowTaskExportV2(BaseModel):
|
|
203
180
|
args_parallel: Optional[dict[str, Any]] = None
|
204
181
|
input_filters: Filters = Field(default_factory=Filters)
|
205
182
|
|
206
|
-
|
207
|
-
task: Optional[TaskExportV2]
|
208
|
-
task_legacy: Optional[TaskExportV1]
|
183
|
+
task: TaskExportV2
|
@@ -55,9 +55,9 @@ from fractal_server.app.models import OAuthAccount
|
|
55
55
|
from fractal_server.app.models import UserGroup
|
56
56
|
from fractal_server.app.models import UserOAuth
|
57
57
|
from fractal_server.app.schemas.user import UserCreate
|
58
|
-
from fractal_server.logger import
|
58
|
+
from fractal_server.logger import set_logger
|
59
59
|
|
60
|
-
logger =
|
60
|
+
logger = set_logger(__name__)
|
61
61
|
|
62
62
|
FRACTAL_DEFAULT_GROUP_NAME = "All"
|
63
63
|
|
@@ -264,9 +264,10 @@ async def _create_first_user(
|
|
264
264
|
is_verified: `True` if the new user is verifie
|
265
265
|
username:
|
266
266
|
"""
|
267
|
+
function_logger = set_logger("fractal_server.create_first_user")
|
268
|
+
function_logger.info(f"START _create_first_user, with email '{email}'")
|
267
269
|
try:
|
268
270
|
async with get_async_session_context() as session:
|
269
|
-
|
270
271
|
if is_superuser is True:
|
271
272
|
# If a superuser already exists, exit
|
272
273
|
stm = select(UserOAuth).where( # noqa
|
@@ -275,9 +276,9 @@ async def _create_first_user(
|
|
275
276
|
res = await session.execute(stm)
|
276
277
|
existing_superuser = res.scalars().first()
|
277
278
|
if existing_superuser is not None:
|
278
|
-
|
279
|
-
f"{existing_superuser.email} superuser already
|
280
|
-
f" skip creation of {email}"
|
279
|
+
function_logger.info(
|
280
|
+
f"'{existing_superuser.email}' superuser already "
|
281
|
+
f"exists, skip creation of '{email}'"
|
281
282
|
)
|
282
283
|
return None
|
283
284
|
|
@@ -292,15 +293,19 @@ async def _create_first_user(
|
|
292
293
|
if username is not None:
|
293
294
|
kwargs["username"] = username
|
294
295
|
user = await user_manager.create(UserCreate(**kwargs))
|
295
|
-
|
296
|
+
function_logger.info(f"User '{user.email}' created")
|
296
297
|
|
297
298
|
except UserAlreadyExists:
|
298
|
-
|
299
|
+
function_logger.warning(f"User '{email}' already exists")
|
300
|
+
finally:
|
301
|
+
function_logger.info(f"END _create_first_user, with email '{email}'")
|
299
302
|
|
300
303
|
|
301
304
|
def _create_first_group():
|
302
|
-
|
303
|
-
|
305
|
+
function_logger = set_logger("fractal_server.create_first_group")
|
306
|
+
|
307
|
+
function_logger.info(
|
308
|
+
f"START _create_first_group, with name '{FRACTAL_DEFAULT_GROUP_NAME}'"
|
304
309
|
)
|
305
310
|
with next(get_sync_db()) as db:
|
306
311
|
group_all = db.execute(select(UserGroup))
|
@@ -308,11 +313,13 @@ def _create_first_group():
|
|
308
313
|
first_group = UserGroup(name=FRACTAL_DEFAULT_GROUP_NAME)
|
309
314
|
db.add(first_group)
|
310
315
|
db.commit()
|
311
|
-
|
316
|
+
function_logger.info(
|
317
|
+
f"Created group '{FRACTAL_DEFAULT_GROUP_NAME}'"
|
318
|
+
)
|
312
319
|
else:
|
313
|
-
|
314
|
-
f"Group {FRACTAL_DEFAULT_GROUP_NAME} already exists, skip."
|
320
|
+
function_logger.info(
|
321
|
+
f"Group '{FRACTAL_DEFAULT_GROUP_NAME}' already exists, skip."
|
315
322
|
)
|
316
|
-
|
317
|
-
f"END _create_first_group, with name {FRACTAL_DEFAULT_GROUP_NAME}"
|
323
|
+
function_logger.info(
|
324
|
+
f"END _create_first_group, with name '{FRACTAL_DEFAULT_GROUP_NAME}'"
|
318
325
|
)
|
fractal_server/migrations/env.py
CHANGED
@@ -7,6 +7,7 @@ from sqlmodel import SQLModel
|
|
7
7
|
|
8
8
|
from fractal_server.app import models # noqa
|
9
9
|
from fractal_server.config import get_settings
|
10
|
+
from fractal_server.migrations.naming_convention import NAMING_CONVENTION
|
10
11
|
from fractal_server.syringe import Inject
|
11
12
|
|
12
13
|
# this is the Alembic Config object, which provides
|
@@ -25,13 +26,7 @@ if config.config_file_name is not None:
|
|
25
26
|
# from myapp import mymodel
|
26
27
|
# target_metadata = mymodel.Base.metadata
|
27
28
|
target_metadata = SQLModel.metadata
|
28
|
-
target_metadata.naming_convention =
|
29
|
-
"ix": "ix_%(column_0_label)s",
|
30
|
-
"uq": "uq_%(table_name)s_%(column_0_name)s",
|
31
|
-
"ck": "ck_%(table_name)s_`%(constraint_name)s`",
|
32
|
-
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
|
33
|
-
"pk": "pk_%(table_name)s",
|
34
|
-
}
|
29
|
+
target_metadata.naming_convention = NAMING_CONVENTION
|
35
30
|
|
36
31
|
# other values from the config, defined by the needs of env.py,
|
37
32
|
# can be acquired:
|
@@ -58,6 +53,7 @@ def run_migrations_offline() -> None:
|
|
58
53
|
target_metadata=target_metadata,
|
59
54
|
literal_binds=True,
|
60
55
|
dialect_opts={"paramstyle": "named"},
|
56
|
+
render_as_batch=True,
|
61
57
|
)
|
62
58
|
|
63
59
|
with context.begin_transaction():
|
@@ -68,6 +64,7 @@ def do_run_migrations(connection: Connection) -> None:
|
|
68
64
|
context.configure(
|
69
65
|
connection=connection,
|
70
66
|
target_metadata=target_metadata,
|
67
|
+
render_as_batch=True,
|
71
68
|
)
|
72
69
|
|
73
70
|
with context.begin_transaction():
|