fractal-server 2.18.0a3__py3-none-any.whl → 2.18.0a5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fractal_server/__init__.py +1 -1
- fractal_server/app/models/v2/job.py +13 -2
- fractal_server/app/models/v2/resource.py +13 -0
- fractal_server/app/routes/admin/v2/__init__.py +10 -12
- fractal_server/app/routes/admin/v2/job.py +15 -15
- fractal_server/app/routes/admin/v2/task.py +7 -7
- fractal_server/app/routes/admin/v2/task_group.py +11 -11
- fractal_server/app/routes/admin/v2/task_group_lifecycle.py +20 -20
- fractal_server/app/routes/api/v2/__init__.py +47 -49
- fractal_server/app/routes/api/v2/_aux_functions.py +22 -47
- fractal_server/app/routes/api/v2/_aux_functions_task_lifecycle.py +4 -4
- fractal_server/app/routes/api/v2/_aux_functions_tasks.py +2 -2
- fractal_server/app/routes/api/v2/dataset.py +63 -73
- fractal_server/app/routes/api/v2/history.py +7 -5
- fractal_server/app/routes/api/v2/job.py +12 -12
- fractal_server/app/routes/api/v2/project.py +11 -11
- fractal_server/app/routes/api/v2/status_legacy.py +15 -29
- fractal_server/app/routes/api/v2/submit.py +65 -66
- fractal_server/app/routes/api/v2/task.py +15 -17
- fractal_server/app/routes/api/v2/task_collection.py +18 -18
- fractal_server/app/routes/api/v2/task_collection_custom.py +11 -13
- fractal_server/app/routes/api/v2/task_collection_pixi.py +9 -9
- fractal_server/app/routes/api/v2/task_group.py +18 -18
- fractal_server/app/routes/api/v2/task_group_lifecycle.py +26 -26
- fractal_server/app/routes/api/v2/task_version_update.py +5 -5
- fractal_server/app/routes/api/v2/workflow.py +18 -18
- fractal_server/app/routes/api/v2/workflow_import.py +11 -11
- fractal_server/app/routes/api/v2/workflowtask.py +10 -10
- fractal_server/app/routes/auth/_aux_auth.py +99 -0
- fractal_server/app/routes/auth/users.py +9 -0
- fractal_server/app/schemas/user.py +1 -1
- fractal_server/app/schemas/v2/__init__.py +48 -48
- fractal_server/app/schemas/v2/dataset.py +25 -13
- fractal_server/app/schemas/v2/dumps.py +9 -9
- fractal_server/app/schemas/v2/job.py +11 -11
- fractal_server/app/schemas/v2/project.py +3 -3
- fractal_server/app/schemas/v2/resource.py +13 -4
- fractal_server/app/schemas/v2/status_legacy.py +3 -3
- fractal_server/app/schemas/v2/task.py +6 -6
- fractal_server/app/schemas/v2/task_collection.py +4 -4
- fractal_server/app/schemas/v2/task_group.py +16 -16
- fractal_server/app/schemas/v2/workflow.py +16 -16
- fractal_server/app/schemas/v2/workflowtask.py +14 -14
- fractal_server/app/shutdown.py +6 -6
- fractal_server/config/_main.py +1 -1
- fractal_server/data_migrations/{2_18_1.py → 2_18_0.py} +2 -1
- fractal_server/main.py +8 -12
- fractal_server/migrations/versions/88270f589c9b_add_prevent_new_submissions.py +39 -0
- fractal_server/migrations/versions/f0702066b007_one_submitted_job_per_dataset.py +40 -0
- fractal_server/runner/v2/_local.py +3 -2
- fractal_server/runner/v2/_slurm_ssh.py +3 -2
- fractal_server/runner/v2/_slurm_sudo.py +3 -2
- fractal_server/runner/v2/runner.py +36 -17
- fractal_server/runner/v2/runner_functions.py +11 -14
- fractal_server/runner/v2/submit_workflow.py +22 -9
- fractal_server/tasks/v2/local/_utils.py +2 -2
- fractal_server/tasks/v2/local/collect.py +5 -6
- fractal_server/tasks/v2/local/collect_pixi.py +5 -6
- fractal_server/tasks/v2/local/deactivate.py +7 -7
- fractal_server/tasks/v2/local/deactivate_pixi.py +3 -3
- fractal_server/tasks/v2/local/delete.py +5 -5
- fractal_server/tasks/v2/local/reactivate.py +5 -5
- fractal_server/tasks/v2/local/reactivate_pixi.py +5 -5
- fractal_server/tasks/v2/ssh/collect.py +5 -5
- fractal_server/tasks/v2/ssh/collect_pixi.py +5 -5
- fractal_server/tasks/v2/ssh/deactivate.py +7 -7
- fractal_server/tasks/v2/ssh/deactivate_pixi.py +2 -2
- fractal_server/tasks/v2/ssh/delete.py +5 -5
- fractal_server/tasks/v2/ssh/reactivate.py +5 -5
- fractal_server/tasks/v2/ssh/reactivate_pixi.py +5 -5
- fractal_server/tasks/v2/utils_background.py +7 -7
- fractal_server/tasks/v2/utils_database.py +5 -5
- fractal_server/types/__init__.py +13 -4
- fractal_server/types/validators/__init__.py +3 -1
- fractal_server/types/validators/_common_validators.py +23 -1
- {fractal_server-2.18.0a3.dist-info → fractal_server-2.18.0a5.dist-info}/METADATA +1 -1
- {fractal_server-2.18.0a3.dist-info → fractal_server-2.18.0a5.dist-info}/RECORD +80 -78
- {fractal_server-2.18.0a3.dist-info → fractal_server-2.18.0a5.dist-info}/WHEEL +0 -0
- {fractal_server-2.18.0a3.dist-info → fractal_server-2.18.0a5.dist-info}/entry_points.txt +0 -0
- {fractal_server-2.18.0a3.dist-info → fractal_server-2.18.0a5.dist-info}/licenses/LICENSE +0 -0
|
@@ -5,29 +5,29 @@ from pydantic import ConfigDict
|
|
|
5
5
|
from pydantic import field_serializer
|
|
6
6
|
from pydantic.types import AwareDatetime
|
|
7
7
|
|
|
8
|
-
from fractal_server.app.schemas.v2.project import
|
|
9
|
-
from fractal_server.app.schemas.v2.workflowtask import
|
|
10
|
-
from fractal_server.app.schemas.v2.workflowtask import
|
|
11
|
-
from fractal_server.app.schemas.v2.workflowtask import
|
|
8
|
+
from fractal_server.app.schemas.v2.project import ProjectRead
|
|
9
|
+
from fractal_server.app.schemas.v2.workflowtask import WorkflowTaskExport
|
|
10
|
+
from fractal_server.app.schemas.v2.workflowtask import WorkflowTaskImport
|
|
11
|
+
from fractal_server.app.schemas.v2.workflowtask import WorkflowTaskRead
|
|
12
12
|
from fractal_server.app.schemas.v2.workflowtask import (
|
|
13
|
-
|
|
13
|
+
WorkflowTaskReadWithWarning,
|
|
14
14
|
)
|
|
15
15
|
from fractal_server.types import ListUniqueNonNegativeInt
|
|
16
16
|
from fractal_server.types import NonEmptyStr
|
|
17
17
|
|
|
18
18
|
|
|
19
|
-
class
|
|
19
|
+
class WorkflowCreate(BaseModel):
|
|
20
20
|
model_config = ConfigDict(extra="forbid")
|
|
21
21
|
|
|
22
22
|
name: NonEmptyStr
|
|
23
23
|
|
|
24
24
|
|
|
25
|
-
class
|
|
25
|
+
class WorkflowRead(BaseModel):
|
|
26
26
|
id: int
|
|
27
27
|
name: str
|
|
28
28
|
project_id: int
|
|
29
|
-
task_list: list[
|
|
30
|
-
project:
|
|
29
|
+
task_list: list[WorkflowTaskRead]
|
|
30
|
+
project: ProjectRead
|
|
31
31
|
timestamp_created: AwareDatetime
|
|
32
32
|
|
|
33
33
|
@field_serializer("timestamp_created")
|
|
@@ -35,18 +35,18 @@ class WorkflowReadV2(BaseModel):
|
|
|
35
35
|
return v.isoformat()
|
|
36
36
|
|
|
37
37
|
|
|
38
|
-
class
|
|
39
|
-
task_list: list[
|
|
38
|
+
class WorkflowReadWithWarnings(WorkflowRead):
|
|
39
|
+
task_list: list[WorkflowTaskReadWithWarning]
|
|
40
40
|
|
|
41
41
|
|
|
42
|
-
class
|
|
42
|
+
class WorkflowUpdate(BaseModel):
|
|
43
43
|
model_config = ConfigDict(extra="forbid")
|
|
44
44
|
|
|
45
45
|
name: NonEmptyStr = None
|
|
46
46
|
reordered_workflowtask_ids: ListUniqueNonNegativeInt | None = None
|
|
47
47
|
|
|
48
48
|
|
|
49
|
-
class
|
|
49
|
+
class WorkflowImport(BaseModel):
|
|
50
50
|
"""
|
|
51
51
|
Class for `Workflow` import.
|
|
52
52
|
|
|
@@ -56,10 +56,10 @@ class WorkflowImportV2(BaseModel):
|
|
|
56
56
|
|
|
57
57
|
model_config = ConfigDict(extra="forbid")
|
|
58
58
|
name: NonEmptyStr
|
|
59
|
-
task_list: list[
|
|
59
|
+
task_list: list[WorkflowTaskImport]
|
|
60
60
|
|
|
61
61
|
|
|
62
|
-
class
|
|
62
|
+
class WorkflowExport(BaseModel):
|
|
63
63
|
"""
|
|
64
64
|
Class for `Workflow` export.
|
|
65
65
|
|
|
@@ -68,4 +68,4 @@ class WorkflowExportV2(BaseModel):
|
|
|
68
68
|
"""
|
|
69
69
|
|
|
70
70
|
name: str
|
|
71
|
-
task_list: list[
|
|
71
|
+
task_list: list[WorkflowTaskExport]
|
|
@@ -9,14 +9,14 @@ from fractal_server.types import DictStrAny
|
|
|
9
9
|
from fractal_server.types import TypeFilters
|
|
10
10
|
from fractal_server.types import WorkflowTaskArgument
|
|
11
11
|
|
|
12
|
-
from .task import
|
|
13
|
-
from .task import
|
|
14
|
-
from .task import
|
|
15
|
-
from .task import
|
|
12
|
+
from .task import TaskExport
|
|
13
|
+
from .task import TaskImport
|
|
14
|
+
from .task import TaskImportLegacy
|
|
15
|
+
from .task import TaskRead
|
|
16
16
|
from .task import TaskType
|
|
17
17
|
|
|
18
18
|
|
|
19
|
-
class
|
|
19
|
+
class WorkflowTaskCreate(BaseModel):
|
|
20
20
|
model_config = ConfigDict(extra="forbid")
|
|
21
21
|
|
|
22
22
|
meta_non_parallel: DictStrAny | None = None
|
|
@@ -26,14 +26,14 @@ class WorkflowTaskCreateV2(BaseModel):
|
|
|
26
26
|
type_filters: TypeFilters = Field(default_factory=dict)
|
|
27
27
|
|
|
28
28
|
|
|
29
|
-
class
|
|
29
|
+
class WorkflowTaskReplace(BaseModel):
|
|
30
30
|
"""Used by 'replace-task' endpoint"""
|
|
31
31
|
|
|
32
32
|
args_non_parallel: dict[str, Any] | None = None
|
|
33
33
|
args_parallel: dict[str, Any] | None = None
|
|
34
34
|
|
|
35
35
|
|
|
36
|
-
class
|
|
36
|
+
class WorkflowTaskRead(BaseModel):
|
|
37
37
|
id: int
|
|
38
38
|
|
|
39
39
|
workflow_id: int
|
|
@@ -48,14 +48,14 @@ class WorkflowTaskReadV2(BaseModel):
|
|
|
48
48
|
|
|
49
49
|
task_type: TaskType
|
|
50
50
|
task_id: int
|
|
51
|
-
task:
|
|
51
|
+
task: TaskRead
|
|
52
52
|
|
|
53
53
|
|
|
54
|
-
class
|
|
54
|
+
class WorkflowTaskReadWithWarning(WorkflowTaskRead):
|
|
55
55
|
warning: str | None = None
|
|
56
56
|
|
|
57
57
|
|
|
58
|
-
class
|
|
58
|
+
class WorkflowTaskUpdate(BaseModel):
|
|
59
59
|
model_config = ConfigDict(extra="forbid")
|
|
60
60
|
|
|
61
61
|
meta_non_parallel: DictStrAny | None = None
|
|
@@ -65,7 +65,7 @@ class WorkflowTaskUpdateV2(BaseModel):
|
|
|
65
65
|
type_filters: TypeFilters = None
|
|
66
66
|
|
|
67
67
|
|
|
68
|
-
class
|
|
68
|
+
class WorkflowTaskImport(BaseModel):
|
|
69
69
|
model_config = ConfigDict(extra="forbid")
|
|
70
70
|
|
|
71
71
|
meta_non_parallel: DictStrAny | None = None
|
|
@@ -75,7 +75,7 @@ class WorkflowTaskImportV2(BaseModel):
|
|
|
75
75
|
type_filters: TypeFilters | None = None
|
|
76
76
|
input_filters: dict[str, Any] | None = None
|
|
77
77
|
|
|
78
|
-
task:
|
|
78
|
+
task: TaskImport | TaskImportLegacy
|
|
79
79
|
|
|
80
80
|
@model_validator(mode="before")
|
|
81
81
|
@classmethod
|
|
@@ -106,11 +106,11 @@ class WorkflowTaskImportV2(BaseModel):
|
|
|
106
106
|
return values
|
|
107
107
|
|
|
108
108
|
|
|
109
|
-
class
|
|
109
|
+
class WorkflowTaskExport(BaseModel):
|
|
110
110
|
meta_non_parallel: dict[str, Any] | None = None
|
|
111
111
|
meta_parallel: dict[str, Any] | None = None
|
|
112
112
|
args_non_parallel: dict[str, Any] | None = None
|
|
113
113
|
args_parallel: dict[str, Any] | None = None
|
|
114
114
|
type_filters: dict[str, bool] = Field(default_factory=dict)
|
|
115
115
|
|
|
116
|
-
task:
|
|
116
|
+
task: TaskExport
|
fractal_server/app/shutdown.py
CHANGED
|
@@ -4,26 +4,26 @@ from sqlmodel import select
|
|
|
4
4
|
|
|
5
5
|
from fractal_server.app.db import get_async_db
|
|
6
6
|
from fractal_server.app.models.v2 import JobV2
|
|
7
|
-
from fractal_server.app.models.v2.job import
|
|
7
|
+
from fractal_server.app.models.v2.job import JobStatusType
|
|
8
8
|
from fractal_server.app.routes.aux._job import _write_shutdown_file
|
|
9
9
|
from fractal_server.config import get_settings
|
|
10
10
|
from fractal_server.logger import get_logger
|
|
11
11
|
from fractal_server.syringe import Inject
|
|
12
12
|
|
|
13
13
|
|
|
14
|
-
async def cleanup_after_shutdown(*,
|
|
14
|
+
async def cleanup_after_shutdown(*, jobs: list[int], logger_name: str):
|
|
15
15
|
settings = Inject(get_settings)
|
|
16
16
|
logger = get_logger(logger_name)
|
|
17
17
|
logger.info("Cleanup function after shutdown")
|
|
18
18
|
stm_objects = (
|
|
19
19
|
select(JobV2)
|
|
20
|
-
.where(JobV2.id.in_(
|
|
21
|
-
.where(JobV2.status ==
|
|
20
|
+
.where(JobV2.id.in_(jobs))
|
|
21
|
+
.where(JobV2.status == JobStatusType.SUBMITTED)
|
|
22
22
|
)
|
|
23
23
|
stm_ids = (
|
|
24
24
|
select(JobV2.id)
|
|
25
|
-
.where(JobV2.id.in_(
|
|
26
|
-
.where(JobV2.status ==
|
|
25
|
+
.where(JobV2.id.in_(jobs))
|
|
26
|
+
.where(JobV2.status == JobStatusType.SUBMITTED)
|
|
27
27
|
)
|
|
28
28
|
|
|
29
29
|
async for session in get_async_db():
|
fractal_server/config/_main.py
CHANGED
|
@@ -28,7 +28,7 @@ class Settings(BaseSettings):
|
|
|
28
28
|
Only logs of with this level (or higher) will appear in the console
|
|
29
29
|
logs.
|
|
30
30
|
FRACTAL_API_MAX_JOB_LIST_LENGTH:
|
|
31
|
-
Number of ids that can be stored in the `
|
|
31
|
+
Number of ids that can be stored in the `jobs` attribute of
|
|
32
32
|
`app.state`.
|
|
33
33
|
FRACTAL_GRACEFUL_SHUTDOWN_TIME:
|
|
34
34
|
Waiting time for the shutdown phase of executors, in seconds.
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
import sys
|
|
3
|
+
from os.path import normpath
|
|
3
4
|
|
|
4
5
|
from sqlalchemy.orm.attributes import flag_modified
|
|
5
6
|
from sqlmodel import select
|
|
@@ -21,7 +22,7 @@ def fix_db():
|
|
|
21
22
|
logging.info(f"Now handling user {user.email}.")
|
|
22
23
|
if user.project_dirs != []:
|
|
23
24
|
sys.exit(f"Non empty `project_dirs` for User[{user.id}]")
|
|
24
|
-
user.project_dirs.append(user.project_dir)
|
|
25
|
+
user.project_dirs.append(normpath(user.project_dir))
|
|
25
26
|
flag_modified(user, "project_dirs")
|
|
26
27
|
|
|
27
28
|
db.commit()
|
fractal_server/main.py
CHANGED
|
@@ -33,16 +33,14 @@ def collect_routers(app: FastAPI) -> None:
|
|
|
33
33
|
app:
|
|
34
34
|
The application to register the routers to.
|
|
35
35
|
"""
|
|
36
|
-
from .app.routes.admin.v2 import
|
|
36
|
+
from .app.routes.admin.v2 import router_admin
|
|
37
37
|
from .app.routes.api import router_api
|
|
38
|
-
from .app.routes.api.v2 import router_api_v2
|
|
38
|
+
from .app.routes.api.v2 import router_api as router_api_v2
|
|
39
39
|
from .app.routes.auth.router import router_auth
|
|
40
40
|
|
|
41
41
|
app.include_router(router_api, prefix="/api")
|
|
42
42
|
app.include_router(router_api_v2, prefix="/api/v2")
|
|
43
|
-
app.include_router(
|
|
44
|
-
router_admin_v2, prefix="/admin/v2", tags=["V2 Admin area"]
|
|
45
|
-
)
|
|
43
|
+
app.include_router(router_admin, prefix="/admin/v2", tags=["Admin area"])
|
|
46
44
|
app.include_router(router_auth, prefix="/auth", tags=["Authentication"])
|
|
47
45
|
|
|
48
46
|
|
|
@@ -74,7 +72,7 @@ def check_settings() -> None:
|
|
|
74
72
|
|
|
75
73
|
@asynccontextmanager
|
|
76
74
|
async def lifespan(app: FastAPI):
|
|
77
|
-
app.state.
|
|
75
|
+
app.state.jobs = []
|
|
78
76
|
logger = set_logger("fractal_server.lifespan")
|
|
79
77
|
logger.info(f"[startup] START (fractal-server {__VERSION__})")
|
|
80
78
|
check_settings()
|
|
@@ -111,12 +109,12 @@ async def lifespan(app: FastAPI):
|
|
|
111
109
|
|
|
112
110
|
logger.info(
|
|
113
111
|
f"[teardown] Current worker with pid {os.getpid()} is shutting down. "
|
|
114
|
-
f"Current jobs: {app.state.
|
|
112
|
+
f"Current jobs: {app.state.jobs=}"
|
|
115
113
|
)
|
|
116
114
|
if _backend_supports_shutdown(settings.FRACTAL_RUNNER_BACKEND):
|
|
117
115
|
try:
|
|
118
116
|
await cleanup_after_shutdown(
|
|
119
|
-
|
|
117
|
+
jobs=app.state.jobs,
|
|
120
118
|
logger_name="fractal_server.lifespan",
|
|
121
119
|
)
|
|
122
120
|
except Exception as e:
|
|
@@ -136,8 +134,6 @@ async def lifespan(app: FastAPI):
|
|
|
136
134
|
|
|
137
135
|
slow_response_logger = set_logger("slow-response")
|
|
138
136
|
|
|
139
|
-
MIDDLEWARE_DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S.%f"
|
|
140
|
-
|
|
141
137
|
|
|
142
138
|
class SlowResponseMiddleware:
|
|
143
139
|
def __init__(self, app: FastAPI, time_threshold: float):
|
|
@@ -173,8 +169,8 @@ class SlowResponseMiddleware:
|
|
|
173
169
|
f"?{scope['query_string'].decode('utf-8')}, "
|
|
174
170
|
f"{context['status_code']}, "
|
|
175
171
|
f"{request_time:.2f}, "
|
|
176
|
-
f"{start_timestamp.
|
|
177
|
-
f"{end_timestamp.
|
|
172
|
+
f"{start_timestamp.isoformat(timespec='milliseconds')}, "
|
|
173
|
+
f"{end_timestamp.isoformat(timespec='milliseconds')}"
|
|
178
174
|
)
|
|
179
175
|
|
|
180
176
|
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
"""add_prevent_new_submissions
|
|
2
|
+
|
|
3
|
+
Revision ID: 88270f589c9b
|
|
4
|
+
Revises: f0702066b007
|
|
5
|
+
Create Date: 2025-12-02 12:34:11.028259
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import sqlalchemy as sa
|
|
10
|
+
from alembic import op
|
|
11
|
+
|
|
12
|
+
# revision identifiers, used by Alembic.
|
|
13
|
+
revision = "88270f589c9b"
|
|
14
|
+
down_revision = "f0702066b007"
|
|
15
|
+
branch_labels = None
|
|
16
|
+
depends_on = None
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def upgrade() -> None:
|
|
20
|
+
# ### commands auto generated by Alembic - please adjust! ###
|
|
21
|
+
with op.batch_alter_table("resource", schema=None) as batch_op:
|
|
22
|
+
batch_op.add_column(
|
|
23
|
+
sa.Column(
|
|
24
|
+
"prevent_new_submissions",
|
|
25
|
+
sa.BOOLEAN(),
|
|
26
|
+
server_default="false",
|
|
27
|
+
nullable=False,
|
|
28
|
+
)
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
# ### end Alembic commands ###
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def downgrade() -> None:
|
|
35
|
+
# ### commands auto generated by Alembic - please adjust! ###
|
|
36
|
+
with op.batch_alter_table("resource", schema=None) as batch_op:
|
|
37
|
+
batch_op.drop_column("prevent_new_submissions")
|
|
38
|
+
|
|
39
|
+
# ### end Alembic commands ###
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
"""One submitted Job per Dataset
|
|
2
|
+
|
|
3
|
+
Revision ID: f0702066b007
|
|
4
|
+
Revises: 7910eed4cf97
|
|
5
|
+
Create Date: 2025-12-01 20:54:03.137093
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import sqlalchemy as sa
|
|
10
|
+
from alembic import op
|
|
11
|
+
|
|
12
|
+
# revision identifiers, used by Alembic.
|
|
13
|
+
revision = "f0702066b007"
|
|
14
|
+
down_revision = "7910eed4cf97"
|
|
15
|
+
branch_labels = None
|
|
16
|
+
depends_on = None
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def upgrade() -> None:
|
|
20
|
+
# ### commands auto generated by Alembic - please adjust! ###
|
|
21
|
+
with op.batch_alter_table("jobv2", schema=None) as batch_op:
|
|
22
|
+
batch_op.create_index(
|
|
23
|
+
"ix_jobv2_one_submitted_job_per_dataset",
|
|
24
|
+
["dataset_id"],
|
|
25
|
+
unique=True,
|
|
26
|
+
postgresql_where=sa.text("status = 'submitted'"),
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
# ### end Alembic commands ###
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def downgrade() -> None:
|
|
33
|
+
# ### commands auto generated by Alembic - please adjust! ###
|
|
34
|
+
with op.batch_alter_table("jobv2", schema=None) as batch_op:
|
|
35
|
+
batch_op.drop_index(
|
|
36
|
+
"ix_jobv2_one_submitted_job_per_dataset",
|
|
37
|
+
postgresql_where=sa.text("status = 'submitted'"),
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
# ### end Alembic commands ###
|
|
@@ -14,7 +14,7 @@ from fractal_server.runner.set_start_and_last_task_index import (
|
|
|
14
14
|
from fractal_server.ssh._fabric import FractalSSH
|
|
15
15
|
from fractal_server.types import AttributeFilters
|
|
16
16
|
|
|
17
|
-
from .runner import
|
|
17
|
+
from .runner import execute_tasks
|
|
18
18
|
|
|
19
19
|
|
|
20
20
|
def process_workflow(
|
|
@@ -90,7 +90,7 @@ def process_workflow(
|
|
|
90
90
|
resource=resource,
|
|
91
91
|
profile=profile,
|
|
92
92
|
) as runner:
|
|
93
|
-
|
|
93
|
+
execute_tasks(
|
|
94
94
|
wf_task_list=workflow.task_list[
|
|
95
95
|
first_task_index : (last_task_index + 1)
|
|
96
96
|
],
|
|
@@ -104,4 +104,5 @@ def process_workflow(
|
|
|
104
104
|
job_attribute_filters=job_attribute_filters,
|
|
105
105
|
job_type_filters=job_type_filters,
|
|
106
106
|
user_id=user_id,
|
|
107
|
+
resource_id=resource.id,
|
|
107
108
|
)
|
|
@@ -33,7 +33,7 @@ from fractal_server.runner.set_start_and_last_task_index import (
|
|
|
33
33
|
from fractal_server.ssh._fabric import FractalSSH
|
|
34
34
|
from fractal_server.types import AttributeFilters
|
|
35
35
|
|
|
36
|
-
from .runner import
|
|
36
|
+
from .runner import execute_tasks
|
|
37
37
|
|
|
38
38
|
logger = set_logger(__name__)
|
|
39
39
|
|
|
@@ -113,7 +113,7 @@ def process_workflow(
|
|
|
113
113
|
common_script_lines=worker_init,
|
|
114
114
|
user_cache_dir=user_cache_dir,
|
|
115
115
|
) as runner:
|
|
116
|
-
|
|
116
|
+
execute_tasks(
|
|
117
117
|
wf_task_list=workflow.task_list[
|
|
118
118
|
first_task_index : (last_task_index + 1)
|
|
119
119
|
],
|
|
@@ -127,4 +127,5 @@ def process_workflow(
|
|
|
127
127
|
job_attribute_filters=job_attribute_filters,
|
|
128
128
|
job_type_filters=job_type_filters,
|
|
129
129
|
user_id=user_id,
|
|
130
|
+
resource_id=resource.id,
|
|
130
131
|
)
|
|
@@ -32,7 +32,7 @@ from fractal_server.runner.set_start_and_last_task_index import (
|
|
|
32
32
|
from fractal_server.ssh._fabric import FractalSSH
|
|
33
33
|
from fractal_server.types import AttributeFilters
|
|
34
34
|
|
|
35
|
-
from .runner import
|
|
35
|
+
from .runner import execute_tasks
|
|
36
36
|
|
|
37
37
|
|
|
38
38
|
def process_workflow(
|
|
@@ -109,7 +109,7 @@ def process_workflow(
|
|
|
109
109
|
user_cache_dir=user_cache_dir,
|
|
110
110
|
slurm_account=slurm_account,
|
|
111
111
|
) as runner:
|
|
112
|
-
|
|
112
|
+
execute_tasks(
|
|
113
113
|
wf_task_list=workflow.task_list[
|
|
114
114
|
first_task_index : (last_task_index + 1)
|
|
115
115
|
],
|
|
@@ -123,4 +123,5 @@ def process_workflow(
|
|
|
123
123
|
job_attribute_filters=job_attribute_filters,
|
|
124
124
|
job_type_filters=job_type_filters,
|
|
125
125
|
user_id=user_id,
|
|
126
|
+
resource_id=resource.id,
|
|
126
127
|
)
|
|
@@ -14,11 +14,12 @@ from fractal_server.app.models.v2 import HistoryImageCache
|
|
|
14
14
|
from fractal_server.app.models.v2 import HistoryRun
|
|
15
15
|
from fractal_server.app.models.v2 import HistoryUnit
|
|
16
16
|
from fractal_server.app.models.v2 import JobV2
|
|
17
|
+
from fractal_server.app.models.v2 import Resource
|
|
17
18
|
from fractal_server.app.models.v2 import TaskGroupV2
|
|
18
19
|
from fractal_server.app.models.v2 import WorkflowTaskV2
|
|
19
20
|
from fractal_server.app.schemas.v2 import HistoryUnitStatus
|
|
20
|
-
from fractal_server.app.schemas.v2 import
|
|
21
|
-
from fractal_server.app.schemas.v2 import
|
|
21
|
+
from fractal_server.app.schemas.v2 import TaskDump
|
|
22
|
+
from fractal_server.app.schemas.v2 import TaskGroupDump
|
|
22
23
|
from fractal_server.app.schemas.v2 import TaskType
|
|
23
24
|
from fractal_server.images import SingleImage
|
|
24
25
|
from fractal_server.images.status_tools import IMAGE_STATUS_KEY
|
|
@@ -35,9 +36,9 @@ from fractal_server.types import AttributeFilters
|
|
|
35
36
|
from .merge_outputs import merge_outputs
|
|
36
37
|
from .runner_functions import GetRunnerConfigType
|
|
37
38
|
from .runner_functions import SubmissionOutcome
|
|
38
|
-
from .runner_functions import
|
|
39
|
-
from .runner_functions import
|
|
40
|
-
from .runner_functions import
|
|
39
|
+
from .runner_functions import run_task_compound
|
|
40
|
+
from .runner_functions import run_task_non_parallel
|
|
41
|
+
from .runner_functions import run_task_parallel
|
|
41
42
|
from .task_interface import TaskOutput
|
|
42
43
|
|
|
43
44
|
|
|
@@ -82,7 +83,7 @@ def get_origin_attribute_and_types(
|
|
|
82
83
|
return updated_attributes, updated_types
|
|
83
84
|
|
|
84
85
|
|
|
85
|
-
def
|
|
86
|
+
def execute_tasks(
|
|
86
87
|
*,
|
|
87
88
|
wf_task_list: list[WorkflowTaskV2],
|
|
88
89
|
dataset: DatasetV2,
|
|
@@ -95,6 +96,7 @@ def execute_tasks_v2(
|
|
|
95
96
|
get_runner_config: GetRunnerConfigType,
|
|
96
97
|
job_type_filters: dict[str, bool],
|
|
97
98
|
job_attribute_filters: AttributeFilters,
|
|
99
|
+
resource_id: int,
|
|
98
100
|
) -> None:
|
|
99
101
|
logger = get_logger(logger_name=logger_name)
|
|
100
102
|
|
|
@@ -165,10 +167,10 @@ def execute_tasks_v2(
|
|
|
165
167
|
# Create dumps for workflowtask and taskgroup
|
|
166
168
|
workflowtask_dump = dict(
|
|
167
169
|
**wftask.model_dump(exclude={"task"}),
|
|
168
|
-
task=
|
|
170
|
+
task=TaskDump(**wftask.task.model_dump()).model_dump(),
|
|
169
171
|
)
|
|
170
172
|
task_group = db.get(TaskGroupV2, wftask.task.taskgroupv2_id)
|
|
171
|
-
task_group_dump =
|
|
173
|
+
task_group_dump = TaskGroupDump(
|
|
172
174
|
**task_group.model_dump()
|
|
173
175
|
).model_dump()
|
|
174
176
|
# Create HistoryRun
|
|
@@ -211,20 +213,37 @@ def execute_tasks_v2(
|
|
|
211
213
|
f"attribute_filters={job_attribute_filters})."
|
|
212
214
|
)
|
|
213
215
|
logger.info(error_msg)
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
216
|
+
with next(get_sync_db()) as db:
|
|
217
|
+
update_status_of_history_run(
|
|
218
|
+
history_run_id=history_run_id,
|
|
219
|
+
status=HistoryUnitStatus.FAILED,
|
|
220
|
+
db_sync=db,
|
|
221
|
+
)
|
|
219
222
|
raise JobExecutionError(error_msg)
|
|
220
223
|
|
|
221
|
-
#
|
|
224
|
+
# Fail if the resource is not open for new submissions
|
|
225
|
+
with next(get_sync_db()) as db:
|
|
226
|
+
resource = db.get(Resource, resource_id)
|
|
227
|
+
if resource.prevent_new_submissions:
|
|
228
|
+
error_msg = (
|
|
229
|
+
f"Cannot run '{task.name}', since the '{resource.name}' "
|
|
230
|
+
"resource is not currently active."
|
|
231
|
+
)
|
|
232
|
+
logger.info(error_msg)
|
|
233
|
+
update_status_of_history_run(
|
|
234
|
+
history_run_id=history_run_id,
|
|
235
|
+
status=HistoryUnitStatus.FAILED,
|
|
236
|
+
db_sync=db,
|
|
237
|
+
)
|
|
238
|
+
raise JobExecutionError(error_msg)
|
|
239
|
+
|
|
240
|
+
# TASK EXECUTION
|
|
222
241
|
try:
|
|
223
242
|
if task.type in [
|
|
224
243
|
TaskType.NON_PARALLEL,
|
|
225
244
|
TaskType.CONVERTER_NON_PARALLEL,
|
|
226
245
|
]:
|
|
227
|
-
outcomes_dict, num_tasks =
|
|
246
|
+
outcomes_dict, num_tasks = run_task_non_parallel(
|
|
228
247
|
images=filtered_images,
|
|
229
248
|
zarr_dir=zarr_dir,
|
|
230
249
|
wftask=wftask,
|
|
@@ -239,7 +258,7 @@ def execute_tasks_v2(
|
|
|
239
258
|
user_id=user_id,
|
|
240
259
|
)
|
|
241
260
|
elif task.type == TaskType.PARALLEL:
|
|
242
|
-
outcomes_dict, num_tasks =
|
|
261
|
+
outcomes_dict, num_tasks = run_task_parallel(
|
|
243
262
|
images=filtered_images,
|
|
244
263
|
wftask=wftask,
|
|
245
264
|
task=task,
|
|
@@ -255,7 +274,7 @@ def execute_tasks_v2(
|
|
|
255
274
|
TaskType.COMPOUND,
|
|
256
275
|
TaskType.CONVERTER_COMPOUND,
|
|
257
276
|
]:
|
|
258
|
-
outcomes_dict, num_tasks =
|
|
277
|
+
outcomes_dict, num_tasks = run_task_compound(
|
|
259
278
|
images=filtered_images,
|
|
260
279
|
zarr_dir=zarr_dir,
|
|
261
280
|
wftask=wftask,
|
|
@@ -64,9 +64,9 @@ GetRunnerConfigType = GetRunnerConfigTypeLocal | GetRunnerConfigTypeSLURM
|
|
|
64
64
|
|
|
65
65
|
|
|
66
66
|
__all__ = [
|
|
67
|
-
"
|
|
68
|
-
"
|
|
69
|
-
"
|
|
67
|
+
"run_task_parallel",
|
|
68
|
+
"run_task_non_parallel",
|
|
69
|
+
"run_task_compound",
|
|
70
70
|
]
|
|
71
71
|
|
|
72
72
|
|
|
@@ -145,7 +145,7 @@ def _check_parallelization_list_size(my_list):
|
|
|
145
145
|
)
|
|
146
146
|
|
|
147
147
|
|
|
148
|
-
def
|
|
148
|
+
def run_task_non_parallel(
|
|
149
149
|
*,
|
|
150
150
|
images: list[dict[str, Any]],
|
|
151
151
|
zarr_dir: str,
|
|
@@ -168,9 +168,7 @@ def run_v2_task_non_parallel(
|
|
|
168
168
|
TaskType.NON_PARALLEL,
|
|
169
169
|
TaskType.CONVERTER_NON_PARALLEL,
|
|
170
170
|
]:
|
|
171
|
-
raise ValueError(
|
|
172
|
-
f"Invalid {task_type=} for `run_v2_task_non_parallel`."
|
|
173
|
-
)
|
|
171
|
+
raise ValueError(f"Invalid {task_type=} for `run_task_non_parallel`.")
|
|
174
172
|
|
|
175
173
|
# Get TaskFiles object
|
|
176
174
|
task_files = TaskFiles(
|
|
@@ -213,7 +211,7 @@ def run_v2_task_non_parallel(
|
|
|
213
211
|
db.commit()
|
|
214
212
|
db.refresh(history_unit)
|
|
215
213
|
logger.debug(
|
|
216
|
-
"[
|
|
214
|
+
"[run_task_non_parallel] Created `HistoryUnit` with "
|
|
217
215
|
f"{history_run_id=}."
|
|
218
216
|
)
|
|
219
217
|
history_unit_id = history_unit.id
|
|
@@ -265,7 +263,7 @@ def run_v2_task_non_parallel(
|
|
|
265
263
|
return outcome, num_tasks
|
|
266
264
|
|
|
267
265
|
|
|
268
|
-
def
|
|
266
|
+
def run_task_parallel(
|
|
269
267
|
*,
|
|
270
268
|
images: list[dict[str, Any]],
|
|
271
269
|
task: TaskV2,
|
|
@@ -326,7 +324,7 @@ def run_v2_task_parallel(
|
|
|
326
324
|
db.add_all(history_units)
|
|
327
325
|
db.commit()
|
|
328
326
|
logger.debug(
|
|
329
|
-
f"[
|
|
327
|
+
f"[run_task_non_parallel] Created {len(history_units)} "
|
|
330
328
|
"`HistoryUnit`s."
|
|
331
329
|
)
|
|
332
330
|
|
|
@@ -388,7 +386,7 @@ def run_v2_task_parallel(
|
|
|
388
386
|
return outcome, num_tasks
|
|
389
387
|
|
|
390
388
|
|
|
391
|
-
def
|
|
389
|
+
def run_task_compound(
|
|
392
390
|
*,
|
|
393
391
|
images: list[dict[str, Any]],
|
|
394
392
|
zarr_dir: str,
|
|
@@ -445,7 +443,7 @@ def run_v2_task_compound(
|
|
|
445
443
|
db.refresh(history_unit)
|
|
446
444
|
init_history_unit_id = history_unit.id
|
|
447
445
|
logger.debug(
|
|
448
|
-
"[
|
|
446
|
+
"[run_task_compound] Created `HistoryUnit` with "
|
|
449
447
|
f"{init_history_unit_id=}."
|
|
450
448
|
)
|
|
451
449
|
# Create one `HistoryImageCache` for each input image
|
|
@@ -557,8 +555,7 @@ def run_v2_task_compound(
|
|
|
557
555
|
for history_unit in history_units:
|
|
558
556
|
db.refresh(history_unit)
|
|
559
557
|
logger.debug(
|
|
560
|
-
f"[
|
|
561
|
-
"`HistoryUnit`s."
|
|
558
|
+
f"[run_task_compound] Created {len(history_units)} `HistoryUnit`s."
|
|
562
559
|
)
|
|
563
560
|
history_unit_ids = [history_unit.id for history_unit in history_units]
|
|
564
561
|
|