fractal-server 2.18.0__py3-none-any.whl → 2.18.0a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (97) hide show
  1. fractal_server/__init__.py +1 -1
  2. fractal_server/__main__.py +1 -2
  3. fractal_server/app/models/security.py +5 -7
  4. fractal_server/app/models/v2/job.py +2 -13
  5. fractal_server/app/models/v2/resource.py +0 -13
  6. fractal_server/app/routes/admin/v2/__init__.py +12 -10
  7. fractal_server/app/routes/admin/v2/accounting.py +2 -2
  8. fractal_server/app/routes/admin/v2/job.py +17 -17
  9. fractal_server/app/routes/admin/v2/task.py +8 -8
  10. fractal_server/app/routes/admin/v2/task_group.py +16 -94
  11. fractal_server/app/routes/admin/v2/task_group_lifecycle.py +20 -20
  12. fractal_server/app/routes/api/__init__.py +9 -0
  13. fractal_server/app/routes/api/v2/__init__.py +49 -47
  14. fractal_server/app/routes/api/v2/_aux_functions.py +47 -22
  15. fractal_server/app/routes/api/v2/_aux_functions_task_lifecycle.py +4 -4
  16. fractal_server/app/routes/api/v2/_aux_functions_tasks.py +2 -2
  17. fractal_server/app/routes/api/v2/dataset.py +60 -66
  18. fractal_server/app/routes/api/v2/history.py +5 -7
  19. fractal_server/app/routes/api/v2/job.py +12 -12
  20. fractal_server/app/routes/api/v2/project.py +11 -11
  21. fractal_server/app/routes/api/v2/sharing.py +2 -1
  22. fractal_server/app/routes/api/v2/status_legacy.py +29 -15
  23. fractal_server/app/routes/api/v2/submit.py +66 -65
  24. fractal_server/app/routes/api/v2/task.py +17 -15
  25. fractal_server/app/routes/api/v2/task_collection.py +18 -18
  26. fractal_server/app/routes/api/v2/task_collection_custom.py +13 -11
  27. fractal_server/app/routes/api/v2/task_collection_pixi.py +9 -9
  28. fractal_server/app/routes/api/v2/task_group.py +18 -18
  29. fractal_server/app/routes/api/v2/task_group_lifecycle.py +26 -26
  30. fractal_server/app/routes/api/v2/task_version_update.py +5 -5
  31. fractal_server/app/routes/api/v2/workflow.py +18 -18
  32. fractal_server/app/routes/api/v2/workflow_import.py +11 -11
  33. fractal_server/app/routes/api/v2/workflowtask.py +36 -10
  34. fractal_server/app/routes/auth/_aux_auth.py +0 -100
  35. fractal_server/app/routes/auth/current_user.py +63 -0
  36. fractal_server/app/routes/auth/group.py +30 -1
  37. fractal_server/app/routes/auth/router.py +0 -2
  38. fractal_server/app/routes/auth/users.py +0 -9
  39. fractal_server/app/schemas/user.py +12 -29
  40. fractal_server/app/schemas/user_group.py +15 -0
  41. fractal_server/app/schemas/v2/__init__.py +48 -48
  42. fractal_server/app/schemas/v2/dataset.py +13 -35
  43. fractal_server/app/schemas/v2/dumps.py +9 -9
  44. fractal_server/app/schemas/v2/job.py +11 -11
  45. fractal_server/app/schemas/v2/project.py +3 -3
  46. fractal_server/app/schemas/v2/resource.py +4 -13
  47. fractal_server/app/schemas/v2/status_legacy.py +3 -3
  48. fractal_server/app/schemas/v2/task.py +6 -6
  49. fractal_server/app/schemas/v2/task_collection.py +4 -4
  50. fractal_server/app/schemas/v2/task_group.py +16 -16
  51. fractal_server/app/schemas/v2/workflow.py +16 -16
  52. fractal_server/app/schemas/v2/workflowtask.py +14 -14
  53. fractal_server/app/security/__init__.py +1 -1
  54. fractal_server/app/shutdown.py +6 -6
  55. fractal_server/config/__init__.py +6 -0
  56. fractal_server/config/_data.py +79 -0
  57. fractal_server/config/_main.py +1 -6
  58. fractal_server/images/models.py +2 -1
  59. fractal_server/main.py +11 -72
  60. fractal_server/runner/config/_slurm.py +0 -2
  61. fractal_server/runner/executors/slurm_common/slurm_config.py +0 -1
  62. fractal_server/runner/v2/_local.py +3 -4
  63. fractal_server/runner/v2/_slurm_ssh.py +3 -4
  64. fractal_server/runner/v2/_slurm_sudo.py +3 -4
  65. fractal_server/runner/v2/runner.py +17 -36
  66. fractal_server/runner/v2/runner_functions.py +14 -11
  67. fractal_server/runner/v2/submit_workflow.py +9 -22
  68. fractal_server/tasks/v2/local/_utils.py +2 -2
  69. fractal_server/tasks/v2/local/collect.py +6 -5
  70. fractal_server/tasks/v2/local/collect_pixi.py +6 -5
  71. fractal_server/tasks/v2/local/deactivate.py +7 -7
  72. fractal_server/tasks/v2/local/deactivate_pixi.py +3 -3
  73. fractal_server/tasks/v2/local/delete.py +5 -5
  74. fractal_server/tasks/v2/local/reactivate.py +5 -5
  75. fractal_server/tasks/v2/local/reactivate_pixi.py +5 -5
  76. fractal_server/tasks/v2/ssh/collect.py +5 -5
  77. fractal_server/tasks/v2/ssh/collect_pixi.py +5 -5
  78. fractal_server/tasks/v2/ssh/deactivate.py +7 -7
  79. fractal_server/tasks/v2/ssh/deactivate_pixi.py +2 -2
  80. fractal_server/tasks/v2/ssh/delete.py +5 -5
  81. fractal_server/tasks/v2/ssh/reactivate.py +5 -5
  82. fractal_server/tasks/v2/ssh/reactivate_pixi.py +5 -5
  83. fractal_server/tasks/v2/utils_background.py +7 -7
  84. fractal_server/tasks/v2/utils_database.py +5 -5
  85. fractal_server/types/__init__.py +0 -22
  86. fractal_server/types/validators/__init__.py +0 -3
  87. fractal_server/types/validators/_common_validators.py +0 -32
  88. {fractal_server-2.18.0.dist-info → fractal_server-2.18.0a0.dist-info}/METADATA +1 -1
  89. {fractal_server-2.18.0.dist-info → fractal_server-2.18.0a0.dist-info}/RECORD +92 -97
  90. fractal_server/app/routes/auth/viewer_paths.py +0 -43
  91. fractal_server/data_migrations/2_18_0.py +0 -30
  92. fractal_server/migrations/versions/7910eed4cf97_user_project_dirs_and_usergroup_viewer_.py +0 -60
  93. fractal_server/migrations/versions/88270f589c9b_add_prevent_new_submissions.py +0 -39
  94. fractal_server/migrations/versions/f0702066b007_one_submitted_job_per_dataset.py +0 -40
  95. {fractal_server-2.18.0.dist-info → fractal_server-2.18.0a0.dist-info}/WHEEL +0 -0
  96. {fractal_server-2.18.0.dist-info → fractal_server-2.18.0a0.dist-info}/entry_points.txt +0 -0
  97. {fractal_server-2.18.0.dist-info → fractal_server-2.18.0a0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,79 @@
1
+ from enum import StrEnum
2
+ from typing import Self
3
+
4
+ from pydantic import model_validator
5
+ from pydantic_settings import BaseSettings
6
+ from pydantic_settings import SettingsConfigDict
7
+
8
+ from fractal_server.types import AbsolutePathStr
9
+
10
+ from ._settings_config import SETTINGS_CONFIG_DICT
11
+
12
+
13
+ class DataAuthScheme(StrEnum):
14
+ VIEWER_PATHS = "viewer-paths"
15
+ USERS_FOLDERS = "users-folders"
16
+ NONE = "none"
17
+
18
+
19
+ class DataSettings(BaseSettings):
20
+ """
21
+ Settings for the `fractal-data` integration.
22
+
23
+ See https://github.com/fractal-analytics-platform/fractal-data.
24
+
25
+ Attributes:
26
+ FRACTAL_DATA_AUTH_SCHEME:
27
+ Defines how the list of allowed viewer paths is built.
28
+
29
+ This variable affects the
30
+ `GET /auth/current-user/allowed-viewer-paths/` response, which is
31
+ then consumed by
32
+ [fractal-data](https://github.com/fractal-analytics-platform/fractal-data).
33
+
34
+ Options:
35
+ <ul>
36
+ <li> `"viewer-paths"`: The list of allowed viewer paths will
37
+ include the user's `project_dir` along with any path
38
+ defined in UserGroups `viewer_paths` attributes.
39
+ </li>
40
+ <li> `"users-folders"`: The list will consist of the user's
41
+ `project_dir` and a user-specific folder. The user folder
42
+ is constructed by concatenating the base folder
43
+ `FRACTAL_DATA_BASE_FOLDER` with the user's profile
44
+ `username`.
45
+ </li>
46
+ <li> `"none"`: An empty list will be returned, indicating no
47
+ access to viewer paths. Useful when vizarr viewer is not
48
+ used.
49
+ </li>
50
+ </ul>
51
+ FRACTAL_DATA_BASE_FOLDER:
52
+ Base path to Zarr files that will be served by
53
+ fractal-vizarr-viewer.
54
+ This variable is required and used only when
55
+ `FRACTAL_DATA_AUTHORIZATION_SCHEME` is set to `"users-folders"`.
56
+ """
57
+
58
+ model_config = SettingsConfigDict(**SETTINGS_CONFIG_DICT)
59
+
60
+ FRACTAL_DATA_AUTH_SCHEME: DataAuthScheme = "none"
61
+
62
+ FRACTAL_DATA_BASE_FOLDER: AbsolutePathStr | None = None
63
+
64
+ @model_validator(mode="after")
65
+ def check(self: Self) -> Self:
66
+ """
67
+ `FRACTAL_DATA_BASE_FOLDER` is required when
68
+ `FRACTAL_DATA_AUTHORIZATION_SCHEME` is set to `"users-folders"`.
69
+ """
70
+ if (
71
+ self.FRACTAL_DATA_AUTH_SCHEME == DataAuthScheme.USERS_FOLDERS
72
+ and self.FRACTAL_DATA_BASE_FOLDER is None
73
+ ):
74
+ raise ValueError(
75
+ "FRACTAL_DATA_BASE_FOLDER is required when "
76
+ "FRACTAL_DATA_AUTH_SCHEME is set to "
77
+ "users-folders"
78
+ )
79
+ return self
@@ -28,7 +28,7 @@ class Settings(BaseSettings):
28
28
  Only logs of with this level (or higher) will appear in the console
29
29
  logs.
30
30
  FRACTAL_API_MAX_JOB_LIST_LENGTH:
31
- Number of ids that can be stored in the `jobs` attribute of
31
+ Number of ids that can be stored in the `jobsV2` attribute of
32
32
  `app.state`.
33
33
  FRACTAL_GRACEFUL_SHUTDOWN_TIME:
34
34
  Waiting time for the shutdown phase of executors, in seconds.
@@ -41,10 +41,6 @@ class Settings(BaseSettings):
41
41
  user group (e.g. it cannot be deleted, and new users are
42
42
  automatically added to it). If set to `None` (the default value),
43
43
  then user groups are all equivalent, independently on their name.
44
- FRACTAL_LONG_REQUEST_TIME:
45
- Time limit beyond which the execution of an API request is
46
- considered *slow* and an appropriate warning is logged by the
47
- middleware.
48
44
  """
49
45
 
50
46
  model_config = SettingsConfigDict(**SETTINGS_CONFIG_DICT)
@@ -61,4 +57,3 @@ class Settings(BaseSettings):
61
57
  FRACTAL_GRACEFUL_SHUTDOWN_TIME: float = 30.0
62
58
  FRACTAL_HELP_URL: HttpUrl | None = None
63
59
  FRACTAL_DEFAULT_GROUP_NAME: Literal["All"] | None = None
64
- FRACTAL_LONG_REQUEST_TIME: float = 30.0
@@ -5,6 +5,7 @@ from fractal_server.types import DictStrAny
5
5
  from fractal_server.types import ImageAttributes
6
6
  from fractal_server.types import ImageAttributesWithNone
7
7
  from fractal_server.types import ImageTypes
8
+ from fractal_server.types import ZarrDirStr
8
9
  from fractal_server.types import ZarrUrlStr
9
10
 
10
11
 
@@ -20,7 +21,7 @@ class SingleImageBase(BaseModel):
20
21
  """
21
22
 
22
23
  zarr_url: ZarrUrlStr
23
- origin: ZarrUrlStr | None = None
24
+ origin: ZarrDirStr | None = None
24
25
 
25
26
  attributes: DictStrAny = Field(default_factory=dict)
26
27
  types: ImageTypes = Field(default_factory=dict)
fractal_server/main.py CHANGED
@@ -1,20 +1,15 @@
1
1
  import os
2
- import time
3
2
  from contextlib import asynccontextmanager
4
- from datetime import datetime
5
3
  from itertools import chain
6
4
 
7
5
  from fastapi import FastAPI
8
- from starlette.types import Message
9
- from starlette.types import Receive
10
- from starlette.types import Scope
11
- from starlette.types import Send
12
6
 
13
7
  from fractal_server import __VERSION__
14
8
  from fractal_server.app.schemas.v2 import ResourceType
15
9
 
16
10
  from .app.routes.aux._runner import _backend_supports_shutdown
17
11
  from .app.shutdown import cleanup_after_shutdown
12
+ from .config import get_data_settings
18
13
  from .config import get_db_settings
19
14
  from .config import get_email_settings
20
15
  from .config import get_settings
@@ -33,14 +28,16 @@ def collect_routers(app: FastAPI) -> None:
33
28
  app:
34
29
  The application to register the routers to.
35
30
  """
36
- from .app.routes.admin.v2 import router_admin
31
+ from .app.routes.admin.v2 import router_admin_v2
37
32
  from .app.routes.api import router_api
38
- from .app.routes.api.v2 import router_api as router_api_v2
33
+ from .app.routes.api.v2 import router_api_v2
39
34
  from .app.routes.auth.router import router_auth
40
35
 
41
36
  app.include_router(router_api, prefix="/api")
42
37
  app.include_router(router_api_v2, prefix="/api/v2")
43
- app.include_router(router_admin, prefix="/admin/v2", tags=["Admin area"])
38
+ app.include_router(
39
+ router_admin_v2, prefix="/admin/v2", tags=["V2 Admin area"]
40
+ )
44
41
  app.include_router(router_auth, prefix="/auth", tags=["Authentication"])
45
42
 
46
43
 
@@ -57,12 +54,14 @@ def check_settings() -> None:
57
54
  settings = Inject(get_settings)
58
55
  db_settings = Inject(get_db_settings)
59
56
  email_settings = Inject(get_email_settings)
57
+ data_settings = Inject(get_data_settings)
60
58
  logger = set_logger("fractal_server_settings")
61
59
  logger.debug("Fractal Settings:")
62
60
  for key, value in chain(
63
61
  db_settings.model_dump().items(),
64
62
  settings.model_dump().items(),
65
63
  email_settings.model_dump().items(),
64
+ data_settings.model_dump().items(),
66
65
  ):
67
66
  if any(s in key.upper() for s in ["PASSWORD", "SECRET", "KEY"]):
68
67
  value = "*****"
@@ -72,7 +71,7 @@ def check_settings() -> None:
72
71
 
73
72
  @asynccontextmanager
74
73
  async def lifespan(app: FastAPI):
75
- app.state.jobs = []
74
+ app.state.jobsV2 = []
76
75
  logger = set_logger("fractal_server.lifespan")
77
76
  logger.info(f"[startup] START (fractal-server {__VERSION__})")
78
77
  check_settings()
@@ -109,12 +108,12 @@ async def lifespan(app: FastAPI):
109
108
 
110
109
  logger.info(
111
110
  f"[teardown] Current worker with pid {os.getpid()} is shutting down. "
112
- f"Current jobs: {app.state.jobs=}"
111
+ f"Current jobs: {app.state.jobsV2=}"
113
112
  )
114
113
  if _backend_supports_shutdown(settings.FRACTAL_RUNNER_BACKEND):
115
114
  try:
116
115
  await cleanup_after_shutdown(
117
- jobs=app.state.jobs,
116
+ jobsV2=app.state.jobsV2,
118
117
  logger_name="fractal_server.lifespan",
119
118
  )
120
119
  except Exception as e:
@@ -132,59 +131,6 @@ async def lifespan(app: FastAPI):
132
131
  reset_logger_handlers(logger)
133
132
 
134
133
 
135
- slow_response_logger = set_logger("slow-response")
136
-
137
-
138
- def _endpoint_has_background_task(method: str, path: str) -> bool:
139
- has_background_task = (method == "POST") and (
140
- "/job/submit/" in path
141
- or "/task/collect/pi" in path # "/pip" and "/pixi"
142
- or "/task-group/" in path
143
- )
144
- return has_background_task
145
-
146
-
147
- class SlowResponseMiddleware:
148
- def __init__(self, app: FastAPI, time_threshold: float):
149
- self.app = app
150
- self.time_threshold = time_threshold
151
-
152
- async def __call__(self, scope: Scope, receive: Receive, send: Send):
153
- if (
154
- scope["type"] != "http" # e.g. `scope["type"] == "lifespan"`
155
- or _endpoint_has_background_task(scope["method"], scope["path"])
156
- ):
157
- await self.app(scope, receive, send)
158
- return
159
-
160
- # Mutable variable which can be updated from within `send_wrapper`
161
- context = {"status_code": None}
162
-
163
- async def send_wrapper(message: Message):
164
- if message["type"] == "http.response.start":
165
- context["status_code"] = message["status"]
166
- await send(message)
167
-
168
- # Measure request time
169
- start_timestamp = datetime.now()
170
- start_time = time.perf_counter()
171
- await self.app(scope, receive, send_wrapper)
172
- stop_time = time.perf_counter()
173
- request_time = stop_time - start_time
174
-
175
- # Log if process time is too high
176
- if request_time > self.time_threshold:
177
- end_timestamp = datetime.now()
178
- slow_response_logger.warning(
179
- f"{scope['method']} {scope['route'].path}"
180
- f"?{scope['query_string'].decode('utf-8')}, "
181
- f"{context['status_code']}, "
182
- f"{request_time:.2f}, "
183
- f"{start_timestamp.isoformat(timespec='milliseconds')}, "
184
- f"{end_timestamp.isoformat(timespec='milliseconds')}"
185
- )
186
-
187
-
188
134
  def start_application() -> FastAPI:
189
135
  """
190
136
  Create the application, initialise it and collect all available routers.
@@ -194,13 +140,6 @@ def start_application() -> FastAPI:
194
140
  The fully initialised application.
195
141
  """
196
142
  app = FastAPI(lifespan=lifespan)
197
-
198
- settings = Inject(get_settings)
199
- app.add_middleware(
200
- SlowResponseMiddleware,
201
- time_threshold=settings.FRACTAL_LONG_REQUEST_TIME,
202
- )
203
-
204
143
  collect_routers(app)
205
144
  return app
206
145
 
@@ -34,7 +34,6 @@ class SlurmConfigSet(BaseModel):
34
34
  account:
35
35
  extra_lines:
36
36
  gpus:
37
- shebang_line: The shell shebang to use for SLURM jobs.
38
37
  """
39
38
 
40
39
  model_config = ConfigDict(extra="forbid")
@@ -50,7 +49,6 @@ class SlurmConfigSet(BaseModel):
50
49
  account: NonEmptyStr | None = None
51
50
  extra_lines: list[NonEmptyStr] = Field(default_factory=list)
52
51
  gpus: NonEmptyStr | None = None
53
- shebang_line: str = "#!/bin/sh"
54
52
 
55
53
 
56
54
  class BatchingConfigSet(BaseModel):
@@ -60,7 +60,6 @@ class SlurmConfig(BaseModel):
60
60
  Key-value pairs to be included as `export`-ed variables in SLURM
61
61
  submission script, after prepending values with the user's cache
62
62
  directory.
63
-
64
63
  """
65
64
 
66
65
  model_config = ConfigDict(extra="forbid")
@@ -14,7 +14,7 @@ from fractal_server.runner.set_start_and_last_task_index import (
14
14
  from fractal_server.ssh._fabric import FractalSSH
15
15
  from fractal_server.types import AttributeFilters
16
16
 
17
- from .runner import execute_tasks
17
+ from .runner import execute_tasks_v2
18
18
 
19
19
 
20
20
  def process_workflow(
@@ -59,7 +59,7 @@ def process_workflow(
59
59
  resource: Computational resource for running this job.
60
60
  profile: Computational profile for running this job.
61
61
  user_cache_dir:
62
- User-writeable folder (typically a subfolder of `project_dirs`).
62
+ User-writeable folder (typically a subfolder of `project_dir`).
63
63
  Only relevant for `slurm_sudo` and `slurm_ssh` backends.
64
64
  fractal_ssh:
65
65
  `FractalSSH` object, only relevant for the `slurm_ssh` backend.
@@ -90,7 +90,7 @@ def process_workflow(
90
90
  resource=resource,
91
91
  profile=profile,
92
92
  ) as runner:
93
- execute_tasks(
93
+ execute_tasks_v2(
94
94
  wf_task_list=workflow.task_list[
95
95
  first_task_index : (last_task_index + 1)
96
96
  ],
@@ -104,5 +104,4 @@ def process_workflow(
104
104
  job_attribute_filters=job_attribute_filters,
105
105
  job_type_filters=job_type_filters,
106
106
  user_id=user_id,
107
- resource_id=resource.id,
108
107
  )
@@ -33,7 +33,7 @@ from fractal_server.runner.set_start_and_last_task_index import (
33
33
  from fractal_server.ssh._fabric import FractalSSH
34
34
  from fractal_server.types import AttributeFilters
35
35
 
36
- from .runner import execute_tasks
36
+ from .runner import execute_tasks_v2
37
37
 
38
38
  logger = set_logger(__name__)
39
39
 
@@ -80,7 +80,7 @@ def process_workflow(
80
80
  resource: Computational resource for running this job.
81
81
  profile: Computational profile for running this job.
82
82
  user_cache_dir:
83
- User-writeable folder (typically a subfolder of `project_dirs`).
83
+ User-writeable folder (typically a subfolder of `project_dir`).
84
84
  Only relevant for `slurm_sudo` and `slurm_ssh` backends.
85
85
  fractal_ssh:
86
86
  `FractalSSH` object, only relevant for the `slurm_ssh` backend.
@@ -113,7 +113,7 @@ def process_workflow(
113
113
  common_script_lines=worker_init,
114
114
  user_cache_dir=user_cache_dir,
115
115
  ) as runner:
116
- execute_tasks(
116
+ execute_tasks_v2(
117
117
  wf_task_list=workflow.task_list[
118
118
  first_task_index : (last_task_index + 1)
119
119
  ],
@@ -127,5 +127,4 @@ def process_workflow(
127
127
  job_attribute_filters=job_attribute_filters,
128
128
  job_type_filters=job_type_filters,
129
129
  user_id=user_id,
130
- resource_id=resource.id,
131
130
  )
@@ -32,7 +32,7 @@ from fractal_server.runner.set_start_and_last_task_index import (
32
32
  from fractal_server.ssh._fabric import FractalSSH
33
33
  from fractal_server.types import AttributeFilters
34
34
 
35
- from .runner import execute_tasks
35
+ from .runner import execute_tasks_v2
36
36
 
37
37
 
38
38
  def process_workflow(
@@ -77,7 +77,7 @@ def process_workflow(
77
77
  resource: Computational resource for running this job.
78
78
  profile: Computational profile for running this job.
79
79
  user_cache_dir:
80
- User-writeable folder (typically a subfolder of `project_dirs`).
80
+ User-writeable folder (typically a subfolder of `project_dir`).
81
81
  Only relevant for `slurm_sudo` and `slurm_ssh` backends.
82
82
  fractal_ssh:
83
83
  `FractalSSH` object, only relevant for the `slurm_ssh` backend.
@@ -109,7 +109,7 @@ def process_workflow(
109
109
  user_cache_dir=user_cache_dir,
110
110
  slurm_account=slurm_account,
111
111
  ) as runner:
112
- execute_tasks(
112
+ execute_tasks_v2(
113
113
  wf_task_list=workflow.task_list[
114
114
  first_task_index : (last_task_index + 1)
115
115
  ],
@@ -123,5 +123,4 @@ def process_workflow(
123
123
  job_attribute_filters=job_attribute_filters,
124
124
  job_type_filters=job_type_filters,
125
125
  user_id=user_id,
126
- resource_id=resource.id,
127
126
  )
@@ -14,12 +14,11 @@ from fractal_server.app.models.v2 import HistoryImageCache
14
14
  from fractal_server.app.models.v2 import HistoryRun
15
15
  from fractal_server.app.models.v2 import HistoryUnit
16
16
  from fractal_server.app.models.v2 import JobV2
17
- from fractal_server.app.models.v2 import Resource
18
17
  from fractal_server.app.models.v2 import TaskGroupV2
19
18
  from fractal_server.app.models.v2 import WorkflowTaskV2
20
19
  from fractal_server.app.schemas.v2 import HistoryUnitStatus
21
- from fractal_server.app.schemas.v2 import TaskDump
22
- from fractal_server.app.schemas.v2 import TaskGroupDump
20
+ from fractal_server.app.schemas.v2 import TaskDumpV2
21
+ from fractal_server.app.schemas.v2 import TaskGroupDumpV2
23
22
  from fractal_server.app.schemas.v2 import TaskType
24
23
  from fractal_server.images import SingleImage
25
24
  from fractal_server.images.status_tools import IMAGE_STATUS_KEY
@@ -36,9 +35,9 @@ from fractal_server.types import AttributeFilters
36
35
  from .merge_outputs import merge_outputs
37
36
  from .runner_functions import GetRunnerConfigType
38
37
  from .runner_functions import SubmissionOutcome
39
- from .runner_functions import run_task_compound
40
- from .runner_functions import run_task_non_parallel
41
- from .runner_functions import run_task_parallel
38
+ from .runner_functions import run_v2_task_compound
39
+ from .runner_functions import run_v2_task_non_parallel
40
+ from .runner_functions import run_v2_task_parallel
42
41
  from .task_interface import TaskOutput
43
42
 
44
43
 
@@ -83,7 +82,7 @@ def get_origin_attribute_and_types(
83
82
  return updated_attributes, updated_types
84
83
 
85
84
 
86
- def execute_tasks(
85
+ def execute_tasks_v2(
87
86
  *,
88
87
  wf_task_list: list[WorkflowTaskV2],
89
88
  dataset: DatasetV2,
@@ -96,7 +95,6 @@ def execute_tasks(
96
95
  get_runner_config: GetRunnerConfigType,
97
96
  job_type_filters: dict[str, bool],
98
97
  job_attribute_filters: AttributeFilters,
99
- resource_id: int,
100
98
  ) -> None:
101
99
  logger = get_logger(logger_name=logger_name)
102
100
 
@@ -167,10 +165,10 @@ def execute_tasks(
167
165
  # Create dumps for workflowtask and taskgroup
168
166
  workflowtask_dump = dict(
169
167
  **wftask.model_dump(exclude={"task"}),
170
- task=TaskDump(**wftask.task.model_dump()).model_dump(),
168
+ task=TaskDumpV2(**wftask.task.model_dump()).model_dump(),
171
169
  )
172
170
  task_group = db.get(TaskGroupV2, wftask.task.taskgroupv2_id)
173
- task_group_dump = TaskGroupDump(
171
+ task_group_dump = TaskGroupDumpV2(
174
172
  **task_group.model_dump()
175
173
  ).model_dump()
176
174
  # Create HistoryRun
@@ -213,37 +211,20 @@ def execute_tasks(
213
211
  f"attribute_filters={job_attribute_filters})."
214
212
  )
215
213
  logger.info(error_msg)
216
- with next(get_sync_db()) as db:
217
- update_status_of_history_run(
218
- history_run_id=history_run_id,
219
- status=HistoryUnitStatus.FAILED,
220
- db_sync=db,
221
- )
214
+ update_status_of_history_run(
215
+ history_run_id=history_run_id,
216
+ status=HistoryUnitStatus.FAILED,
217
+ db_sync=db,
218
+ )
222
219
  raise JobExecutionError(error_msg)
223
220
 
224
- # Fail if the resource is not open for new submissions
225
- with next(get_sync_db()) as db:
226
- resource = db.get(Resource, resource_id)
227
- if resource.prevent_new_submissions:
228
- error_msg = (
229
- f"Cannot run '{task.name}', since the '{resource.name}' "
230
- "resource is not currently active."
231
- )
232
- logger.info(error_msg)
233
- update_status_of_history_run(
234
- history_run_id=history_run_id,
235
- status=HistoryUnitStatus.FAILED,
236
- db_sync=db,
237
- )
238
- raise JobExecutionError(error_msg)
239
-
240
- # TASK EXECUTION
221
+ # TASK EXECUTION (V2)
241
222
  try:
242
223
  if task.type in [
243
224
  TaskType.NON_PARALLEL,
244
225
  TaskType.CONVERTER_NON_PARALLEL,
245
226
  ]:
246
- outcomes_dict, num_tasks = run_task_non_parallel(
227
+ outcomes_dict, num_tasks = run_v2_task_non_parallel(
247
228
  images=filtered_images,
248
229
  zarr_dir=zarr_dir,
249
230
  wftask=wftask,
@@ -258,7 +239,7 @@ def execute_tasks(
258
239
  user_id=user_id,
259
240
  )
260
241
  elif task.type == TaskType.PARALLEL:
261
- outcomes_dict, num_tasks = run_task_parallel(
242
+ outcomes_dict, num_tasks = run_v2_task_parallel(
262
243
  images=filtered_images,
263
244
  wftask=wftask,
264
245
  task=task,
@@ -274,7 +255,7 @@ def execute_tasks(
274
255
  TaskType.COMPOUND,
275
256
  TaskType.CONVERTER_COMPOUND,
276
257
  ]:
277
- outcomes_dict, num_tasks = run_task_compound(
258
+ outcomes_dict, num_tasks = run_v2_task_compound(
278
259
  images=filtered_images,
279
260
  zarr_dir=zarr_dir,
280
261
  wftask=wftask,
@@ -64,9 +64,9 @@ GetRunnerConfigType = GetRunnerConfigTypeLocal | GetRunnerConfigTypeSLURM
64
64
 
65
65
 
66
66
  __all__ = [
67
- "run_task_parallel",
68
- "run_task_non_parallel",
69
- "run_task_compound",
67
+ "run_v2_task_parallel",
68
+ "run_v2_task_non_parallel",
69
+ "run_v2_task_compound",
70
70
  ]
71
71
 
72
72
 
@@ -145,7 +145,7 @@ def _check_parallelization_list_size(my_list):
145
145
  )
146
146
 
147
147
 
148
- def run_task_non_parallel(
148
+ def run_v2_task_non_parallel(
149
149
  *,
150
150
  images: list[dict[str, Any]],
151
151
  zarr_dir: str,
@@ -168,7 +168,9 @@ def run_task_non_parallel(
168
168
  TaskType.NON_PARALLEL,
169
169
  TaskType.CONVERTER_NON_PARALLEL,
170
170
  ]:
171
- raise ValueError(f"Invalid {task_type=} for `run_task_non_parallel`.")
171
+ raise ValueError(
172
+ f"Invalid {task_type=} for `run_v2_task_non_parallel`."
173
+ )
172
174
 
173
175
  # Get TaskFiles object
174
176
  task_files = TaskFiles(
@@ -211,7 +213,7 @@ def run_task_non_parallel(
211
213
  db.commit()
212
214
  db.refresh(history_unit)
213
215
  logger.debug(
214
- "[run_task_non_parallel] Created `HistoryUnit` with "
216
+ "[run_v2_task_non_parallel] Created `HistoryUnit` with "
215
217
  f"{history_run_id=}."
216
218
  )
217
219
  history_unit_id = history_unit.id
@@ -263,7 +265,7 @@ def run_task_non_parallel(
263
265
  return outcome, num_tasks
264
266
 
265
267
 
266
- def run_task_parallel(
268
+ def run_v2_task_parallel(
267
269
  *,
268
270
  images: list[dict[str, Any]],
269
271
  task: TaskV2,
@@ -324,7 +326,7 @@ def run_task_parallel(
324
326
  db.add_all(history_units)
325
327
  db.commit()
326
328
  logger.debug(
327
- f"[run_task_non_parallel] Created {len(history_units)} "
329
+ f"[run_v2_task_non_parallel] Created {len(history_units)} "
328
330
  "`HistoryUnit`s."
329
331
  )
330
332
 
@@ -386,7 +388,7 @@ def run_task_parallel(
386
388
  return outcome, num_tasks
387
389
 
388
390
 
389
- def run_task_compound(
391
+ def run_v2_task_compound(
390
392
  *,
391
393
  images: list[dict[str, Any]],
392
394
  zarr_dir: str,
@@ -443,7 +445,7 @@ def run_task_compound(
443
445
  db.refresh(history_unit)
444
446
  init_history_unit_id = history_unit.id
445
447
  logger.debug(
446
- "[run_task_compound] Created `HistoryUnit` with "
448
+ "[run_v2_task_compound] Created `HistoryUnit` with "
447
449
  f"{init_history_unit_id=}."
448
450
  )
449
451
  # Create one `HistoryImageCache` for each input image
@@ -555,7 +557,8 @@ def run_task_compound(
555
557
  for history_unit in history_units:
556
558
  db.refresh(history_unit)
557
559
  logger.debug(
558
- f"[run_task_compound] Created {len(history_units)} `HistoryUnit`s."
560
+ f"[run_v2_task_compound] Created {len(history_units)} "
561
+ "`HistoryUnit`s."
559
562
  )
560
563
  history_unit_ids = [history_unit.id for history_unit in history_units]
561
564
 
@@ -20,7 +20,7 @@ from fractal_server.app.models.v2 import JobV2
20
20
  from fractal_server.app.models.v2 import Profile
21
21
  from fractal_server.app.models.v2 import Resource
22
22
  from fractal_server.app.models.v2 import WorkflowV2
23
- from fractal_server.app.schemas.v2 import JobStatusType
23
+ from fractal_server.app.schemas.v2 import JobStatusTypeV2
24
24
  from fractal_server.app.schemas.v2 import ResourceType
25
25
  from fractal_server.logger import get_logger
26
26
  from fractal_server.logger import reset_logger_handlers
@@ -71,7 +71,7 @@ def fail_job(
71
71
  logger.error(log_msg)
72
72
  reset_logger_handlers(logger)
73
73
  job = db.get(JobV2, job.id) # refetch, in case it was updated
74
- job.status = JobStatusType.FAILED
74
+ job.status = JobStatusTypeV2.FAILED
75
75
  job.end_timestamp = get_timestamp()
76
76
  job.log = log_msg
77
77
  db.merge(job)
@@ -145,27 +145,14 @@ def submit_workflow(
145
145
  return
146
146
  if dataset is None or workflow is None:
147
147
  log_msg = ""
148
- if dataset is None:
149
- current_log_msg = (
150
- f"Cannot fetch dataset {dataset_id} from database "
151
- f"(as part of job {job_id})."
148
+ if not dataset:
149
+ log_msg += f"Cannot fetch dataset {dataset_id} from database\n"
150
+ if not workflow:
151
+ log_msg += (
152
+ f"Cannot fetch workflow {workflow_id} from database\n"
152
153
  )
153
- logger.error(current_log_msg)
154
- log_msg += f"{current_log_msg}\n"
155
- if workflow is None:
156
- current_log_msg += (
157
- f"Cannot fetch workflow {workflow_id} from database "
158
- f"(as part of job {job_id})."
159
- )
160
- logger.error(current_log_msg)
161
- log_msg += f"{current_log_msg}\n"
162
-
163
154
  fail_job(
164
- db=db_sync,
165
- job=job,
166
- log_msg=log_msg,
167
- logger_name=logger_name,
168
- emit_log=False,
155
+ db=db_sync, job=job, log_msg=log_msg, logger_name=logger_name
169
156
  )
170
157
  return
171
158
 
@@ -286,7 +273,7 @@ def submit_workflow(
286
273
  # Update job DB entry
287
274
  with next(DB.get_sync_db()) as db_sync:
288
275
  job = db_sync.get(JobV2, job_id)
289
- job.status = JobStatusType.DONE
276
+ job.status = JobStatusTypeV2.DONE
290
277
  job.end_timestamp = get_timestamp()
291
278
  with log_file_path.open("r") as f:
292
279
  logs = f.read()