fractal-server 2.14.0a10__py3-none-any.whl → 2.14.0a12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fractal_server/__init__.py +1 -1
- fractal_server/app/routes/api/v2/submit.py +1 -1
- fractal_server/app/runner/components.py +0 -3
- fractal_server/app/runner/exceptions.py +4 -0
- fractal_server/app/runner/executors/base_runner.py +38 -17
- fractal_server/app/runner/executors/local/{_local_config.py → get_local_config.py} +0 -7
- fractal_server/app/runner/executors/local/runner.py +109 -59
- fractal_server/app/runner/executors/slurm_common/_check_jobs_status.py +4 -0
- fractal_server/app/runner/executors/slurm_ssh/executor.py +7 -5
- fractal_server/app/runner/executors/slurm_ssh/runner.py +6 -10
- fractal_server/app/runner/executors/slurm_sudo/runner.py +196 -99
- fractal_server/app/runner/task_files.py +8 -0
- fractal_server/app/runner/v2/__init__.py +0 -366
- fractal_server/app/runner/v2/_local.py +2 -2
- fractal_server/app/runner/v2/_slurm_ssh.py +2 -2
- fractal_server/app/runner/v2/_slurm_sudo.py +2 -2
- fractal_server/app/runner/v2/db_tools.py +87 -0
- fractal_server/app/runner/v2/runner.py +77 -81
- fractal_server/app/runner/v2/runner_functions.py +274 -436
- fractal_server/app/runner/v2/runner_functions_low_level.py +37 -39
- fractal_server/app/runner/v2/submit_workflow.py +366 -0
- fractal_server/app/runner/v2/task_interface.py +31 -0
- {fractal_server-2.14.0a10.dist-info → fractal_server-2.14.0a12.dist-info}/METADATA +1 -1
- {fractal_server-2.14.0a10.dist-info → fractal_server-2.14.0a12.dist-info}/RECORD +27 -28
- fractal_server/app/runner/executors/local/_submit_setup.py +0 -46
- fractal_server/app/runner/executors/slurm_common/_submit_setup.py +0 -84
- fractal_server/app/runner/v2/_db_tools.py +0 -48
- {fractal_server-2.14.0a10.dist-info → fractal_server-2.14.0a12.dist-info}/LICENSE +0 -0
- {fractal_server-2.14.0a10.dist-info → fractal_server-2.14.0a12.dist-info}/WHEEL +0 -0
- {fractal_server-2.14.0a10.dist-info → fractal_server-2.14.0a12.dist-info}/entry_points.txt +0 -0
@@ -1,366 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
Runner backend subsystem root V2
|
3
|
-
|
4
|
-
This module is the single entry point to the runner backend subsystem V2.
|
5
|
-
Other subsystems should only import this module and not its submodules or
|
6
|
-
the individual backends.
|
7
|
-
"""
|
8
|
-
import os
|
9
|
-
import traceback
|
10
|
-
from pathlib import Path
|
11
|
-
from typing import Optional
|
12
|
-
|
13
|
-
from sqlalchemy.orm import Session as DBSyncSession
|
14
|
-
|
15
|
-
from ....config import get_settings
|
16
|
-
from ....logger import get_logger
|
17
|
-
from ....logger import reset_logger_handlers
|
18
|
-
from ....logger import set_logger
|
19
|
-
from ....ssh._fabric import FractalSSH
|
20
|
-
from ....syringe import Inject
|
21
|
-
from ....utils import get_timestamp
|
22
|
-
from ....zip_tools import _zip_folder_to_file_and_remove
|
23
|
-
from ...db import DB
|
24
|
-
from ...models.v2 import DatasetV2
|
25
|
-
from ...models.v2 import JobV2
|
26
|
-
from ...models.v2 import WorkflowV2
|
27
|
-
from ...schemas.v2 import JobStatusTypeV2
|
28
|
-
from ..exceptions import JobExecutionError
|
29
|
-
from ..exceptions import TaskExecutionError
|
30
|
-
from ..executors.slurm_sudo._subprocess_run_as_user import _mkdir_as_user
|
31
|
-
from ..filenames import WORKFLOW_LOG_FILENAME
|
32
|
-
from ._local import process_workflow as local_process_workflow
|
33
|
-
from ._slurm_ssh import process_workflow as slurm_ssh_process_workflow
|
34
|
-
from ._slurm_sudo import process_workflow as slurm_sudo_process_workflow
|
35
|
-
from fractal_server import __VERSION__
|
36
|
-
from fractal_server.app.models import UserSettings
|
37
|
-
|
38
|
-
|
39
|
-
_backends = {}
|
40
|
-
_backends["local"] = local_process_workflow
|
41
|
-
_backends["slurm"] = slurm_sudo_process_workflow
|
42
|
-
_backends["slurm_ssh"] = slurm_ssh_process_workflow
|
43
|
-
|
44
|
-
|
45
|
-
def fail_job(
|
46
|
-
*,
|
47
|
-
db: DBSyncSession,
|
48
|
-
job: JobV2,
|
49
|
-
log_msg: str,
|
50
|
-
logger_name: str,
|
51
|
-
emit_log: bool = False,
|
52
|
-
) -> None:
|
53
|
-
logger = get_logger(logger_name=logger_name)
|
54
|
-
if emit_log:
|
55
|
-
logger.error(log_msg)
|
56
|
-
reset_logger_handlers(logger)
|
57
|
-
job.status = JobStatusTypeV2.FAILED
|
58
|
-
job.end_timestamp = get_timestamp()
|
59
|
-
job.log = log_msg
|
60
|
-
db.merge(job)
|
61
|
-
db.commit()
|
62
|
-
db.close()
|
63
|
-
return
|
64
|
-
|
65
|
-
|
66
|
-
def submit_workflow(
|
67
|
-
*,
|
68
|
-
workflow_id: int,
|
69
|
-
dataset_id: int,
|
70
|
-
job_id: int,
|
71
|
-
user_id: int,
|
72
|
-
user_settings: UserSettings,
|
73
|
-
worker_init: Optional[str] = None,
|
74
|
-
slurm_user: Optional[str] = None,
|
75
|
-
user_cache_dir: Optional[str] = None,
|
76
|
-
fractal_ssh: Optional[FractalSSH] = None,
|
77
|
-
) -> None:
|
78
|
-
"""
|
79
|
-
Prepares a workflow and applies it to a dataset
|
80
|
-
|
81
|
-
This function wraps the process_workflow one, which is different for each
|
82
|
-
backend (e.g. local or slurm backend).
|
83
|
-
|
84
|
-
Args:
|
85
|
-
workflow_id:
|
86
|
-
ID of the workflow being applied
|
87
|
-
dataset_id:
|
88
|
-
Dataset ID
|
89
|
-
job_id:
|
90
|
-
Id of the job record which stores the state for the current
|
91
|
-
workflow application.
|
92
|
-
user_id:
|
93
|
-
User ID.
|
94
|
-
worker_init:
|
95
|
-
Custom executor parameters that get parsed before the execution of
|
96
|
-
each task.
|
97
|
-
user_cache_dir:
|
98
|
-
Cache directory (namely a path where the user can write); for the
|
99
|
-
slurm backend, this is used as a base directory for
|
100
|
-
`job.working_dir_user`.
|
101
|
-
slurm_user:
|
102
|
-
The username to impersonate for the workflow execution, for the
|
103
|
-
slurm backend.
|
104
|
-
"""
|
105
|
-
# Declare runner backend and set `process_workflow` function
|
106
|
-
settings = Inject(get_settings)
|
107
|
-
FRACTAL_RUNNER_BACKEND = settings.FRACTAL_RUNNER_BACKEND
|
108
|
-
logger_name = f"WF{workflow_id}_job{job_id}"
|
109
|
-
logger = set_logger(logger_name=logger_name)
|
110
|
-
|
111
|
-
with next(DB.get_sync_db()) as db_sync:
|
112
|
-
try:
|
113
|
-
job: Optional[JobV2] = db_sync.get(JobV2, job_id)
|
114
|
-
dataset: Optional[DatasetV2] = db_sync.get(DatasetV2, dataset_id)
|
115
|
-
workflow: Optional[WorkflowV2] = db_sync.get(
|
116
|
-
WorkflowV2, workflow_id
|
117
|
-
)
|
118
|
-
except Exception as e:
|
119
|
-
logger.error(
|
120
|
-
f"Error connecting to the database. Original error: {str(e)}"
|
121
|
-
)
|
122
|
-
reset_logger_handlers(logger)
|
123
|
-
return
|
124
|
-
|
125
|
-
if job is None:
|
126
|
-
logger.error(f"JobV2 {job_id} does not exist")
|
127
|
-
reset_logger_handlers(logger)
|
128
|
-
return
|
129
|
-
if dataset is None or workflow is None:
|
130
|
-
log_msg = ""
|
131
|
-
if not dataset:
|
132
|
-
log_msg += f"Cannot fetch dataset {dataset_id} from database\n"
|
133
|
-
if not workflow:
|
134
|
-
log_msg += (
|
135
|
-
f"Cannot fetch workflow {workflow_id} from database\n"
|
136
|
-
)
|
137
|
-
fail_job(
|
138
|
-
db=db_sync, job=job, log_msg=log_msg, logger_name=logger_name
|
139
|
-
)
|
140
|
-
return
|
141
|
-
|
142
|
-
# Declare runner backend and set `process_workflow` function
|
143
|
-
settings = Inject(get_settings)
|
144
|
-
FRACTAL_RUNNER_BACKEND = settings.FRACTAL_RUNNER_BACKEND
|
145
|
-
try:
|
146
|
-
process_workflow = _backends[settings.FRACTAL_RUNNER_BACKEND]
|
147
|
-
except KeyError as e:
|
148
|
-
fail_job(
|
149
|
-
db=db_sync,
|
150
|
-
job=job,
|
151
|
-
log_msg=(
|
152
|
-
f"Invalid {FRACTAL_RUNNER_BACKEND=}.\n"
|
153
|
-
f"Original KeyError: {str(e)}"
|
154
|
-
),
|
155
|
-
logger_name=logger_name,
|
156
|
-
emit_log=True,
|
157
|
-
)
|
158
|
-
return
|
159
|
-
|
160
|
-
# Define and create server-side working folder
|
161
|
-
WORKFLOW_DIR_LOCAL = Path(job.working_dir)
|
162
|
-
if WORKFLOW_DIR_LOCAL.exists():
|
163
|
-
fail_job(
|
164
|
-
db=db_sync,
|
165
|
-
job=job,
|
166
|
-
log_msg=f"Workflow dir {WORKFLOW_DIR_LOCAL} already exists.",
|
167
|
-
logger_name=logger_name,
|
168
|
-
emit_log=True,
|
169
|
-
)
|
170
|
-
return
|
171
|
-
|
172
|
-
try:
|
173
|
-
# Create WORKFLOW_DIR_LOCAL
|
174
|
-
if FRACTAL_RUNNER_BACKEND == "slurm":
|
175
|
-
original_umask = os.umask(0)
|
176
|
-
WORKFLOW_DIR_LOCAL.mkdir(parents=True, mode=0o755)
|
177
|
-
os.umask(original_umask)
|
178
|
-
else:
|
179
|
-
WORKFLOW_DIR_LOCAL.mkdir(parents=True)
|
180
|
-
|
181
|
-
# Define and create WORKFLOW_DIR_REMOTE
|
182
|
-
if FRACTAL_RUNNER_BACKEND == "local":
|
183
|
-
WORKFLOW_DIR_REMOTE = WORKFLOW_DIR_LOCAL
|
184
|
-
elif FRACTAL_RUNNER_BACKEND == "slurm":
|
185
|
-
WORKFLOW_DIR_REMOTE = (
|
186
|
-
Path(user_cache_dir) / WORKFLOW_DIR_LOCAL.name
|
187
|
-
)
|
188
|
-
_mkdir_as_user(
|
189
|
-
folder=str(WORKFLOW_DIR_REMOTE), user=slurm_user
|
190
|
-
)
|
191
|
-
elif FRACTAL_RUNNER_BACKEND == "slurm_ssh":
|
192
|
-
# Folder creation is deferred to _process_workflow
|
193
|
-
WORKFLOW_DIR_REMOTE = (
|
194
|
-
Path(user_settings.ssh_jobs_dir) / WORKFLOW_DIR_LOCAL.name
|
195
|
-
)
|
196
|
-
else:
|
197
|
-
logger.error(
|
198
|
-
"Invalid FRACTAL_RUNNER_BACKEND="
|
199
|
-
f"{settings.FRACTAL_RUNNER_BACKEND}."
|
200
|
-
)
|
201
|
-
|
202
|
-
except Exception as e:
|
203
|
-
error_type = type(e).__name__
|
204
|
-
fail_job(
|
205
|
-
db=db_sync,
|
206
|
-
job=job,
|
207
|
-
log_msg=(
|
208
|
-
f"{error_type} error occurred while creating job folder "
|
209
|
-
f"and subfolders.\nOriginal error: {str(e)}"
|
210
|
-
),
|
211
|
-
logger_name=logger_name,
|
212
|
-
emit_log=True,
|
213
|
-
)
|
214
|
-
return
|
215
|
-
|
216
|
-
# After Session.commit() is called, either explicitly or when using a
|
217
|
-
# context manager, all objects associated with the Session are expired.
|
218
|
-
# https://docs.sqlalchemy.org/en/14/orm/
|
219
|
-
# session_basics.html#opening-and-closing-a-session
|
220
|
-
# https://docs.sqlalchemy.org/en/14/orm/
|
221
|
-
# session_state_management.html#refreshing-expiring
|
222
|
-
|
223
|
-
# See issue #928:
|
224
|
-
# https://github.com/fractal-analytics-platform/
|
225
|
-
# fractal-server/issues/928
|
226
|
-
|
227
|
-
db_sync.refresh(dataset)
|
228
|
-
db_sync.refresh(workflow)
|
229
|
-
for wftask in workflow.task_list:
|
230
|
-
db_sync.refresh(wftask)
|
231
|
-
|
232
|
-
# Write logs
|
233
|
-
log_file_path = WORKFLOW_DIR_LOCAL / WORKFLOW_LOG_FILENAME
|
234
|
-
logger = set_logger(
|
235
|
-
logger_name=logger_name,
|
236
|
-
log_file_path=log_file_path,
|
237
|
-
)
|
238
|
-
logger.info(
|
239
|
-
f'Start execution of workflow "{workflow.name}"; '
|
240
|
-
f"more logs at {str(log_file_path)}"
|
241
|
-
)
|
242
|
-
logger.debug(f"fractal_server.__VERSION__: {__VERSION__}")
|
243
|
-
logger.debug(f"FRACTAL_RUNNER_BACKEND: {FRACTAL_RUNNER_BACKEND}")
|
244
|
-
if FRACTAL_RUNNER_BACKEND == "slurm":
|
245
|
-
logger.debug(f"slurm_user: {slurm_user}")
|
246
|
-
logger.debug(f"slurm_account: {job.slurm_account}")
|
247
|
-
logger.debug(f"worker_init: {worker_init}")
|
248
|
-
elif FRACTAL_RUNNER_BACKEND == "slurm_ssh":
|
249
|
-
logger.debug(f"ssh_user: {user_settings.ssh_username}")
|
250
|
-
logger.debug(f"base dir: {user_settings.ssh_tasks_dir}")
|
251
|
-
logger.debug(f"worker_init: {worker_init}")
|
252
|
-
logger.debug(f"job.id: {job.id}")
|
253
|
-
logger.debug(f"job.working_dir: {job.working_dir}")
|
254
|
-
logger.debug(f"job.working_dir_user: {job.working_dir_user}")
|
255
|
-
logger.debug(f"job.first_task_index: {job.first_task_index}")
|
256
|
-
logger.debug(f"job.last_task_index: {job.last_task_index}")
|
257
|
-
logger.debug(f'START workflow "{workflow.name}"')
|
258
|
-
|
259
|
-
try:
|
260
|
-
if FRACTAL_RUNNER_BACKEND == "local":
|
261
|
-
process_workflow = local_process_workflow
|
262
|
-
backend_specific_kwargs = {}
|
263
|
-
elif FRACTAL_RUNNER_BACKEND == "slurm":
|
264
|
-
process_workflow = slurm_sudo_process_workflow
|
265
|
-
backend_specific_kwargs = dict(
|
266
|
-
slurm_user=slurm_user,
|
267
|
-
slurm_account=job.slurm_account,
|
268
|
-
user_cache_dir=user_cache_dir,
|
269
|
-
)
|
270
|
-
elif FRACTAL_RUNNER_BACKEND == "slurm_ssh":
|
271
|
-
process_workflow = slurm_ssh_process_workflow
|
272
|
-
backend_specific_kwargs = dict(fractal_ssh=fractal_ssh)
|
273
|
-
else:
|
274
|
-
raise RuntimeError(
|
275
|
-
f"Invalid runner backend {FRACTAL_RUNNER_BACKEND=}"
|
276
|
-
)
|
277
|
-
|
278
|
-
# "The Session.close() method does not prevent the Session from being
|
279
|
-
# used again. The Session itself does not actually have a distinct
|
280
|
-
# “closed” state; it merely means the Session will release all database
|
281
|
-
# connections and ORM objects."
|
282
|
-
# (https://docs.sqlalchemy.org/en/20/orm/session_api.html#sqlalchemy.orm.Session.close).
|
283
|
-
#
|
284
|
-
# We close the session before the (possibly long) process_workflow
|
285
|
-
# call, to make sure all DB connections are released. The reason why we
|
286
|
-
# are not using a context manager within the try block is that we also
|
287
|
-
# need access to db_sync in the except branches.
|
288
|
-
db_sync = next(DB.get_sync_db())
|
289
|
-
db_sync.close()
|
290
|
-
|
291
|
-
process_workflow(
|
292
|
-
workflow=workflow,
|
293
|
-
dataset=dataset,
|
294
|
-
user_id=user_id,
|
295
|
-
workflow_dir_local=WORKFLOW_DIR_LOCAL,
|
296
|
-
workflow_dir_remote=WORKFLOW_DIR_REMOTE,
|
297
|
-
logger_name=logger_name,
|
298
|
-
worker_init=worker_init,
|
299
|
-
first_task_index=job.first_task_index,
|
300
|
-
last_task_index=job.last_task_index,
|
301
|
-
job_attribute_filters=job.attribute_filters,
|
302
|
-
job_type_filters=job.type_filters,
|
303
|
-
**backend_specific_kwargs,
|
304
|
-
)
|
305
|
-
|
306
|
-
logger.info(
|
307
|
-
f'End execution of workflow "{workflow.name}"; '
|
308
|
-
f"more logs at {str(log_file_path)}"
|
309
|
-
)
|
310
|
-
logger.debug(f'END workflow "{workflow.name}"')
|
311
|
-
|
312
|
-
# Update job DB entry
|
313
|
-
job.status = JobStatusTypeV2.DONE
|
314
|
-
job.end_timestamp = get_timestamp()
|
315
|
-
with log_file_path.open("r") as f:
|
316
|
-
logs = f.read()
|
317
|
-
job.log = logs
|
318
|
-
db_sync.merge(job)
|
319
|
-
db_sync.commit()
|
320
|
-
|
321
|
-
except TaskExecutionError as e:
|
322
|
-
logger.debug(f'FAILED workflow "{workflow.name}", TaskExecutionError.')
|
323
|
-
logger.info(f'Workflow "{workflow.name}" failed (TaskExecutionError).')
|
324
|
-
|
325
|
-
exception_args_string = "\n".join(e.args)
|
326
|
-
log_msg = (
|
327
|
-
f"TASK ERROR: "
|
328
|
-
f"Task name: {e.task_name}, "
|
329
|
-
f"position in Workflow: {e.workflow_task_order}\n"
|
330
|
-
f"TRACEBACK:\n{exception_args_string}"
|
331
|
-
)
|
332
|
-
fail_job(db=db_sync, job=job, log_msg=log_msg, logger_name=logger_name)
|
333
|
-
|
334
|
-
except JobExecutionError as e:
|
335
|
-
logger.debug(f'FAILED workflow "{workflow.name}", JobExecutionError.')
|
336
|
-
logger.info(f'Workflow "{workflow.name}" failed (JobExecutionError).')
|
337
|
-
|
338
|
-
fail_job(
|
339
|
-
db=db_sync,
|
340
|
-
job=job,
|
341
|
-
log_msg=(
|
342
|
-
f"JOB ERROR in Fractal job {job.id}:\n"
|
343
|
-
f"TRACEBACK:\n{e.assemble_error()}"
|
344
|
-
),
|
345
|
-
logger_name=logger_name,
|
346
|
-
)
|
347
|
-
|
348
|
-
except Exception:
|
349
|
-
logger.debug(f'FAILED workflow "{workflow.name}", unknown error.')
|
350
|
-
logger.info(f'Workflow "{workflow.name}" failed (unkwnon error).')
|
351
|
-
|
352
|
-
current_traceback = traceback.format_exc()
|
353
|
-
fail_job(
|
354
|
-
db=db_sync,
|
355
|
-
job=job,
|
356
|
-
log_msg=(
|
357
|
-
f"UNKNOWN ERROR in Fractal job {job.id}\n"
|
358
|
-
f"TRACEBACK:\n{current_traceback}"
|
359
|
-
),
|
360
|
-
logger_name=logger_name,
|
361
|
-
)
|
362
|
-
|
363
|
-
finally:
|
364
|
-
reset_logger_handlers(logger)
|
365
|
-
db_sync.close()
|
366
|
-
_zip_folder_to_file_and_remove(folder=job.working_dir)
|
@@ -3,7 +3,7 @@ from typing import Optional
|
|
3
3
|
|
4
4
|
from ...models.v2 import DatasetV2
|
5
5
|
from ...models.v2 import WorkflowV2
|
6
|
-
from ..executors.local.
|
6
|
+
from ..executors.local.get_local_config import get_local_backend_config
|
7
7
|
from ..executors.local.runner import LocalRunner
|
8
8
|
from ..set_start_and_last_task_index import set_start_and_last_task_index
|
9
9
|
from .runner import execute_tasks_v2
|
@@ -79,7 +79,7 @@ def process_workflow(
|
|
79
79
|
workflow_dir_local=workflow_dir_local,
|
80
80
|
workflow_dir_remote=workflow_dir_local,
|
81
81
|
logger_name=logger_name,
|
82
|
-
|
82
|
+
get_runner_config=get_local_backend_config,
|
83
83
|
job_attribute_filters=job_attribute_filters,
|
84
84
|
job_type_filters=job_type_filters,
|
85
85
|
user_id=user_id,
|
@@ -23,7 +23,7 @@ from ....ssh._fabric import FractalSSH
|
|
23
23
|
from ...models.v2 import DatasetV2
|
24
24
|
from ...models.v2 import WorkflowV2
|
25
25
|
from ..exceptions import JobExecutionError
|
26
|
-
from ..executors.slurm_common.
|
26
|
+
from ..executors.slurm_common.get_slurm_config import get_slurm_config
|
27
27
|
from ..executors.slurm_ssh.executor import FractalSlurmSSHExecutor
|
28
28
|
from ..set_start_and_last_task_index import set_start_and_last_task_index
|
29
29
|
from .runner import execute_tasks_v2
|
@@ -91,7 +91,7 @@ def process_workflow(
|
|
91
91
|
workflow_dir_local=workflow_dir_local,
|
92
92
|
workflow_dir_remote=workflow_dir_remote,
|
93
93
|
logger_name=logger_name,
|
94
|
-
|
94
|
+
get_runner_config=get_slurm_config,
|
95
95
|
job_attribute_filters=job_attribute_filters,
|
96
96
|
job_type_filters=job_type_filters,
|
97
97
|
user_id=user_id,
|
@@ -21,7 +21,7 @@ from typing import Optional
|
|
21
21
|
|
22
22
|
from ...models.v2 import DatasetV2
|
23
23
|
from ...models.v2 import WorkflowV2
|
24
|
-
from ..executors.slurm_common.
|
24
|
+
from ..executors.slurm_common.get_slurm_config import get_slurm_config
|
25
25
|
from ..executors.slurm_sudo.runner import RunnerSlurmSudo
|
26
26
|
from ..set_start_and_last_task_index import set_start_and_last_task_index
|
27
27
|
from .runner import execute_tasks_v2
|
@@ -83,7 +83,7 @@ def process_workflow(
|
|
83
83
|
workflow_dir_local=workflow_dir_local,
|
84
84
|
workflow_dir_remote=workflow_dir_remote,
|
85
85
|
logger_name=logger_name,
|
86
|
-
|
86
|
+
get_runner_config=get_slurm_config,
|
87
87
|
job_attribute_filters=job_attribute_filters,
|
88
88
|
job_type_filters=job_type_filters,
|
89
89
|
user_id=user_id,
|
@@ -0,0 +1,87 @@
|
|
1
|
+
from typing import Any
|
2
|
+
|
3
|
+
from sqlalchemy.dialects.postgresql import insert as pg_insert
|
4
|
+
from sqlalchemy.orm import Session
|
5
|
+
|
6
|
+
from fractal_server.app.models.v2 import HistoryImageCache
|
7
|
+
from fractal_server.app.models.v2 import HistoryRun
|
8
|
+
from fractal_server.app.models.v2 import HistoryUnit
|
9
|
+
from fractal_server.app.schemas.v2 import HistoryUnitStatus
|
10
|
+
|
11
|
+
|
12
|
+
def update_status_of_history_run(
|
13
|
+
*,
|
14
|
+
history_run_id: int,
|
15
|
+
status: HistoryUnitStatus,
|
16
|
+
db_sync: Session,
|
17
|
+
) -> None:
|
18
|
+
run = db_sync.get(HistoryRun, history_run_id)
|
19
|
+
if run is None:
|
20
|
+
raise ValueError(f"HistoryRun {history_run_id} not found.")
|
21
|
+
run.status = status
|
22
|
+
db_sync.merge(run)
|
23
|
+
db_sync.commit()
|
24
|
+
|
25
|
+
|
26
|
+
def update_status_of_history_unit(
|
27
|
+
*,
|
28
|
+
history_unit_id: int,
|
29
|
+
status: HistoryUnitStatus,
|
30
|
+
db_sync: Session,
|
31
|
+
) -> None:
|
32
|
+
unit = db_sync.get(HistoryUnit, history_unit_id)
|
33
|
+
if unit is None:
|
34
|
+
raise ValueError(f"HistoryUnit {history_unit_id} not found.")
|
35
|
+
unit.status = status
|
36
|
+
db_sync.merge(unit)
|
37
|
+
db_sync.commit()
|
38
|
+
|
39
|
+
|
40
|
+
_CHUNK_SIZE = 2_000
|
41
|
+
|
42
|
+
|
43
|
+
def bulk_upsert_image_cache_fast(
|
44
|
+
*,
|
45
|
+
list_upsert_objects: list[dict[str, Any]],
|
46
|
+
db: Session,
|
47
|
+
) -> None:
|
48
|
+
"""
|
49
|
+
Insert or update many objects into `HistoryImageCache` and commit
|
50
|
+
|
51
|
+
This function is an optimized version of
|
52
|
+
|
53
|
+
```python
|
54
|
+
for obj in list_upsert_objects:
|
55
|
+
db.merge(**obj)
|
56
|
+
db.commit()
|
57
|
+
```
|
58
|
+
|
59
|
+
See docs at
|
60
|
+
https://docs.sqlalchemy.org/en/20/dialects/postgresql.html#insert-on-conflict-upsert
|
61
|
+
|
62
|
+
FIXME: we tried to replace `index_elements` with
|
63
|
+
`constraint="pk_historyimagecache"`, but it did not work as expected.
|
64
|
+
|
65
|
+
Arguments:
|
66
|
+
list_upsert_objects:
|
67
|
+
List of dictionaries for objects to be upsert-ed.
|
68
|
+
db: A sync database session
|
69
|
+
"""
|
70
|
+
if len(list_upsert_objects) == 0:
|
71
|
+
return None
|
72
|
+
|
73
|
+
for ind in range(0, len(list_upsert_objects), _CHUNK_SIZE):
|
74
|
+
stmt = pg_insert(HistoryImageCache).values(
|
75
|
+
list_upsert_objects[ind : ind + _CHUNK_SIZE]
|
76
|
+
)
|
77
|
+
stmt = stmt.on_conflict_do_update(
|
78
|
+
index_elements=[
|
79
|
+
HistoryImageCache.zarr_url,
|
80
|
+
HistoryImageCache.dataset_id,
|
81
|
+
HistoryImageCache.workflowtask_id,
|
82
|
+
],
|
83
|
+
set_=dict(
|
84
|
+
latest_history_unit_id=stmt.excluded.latest_history_unit_id
|
85
|
+
),
|
86
|
+
)
|
87
|
+
db.execute(stmt)
|