fractal-server 2.14.0a9__py3-none-any.whl → 2.14.0a11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. fractal_server/__init__.py +1 -1
  2. fractal_server/app/models/v2/dataset.py +0 -10
  3. fractal_server/app/models/v2/job.py +3 -0
  4. fractal_server/app/routes/api/v2/__init__.py +2 -0
  5. fractal_server/app/routes/api/v2/history.py +14 -9
  6. fractal_server/app/routes/api/v2/images.py +5 -2
  7. fractal_server/app/routes/api/v2/submit.py +16 -14
  8. fractal_server/app/routes/api/v2/verify_image_types.py +64 -0
  9. fractal_server/app/routes/api/v2/workflow.py +11 -7
  10. fractal_server/app/runner/components.py +0 -3
  11. fractal_server/app/runner/exceptions.py +4 -0
  12. fractal_server/app/runner/executors/base_runner.py +16 -17
  13. fractal_server/app/runner/executors/local/{_local_config.py → get_local_config.py} +0 -7
  14. fractal_server/app/runner/executors/local/runner.py +117 -58
  15. fractal_server/app/runner/executors/{slurm_sudo → slurm_common}/_check_jobs_status.py +4 -0
  16. fractal_server/app/runner/executors/slurm_ssh/_check_job_status_ssh.py +67 -0
  17. fractal_server/app/runner/executors/slurm_ssh/executor.py +7 -5
  18. fractal_server/app/runner/executors/slurm_ssh/runner.py +707 -0
  19. fractal_server/app/runner/executors/slurm_sudo/runner.py +265 -114
  20. fractal_server/app/runner/task_files.py +8 -0
  21. fractal_server/app/runner/v2/__init__.py +0 -365
  22. fractal_server/app/runner/v2/_local.py +4 -2
  23. fractal_server/app/runner/v2/_slurm_ssh.py +4 -2
  24. fractal_server/app/runner/v2/_slurm_sudo.py +4 -2
  25. fractal_server/app/runner/v2/db_tools.py +87 -0
  26. fractal_server/app/runner/v2/runner.py +83 -89
  27. fractal_server/app/runner/v2/runner_functions.py +279 -436
  28. fractal_server/app/runner/v2/runner_functions_low_level.py +37 -39
  29. fractal_server/app/runner/v2/submit_workflow.py +366 -0
  30. fractal_server/app/runner/v2/task_interface.py +31 -0
  31. fractal_server/app/schemas/v2/dataset.py +4 -71
  32. fractal_server/app/schemas/v2/dumps.py +6 -5
  33. fractal_server/app/schemas/v2/job.py +6 -3
  34. fractal_server/migrations/versions/47351f8c7ebc_drop_dataset_filters.py +50 -0
  35. fractal_server/migrations/versions/e81103413827_add_job_type_filters.py +36 -0
  36. {fractal_server-2.14.0a9.dist-info → fractal_server-2.14.0a11.dist-info}/METADATA +1 -1
  37. {fractal_server-2.14.0a9.dist-info → fractal_server-2.14.0a11.dist-info}/RECORD +40 -36
  38. fractal_server/app/runner/executors/local/_submit_setup.py +0 -46
  39. fractal_server/app/runner/executors/slurm_common/_submit_setup.py +0 -84
  40. fractal_server/app/runner/v2/_db_tools.py +0 -48
  41. {fractal_server-2.14.0a9.dist-info → fractal_server-2.14.0a11.dist-info}/LICENSE +0 -0
  42. {fractal_server-2.14.0a9.dist-info → fractal_server-2.14.0a11.dist-info}/WHEEL +0 -0
  43. {fractal_server-2.14.0a9.dist-info → fractal_server-2.14.0a11.dist-info}/entry_points.txt +0 -0
@@ -1,365 +0,0 @@
1
- """
2
- Runner backend subsystem root V2
3
-
4
- This module is the single entry point to the runner backend subsystem V2.
5
- Other subsystems should only import this module and not its submodules or
6
- the individual backends.
7
- """
8
- import os
9
- import traceback
10
- from pathlib import Path
11
- from typing import Optional
12
-
13
- from sqlalchemy.orm import Session as DBSyncSession
14
-
15
- from ....config import get_settings
16
- from ....logger import get_logger
17
- from ....logger import reset_logger_handlers
18
- from ....logger import set_logger
19
- from ....ssh._fabric import FractalSSH
20
- from ....syringe import Inject
21
- from ....utils import get_timestamp
22
- from ....zip_tools import _zip_folder_to_file_and_remove
23
- from ...db import DB
24
- from ...models.v2 import DatasetV2
25
- from ...models.v2 import JobV2
26
- from ...models.v2 import WorkflowV2
27
- from ...schemas.v2 import JobStatusTypeV2
28
- from ..exceptions import JobExecutionError
29
- from ..exceptions import TaskExecutionError
30
- from ..executors.slurm_sudo._subprocess_run_as_user import _mkdir_as_user
31
- from ..filenames import WORKFLOW_LOG_FILENAME
32
- from ._local import process_workflow as local_process_workflow
33
- from ._slurm_ssh import process_workflow as slurm_ssh_process_workflow
34
- from ._slurm_sudo import process_workflow as slurm_sudo_process_workflow
35
- from fractal_server import __VERSION__
36
- from fractal_server.app.models import UserSettings
37
-
38
-
39
- _backends = {}
40
- _backends["local"] = local_process_workflow
41
- _backends["slurm"] = slurm_sudo_process_workflow
42
- _backends["slurm_ssh"] = slurm_ssh_process_workflow
43
-
44
-
45
- def fail_job(
46
- *,
47
- db: DBSyncSession,
48
- job: JobV2,
49
- log_msg: str,
50
- logger_name: str,
51
- emit_log: bool = False,
52
- ) -> None:
53
- logger = get_logger(logger_name=logger_name)
54
- if emit_log:
55
- logger.error(log_msg)
56
- reset_logger_handlers(logger)
57
- job.status = JobStatusTypeV2.FAILED
58
- job.end_timestamp = get_timestamp()
59
- job.log = log_msg
60
- db.merge(job)
61
- db.commit()
62
- db.close()
63
- return
64
-
65
-
66
- def submit_workflow(
67
- *,
68
- workflow_id: int,
69
- dataset_id: int,
70
- job_id: int,
71
- user_id: int,
72
- user_settings: UserSettings,
73
- worker_init: Optional[str] = None,
74
- slurm_user: Optional[str] = None,
75
- user_cache_dir: Optional[str] = None,
76
- fractal_ssh: Optional[FractalSSH] = None,
77
- ) -> None:
78
- """
79
- Prepares a workflow and applies it to a dataset
80
-
81
- This function wraps the process_workflow one, which is different for each
82
- backend (e.g. local or slurm backend).
83
-
84
- Args:
85
- workflow_id:
86
- ID of the workflow being applied
87
- dataset_id:
88
- Dataset ID
89
- job_id:
90
- Id of the job record which stores the state for the current
91
- workflow application.
92
- user_id:
93
- User ID.
94
- worker_init:
95
- Custom executor parameters that get parsed before the execution of
96
- each task.
97
- user_cache_dir:
98
- Cache directory (namely a path where the user can write); for the
99
- slurm backend, this is used as a base directory for
100
- `job.working_dir_user`.
101
- slurm_user:
102
- The username to impersonate for the workflow execution, for the
103
- slurm backend.
104
- """
105
- # Declare runner backend and set `process_workflow` function
106
- settings = Inject(get_settings)
107
- FRACTAL_RUNNER_BACKEND = settings.FRACTAL_RUNNER_BACKEND
108
- logger_name = f"WF{workflow_id}_job{job_id}"
109
- logger = set_logger(logger_name=logger_name)
110
-
111
- with next(DB.get_sync_db()) as db_sync:
112
- try:
113
- job: Optional[JobV2] = db_sync.get(JobV2, job_id)
114
- dataset: Optional[DatasetV2] = db_sync.get(DatasetV2, dataset_id)
115
- workflow: Optional[WorkflowV2] = db_sync.get(
116
- WorkflowV2, workflow_id
117
- )
118
- except Exception as e:
119
- logger.error(
120
- f"Error connecting to the database. Original error: {str(e)}"
121
- )
122
- reset_logger_handlers(logger)
123
- return
124
-
125
- if job is None:
126
- logger.error(f"JobV2 {job_id} does not exist")
127
- reset_logger_handlers(logger)
128
- return
129
- if dataset is None or workflow is None:
130
- log_msg = ""
131
- if not dataset:
132
- log_msg += f"Cannot fetch dataset {dataset_id} from database\n"
133
- if not workflow:
134
- log_msg += (
135
- f"Cannot fetch workflow {workflow_id} from database\n"
136
- )
137
- fail_job(
138
- db=db_sync, job=job, log_msg=log_msg, logger_name=logger_name
139
- )
140
- return
141
-
142
- # Declare runner backend and set `process_workflow` function
143
- settings = Inject(get_settings)
144
- FRACTAL_RUNNER_BACKEND = settings.FRACTAL_RUNNER_BACKEND
145
- try:
146
- process_workflow = _backends[settings.FRACTAL_RUNNER_BACKEND]
147
- except KeyError as e:
148
- fail_job(
149
- db=db_sync,
150
- job=job,
151
- log_msg=(
152
- f"Invalid {FRACTAL_RUNNER_BACKEND=}.\n"
153
- f"Original KeyError: {str(e)}"
154
- ),
155
- logger_name=logger_name,
156
- emit_log=True,
157
- )
158
- return
159
-
160
- # Define and create server-side working folder
161
- WORKFLOW_DIR_LOCAL = Path(job.working_dir)
162
- if WORKFLOW_DIR_LOCAL.exists():
163
- fail_job(
164
- db=db_sync,
165
- job=job,
166
- log_msg=f"Workflow dir {WORKFLOW_DIR_LOCAL} already exists.",
167
- logger_name=logger_name,
168
- emit_log=True,
169
- )
170
- return
171
-
172
- try:
173
- # Create WORKFLOW_DIR_LOCAL
174
- if FRACTAL_RUNNER_BACKEND == "slurm":
175
- original_umask = os.umask(0)
176
- WORKFLOW_DIR_LOCAL.mkdir(parents=True, mode=0o755)
177
- os.umask(original_umask)
178
- else:
179
- WORKFLOW_DIR_LOCAL.mkdir(parents=True)
180
-
181
- # Define and create WORKFLOW_DIR_REMOTE
182
- if FRACTAL_RUNNER_BACKEND == "local":
183
- WORKFLOW_DIR_REMOTE = WORKFLOW_DIR_LOCAL
184
- elif FRACTAL_RUNNER_BACKEND == "slurm":
185
- WORKFLOW_DIR_REMOTE = (
186
- Path(user_cache_dir) / WORKFLOW_DIR_LOCAL.name
187
- )
188
- _mkdir_as_user(
189
- folder=str(WORKFLOW_DIR_REMOTE), user=slurm_user
190
- )
191
- elif FRACTAL_RUNNER_BACKEND == "slurm_ssh":
192
- # Folder creation is deferred to _process_workflow
193
- WORKFLOW_DIR_REMOTE = (
194
- Path(user_settings.ssh_jobs_dir) / WORKFLOW_DIR_LOCAL.name
195
- )
196
- else:
197
- logger.error(
198
- "Invalid FRACTAL_RUNNER_BACKEND="
199
- f"{settings.FRACTAL_RUNNER_BACKEND}."
200
- )
201
-
202
- except Exception as e:
203
- error_type = type(e).__name__
204
- fail_job(
205
- db=db_sync,
206
- job=job,
207
- log_msg=(
208
- f"{error_type} error occurred while creating job folder "
209
- f"and subfolders.\nOriginal error: {str(e)}"
210
- ),
211
- logger_name=logger_name,
212
- emit_log=True,
213
- )
214
- return
215
-
216
- # After Session.commit() is called, either explicitly or when using a
217
- # context manager, all objects associated with the Session are expired.
218
- # https://docs.sqlalchemy.org/en/14/orm/
219
- # session_basics.html#opening-and-closing-a-session
220
- # https://docs.sqlalchemy.org/en/14/orm/
221
- # session_state_management.html#refreshing-expiring
222
-
223
- # See issue #928:
224
- # https://github.com/fractal-analytics-platform/
225
- # fractal-server/issues/928
226
-
227
- db_sync.refresh(dataset)
228
- db_sync.refresh(workflow)
229
- for wftask in workflow.task_list:
230
- db_sync.refresh(wftask)
231
-
232
- # Write logs
233
- log_file_path = WORKFLOW_DIR_LOCAL / WORKFLOW_LOG_FILENAME
234
- logger = set_logger(
235
- logger_name=logger_name,
236
- log_file_path=log_file_path,
237
- )
238
- logger.info(
239
- f'Start execution of workflow "{workflow.name}"; '
240
- f"more logs at {str(log_file_path)}"
241
- )
242
- logger.debug(f"fractal_server.__VERSION__: {__VERSION__}")
243
- logger.debug(f"FRACTAL_RUNNER_BACKEND: {FRACTAL_RUNNER_BACKEND}")
244
- if FRACTAL_RUNNER_BACKEND == "slurm":
245
- logger.debug(f"slurm_user: {slurm_user}")
246
- logger.debug(f"slurm_account: {job.slurm_account}")
247
- logger.debug(f"worker_init: {worker_init}")
248
- elif FRACTAL_RUNNER_BACKEND == "slurm_ssh":
249
- logger.debug(f"ssh_user: {user_settings.ssh_username}")
250
- logger.debug(f"base dir: {user_settings.ssh_tasks_dir}")
251
- logger.debug(f"worker_init: {worker_init}")
252
- logger.debug(f"job.id: {job.id}")
253
- logger.debug(f"job.working_dir: {job.working_dir}")
254
- logger.debug(f"job.working_dir_user: {job.working_dir_user}")
255
- logger.debug(f"job.first_task_index: {job.first_task_index}")
256
- logger.debug(f"job.last_task_index: {job.last_task_index}")
257
- logger.debug(f'START workflow "{workflow.name}"')
258
-
259
- try:
260
- if FRACTAL_RUNNER_BACKEND == "local":
261
- process_workflow = local_process_workflow
262
- backend_specific_kwargs = {}
263
- elif FRACTAL_RUNNER_BACKEND == "slurm":
264
- process_workflow = slurm_sudo_process_workflow
265
- backend_specific_kwargs = dict(
266
- slurm_user=slurm_user,
267
- slurm_account=job.slurm_account,
268
- user_cache_dir=user_cache_dir,
269
- )
270
- elif FRACTAL_RUNNER_BACKEND == "slurm_ssh":
271
- process_workflow = slurm_ssh_process_workflow
272
- backend_specific_kwargs = dict(fractal_ssh=fractal_ssh)
273
- else:
274
- raise RuntimeError(
275
- f"Invalid runner backend {FRACTAL_RUNNER_BACKEND=}"
276
- )
277
-
278
- # "The Session.close() method does not prevent the Session from being
279
- # used again. The Session itself does not actually have a distinct
280
- # “closed” state; it merely means the Session will release all database
281
- # connections and ORM objects."
282
- # (https://docs.sqlalchemy.org/en/20/orm/session_api.html#sqlalchemy.orm.Session.close).
283
- #
284
- # We close the session before the (possibly long) process_workflow
285
- # call, to make sure all DB connections are released. The reason why we
286
- # are not using a context manager within the try block is that we also
287
- # need access to db_sync in the except branches.
288
- db_sync = next(DB.get_sync_db())
289
- db_sync.close()
290
-
291
- process_workflow(
292
- workflow=workflow,
293
- dataset=dataset,
294
- user_id=user_id,
295
- workflow_dir_local=WORKFLOW_DIR_LOCAL,
296
- workflow_dir_remote=WORKFLOW_DIR_REMOTE,
297
- logger_name=logger_name,
298
- worker_init=worker_init,
299
- first_task_index=job.first_task_index,
300
- last_task_index=job.last_task_index,
301
- job_attribute_filters=job.attribute_filters,
302
- **backend_specific_kwargs,
303
- )
304
-
305
- logger.info(
306
- f'End execution of workflow "{workflow.name}"; '
307
- f"more logs at {str(log_file_path)}"
308
- )
309
- logger.debug(f'END workflow "{workflow.name}"')
310
-
311
- # Update job DB entry
312
- job.status = JobStatusTypeV2.DONE
313
- job.end_timestamp = get_timestamp()
314
- with log_file_path.open("r") as f:
315
- logs = f.read()
316
- job.log = logs
317
- db_sync.merge(job)
318
- db_sync.commit()
319
-
320
- except TaskExecutionError as e:
321
- logger.debug(f'FAILED workflow "{workflow.name}", TaskExecutionError.')
322
- logger.info(f'Workflow "{workflow.name}" failed (TaskExecutionError).')
323
-
324
- exception_args_string = "\n".join(e.args)
325
- log_msg = (
326
- f"TASK ERROR: "
327
- f"Task name: {e.task_name}, "
328
- f"position in Workflow: {e.workflow_task_order}\n"
329
- f"TRACEBACK:\n{exception_args_string}"
330
- )
331
- fail_job(db=db_sync, job=job, log_msg=log_msg, logger_name=logger_name)
332
-
333
- except JobExecutionError as e:
334
- logger.debug(f'FAILED workflow "{workflow.name}", JobExecutionError.')
335
- logger.info(f'Workflow "{workflow.name}" failed (JobExecutionError).')
336
-
337
- fail_job(
338
- db=db_sync,
339
- job=job,
340
- log_msg=(
341
- f"JOB ERROR in Fractal job {job.id}:\n"
342
- f"TRACEBACK:\n{e.assemble_error()}"
343
- ),
344
- logger_name=logger_name,
345
- )
346
-
347
- except Exception:
348
- logger.debug(f'FAILED workflow "{workflow.name}", unknown error.')
349
- logger.info(f'Workflow "{workflow.name}" failed (unkwnon error).')
350
-
351
- current_traceback = traceback.format_exc()
352
- fail_job(
353
- db=db_sync,
354
- job=job,
355
- log_msg=(
356
- f"UNKNOWN ERROR in Fractal job {job.id}\n"
357
- f"TRACEBACK:\n{current_traceback}"
358
- ),
359
- logger_name=logger_name,
360
- )
361
-
362
- finally:
363
- reset_logger_handlers(logger)
364
- db_sync.close()
365
- _zip_folder_to_file_and_remove(folder=job.working_dir)
@@ -3,7 +3,7 @@ from typing import Optional
3
3
 
4
4
  from ...models.v2 import DatasetV2
5
5
  from ...models.v2 import WorkflowV2
6
- from ..executors.local._submit_setup import _local_submit_setup
6
+ from ..executors.local.get_local_config import get_local_backend_config
7
7
  from ..executors.local.runner import LocalRunner
8
8
  from ..set_start_and_last_task_index import set_start_and_last_task_index
9
9
  from .runner import execute_tasks_v2
@@ -20,6 +20,7 @@ def process_workflow(
20
20
  last_task_index: Optional[int] = None,
21
21
  logger_name: str,
22
22
  job_attribute_filters: AttributeFiltersType,
23
+ job_type_filters: dict[str, bool],
23
24
  user_id: int,
24
25
  **kwargs,
25
26
  ) -> None:
@@ -78,7 +79,8 @@ def process_workflow(
78
79
  workflow_dir_local=workflow_dir_local,
79
80
  workflow_dir_remote=workflow_dir_local,
80
81
  logger_name=logger_name,
81
- submit_setup_call=_local_submit_setup,
82
+ get_runner_config=get_local_backend_config,
82
83
  job_attribute_filters=job_attribute_filters,
84
+ job_type_filters=job_type_filters,
83
85
  user_id=user_id,
84
86
  )
@@ -23,7 +23,7 @@ from ....ssh._fabric import FractalSSH
23
23
  from ...models.v2 import DatasetV2
24
24
  from ...models.v2 import WorkflowV2
25
25
  from ..exceptions import JobExecutionError
26
- from ..executors.slurm_common._submit_setup import _slurm_submit_setup
26
+ from ..executors.slurm_common.get_slurm_config import get_slurm_config
27
27
  from ..executors.slurm_ssh.executor import FractalSlurmSSHExecutor
28
28
  from ..set_start_and_last_task_index import set_start_and_last_task_index
29
29
  from .runner import execute_tasks_v2
@@ -43,6 +43,7 @@ def process_workflow(
43
43
  last_task_index: Optional[int] = None,
44
44
  logger_name: str,
45
45
  job_attribute_filters: AttributeFiltersType,
46
+ job_type_filters: dict[str, bool],
46
47
  fractal_ssh: FractalSSH,
47
48
  worker_init: Optional[str] = None,
48
49
  user_id: int,
@@ -90,7 +91,8 @@ def process_workflow(
90
91
  workflow_dir_local=workflow_dir_local,
91
92
  workflow_dir_remote=workflow_dir_remote,
92
93
  logger_name=logger_name,
93
- submit_setup_call=_slurm_submit_setup,
94
+ get_runner_config=get_slurm_config,
94
95
  job_attribute_filters=job_attribute_filters,
96
+ job_type_filters=job_type_filters,
95
97
  user_id=user_id,
96
98
  )
@@ -21,7 +21,7 @@ from typing import Optional
21
21
 
22
22
  from ...models.v2 import DatasetV2
23
23
  from ...models.v2 import WorkflowV2
24
- from ..executors.slurm_common._submit_setup import _slurm_submit_setup
24
+ from ..executors.slurm_common.get_slurm_config import get_slurm_config
25
25
  from ..executors.slurm_sudo.runner import RunnerSlurmSudo
26
26
  from ..set_start_and_last_task_index import set_start_and_last_task_index
27
27
  from .runner import execute_tasks_v2
@@ -38,6 +38,7 @@ def process_workflow(
38
38
  last_task_index: Optional[int] = None,
39
39
  logger_name: str,
40
40
  job_attribute_filters: AttributeFiltersType,
41
+ job_type_filters: dict[str, bool],
41
42
  user_id: int,
42
43
  # Slurm-specific
43
44
  user_cache_dir: Optional[str] = None,
@@ -82,7 +83,8 @@ def process_workflow(
82
83
  workflow_dir_local=workflow_dir_local,
83
84
  workflow_dir_remote=workflow_dir_remote,
84
85
  logger_name=logger_name,
85
- submit_setup_call=_slurm_submit_setup,
86
+ get_runner_config=get_slurm_config,
86
87
  job_attribute_filters=job_attribute_filters,
88
+ job_type_filters=job_type_filters,
87
89
  user_id=user_id,
88
90
  )
@@ -0,0 +1,87 @@
1
+ from typing import Any
2
+
3
+ from sqlalchemy.dialects.postgresql import insert as pg_insert
4
+ from sqlalchemy.orm import Session
5
+
6
+ from fractal_server.app.models.v2 import HistoryImageCache
7
+ from fractal_server.app.models.v2 import HistoryRun
8
+ from fractal_server.app.models.v2 import HistoryUnit
9
+ from fractal_server.app.schemas.v2 import HistoryUnitStatus
10
+
11
+
12
+ def update_status_of_history_run(
13
+ *,
14
+ history_run_id: int,
15
+ status: HistoryUnitStatus,
16
+ db_sync: Session,
17
+ ) -> None:
18
+ run = db_sync.get(HistoryRun, history_run_id)
19
+ if run is None:
20
+ raise ValueError(f"HistoryRun {history_run_id} not found.")
21
+ run.status = status
22
+ db_sync.merge(run)
23
+ db_sync.commit()
24
+
25
+
26
+ def update_status_of_history_unit(
27
+ *,
28
+ history_unit_id: int,
29
+ status: HistoryUnitStatus,
30
+ db_sync: Session,
31
+ ) -> None:
32
+ unit = db_sync.get(HistoryUnit, history_unit_id)
33
+ if unit is None:
34
+ raise ValueError(f"HistoryUnit {history_unit_id} not found.")
35
+ unit.status = status
36
+ db_sync.merge(unit)
37
+ db_sync.commit()
38
+
39
+
40
+ _CHUNK_SIZE = 2_000
41
+
42
+
43
+ def bulk_upsert_image_cache_fast(
44
+ *,
45
+ list_upsert_objects: list[dict[str, Any]],
46
+ db: Session,
47
+ ) -> None:
48
+ """
49
+ Insert or update many objects into `HistoryImageCache` and commit
50
+
51
+ This function is an optimized version of
52
+
53
+ ```python
54
+ for obj in list_upsert_objects:
55
+ db.merge(**obj)
56
+ db.commit()
57
+ ```
58
+
59
+ See docs at
60
+ https://docs.sqlalchemy.org/en/20/dialects/postgresql.html#insert-on-conflict-upsert
61
+
62
+ FIXME: we tried to replace `index_elements` with
63
+ `constraint="pk_historyimagecache"`, but it did not work as expected.
64
+
65
+ Arguments:
66
+ list_upsert_objects:
67
+ List of dictionaries for objects to be upsert-ed.
68
+ db: A sync database session
69
+ """
70
+ if len(list_upsert_objects) == 0:
71
+ return None
72
+
73
+ for ind in range(0, len(list_upsert_objects), _CHUNK_SIZE):
74
+ stmt = pg_insert(HistoryImageCache).values(
75
+ list_upsert_objects[ind : ind + _CHUNK_SIZE]
76
+ )
77
+ stmt = stmt.on_conflict_do_update(
78
+ index_elements=[
79
+ HistoryImageCache.zarr_url,
80
+ HistoryImageCache.dataset_id,
81
+ HistoryImageCache.workflowtask_id,
82
+ ],
83
+ set_=dict(
84
+ latest_history_unit_id=stmt.excluded.latest_history_unit_id
85
+ ),
86
+ )
87
+ db.execute(stmt)