fractal-server 2.14.11__py3-none-any.whl → 2.14.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1 +1 @@
1
- __VERSION__ = "2.14.11"
1
+ __VERSION__ = "2.14.13"
@@ -0,0 +1,163 @@
1
+ import itertools
2
+
3
+ from sqlmodel import select
4
+
5
+ from fractal_server.app.db import AsyncSession
6
+ from fractal_server.app.models import LinkUserGroup
7
+ from fractal_server.app.models.v2 import TaskGroupV2
8
+ from fractal_server.exceptions import UnreachableBranchError
9
+ from fractal_server.logger import set_logger
10
+
11
+
12
+ logger = set_logger(__name__)
13
+
14
+
15
+ async def _disambiguate_task_groups(
16
+ *,
17
+ matching_task_groups: list[TaskGroupV2],
18
+ user_id: int,
19
+ default_group_id: int,
20
+ db: AsyncSession,
21
+ ) -> TaskGroupV2 | None:
22
+ """
23
+ Find ownership-based top-priority task group, if any.
24
+
25
+ Args:
26
+ matching_task_groups:
27
+ user_id:
28
+ default_group_id:
29
+ db:
30
+
31
+ Returns:
32
+ The task group or `None`.
33
+ """
34
+
35
+ # Highest priority: task groups created by user
36
+ list_user_ids = [tg.user_id for tg in matching_task_groups]
37
+ try:
38
+ ind_user_id = list_user_ids.index(user_id)
39
+ task_group = matching_task_groups[ind_user_id]
40
+ logger.debug(
41
+ "[_disambiguate_task_groups] "
42
+ f"Found task group {task_group.id} with {user_id=}, return."
43
+ )
44
+ return task_group
45
+ except ValueError:
46
+ logger.debug(
47
+ "[_disambiguate_task_groups] "
48
+ f"No task group with {user_id=}, continue."
49
+ )
50
+
51
+ # Medium priority: task groups owned by default user group
52
+ list_user_group_ids = [tg.user_group_id for tg in matching_task_groups]
53
+ try:
54
+ ind_user_group_id = list_user_group_ids.index(default_group_id)
55
+ task_group = matching_task_groups[ind_user_group_id]
56
+ logger.debug(
57
+ "[_disambiguate_task_groups] "
58
+ f"Found task group {task_group.id} with {user_id=}, return."
59
+ )
60
+ return task_group
61
+ except ValueError:
62
+ logger.debug(
63
+ "[_disambiguate_task_groups] "
64
+ "No task group with user_group_id="
65
+ f"{default_group_id}, continue."
66
+ )
67
+
68
+ # Lowest priority: task groups owned by other groups, sorted
69
+ # according to age of the user/usergroup link
70
+ logger.debug(
71
+ "[_disambiguate_task_groups] "
72
+ "Sort remaining task groups by oldest-user-link."
73
+ )
74
+ stm = (
75
+ select(LinkUserGroup.group_id)
76
+ .where(LinkUserGroup.user_id == user_id)
77
+ .where(LinkUserGroup.group_id.in_(list_user_group_ids))
78
+ .order_by(LinkUserGroup.timestamp_created.asc())
79
+ )
80
+ res = await db.execute(stm)
81
+ oldest_user_group_id = res.scalars().first()
82
+ logger.debug(
83
+ "[_disambiguate_task_groups] " f"Result: {oldest_user_group_id=}."
84
+ )
85
+ task_group = next(
86
+ iter(
87
+ task_group
88
+ for task_group in matching_task_groups
89
+ if task_group.user_group_id == oldest_user_group_id
90
+ ),
91
+ None,
92
+ )
93
+ return task_group
94
+
95
+
96
+ async def _disambiguate_task_groups_not_none(
97
+ *,
98
+ matching_task_groups: list[TaskGroupV2],
99
+ user_id: int,
100
+ default_group_id: int,
101
+ db: AsyncSession,
102
+ ) -> TaskGroupV2:
103
+ """
104
+ Find ownership-based top-priority task group, and fail otherwise.
105
+
106
+ Args:
107
+ matching_task_groups:
108
+ user_id:
109
+ default_group_id:
110
+ db:
111
+
112
+ Returns:
113
+ The top-priority task group.
114
+ """
115
+ task_group = await _disambiguate_task_groups(
116
+ matching_task_groups=matching_task_groups,
117
+ user_id=user_id,
118
+ default_group_id=default_group_id,
119
+ db=db,
120
+ )
121
+ if task_group is None:
122
+ error_msg = (
123
+ "[_disambiguate_task_groups_not_none] Could not find a task "
124
+ f"group ({user_id=}, {default_group_id=})."
125
+ )
126
+ logger.error(f"UnreachableBranchError {error_msg}")
127
+ raise UnreachableBranchError(error_msg)
128
+ else:
129
+ return task_group
130
+
131
+
132
+ async def remove_duplicate_task_groups(
133
+ *,
134
+ task_groups: list[TaskGroupV2],
135
+ user_id: int,
136
+ default_group_id: int,
137
+ db: AsyncSession,
138
+ ) -> list[TaskGroupV2]:
139
+ """
140
+ Extract a single task group for each `version`.
141
+
142
+ Args:
143
+ task_groups: A list of task groups with identical `pkg_name`
144
+ user_id: User ID
145
+
146
+ Returns:
147
+ New list of task groups with no duplicate `(pkg_name,version)` entries
148
+ """
149
+
150
+ new_task_groups = [
151
+ (
152
+ await _disambiguate_task_groups_not_none(
153
+ matching_task_groups=list(groups),
154
+ user_id=user_id,
155
+ default_group_id=default_group_id,
156
+ db=db,
157
+ )
158
+ )
159
+ for version, groups in itertools.groupby(
160
+ task_groups, key=lambda tg: tg.version
161
+ )
162
+ ]
163
+ return new_task_groups
@@ -1,5 +1,4 @@
1
1
  from copy import deepcopy
2
- from typing import Any
3
2
 
4
3
  from fastapi import APIRouter
5
4
  from fastapi import Depends
@@ -16,6 +15,7 @@ from ._aux_functions_history import get_history_run_or_404
16
15
  from ._aux_functions_history import get_history_unit_or_404
17
16
  from ._aux_functions_history import get_wftask_check_owner
18
17
  from ._aux_functions_history import read_log_file
18
+ from .images import ImagePage
19
19
  from .images import ImageQuery
20
20
  from fractal_server.app.db import AsyncSession
21
21
  from fractal_server.app.db import get_async_db
@@ -34,13 +34,11 @@ from fractal_server.app.schemas.v2 import HistoryUnitRead
34
34
  from fractal_server.app.schemas.v2 import HistoryUnitStatus
35
35
  from fractal_server.app.schemas.v2 import HistoryUnitStatusWithUnset
36
36
  from fractal_server.app.schemas.v2 import ImageLogsRequest
37
- from fractal_server.app.schemas.v2 import SingleImageWithStatus
38
- from fractal_server.images.image_status import enrich_image_list
39
- from fractal_server.images.image_status import IMAGE_STATUS_KEY
37
+ from fractal_server.images.status_tools import enrich_images_async
38
+ from fractal_server.images.status_tools import IMAGE_STATUS_KEY
40
39
  from fractal_server.images.tools import aggregate_attributes
41
40
  from fractal_server.images.tools import aggregate_types
42
41
  from fractal_server.images.tools import filter_image_list
43
- from fractal_server.images.tools import merge_type_filters
44
42
  from fractal_server.logger import set_logger
45
43
 
46
44
 
@@ -62,11 +60,6 @@ def check_historyrun_related_to_dataset_and_wftask(
62
60
  )
63
61
 
64
62
 
65
- class ImageWithStatusPage(PaginationResponse[SingleImageWithStatus]):
66
- attributes: dict[str, list[Any]]
67
- types: list[str]
68
-
69
-
70
63
  router = APIRouter()
71
64
  logger = set_logger(__name__)
72
65
 
@@ -299,11 +292,11 @@ async def get_history_images(
299
292
  dataset_id: int,
300
293
  workflowtask_id: int,
301
294
  request_body: ImageQuery,
302
- unit_status: HistoryUnitStatusWithUnset | None = None,
303
295
  user: UserOAuth = Depends(current_active_user),
304
296
  db: AsyncSession = Depends(get_async_db),
305
297
  pagination: PaginationRequest = Depends(get_pagination_params),
306
- ) -> ImageWithStatusPage:
298
+ ) -> ImagePage:
299
+
307
300
  # Access control and object retrieval
308
301
  wftask = await get_wftask_check_owner(
309
302
  project_id=project_id,
@@ -320,82 +313,54 @@ async def get_history_images(
320
313
  db=db,
321
314
  )
322
315
  dataset = res["dataset"]
323
- workflow = res["workflow"]
324
316
 
325
317
  # Setup prefix for logging
326
- prefix = f"[DS{dataset.id}-WFT{wftask.id}-images]"
327
-
328
- # (1) Get the type-filtered list of dataset images
329
-
330
- # (1A) Reconstruct dataset type filters by starting from {} and making
331
- # incremental updates with `output_types` of all previous tasks
332
- inferred_dataset_type_filters = {}
333
- for current_wftask in workflow.task_list[0 : wftask.order]:
334
- inferred_dataset_type_filters.update(current_wftask.task.output_types)
335
- logger.debug(f"{prefix} {inferred_dataset_type_filters=}")
336
- # (1B) Compute type filters for the current wftask
337
- type_filters_patch = merge_type_filters(
338
- task_input_types=wftask.task.input_types,
339
- wftask_type_filters=wftask.type_filters,
340
- )
341
- logger.debug(f"{prefix} {type_filters_patch=}")
342
- # (1C) Combine dataset type filters (lower priority) and current-wftask
343
- # filters (higher priority)
344
- actual_filters = inferred_dataset_type_filters
345
- actual_filters.update(type_filters_patch)
346
- logger.debug(f"{prefix} {actual_filters=}")
347
- # (1D) Get all matching images from the dataset
348
-
349
- pre_filtered_dataset_images = filter_image_list(
318
+ prefix = f"[DS{dataset.id}-WFT{workflowtask_id}-images]"
319
+
320
+ # (1) Apply type filters
321
+ type_filtered_images = filter_image_list(
350
322
  images=dataset.images,
351
- type_filters=inferred_dataset_type_filters,
323
+ type_filters=request_body.type_filters,
352
324
  )
353
325
 
354
- full_images_list = await enrich_image_list(
326
+ # (2) Extract valid values for attributes and types
327
+ attributes = aggregate_attributes(type_filtered_images)
328
+ attributes[IMAGE_STATUS_KEY] = [
329
+ HistoryUnitStatusWithUnset.DONE,
330
+ HistoryUnitStatusWithUnset.SUBMITTED,
331
+ HistoryUnitStatusWithUnset.FAILED,
332
+ HistoryUnitStatusWithUnset.UNSET,
333
+ ]
334
+ types = aggregate_types(type_filtered_images)
335
+
336
+ # (3) Enrich images with status attribute
337
+ type_filtered_images_with_status = await enrich_images_async(
355
338
  dataset_id=dataset_id,
356
339
  workflowtask_id=workflowtask_id,
357
- images=pre_filtered_dataset_images,
340
+ images=type_filtered_images,
358
341
  db=db,
359
342
  )
360
343
 
361
- if unit_status is not None:
362
- request_body.attribute_filters[IMAGE_STATUS_KEY] = unit_status
363
-
364
- filtered_dataset_images = filter_image_list(
365
- full_images_list,
366
- type_filters=request_body.type_filters,
344
+ # (4) Apply attribute filters
345
+ final_images_with_status = filter_image_list(
346
+ type_filtered_images_with_status,
367
347
  attribute_filters=request_body.attribute_filters,
368
348
  )
369
- logger.debug(f"{prefix} {len(dataset.images)=}")
370
- logger.debug(f"{prefix} {len(filtered_dataset_images)=}")
371
-
372
- attributes = aggregate_attributes(pre_filtered_dataset_images)
373
- types = aggregate_types(pre_filtered_dataset_images)
374
349
 
375
- # Final list of objects
350
+ logger.debug(f"{prefix} {len(dataset.images)=}")
351
+ logger.debug(f"{prefix} {len(final_images_with_status)=}")
376
352
 
377
- total_count = len(filtered_dataset_images)
353
+ # (5) Apply pagination logic
354
+ total_count = len(final_images_with_status)
378
355
  page_size = pagination.page_size or total_count
379
356
  sorted_images_list = sorted(
380
- filtered_dataset_images,
357
+ final_images_with_status,
381
358
  key=lambda image: image["zarr_url"],
382
359
  )
383
360
  paginated_images_list = sorted_images_list[
384
361
  (pagination.page - 1) * page_size : pagination.page * page_size
385
362
  ]
386
363
 
387
- # FIXME: This is only for backwards-compatibility. To remove when we
388
- # update the webclient
389
- paginated_images_list = [
390
- {
391
- **img,
392
- "status": (
393
- lambda x: None if x == HistoryUnitStatusWithUnset.UNSET else x
394
- )(img["attributes"].pop(IMAGE_STATUS_KEY)),
395
- }
396
- for img in paginated_images_list
397
- ]
398
-
399
364
  return dict(
400
365
  current_page=pagination.page,
401
366
  page_size=page_size,
@@ -1,5 +1,3 @@
1
- from typing import Any
2
-
3
1
  from fastapi import APIRouter
4
2
  from fastapi import Depends
5
3
  from fastapi import HTTPException
@@ -26,6 +24,7 @@ from fractal_server.images.tools import aggregate_types
26
24
  from fractal_server.images.tools import find_image_by_zarr_url
27
25
  from fractal_server.images.tools import match_filter
28
26
  from fractal_server.types import AttributeFilters
27
+ from fractal_server.types import ImageAttributeValue
29
28
  from fractal_server.types import TypeFilters
30
29
 
31
30
  router = APIRouter()
@@ -33,11 +32,19 @@ router = APIRouter()
33
32
 
34
33
  class ImagePage(PaginationResponse[SingleImage]):
35
34
 
36
- attributes: dict[str, list[Any]]
35
+ attributes: dict[str, list[ImageAttributeValue]]
37
36
  types: list[str]
38
37
 
39
38
 
40
39
  class ImageQuery(BaseModel):
40
+ """
41
+ Query for a list of images.
42
+
43
+ Attributes:
44
+ type_filters:
45
+ attribute_filters:
46
+ """
47
+
41
48
  type_filters: TypeFilters = Field(default_factory=dict)
42
49
  attribute_filters: AttributeFilters = Field(default_factory=dict)
43
50
 
@@ -1,8 +1,13 @@
1
+ import itertools
2
+
1
3
  from fastapi import APIRouter
2
4
  from fastapi import Depends
3
5
  from fastapi import HTTPException
4
6
  from fastapi import Response
5
7
  from fastapi import status
8
+ from packaging.version import InvalidVersion
9
+ from packaging.version import parse
10
+ from packaging.version import Version
6
11
  from pydantic.types import AwareDatetime
7
12
  from sqlmodel import or_
8
13
  from sqlmodel import select
@@ -10,6 +15,7 @@ from sqlmodel import select
10
15
  from ._aux_functions_tasks import _get_task_group_full_access
11
16
  from ._aux_functions_tasks import _get_task_group_read_access
12
17
  from ._aux_functions_tasks import _verify_non_duplication_group_constraint
18
+ from ._aux_task_group_disambiguation import remove_duplicate_task_groups
13
19
  from fractal_server.app.db import AsyncSession
14
20
  from fractal_server.app.db import get_async_db
15
21
  from fractal_server.app.models import LinkUserGroup
@@ -18,6 +24,7 @@ from fractal_server.app.models.v2 import TaskGroupActivityV2
18
24
  from fractal_server.app.models.v2 import TaskGroupV2
19
25
  from fractal_server.app.models.v2 import WorkflowTaskV2
20
26
  from fractal_server.app.routes.auth import current_active_user
27
+ from fractal_server.app.routes.auth._aux_auth import _get_default_usergroup_id
21
28
  from fractal_server.app.routes.auth._aux_auth import (
22
29
  _verify_user_belongs_to_group,
23
30
  )
@@ -33,6 +40,26 @@ router = APIRouter()
33
40
  logger = set_logger(__name__)
34
41
 
35
42
 
43
+ def _version_sort_key(
44
+ task_group: TaskGroupV2,
45
+ ) -> tuple[int, Version | str | None]:
46
+ """
47
+ Returns a tuple used as (reverse) ordering key for TaskGroups in
48
+ `get_task_group_list`.
49
+ The TaskGroups with a parsable versions are the first in order,
50
+ sorted according to the sorting rules of packaging.version.Version.
51
+ Next in order we have the TaskGroups with non-null non-parsable versions,
52
+ sorted alphabetically.
53
+ Last we have the TaskGroups with null version.
54
+ """
55
+ if task_group.version is None:
56
+ return (0, task_group.version)
57
+ try:
58
+ return (2, parse(task_group.version))
59
+ except InvalidVersion:
60
+ return (1, task_group.version)
61
+
62
+
36
63
  @router.get("/activity/", response_model=list[TaskGroupActivityV2Read])
37
64
  async def get_task_group_activity_list(
38
65
  task_group_activity_id: int | None = None,
@@ -97,14 +124,14 @@ async def get_task_group_activity(
97
124
  return activity
98
125
 
99
126
 
100
- @router.get("/", response_model=list[TaskGroupReadV2])
127
+ @router.get("/", response_model=list[tuple[str, list[TaskGroupReadV2]]])
101
128
  async def get_task_group_list(
102
129
  user: UserOAuth = Depends(current_active_user),
103
130
  db: AsyncSession = Depends(get_async_db),
104
131
  only_active: bool = False,
105
132
  only_owner: bool = False,
106
133
  args_schema: bool = True,
107
- ) -> list[TaskGroupReadV2]:
134
+ ) -> list[tuple[str, list[TaskGroupReadV2]]]:
108
135
  """
109
136
  Get all accessible TaskGroups
110
137
  """
@@ -119,7 +146,7 @@ async def get_task_group_list(
119
146
  )
120
147
  ),
121
148
  )
122
- stm = select(TaskGroupV2).where(condition)
149
+ stm = select(TaskGroupV2).where(condition).order_by(TaskGroupV2.pkg_name)
123
150
  if only_active:
124
151
  stm = stm.where(TaskGroupV2.active)
125
152
 
@@ -132,7 +159,28 @@ async def get_task_group_list(
132
159
  setattr(task, "args_schema_non_parallel", None)
133
160
  setattr(task, "args_schema_parallel", None)
134
161
 
135
- return task_groups
162
+ default_group_id = await _get_default_usergroup_id(db)
163
+ grouped_result = [
164
+ (
165
+ pkg_name,
166
+ sorted(
167
+ (
168
+ await remove_duplicate_task_groups(
169
+ task_groups=list(groups),
170
+ user_id=user.id,
171
+ default_group_id=default_group_id,
172
+ db=db,
173
+ )
174
+ ),
175
+ key=_version_sort_key,
176
+ reverse=True,
177
+ ),
178
+ )
179
+ for pkg_name, groups in itertools.groupby(
180
+ task_groups, key=lambda tg: tg.pkg_name
181
+ )
182
+ ]
183
+ return grouped_result
136
184
 
137
185
 
138
186
  @router.get("/{task_group_id}/", response_model=TaskGroupReadV2)
@@ -21,6 +21,9 @@ from ._aux_functions_tasks import _check_type_filters_compatibility
21
21
  from fractal_server.app.models import LinkUserGroup
22
22
  from fractal_server.app.models import UserOAuth
23
23
  from fractal_server.app.models.v2 import TaskGroupV2
24
+ from fractal_server.app.routes.api.v2._aux_task_group_disambiguation import (
25
+ _disambiguate_task_groups,
26
+ )
24
27
  from fractal_server.app.routes.auth import current_active_user
25
28
  from fractal_server.app.routes.auth._aux_auth import _get_default_usergroup_id
26
29
  from fractal_server.app.schemas.v2 import TaskImportV2
@@ -85,76 +88,6 @@ async def _get_task_by_source(
85
88
  return task_id
86
89
 
87
90
 
88
- async def _disambiguate_task_groups(
89
- *,
90
- matching_task_groups: list[TaskGroupV2],
91
- user_id: int,
92
- db: AsyncSession,
93
- default_group_id: int,
94
- ) -> TaskV2 | None:
95
- """
96
- Disambiguate task groups based on ownership information.
97
- """
98
- # Highest priority: task groups created by user
99
- for task_group in matching_task_groups:
100
- if task_group.user_id == user_id:
101
- logger.info(
102
- "[_disambiguate_task_groups] "
103
- f"Found task group {task_group.id} with {user_id=}, return."
104
- )
105
- return task_group
106
- logger.info(
107
- "[_disambiguate_task_groups] "
108
- f"No task group found with {user_id=}, continue."
109
- )
110
-
111
- # Medium priority: task groups owned by default user group
112
- for task_group in matching_task_groups:
113
- if task_group.user_group_id == default_group_id:
114
- logger.info(
115
- "[_disambiguate_task_groups] "
116
- f"Found task group {task_group.id} with user_group_id="
117
- f"{default_group_id}, return."
118
- )
119
- return task_group
120
- logger.info(
121
- "[_disambiguate_task_groups] "
122
- "No task group found with user_group_id="
123
- f"{default_group_id}, continue."
124
- )
125
-
126
- # Lowest priority: task groups owned by other groups, sorted
127
- # according to age of the user/usergroup link
128
- logger.info(
129
- "[_disambiguate_task_groups] "
130
- "Now sorting remaining task groups by oldest-user-link."
131
- )
132
- user_group_ids = [
133
- task_group.user_group_id for task_group in matching_task_groups
134
- ]
135
- stm = (
136
- select(LinkUserGroup.group_id)
137
- .where(LinkUserGroup.user_id == user_id)
138
- .where(LinkUserGroup.group_id.in_(user_group_ids))
139
- .order_by(LinkUserGroup.timestamp_created.asc())
140
- )
141
- res = await db.execute(stm)
142
- oldest_user_group_id = res.scalars().first()
143
- logger.info(
144
- "[_disambiguate_task_groups] "
145
- f"Result of sorting: {oldest_user_group_id=}."
146
- )
147
- task_group = next(
148
- iter(
149
- task_group
150
- for task_group in matching_task_groups
151
- if task_group.user_group_id == oldest_user_group_id
152
- ),
153
- None,
154
- )
155
- return task_group
156
-
157
-
158
91
  async def _get_task_by_taskimport(
159
92
  *,
160
93
  task_import: TaskImportV2,
@@ -33,10 +33,23 @@ from fractal_server.app.runner.v2.db_tools import update_status_of_history_run
33
33
  from fractal_server.app.schemas.v2 import HistoryUnitStatus
34
34
  from fractal_server.app.schemas.v2 import TaskDumpV2
35
35
  from fractal_server.app.schemas.v2 import TaskGroupDumpV2
36
+ from fractal_server.images.status_tools import enrich_images_sync
37
+ from fractal_server.images.status_tools import IMAGE_STATUS_KEY
36
38
  from fractal_server.images.tools import merge_type_filters
37
39
  from fractal_server.types import AttributeFilters
38
40
 
39
41
 
42
+ def _remove_status_from_attributes(
43
+ images: list[dict[str, Any]],
44
+ ) -> list[dict[str, Any]]:
45
+ """
46
+ Drop attribute `IMAGE_STATUS_KEY` from all images.
47
+ """
48
+ images_copy = deepcopy(images)
49
+ [img["attributes"].pop(IMAGE_STATUS_KEY) for img in images_copy]
50
+ return images_copy
51
+
52
+
40
53
  def drop_none_attributes(attributes: dict[str, Any]) -> dict[str, Any]:
41
54
  # Unset attributes with `None` value
42
55
  non_none_attributes = {
@@ -106,7 +119,11 @@ def execute_tasks_v2(
106
119
  tmp_images = deepcopy(dataset.images)
107
120
  current_dataset_type_filters = copy(job_type_filters)
108
121
 
109
- for wftask in wf_task_list:
122
+ ENRICH_IMAGES_WITH_STATUS: bool = (
123
+ IMAGE_STATUS_KEY in job_attribute_filters.keys()
124
+ )
125
+
126
+ for ind_wftask, wftask in enumerate(wf_task_list):
110
127
  task = wftask.task
111
128
  task_name = task.name
112
129
  logger.debug(f'SUBMIT {wftask.order}-th task (name="{task_name}")')
@@ -122,15 +139,22 @@ def execute_tasks_v2(
122
139
  wftask_type_filters=wftask.type_filters,
123
140
  )
124
141
  type_filters.update(type_filters_patch)
142
+
143
+ if ind_wftask == 0 and ENRICH_IMAGES_WITH_STATUS:
144
+ # FIXME: Could this be done on `type_filtered_images`?
145
+ tmp_images = enrich_images_sync(
146
+ images=tmp_images,
147
+ dataset_id=dataset.id,
148
+ workflowtask_id=wftask.id,
149
+ )
125
150
  type_filtered_images = filter_image_list(
126
151
  images=tmp_images,
127
152
  type_filters=type_filters,
128
- attribute_filters=None,
129
153
  )
130
154
  num_available_images = len(type_filtered_images)
155
+
131
156
  filtered_images = filter_image_list(
132
157
  images=type_filtered_images,
133
- type_filters=None,
134
158
  attribute_filters=job_attribute_filters,
135
159
  )
136
160
  else:
@@ -379,7 +403,11 @@ def execute_tasks_v2(
379
403
  with next(get_sync_db()) as db:
380
404
  # Write current dataset images into the database.
381
405
  db_dataset = db.get(DatasetV2, dataset.id)
382
- db_dataset.images = tmp_images
406
+ if ENRICH_IMAGES_WITH_STATUS:
407
+
408
+ db_dataset.images = _remove_status_from_attributes(tmp_images)
409
+ else:
410
+ db_dataset.images = tmp_images
383
411
  flag_modified(db_dataset, "images")
384
412
  db.merge(db_dataset)
385
413
 
@@ -16,7 +16,6 @@ from .history import HistoryUnitRead # noqa F401
16
16
  from .history import HistoryUnitStatus # noqa F401
17
17
  from .history import HistoryUnitStatusWithUnset # noqa F401
18
18
  from .history import ImageLogsRequest # noqa F401
19
- from .history import SingleImageWithStatus # noqa F401
20
19
  from .job import JobCreateV2 # noqa F401
21
20
  from .job import JobReadV2 # noqa F401
22
21
  from .job import JobStatusTypeV2 # noqa F401
@@ -6,8 +6,6 @@ from pydantic import AwareDatetime
6
6
  from pydantic import BaseModel
7
7
  from pydantic import field_serializer
8
8
 
9
- from ....images import SingleImage
10
-
11
9
 
12
10
  class HistoryUnitStatus(StrEnum):
13
11
  """
@@ -83,8 +81,3 @@ class ImageLogsRequest(BaseModel):
83
81
  workflowtask_id: int
84
82
  dataset_id: int
85
83
  zarr_url: str
86
-
87
-
88
- # FIXME: remove this when we update the webclient
89
- class SingleImageWithStatus(SingleImage):
90
- status: HistoryUnitStatus | None = None
@@ -0,0 +1,2 @@
1
+ class UnreachableBranchError(RuntimeError):
2
+ pass
@@ -0,0 +1,174 @@
1
+ import time
2
+ from copy import deepcopy
3
+ from typing import Any
4
+
5
+ from sqlalchemy import Select
6
+ from sqlalchemy.ext.asyncio import AsyncSession
7
+ from sqlmodel import select
8
+
9
+ from fractal_server.app.db import get_sync_db
10
+ from fractal_server.app.models.v2 import HistoryImageCache
11
+ from fractal_server.app.models.v2 import HistoryUnit
12
+ from fractal_server.app.schemas.v2 import HistoryUnitStatusWithUnset
13
+ from fractal_server.logger import set_logger
14
+ from fractal_server.types import ImageAttributeValue
15
+
16
+ logger = set_logger(__name__)
17
+
18
+
19
+ IMAGE_STATUS_KEY = "__wftask_dataset_image_status__"
20
+
21
+
22
+ def _enriched_image(*, img: dict[str, Any], status: str) -> dict[str, Any]:
23
+ img["attributes"][IMAGE_STATUS_KEY] = status
24
+ return img
25
+
26
+
27
+ def _prepare_query(
28
+ *,
29
+ dataset_id: int,
30
+ workflowtask_id: int,
31
+ zarr_urls: list[str],
32
+ ) -> Select:
33
+ stm = (
34
+ select(HistoryImageCache.zarr_url, HistoryUnit.status)
35
+ .join(HistoryUnit)
36
+ .where(HistoryImageCache.dataset_id == dataset_id)
37
+ .where(HistoryImageCache.workflowtask_id == workflowtask_id)
38
+ .where(HistoryImageCache.latest_history_unit_id == HistoryUnit.id)
39
+ .where(HistoryImageCache.zarr_url.in_(zarr_urls))
40
+ .order_by(HistoryImageCache.zarr_url)
41
+ )
42
+ return stm
43
+
44
+
45
+ async def enrich_images_async(
46
+ *,
47
+ images: list[dict[str, Any]],
48
+ dataset_id: int,
49
+ workflowtask_id: int,
50
+ db: AsyncSession,
51
+ ) -> list[dict[str, ImageAttributeValue]]:
52
+ """
53
+ Enrich images with a status-related attribute.
54
+
55
+ Args:
56
+ images: The input image list
57
+ dataset_id: The dataset ID
58
+ workflowtask_id: The workflow-task ID
59
+ db: An async db session
60
+
61
+ Returns:
62
+ The list of enriched images
63
+ """
64
+ t_0 = time.perf_counter()
65
+ logger.info(
66
+ f"[enrich_images_async] START, {dataset_id=}, {workflowtask_id=}"
67
+ )
68
+
69
+ zarr_url_to_image = {img["zarr_url"]: deepcopy(img) for img in images}
70
+
71
+ res = await db.execute(
72
+ _prepare_query(
73
+ dataset_id=dataset_id,
74
+ workflowtask_id=workflowtask_id,
75
+ zarr_urls=zarr_url_to_image.keys(),
76
+ )
77
+ )
78
+ list_processed_url_status = res.all()
79
+ t_1 = time.perf_counter()
80
+ logger.debug(f"[enrich_images_async] db-query, elapsed={t_1 - t_0:.3f} s")
81
+
82
+ set_processed_urls = set(item[0] for item in list_processed_url_status)
83
+ processed_images_with_status = [
84
+ _enriched_image(
85
+ img=zarr_url_to_image[item[0]],
86
+ status=item[1],
87
+ )
88
+ for item in list_processed_url_status
89
+ ]
90
+ t_2 = time.perf_counter()
91
+ logger.debug(
92
+ "[enrich_images_async] processed-images, " f"elapsed={t_2 - t_1:.3f} s"
93
+ )
94
+
95
+ non_processed_urls = zarr_url_to_image.keys() - set_processed_urls
96
+ non_processed_images_with_status = [
97
+ _enriched_image(
98
+ img=zarr_url_to_image[zarr_url],
99
+ status=HistoryUnitStatusWithUnset.UNSET,
100
+ )
101
+ for zarr_url in non_processed_urls
102
+ ]
103
+ t_3 = time.perf_counter()
104
+ logger.debug(
105
+ "[enrich_images_async] non-processed-images, "
106
+ f"elapsed={t_3 - t_2:.3f} s"
107
+ )
108
+
109
+ return processed_images_with_status + non_processed_images_with_status
110
+
111
+
112
+ def enrich_images_sync(
113
+ *,
114
+ images: list[dict[str, Any]],
115
+ dataset_id: int,
116
+ workflowtask_id: int,
117
+ ) -> list[dict[str, ImageAttributeValue]]:
118
+ """
119
+ Enrich images with a status-related attribute.
120
+
121
+ Args:
122
+ images: The input image list
123
+ dataset_id: The dataset ID
124
+ workflowtask_id: The workflow-task ID
125
+
126
+ Returns:
127
+ The list of enriched images
128
+ """
129
+ t_0 = time.perf_counter()
130
+ logger.info(
131
+ f"[enrich_images_async] START, {dataset_id=}, {workflowtask_id=}"
132
+ )
133
+
134
+ zarr_url_to_image = {img["zarr_url"]: deepcopy(img) for img in images}
135
+ with next(get_sync_db()) as db:
136
+ res = db.execute(
137
+ _prepare_query(
138
+ dataset_id=dataset_id,
139
+ workflowtask_id=workflowtask_id,
140
+ zarr_urls=zarr_url_to_image.keys(),
141
+ )
142
+ )
143
+ list_processed_url_status = res.all()
144
+ t_1 = time.perf_counter()
145
+ logger.debug(f"[enrich_images_async] db-query, elapsed={t_1 - t_0:.3f} s")
146
+
147
+ set_processed_urls = set(item[0] for item in list_processed_url_status)
148
+ processed_images_with_status = [
149
+ _enriched_image(
150
+ img=zarr_url_to_image[item[0]],
151
+ status=item[1],
152
+ )
153
+ for item in list_processed_url_status
154
+ ]
155
+ t_2 = time.perf_counter()
156
+ logger.debug(
157
+ "[enrich_images_async] processed-images, " f"elapsed={t_2 - t_1:.3f} s"
158
+ )
159
+
160
+ non_processed_urls = zarr_url_to_image.keys() - set_processed_urls
161
+ non_processed_images_with_status = [
162
+ _enriched_image(
163
+ img=zarr_url_to_image[zarr_url],
164
+ status=HistoryUnitStatusWithUnset.UNSET,
165
+ )
166
+ for zarr_url in non_processed_urls
167
+ ]
168
+ t_3 = time.perf_counter()
169
+ logger.debug(
170
+ "[enrich_images_async] non-processed-images, "
171
+ f"elapsed={t_3 - t_2:.3f} s"
172
+ )
173
+
174
+ return processed_images_with_status + non_processed_images_with_status
@@ -125,7 +125,7 @@ def merge_type_filters(
125
125
  def aggregate_attributes(images: list[dict[str, Any]]) -> dict[str, list[Any]]:
126
126
  """
127
127
  Given a list of images, this function returns a dictionary of all image
128
- attributes, each mapped to a list of present values.
128
+ attributes, each mapped to a sorted list of existing values.
129
129
  """
130
130
  attributes = {}
131
131
  for image in images:
@@ -133,7 +133,10 @@ def aggregate_attributes(images: list[dict[str, Any]]) -> dict[str, list[Any]]:
133
133
  attributes.setdefault(k, []).append(v)
134
134
  for k, v in attributes.items():
135
135
  attributes[k] = list(set(v))
136
- return attributes
136
+ sorted_attributes = {
137
+ key: sorted(value) for key, value in attributes.items()
138
+ }
139
+ return sorted_attributes
137
140
 
138
141
 
139
142
  def aggregate_types(images: list[dict[str, Any]]) -> list[str]:
@@ -24,11 +24,21 @@ write_log "END upgrade pip and install setuptools"
24
24
  echo
25
25
 
26
26
  # Install package
27
- write_log "START install with INSTALL_STRING=${INSTALL_STRING} and PINNED_PACKAGE_LIST=${PINNED_PACKAGE_LIST}"
28
- "$VENVPYTHON" -m pip install ${FRACTAL_PIP_CACHE_DIR_ARG} "$INSTALL_STRING" $PINNED_PACKAGE_LIST
29
- write_log "END install with INSTALL_STRING=${INSTALL_STRING} and PINNED_PACKAGE_LIST=${PINNED_PACKAGE_LIST}"
27
+ write_log "START install with INSTALL_STRING=${INSTALL_STRING}"
28
+ "$VENVPYTHON" -m pip install ${FRACTAL_PIP_CACHE_DIR_ARG} "$INSTALL_STRING"
29
+ write_log "END install with INSTALL_STRING=${INSTALL_STRING}"
30
30
  echo
31
31
 
32
+ # Install pinned packages (note: do not quote $PINNED_PACKAGE_LIST since it could be e.g. "numpy==1.2.3 torch=3.2.1")
33
+ if [ "$PINNED_PACKAGE_LIST" != "" ]; then
34
+ write_log "START install with PINNED_PACKAGE_LIST=${PINNED_PACKAGE_LIST}"
35
+ "$VENVPYTHON" -m pip install ${FRACTAL_PIP_CACHE_DIR_ARG} $PINNED_PACKAGE_LIST
36
+ write_log "END install with PINNED_PACKAGE_LIST=${PINNED_PACKAGE_LIST}"
37
+ echo
38
+ else
39
+ write_log "SKIP installing pinned versions $PINNED_PACKAGE_LIST (empty list)"
40
+ fi
41
+
32
42
  # End
33
43
  TIME_END=$(date +%s)
34
44
  write_log "All good up to here."
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: fractal-server
3
- Version: 2.14.11
3
+ Version: 2.14.13
4
4
  Summary: Backend component of the Fractal analytics platform
5
5
  License: BSD-3-Clause
6
6
  Author: Tommaso Comparin
@@ -1,4 +1,4 @@
1
- fractal_server/__init__.py,sha256=_nZVfV9ubhO5D3aYI-vZIXltETQy27u2BcyxMBu66Ik,24
1
+ fractal_server/__init__.py,sha256=6bdBE5bJoA01yAEh1heVK-Ta-FFnpeeh531ZgEO8YXY,24
2
2
  fractal_server/__main__.py,sha256=rkM8xjY1KeS3l63irB8yCrlVobR-73uDapC4wvrIlxI,6957
3
3
  fractal_server/alembic.ini,sha256=MWwi7GzjzawI9cCAK1LW7NxIBQDUqD12-ptJoq5JpP0,3153
4
4
  fractal_server/app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -35,9 +35,10 @@ fractal_server/app/routes/api/v2/_aux_functions_history.py,sha256=Z23xwvBaVEEQ5B
35
35
  fractal_server/app/routes/api/v2/_aux_functions_task_lifecycle.py,sha256=GpKfw9yj01LmOAuNMTOreU1PFkCKpjK5oCt7_wp35-A,6741
36
36
  fractal_server/app/routes/api/v2/_aux_functions_task_version_update.py,sha256=WLDOYCnb6fnS5avKflyx6yN24Vo1n5kJk5ZyiKbzb8Y,1175
37
37
  fractal_server/app/routes/api/v2/_aux_functions_tasks.py,sha256=MFYnyNPBACSHXTDLXe6cSennnpmlpajN84iivOOMW7Y,11599
38
+ fractal_server/app/routes/api/v2/_aux_task_group_disambiguation.py,sha256=2sK7-bZzcl3-2mkx62tw0MPxeUYVDch30DSWgdhouHI,4615
38
39
  fractal_server/app/routes/api/v2/dataset.py,sha256=6u4MFqJ3YZ0Zq6Xx8CRMrTPKW55ZaR63Uno21DqFr4Q,8889
39
- fractal_server/app/routes/api/v2/history.py,sha256=OHy3Y4lreGyGXk9v5iud73tzoGV-YAT027gMWJH-5p4,16958
40
- fractal_server/app/routes/api/v2/images.py,sha256=tJn0ANv4Tz2KHyb41sPbBRmSCpt632m8HEcgC3u-rHk,7709
40
+ fractal_server/app/routes/api/v2/history.py,sha256=BEmf_ENF5HNMy8yXrxRdo4280rWuRUa1Jw4u8R9-LQQ,15477
41
+ fractal_server/app/routes/api/v2/images.py,sha256=TS1ltUhP0_SaViupdHrSh3MLDi5OVk-lOhE1VCVyZj0,7869
41
42
  fractal_server/app/routes/api/v2/job.py,sha256=8xRTwh_OCHmK9IfI_zUASa2ozewR0qu0zVBl_a4IvHw,6467
42
43
  fractal_server/app/routes/api/v2/pre_submission_checks.py,sha256=MmjvSQ0pNAWEw5BavR16zIZ4h13py302AmToaz8Vvco,4768
43
44
  fractal_server/app/routes/api/v2/project.py,sha256=ldMEyjtwGpX2teu85sCNWaubDFlw-En8U1SA7G1VaIw,4567
@@ -46,11 +47,11 @@ fractal_server/app/routes/api/v2/submit.py,sha256=_BDkWtFdo8-p7kZ0Oxaidei04MfuBe
46
47
  fractal_server/app/routes/api/v2/task.py,sha256=cUFrCxFOLGlRV7UCbUMHs4Xy4tIc3pqwG8gEqVP5GcU,6939
47
48
  fractal_server/app/routes/api/v2/task_collection.py,sha256=FGMhTnU88Umd8nMdriUYPtpTtAHcRBRrZIYyOesFhrU,12577
48
49
  fractal_server/app/routes/api/v2/task_collection_custom.py,sha256=EfGpv6W7xDyuYYp6E7XAcXLJiLNAImUHFqMDLgfh-4s,6730
49
- fractal_server/app/routes/api/v2/task_group.py,sha256=iShTvM9nJQhQLwR8ZpQRucVwYhJ7t00Lbesqh3M6mY4,7361
50
+ fractal_server/app/routes/api/v2/task_group.py,sha256=Xfsj5Wy0NOIkbeYsdqyFke4mkaeq0riJeTrGCHbt-eM,9059
50
51
  fractal_server/app/routes/api/v2/task_group_lifecycle.py,sha256=C2U2V76YbbqDWmErJ98MH9C2C26Lve2p_35FZ1dNmXg,9095
51
52
  fractal_server/app/routes/api/v2/task_version_update.py,sha256=h2c6aTLXj0_ZyBuHVsD5-ZTNMGEUpS96qZ4Ot1jlb74,7974
52
53
  fractal_server/app/routes/api/v2/workflow.py,sha256=gwMtpfUY_JiTv5_R_q1I9WNkp6nTqEVtYx8jWNJRxcU,10227
53
- fractal_server/app/routes/api/v2/workflow_import.py,sha256=Q4CnkSV47F11j6DkNT_U3AhwBK-LSsWWegItfdoOJ6c,11167
54
+ fractal_server/app/routes/api/v2/workflow_import.py,sha256=kOGDaCj0jCGK1WSYGbnUjtUg2U1YxUY9UMH-2ilqJg4,9027
54
55
  fractal_server/app/routes/api/v2/workflowtask.py,sha256=vVqEoJa3lrMl2CU94WoxFaqO3U0QImPgvrkkUNdqDOU,7462
55
56
  fractal_server/app/routes/auth/__init__.py,sha256=fao6CS0WiAjHDTvBzgBVV_bSXFpEAeDBF6Z6q7rRkPc,1658
56
57
  fractal_server/app/routes/auth/_aux_auth.py,sha256=UZgauY0V6mSqjte_sYI1cBl2h8bcbLaeWzgpl1jdJlk,4883
@@ -101,7 +102,7 @@ fractal_server/app/runner/v2/_slurm_sudo.py,sha256=Gvsh4tUlc1_3KdF3B7zEqs-YIntC_
101
102
  fractal_server/app/runner/v2/db_tools.py,sha256=du5dKhMMFMErQXbGIgu9JvO_vtMensodyPsyDeqz1yQ,3324
102
103
  fractal_server/app/runner/v2/deduplicate_list.py,sha256=IVTE4abBU1bUprFTkxrTfYKnvkNTanWQ-KWh_etiT08,645
103
104
  fractal_server/app/runner/v2/merge_outputs.py,sha256=D1L4Taieq9i71SPQyNc1kMokgHh-sV_MqF3bv7QMDBc,907
104
- fractal_server/app/runner/v2/runner.py,sha256=YkxHhWA25Mv7HzviCaYMTxkyuP-JFHj73H9aPO6Fxm0,17713
105
+ fractal_server/app/runner/v2/runner.py,sha256=_vmFdJCqNZVK9sQa7MoAIemkRX-hakLm5RLCBz8QxDg,18667
105
106
  fractal_server/app/runner/v2/runner_functions.py,sha256=Q9AVIR2NEBfRpfqW1wtQTTQfks_R1TnwRFBRro2fvjQ,18837
106
107
  fractal_server/app/runner/v2/submit_workflow.py,sha256=AMnXdozwIGlXD55ch0_SNAG-ntKBO-QRhkbInrvsShU,13140
107
108
  fractal_server/app/runner/v2/task_interface.py,sha256=V2TWBK6tbhycyMrJvFaoJ9IpuKlrLrvmjJbfNMsBBXo,2527
@@ -110,11 +111,11 @@ fractal_server/app/schemas/__init__.py,sha256=stURAU_t3AOBaH0HSUbV-GKhlPKngnnIMo
110
111
  fractal_server/app/schemas/user.py,sha256=t9nbyYjGCSOsxm9K97PDG3-9o27CsaFfhWb_L5nrjqA,1910
111
112
  fractal_server/app/schemas/user_group.py,sha256=x3-kqbo0q2wTP7QI0iZ7PU_9Dr957UYrFMKqS7BXLhE,1425
112
113
  fractal_server/app/schemas/user_settings.py,sha256=NpdC0Me0fgwwdfJuTSlFLCnLUjiWWzrJlPn_UPLjXnw,1862
113
- fractal_server/app/schemas/v2/__init__.py,sha256=M49RJ8SKcVoSfSTuiTCcbexSo8JMtLQTVFltCW4CuGQ,3103
114
+ fractal_server/app/schemas/v2/__init__.py,sha256=ft9gFmLLClNbWE8pwmG81fuLl1hlfdSsb8TvLoX3Sqk,3047
114
115
  fractal_server/app/schemas/v2/accounting.py,sha256=Wylt7uWTiDIFlHJOh4XEtYitk2FjFlmnodDrJDxcr0E,397
115
116
  fractal_server/app/schemas/v2/dataset.py,sha256=NKCjBwGBC7mPiSlXktZAcleJsvlLY6KfNKw7Wx4Zfqk,1728
116
117
  fractal_server/app/schemas/v2/dumps.py,sha256=o4RiWoSmQ8UPoWxgKoeORykGNIdczeNmm-ng-dBRD7k,2216
117
- fractal_server/app/schemas/v2/history.py,sha256=BCOii7GW7OvcDr1mLZl5-kYxtczzefQciuAxp95zrFk,1958
118
+ fractal_server/app/schemas/v2/history.py,sha256=pZiMKfh6nMWbTp5MUtrnGySPKbeRFf5tM1VLFaTgGcw,1784
118
119
  fractal_server/app/schemas/v2/job.py,sha256=fPay7dLSr-skKRdVRoZig8rf_sZwUdVdHZaJ4XM8vMI,3288
119
120
  fractal_server/app/schemas/v2/manifest.py,sha256=sZhj99iDgjE2MWXeTxnXSb6pFdKwRnFpCVQzcnpoTrI,6821
120
121
  fractal_server/app/schemas/v2/project.py,sha256=l96-3bCfB3knhITaLj1WSyBgbzP_k8CdtvgX_5jO_fU,657
@@ -131,11 +132,12 @@ fractal_server/config.py,sha256=ldI9VzEWmwU75Z7zVku6I-rXGKS3bJDdCifZnwad9-4,2592
131
132
  fractal_server/data_migrations/2_14_10.py,sha256=gMRR5QB0SDv0ToEiXVLg1VrHprM_Ii-9O1Kg-ZF-YhY,1599
132
133
  fractal_server/data_migrations/README.md,sha256=_3AEFvDg9YkybDqCLlFPdDmGJvr6Tw7HRI14aZ3LOIw,398
133
134
  fractal_server/data_migrations/tools.py,sha256=LeMeASwYGtEqd-3wOLle6WARdTGAimoyMmRbbJl-hAM,572
135
+ fractal_server/exceptions.py,sha256=7ftpWwNsTQmNonWCynhH5ErUh1haPPhIaVPrNHla7-o,53
134
136
  fractal_server/gunicorn_fractal.py,sha256=u6U01TLGlXgq1v8QmEpLih3QnsInZD7CqphgJ_GrGzc,1230
135
137
  fractal_server/images/__init__.py,sha256=-_wjoKtSX02P1KjDxDP_EXKvmbONTRmbf7iGVTsyBpM,154
136
- fractal_server/images/image_status.py,sha256=2EUxvuYEFKSKoICEiQntT5P3Un7FS8gv2Ef1HZFUfF4,2667
137
138
  fractal_server/images/models.py,sha256=6WchcIzLLLwdkLNRfg71Dl4Y-9UFLPyrrzh1lWgjuP0,1245
138
- fractal_server/images/tools.py,sha256=XKhbdjfWZpTSe1akK1bSQl4gzEQlj9ETDbELkuwayVg,4066
139
+ fractal_server/images/status_tools.py,sha256=tLp-Sojlhf-eQ97O1hj-2fg2zmgHfED9EXkec3Jjz_0,5141
140
+ fractal_server/images/tools.py,sha256=92kmt2Fnyp8ycTbyuar9_U8kJTi0wKpBk8ZagARWl9Y,4177
139
141
  fractal_server/logger.py,sha256=QIeVn3QpZsiIL2jDdrKotr-MLyDcZYgiPiTluFU46lE,5317
140
142
  fractal_server/main.py,sha256=FD9KzTTsXTQnTW0z3Hu7y0Nj_oAkBeZEInKDXFd4hjE,4561
141
143
  fractal_server/migrations/env.py,sha256=nfyBpMIOT3kny6t-b-tUjyRjZ4k906bb1_wCQ7me1BI,1353
@@ -196,7 +198,7 @@ fractal_server/tasks/v2/ssh/collect.py,sha256=bClq8hB04igrUXk1Mgc7pRWQws77mpYX8K
196
198
  fractal_server/tasks/v2/ssh/deactivate.py,sha256=YO2PJ0VV-LhVW-6O-t-d6BQciO2fYAkYbz5Y9UBiXaA,12928
197
199
  fractal_server/tasks/v2/ssh/reactivate.py,sha256=1DIQduhqZLbrIeoVyyp54vemBWZu94tFDvjpmDsZZI0,8818
198
200
  fractal_server/tasks/v2/templates/1_create_venv.sh,sha256=PK0jdHKtQpda1zULebBaVPORt4t6V17wa4N1ohcj5ac,548
199
- fractal_server/tasks/v2/templates/2_pip_install.sh,sha256=Enm1NxXqUglpi3R4_8_ojVq14ozjCc1XPvHXWKPLeS8,1262
201
+ fractal_server/tasks/v2/templates/2_pip_install.sh,sha256=jMJPQJXHKznO6fxOOXtFXKPdCmTf1VLLWj_JL_ZdKxo,1644
200
202
  fractal_server/tasks/v2/templates/3_pip_freeze.sh,sha256=JldREScEBI4cD_qjfX4UK7V4aI-FnX9ZvVNxgpSOBFc,168
201
203
  fractal_server/tasks/v2/templates/4_pip_show.sh,sha256=qm1vPy6AkKhWDjCJGXS8LqCLYO3KsAyRK325ZsFcF6U,1747
202
204
  fractal_server/tasks/v2/templates/5_get_venv_size_and_file_number.sh,sha256=q-6ZUvA6w6FDVEoSd9O63LaJ9tKZc7qAFH72SGPrd_k,284
@@ -214,8 +216,8 @@ fractal_server/types/validators/_workflow_task_arguments_validators.py,sha256=HL
214
216
  fractal_server/urls.py,sha256=QjIKAC1a46bCdiPMu3AlpgFbcv6a4l3ABcd5xz190Og,471
215
217
  fractal_server/utils.py,sha256=FCY6HUsRnnbsWkT2kwQ2izijiHuCrCD3Kh50G0QudxE,3531
216
218
  fractal_server/zip_tools.py,sha256=tqz_8f-vQ9OBRW-4OQfO6xxY-YInHTyHmZxU7U4PqZo,4885
217
- fractal_server-2.14.11.dist-info/LICENSE,sha256=QKAharUuhxL58kSoLizKJeZE3mTCBnX6ucmz8W0lxlk,1576
218
- fractal_server-2.14.11.dist-info/METADATA,sha256=q7pnKsWb8a7g-nCtAe7PSCPQU-RkyHVSTj1Amo_LBLE,4244
219
- fractal_server-2.14.11.dist-info/WHEEL,sha256=7dDg4QLnNKTvwIDR9Ac8jJaAmBC_owJrckbC0jjThyA,88
220
- fractal_server-2.14.11.dist-info/entry_points.txt,sha256=8tV2kynvFkjnhbtDnxAqImL6HMVKsopgGfew0DOp5UY,58
221
- fractal_server-2.14.11.dist-info/RECORD,,
219
+ fractal_server-2.14.13.dist-info/LICENSE,sha256=QKAharUuhxL58kSoLizKJeZE3mTCBnX6ucmz8W0lxlk,1576
220
+ fractal_server-2.14.13.dist-info/METADATA,sha256=-h8oCvhvIAs4Sp0G5zxavNQkpS6NkBsM8XHHmCT47EE,4244
221
+ fractal_server-2.14.13.dist-info/WHEEL,sha256=7dDg4QLnNKTvwIDR9Ac8jJaAmBC_owJrckbC0jjThyA,88
222
+ fractal_server-2.14.13.dist-info/entry_points.txt,sha256=8tV2kynvFkjnhbtDnxAqImL6HMVKsopgGfew0DOp5UY,58
223
+ fractal_server-2.14.13.dist-info/RECORD,,
@@ -1,85 +0,0 @@
1
- import time
2
- from copy import deepcopy
3
- from typing import Any
4
-
5
- from fastapi import Depends
6
- from sqlmodel import select
7
-
8
- from fractal_server.app.db import AsyncSession
9
- from fractal_server.app.db import get_async_db
10
- from fractal_server.app.models.v2 import HistoryImageCache
11
- from fractal_server.app.models.v2 import HistoryUnit
12
- from fractal_server.app.schemas.v2 import HistoryUnitStatusWithUnset
13
- from fractal_server.logger import set_logger
14
-
15
- IMAGE_STATUS_KEY = "__wftask_dataset_image_status__"
16
-
17
- logger = set_logger(__name__)
18
-
19
-
20
- def _enriched_image(*, img: dict[str, Any], status: str) -> dict[str, Any]:
21
- img["attributes"][IMAGE_STATUS_KEY] = status
22
- return img
23
-
24
-
25
- async def enrich_image_list(
26
- *,
27
- images: list[dict[str, Any]],
28
- dataset_id: int,
29
- workflowtask_id: int,
30
- db: AsyncSession = Depends(get_async_db),
31
- ) -> list[dict[str, Any]]:
32
- start_time = time.perf_counter()
33
- logger.info(
34
- f"START {enrich_image_list.__name__} for {dataset_id=}, "
35
- f"{workflowtask_id=}"
36
- )
37
-
38
- zarr_url_to_image = {img["zarr_url"]: deepcopy(img) for img in images}
39
-
40
- stm = (
41
- select(HistoryImageCache.zarr_url, HistoryUnit.status)
42
- .join(HistoryUnit)
43
- .where(HistoryImageCache.dataset_id == dataset_id)
44
- .where(HistoryImageCache.workflowtask_id == workflowtask_id)
45
- .where(HistoryImageCache.latest_history_unit_id == HistoryUnit.id)
46
- .where(HistoryImageCache.zarr_url.in_(zarr_url_to_image.keys()))
47
- .order_by(HistoryImageCache.zarr_url)
48
- )
49
- res = await db.execute(stm)
50
- list_processed_url_status = res.all()
51
- logger.debug(
52
- f"POST db query, "
53
- f"elapsed={time.perf_counter() - start_time:.3f} "
54
- "seconds"
55
- )
56
-
57
- set_processed_urls = set(item[0] for item in list_processed_url_status)
58
- processed_images_with_status = [
59
- _enriched_image(
60
- img=zarr_url_to_image[item[0]],
61
- status=item[1],
62
- )
63
- for item in list_processed_url_status
64
- ]
65
- logger.debug(
66
- f"POST processed_images_with_status, "
67
- f"elapsed={time.perf_counter() - start_time:.3f} "
68
- "seconds"
69
- )
70
-
71
- non_processed_urls = zarr_url_to_image.keys() - set_processed_urls
72
- non_processed_images_with_status = [
73
- _enriched_image(
74
- img=zarr_url_to_image[zarr_url],
75
- status=HistoryUnitStatusWithUnset.UNSET,
76
- )
77
- for zarr_url in non_processed_urls
78
- ]
79
- logger.debug(
80
- f"POST non_processed_images_with_status, "
81
- f"elapsed={time.perf_counter() - start_time:.3f} "
82
- "seconds"
83
- )
84
-
85
- return processed_images_with_status + non_processed_images_with_status