datamint 1.9.2__py3-none-any.whl → 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of datamint might be problematic. Click here for more details.
- datamint/__init__.py +2 -0
- datamint/api/__init__.py +3 -0
- datamint/api/base_api.py +430 -0
- datamint/api/client.py +91 -0
- datamint/api/dto/__init__.py +10 -0
- datamint/api/endpoints/__init__.py +17 -0
- datamint/api/endpoints/annotations_api.py +984 -0
- datamint/api/endpoints/channels_api.py +28 -0
- datamint/api/endpoints/datasetsinfo_api.py +16 -0
- datamint/api/endpoints/projects_api.py +203 -0
- datamint/api/endpoints/resources_api.py +1013 -0
- datamint/api/endpoints/users_api.py +38 -0
- datamint/api/entity_base_api.py +347 -0
- datamint/apihandler/annotation_api_handler.py +5 -5
- datamint/apihandler/api_handler.py +3 -6
- datamint/apihandler/base_api_handler.py +6 -28
- datamint/apihandler/dto/__init__.py +0 -0
- datamint/apihandler/dto/annotation_dto.py +1 -1
- datamint/apihandler/root_api_handler.py +53 -28
- datamint/client_cmd_tools/datamint_config.py +6 -37
- datamint/client_cmd_tools/datamint_upload.py +84 -58
- datamint/dataset/base_dataset.py +65 -75
- datamint/dataset/dataset.py +2 -2
- datamint/entities/__init__.py +20 -0
- datamint/entities/annotation.py +178 -0
- datamint/entities/base_entity.py +51 -0
- datamint/entities/channel.py +46 -0
- datamint/entities/datasetinfo.py +22 -0
- datamint/entities/project.py +64 -0
- datamint/entities/resource.py +130 -0
- datamint/entities/user.py +21 -0
- datamint/examples/example_projects.py +41 -44
- datamint/exceptions.py +27 -1
- datamint/logging.yaml +1 -1
- datamint/utils/logging_utils.py +75 -0
- {datamint-1.9.2.dist-info → datamint-2.0.0.dist-info}/METADATA +13 -9
- datamint-2.0.0.dist-info/RECORD +50 -0
- {datamint-1.9.2.dist-info → datamint-2.0.0.dist-info}/WHEEL +1 -1
- datamint-1.9.2.dist-info/RECORD +0 -29
- {datamint-1.9.2.dist-info → datamint-2.0.0.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,984 @@
|
|
|
1
|
+
from typing import Any, Sequence, Literal, BinaryIO, Generator, IO
|
|
2
|
+
import httpx
|
|
3
|
+
from datetime import date
|
|
4
|
+
import logging
|
|
5
|
+
from ..entity_base_api import ApiConfig, CreatableEntityApi, DeletableEntityApi
|
|
6
|
+
from datamint.entities.annotation import Annotation
|
|
7
|
+
from datamint.entities.resource import Resource
|
|
8
|
+
from datamint.entities.project import Project
|
|
9
|
+
from datamint.apihandler.dto.annotation_dto import AnnotationType, CreateAnnotationDto, LineGeometry, BoxGeometry, CoordinateSystem, Geometry
|
|
10
|
+
import numpy as np
|
|
11
|
+
import os
|
|
12
|
+
import aiohttp
|
|
13
|
+
import json
|
|
14
|
+
from datamint.exceptions import DatamintException, ResourceNotFoundError
|
|
15
|
+
from medimgkit.nifti_utils import DEFAULT_NIFTI_MIME
|
|
16
|
+
from medimgkit.format_detection import guess_type
|
|
17
|
+
import nibabel as nib
|
|
18
|
+
from PIL import Image
|
|
19
|
+
from io import BytesIO
|
|
20
|
+
import pydicom
|
|
21
|
+
from pathlib import Path
|
|
22
|
+
from tqdm.auto import tqdm
|
|
23
|
+
import asyncio
|
|
24
|
+
|
|
25
|
+
_LOGGER = logging.getLogger(__name__)
|
|
26
|
+
_USER_LOGGER = logging.getLogger('user_logger')
|
|
27
|
+
MAX_NUMBER_DISTINCT_COLORS = 2048 # Maximum number of distinct colors in a segmentation image
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class AnnotationsApi(CreatableEntityApi[Annotation], DeletableEntityApi[Annotation]):
|
|
31
|
+
"""API handler for annotation-related endpoints."""
|
|
32
|
+
|
|
33
|
+
def __init__(self, config: ApiConfig, client: httpx.Client | None = None) -> None:
|
|
34
|
+
"""Initialize the annotations API handler.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
config: API configuration containing base URL, API key, etc.
|
|
38
|
+
client: Optional HTTP client instance. If None, a new one will be created.
|
|
39
|
+
"""
|
|
40
|
+
super().__init__(config, Annotation, 'annotations', client)
|
|
41
|
+
|
|
42
|
+
def get_list(self,
|
|
43
|
+
resource: str | Resource | None = None,
|
|
44
|
+
annotation_type: AnnotationType | str | None = None,
|
|
45
|
+
annotator_email: str | None = None,
|
|
46
|
+
date_from: date | None = None,
|
|
47
|
+
date_to: date | None = None,
|
|
48
|
+
dataset_id: str | None = None,
|
|
49
|
+
worklist_id: str | None = None,
|
|
50
|
+
status: Literal['new', 'published'] | None = None,
|
|
51
|
+
load_ai_segmentations: bool | None = None,
|
|
52
|
+
limit: int | None = None
|
|
53
|
+
) -> Sequence[Annotation]:
|
|
54
|
+
payload = {
|
|
55
|
+
'resource_id': resource.id if isinstance(resource, Resource) else resource,
|
|
56
|
+
'annotation_type': annotation_type,
|
|
57
|
+
'annotatorEmail': annotator_email,
|
|
58
|
+
'from': date_from.isoformat() if date_from is not None else None,
|
|
59
|
+
'to': date_to.isoformat() if date_to is not None else None,
|
|
60
|
+
'dataset_id': dataset_id,
|
|
61
|
+
'annotation_worklist_id': worklist_id,
|
|
62
|
+
'status': status,
|
|
63
|
+
'load_ai_segmentations': load_ai_segmentations
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
# remove nones
|
|
67
|
+
payload = {k: v for k, v in payload.items() if v is not None}
|
|
68
|
+
return super().get_list(limit=limit, params=payload)
|
|
69
|
+
|
|
70
|
+
async def _upload_segmentations_async(self,
|
|
71
|
+
resource: str | Resource,
|
|
72
|
+
frame_index: int | Sequence [int] | None,
|
|
73
|
+
file_path: str | np.ndarray,
|
|
74
|
+
name: dict[int, str] | dict[tuple, str],
|
|
75
|
+
imported_from: str | None = None,
|
|
76
|
+
author_email: str | None = None,
|
|
77
|
+
discard_empty_segmentations: bool = True,
|
|
78
|
+
worklist_id: str | None = None,
|
|
79
|
+
model_id: str | None = None,
|
|
80
|
+
transpose_segmentation: bool = False,
|
|
81
|
+
upload_volume: bool | str = 'auto'
|
|
82
|
+
) -> Sequence[str]:
|
|
83
|
+
"""
|
|
84
|
+
Upload segmentations asynchronously.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
resource: The resource unique id or Resource instance.
|
|
88
|
+
frame_index: The frame index or None for multiple frames.
|
|
89
|
+
file_path: Path to segmentation file or numpy array.
|
|
90
|
+
name: The name of the segmentation or mapping of pixel values to names.
|
|
91
|
+
imported_from: The imported from value.
|
|
92
|
+
author_email: The author email.
|
|
93
|
+
discard_empty_segmentations: Whether to discard empty segmentations.
|
|
94
|
+
worklist_id: The annotation worklist unique id.
|
|
95
|
+
model_id: The model unique id.
|
|
96
|
+
transpose_segmentation: Whether to transpose the segmentation.
|
|
97
|
+
upload_volume: Whether to upload the volume as a single file or split into frames.
|
|
98
|
+
|
|
99
|
+
Returns:
|
|
100
|
+
List of annotation IDs created.
|
|
101
|
+
"""
|
|
102
|
+
if upload_volume == 'auto':
|
|
103
|
+
if isinstance(file_path, str) and (file_path.endswith('.nii') or file_path.endswith('.nii.gz')):
|
|
104
|
+
upload_volume = True
|
|
105
|
+
else:
|
|
106
|
+
upload_volume = False
|
|
107
|
+
|
|
108
|
+
resource_id = self._entid(resource)
|
|
109
|
+
# Handle volume upload
|
|
110
|
+
if upload_volume:
|
|
111
|
+
if frame_index is not None:
|
|
112
|
+
_LOGGER.warning("frame_index parameter ignored when upload_volume=True")
|
|
113
|
+
|
|
114
|
+
return await self._upload_volume_segmentation_async(
|
|
115
|
+
resource_id=resource_id,
|
|
116
|
+
file_path=file_path,
|
|
117
|
+
name=name,
|
|
118
|
+
imported_from=imported_from,
|
|
119
|
+
author_email=author_email,
|
|
120
|
+
worklist_id=worklist_id,
|
|
121
|
+
model_id=model_id,
|
|
122
|
+
transpose_segmentation=transpose_segmentation
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
# Handle frame-by-frame upload (existing logic)
|
|
126
|
+
nframes, fios = AnnotationsApi._generate_segmentations_ios(
|
|
127
|
+
file_path, transpose_segmentation=transpose_segmentation
|
|
128
|
+
)
|
|
129
|
+
if frame_index is None:
|
|
130
|
+
frames_indices = list(range(nframes))
|
|
131
|
+
elif isinstance(frame_index, int):
|
|
132
|
+
frames_indices = [frame_index]
|
|
133
|
+
elif isinstance(frame_index, Sequence):
|
|
134
|
+
if len(frame_index) != nframes:
|
|
135
|
+
raise ValueError("Length of frame_index does not match number of frames in segmentation.")
|
|
136
|
+
frames_indices = list(frame_index)
|
|
137
|
+
else:
|
|
138
|
+
raise ValueError("frame_index must be a list of integers or None.")
|
|
139
|
+
|
|
140
|
+
annotids = []
|
|
141
|
+
for fidx, f in zip(frames_indices, fios):
|
|
142
|
+
frame_annotids = await self._upload_single_frame_segmentation_async(
|
|
143
|
+
resource_id=resource_id,
|
|
144
|
+
frame_index=fidx,
|
|
145
|
+
fio=f,
|
|
146
|
+
name=name,
|
|
147
|
+
imported_from=imported_from,
|
|
148
|
+
author_email=author_email,
|
|
149
|
+
discard_empty_segmentations=discard_empty_segmentations,
|
|
150
|
+
worklist_id=worklist_id,
|
|
151
|
+
model_id=model_id
|
|
152
|
+
)
|
|
153
|
+
annotids.extend(frame_annotids)
|
|
154
|
+
return annotids
|
|
155
|
+
|
|
156
|
+
async def _upload_single_frame_segmentation_async(self,
|
|
157
|
+
resource_id: str,
|
|
158
|
+
frame_index: int | None,
|
|
159
|
+
fio: IO,
|
|
160
|
+
name: dict[int, str] | dict[tuple, str],
|
|
161
|
+
imported_from: str | None = None,
|
|
162
|
+
author_email: str | None = None,
|
|
163
|
+
discard_empty_segmentations: bool = True,
|
|
164
|
+
worklist_id: str | None = None,
|
|
165
|
+
model_id: str | None = None
|
|
166
|
+
) -> list[str]:
|
|
167
|
+
"""
|
|
168
|
+
Upload a single frame segmentation asynchronously.
|
|
169
|
+
|
|
170
|
+
Args:
|
|
171
|
+
resource_id: The resource unique id.
|
|
172
|
+
frame_index: The frame index for the segmentation.
|
|
173
|
+
fio: File-like object containing the segmentation image.
|
|
174
|
+
name: The name of the segmentation, a dictionary mapping pixel values to names,
|
|
175
|
+
or a dictionary mapping RGB tuples to names.
|
|
176
|
+
imported_from: The imported from value.
|
|
177
|
+
author_email: The author email.
|
|
178
|
+
discard_empty_segmentations: Whether to discard empty segmentations.
|
|
179
|
+
worklist_id: The annotation worklist unique id.
|
|
180
|
+
model_id: The model unique id.
|
|
181
|
+
|
|
182
|
+
Returns:
|
|
183
|
+
List of annotation IDs created.
|
|
184
|
+
"""
|
|
185
|
+
try:
|
|
186
|
+
try:
|
|
187
|
+
img_pil = Image.open(fio)
|
|
188
|
+
img_array = np.array(img_pil) # shape: (height, width, channels)
|
|
189
|
+
# Returns a list of (count, color) tuples
|
|
190
|
+
unique_vals = img_pil.getcolors(maxcolors=MAX_NUMBER_DISTINCT_COLORS)
|
|
191
|
+
# convert to list of RGB tuples
|
|
192
|
+
if unique_vals is None:
|
|
193
|
+
raise ValueError(f'Number of unique colors exceeds {MAX_NUMBER_DISTINCT_COLORS}.')
|
|
194
|
+
unique_vals = [color for count, color in unique_vals]
|
|
195
|
+
# Remove black/transparent pixels
|
|
196
|
+
black_pixel = (0, 0, 0)
|
|
197
|
+
unique_vals = [rgb for rgb in unique_vals if rgb != black_pixel]
|
|
198
|
+
|
|
199
|
+
if discard_empty_segmentations:
|
|
200
|
+
if len(unique_vals) == 0:
|
|
201
|
+
msg = f"Discarding empty RGB segmentation for frame {frame_index}"
|
|
202
|
+
_LOGGER.debug(msg)
|
|
203
|
+
_USER_LOGGER.debug(msg)
|
|
204
|
+
return []
|
|
205
|
+
segnames = AnnotationsApi._get_segmentation_names_rgb(unique_vals, names=name)
|
|
206
|
+
segs_generator = AnnotationsApi._split_rgb_segmentations(img_array, unique_vals)
|
|
207
|
+
|
|
208
|
+
fio.seek(0)
|
|
209
|
+
# TODO: Optimize this. It is not necessary to open the image twice.
|
|
210
|
+
|
|
211
|
+
# Create annotations
|
|
212
|
+
annotations: list[CreateAnnotationDto] = []
|
|
213
|
+
for segname in segnames:
|
|
214
|
+
ann = CreateAnnotationDto(
|
|
215
|
+
type='segmentation',
|
|
216
|
+
identifier=segname,
|
|
217
|
+
scope='frame',
|
|
218
|
+
frame_index=frame_index,
|
|
219
|
+
imported_from=imported_from,
|
|
220
|
+
import_author=author_email,
|
|
221
|
+
model_id=model_id,
|
|
222
|
+
annotation_worklist_id=worklist_id
|
|
223
|
+
)
|
|
224
|
+
annotations.append(ann)
|
|
225
|
+
|
|
226
|
+
# Validate unique identifiers
|
|
227
|
+
if len(annotations) != len(set([a.identifier for a in annotations])):
|
|
228
|
+
raise ValueError(
|
|
229
|
+
"Multiple annotations with the same identifier, frame_index, scope and author is not supported yet."
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
annotids = await self._create_async(resource_id=resource_id, annotations_dto=annotations)
|
|
233
|
+
|
|
234
|
+
# Upload segmentation files
|
|
235
|
+
if len(annotids) != len(segnames):
|
|
236
|
+
_LOGGER.warning(f"Number of uploaded annotations ({len(annotids)})" +
|
|
237
|
+
f" does not match the number of annotations ({len(segnames)})")
|
|
238
|
+
|
|
239
|
+
for annotid, segname, fio_seg in zip(annotids, segnames, segs_generator):
|
|
240
|
+
await self.upload_annotation_file_async(resource_id, annotid, fio_seg,
|
|
241
|
+
content_type='image/png',
|
|
242
|
+
filename=segname)
|
|
243
|
+
return annotids
|
|
244
|
+
finally:
|
|
245
|
+
fio.close()
|
|
246
|
+
except ResourceNotFoundError:
|
|
247
|
+
raise ResourceNotFoundError('resource', {'resource_id': resource_id})
|
|
248
|
+
|
|
249
|
+
def _prepare_upload_file(self,
|
|
250
|
+
file: str | IO,
|
|
251
|
+
filename: str | None = None,
|
|
252
|
+
content_type: str | None = None
|
|
253
|
+
) -> tuple[IO, str, bool, str | None]:
|
|
254
|
+
if isinstance(file, str):
|
|
255
|
+
if filename is None:
|
|
256
|
+
filename = os.path.basename(file)
|
|
257
|
+
f = open(file, 'rb')
|
|
258
|
+
close_file = True
|
|
259
|
+
else:
|
|
260
|
+
f = file
|
|
261
|
+
if filename is None:
|
|
262
|
+
if hasattr(f, 'name') and isinstance(f.name, str):
|
|
263
|
+
filename = f.name
|
|
264
|
+
else:
|
|
265
|
+
filename = 'unnamed_file'
|
|
266
|
+
close_file = False
|
|
267
|
+
|
|
268
|
+
if content_type is None:
|
|
269
|
+
content_type, _ = guess_type(filename, use_magic=False)
|
|
270
|
+
|
|
271
|
+
return f, filename, close_file, content_type
|
|
272
|
+
|
|
273
|
+
async def upload_annotation_file_async(self,
|
|
274
|
+
resource: str | Resource,
|
|
275
|
+
annotation_id: str,
|
|
276
|
+
file: str | IO,
|
|
277
|
+
content_type: str | None = None,
|
|
278
|
+
filename: str | None = None
|
|
279
|
+
):
|
|
280
|
+
"""
|
|
281
|
+
Upload a file for an existing annotation asynchronously.
|
|
282
|
+
|
|
283
|
+
Args:
|
|
284
|
+
resource: The resource unique id or Resource instance.
|
|
285
|
+
annotation_id: The annotation unique id.
|
|
286
|
+
file: Path to the file or a file-like object.
|
|
287
|
+
content_type: The MIME type of the file.
|
|
288
|
+
filename: Optional filename to use in the upload. If None and file is a path,
|
|
289
|
+
the basename of the path will be used.
|
|
290
|
+
|
|
291
|
+
Raises:
|
|
292
|
+
DatamintException: If the upload fails.
|
|
293
|
+
|
|
294
|
+
Example:
|
|
295
|
+
.. code-block:: python
|
|
296
|
+
|
|
297
|
+
await ann_api.upload_annotation_file_async(
|
|
298
|
+
resource='your_resource_id',
|
|
299
|
+
annotation_id='your_annotation_id',
|
|
300
|
+
file='path/to/your/file.png',
|
|
301
|
+
content_type='image/png',
|
|
302
|
+
filename='custom_name.png'
|
|
303
|
+
)
|
|
304
|
+
"""
|
|
305
|
+
f, filename, close_file, content_type = self._prepare_upload_file(file,
|
|
306
|
+
filename,
|
|
307
|
+
content_type=content_type)
|
|
308
|
+
|
|
309
|
+
try:
|
|
310
|
+
form = aiohttp.FormData()
|
|
311
|
+
form.add_field('file', f, filename=filename, content_type=content_type)
|
|
312
|
+
resource_id = self._entid(resource)
|
|
313
|
+
endpoint = f'{self.endpoint_base}/{resource_id}/annotations/{annotation_id}/file'
|
|
314
|
+
respdata = await self._make_request_async_json('POST',
|
|
315
|
+
endpoint=endpoint,
|
|
316
|
+
data=form)
|
|
317
|
+
if isinstance(respdata, dict) and 'error' in respdata:
|
|
318
|
+
raise DatamintException(respdata['error'])
|
|
319
|
+
finally:
|
|
320
|
+
if close_file:
|
|
321
|
+
f.close()
|
|
322
|
+
|
|
323
|
+
def upload_annotation_file(self,
|
|
324
|
+
resource: str | Resource,
|
|
325
|
+
annotation_id: str,
|
|
326
|
+
file: str | IO,
|
|
327
|
+
content_type: str | None = None,
|
|
328
|
+
filename: str | None = None
|
|
329
|
+
):
|
|
330
|
+
"""
|
|
331
|
+
Upload a file for an existing annotation.
|
|
332
|
+
|
|
333
|
+
Args:
|
|
334
|
+
resource: The resource unique id or Resource instance.
|
|
335
|
+
annotation_id: The annotation unique id.
|
|
336
|
+
file: Path to the file or a file-like object.
|
|
337
|
+
content_type: The MIME type of the file.
|
|
338
|
+
filename: Optional filename to use in the upload. If None and file is a path,
|
|
339
|
+
the basename of the path will be used.
|
|
340
|
+
|
|
341
|
+
Raises:
|
|
342
|
+
DatamintException: If the upload fails.
|
|
343
|
+
"""
|
|
344
|
+
f, filename, close_file, content_type = self._prepare_upload_file(file,
|
|
345
|
+
filename,
|
|
346
|
+
content_type=content_type)
|
|
347
|
+
try:
|
|
348
|
+
files = {
|
|
349
|
+
'file': (filename, f, content_type)
|
|
350
|
+
}
|
|
351
|
+
resource_id = self._entid(resource)
|
|
352
|
+
resp = self._make_request(method='POST',
|
|
353
|
+
endpoint=f'{self.endpoint_base}/{resource_id}/annotations/{annotation_id}/file',
|
|
354
|
+
files=files)
|
|
355
|
+
respdata = resp.json()
|
|
356
|
+
if isinstance(respdata, dict) and 'error' in respdata:
|
|
357
|
+
raise DatamintException(respdata['error'])
|
|
358
|
+
finally:
|
|
359
|
+
if close_file:
|
|
360
|
+
f.close()
|
|
361
|
+
|
|
362
|
+
def create(self,
|
|
363
|
+
resource: str | Resource,
|
|
364
|
+
annotation_dto: CreateAnnotationDto | Sequence[CreateAnnotationDto]
|
|
365
|
+
) -> str | Sequence[str]:
|
|
366
|
+
"""Create a new annotation.
|
|
367
|
+
|
|
368
|
+
Args:
|
|
369
|
+
resource: The resource unique id or Resource instance.
|
|
370
|
+
annotation_dto: A CreateAnnotationDto instance or a list of such instances.
|
|
371
|
+
|
|
372
|
+
Returns:
|
|
373
|
+
The id of the created annotation or a list of ids if multiple annotations were created.
|
|
374
|
+
"""
|
|
375
|
+
|
|
376
|
+
annotations = [annotation_dto] if isinstance(annotation_dto, CreateAnnotationDto) else annotation_dto
|
|
377
|
+
annotations = [ann.to_dict() if isinstance(ann, CreateAnnotationDto) else ann for ann in annotations]
|
|
378
|
+
resource_id = resource.id if isinstance(resource, Resource) else resource
|
|
379
|
+
respdata = self._make_request('POST',
|
|
380
|
+
f'{self.endpoint_base}/{resource_id}/annotations',
|
|
381
|
+
json=annotations).json()
|
|
382
|
+
for r in respdata:
|
|
383
|
+
if isinstance(r, dict) and 'error' in r:
|
|
384
|
+
raise DatamintException(r['error'])
|
|
385
|
+
if isinstance(annotation_dto, CreateAnnotationDto):
|
|
386
|
+
return respdata[0]
|
|
387
|
+
return respdata
|
|
388
|
+
|
|
389
|
+
def upload_segmentations(self,
|
|
390
|
+
resource: str | Resource,
|
|
391
|
+
file_path: str | np.ndarray,
|
|
392
|
+
name: str | dict[int, str] | dict[tuple, str] | None = None,
|
|
393
|
+
frame_index: int | list[int] | None = None,
|
|
394
|
+
imported_from: str | None = None,
|
|
395
|
+
author_email: str | None = None,
|
|
396
|
+
discard_empty_segmentations: bool = True,
|
|
397
|
+
worklist_id: str | None = None,
|
|
398
|
+
model_id: str | None = None,
|
|
399
|
+
transpose_segmentation: bool = False,
|
|
400
|
+
) -> list[str]:
|
|
401
|
+
"""
|
|
402
|
+
Upload segmentations to a resource.
|
|
403
|
+
|
|
404
|
+
Args:
|
|
405
|
+
resource: The resource unique ID or Resource instance.
|
|
406
|
+
file_path: The path to the segmentation file or a numpy array.
|
|
407
|
+
If a numpy array is provided, it can have the shape:
|
|
408
|
+
- (height, width, #frames) or (height, width) for grayscale segmentations
|
|
409
|
+
- (3, height, width, #frames) for RGB segmentations
|
|
410
|
+
For NIfTI files (.nii/.nii.gz), the entire volume is uploaded as a single segmentation.
|
|
411
|
+
name: The name of the segmentation.
|
|
412
|
+
Can be:
|
|
413
|
+
- str: Single name for all segmentations
|
|
414
|
+
- dict[int, str]: Mapping pixel values to names for grayscale segmentations
|
|
415
|
+
- dict[tuple[int, int, int], str]: Mapping RGB tuples to names for RGB segmentations
|
|
416
|
+
Use 'default' as a key for a unnamed classes.
|
|
417
|
+
Example: {(255, 0, 0): 'Red_Region', (0, 255, 0): 'Green_Region'}
|
|
418
|
+
frame_index: The frame index of the segmentation.
|
|
419
|
+
If a list, it must have the same length as the number of frames in the segmentation.
|
|
420
|
+
If None, it is assumed that the segmentations are in sequential order starting from 0.
|
|
421
|
+
This parameter is ignored for NIfTI files as they are treated as volume segmentations.
|
|
422
|
+
imported_from: The imported from value.
|
|
423
|
+
author_email: The author email.
|
|
424
|
+
discard_empty_segmentations: Whether to discard empty segmentations or not.
|
|
425
|
+
worklist_id: The annotation worklist unique id.
|
|
426
|
+
model_id: The model unique id.
|
|
427
|
+
transpose_segmentation: Whether to transpose the segmentation or not.
|
|
428
|
+
|
|
429
|
+
Returns:
|
|
430
|
+
List of segmentation unique ids.
|
|
431
|
+
|
|
432
|
+
Raises:
|
|
433
|
+
ResourceNotFoundError: If the resource does not exist or the segmentation is invalid.
|
|
434
|
+
FileNotFoundError: If the file path does not exist.
|
|
435
|
+
ValueError: If frame_index is provided for NIfTI files or invalid parameters.
|
|
436
|
+
|
|
437
|
+
Example:
|
|
438
|
+
.. code-block:: python
|
|
439
|
+
|
|
440
|
+
# Grayscale segmentation
|
|
441
|
+
api.annotations.upload_segmentations(resource_id, 'path/to/segmentation.png', 'SegmentationName')
|
|
442
|
+
|
|
443
|
+
# RGB segmentation with numpy array
|
|
444
|
+
seg_data = np.random.randint(0, 3, size=(3, 2140, 1760, 1), dtype=np.uint8)
|
|
445
|
+
rgb_names = {(1, 0, 0): 'Red_Region', (0, 1, 0): 'Green_Region', (0, 0, 1): 'Blue_Region'}
|
|
446
|
+
api.annotations.upload_segmentations(resource_id, seg_data, rgb_names)
|
|
447
|
+
|
|
448
|
+
# Volume segmentation
|
|
449
|
+
api.annotations.upload_segmentations(resource_id, 'path/to/segmentation.nii.gz', 'VolumeSegmentation')
|
|
450
|
+
"""
|
|
451
|
+
import nest_asyncio
|
|
452
|
+
|
|
453
|
+
if isinstance(file_path, str) and not os.path.exists(file_path):
|
|
454
|
+
raise FileNotFoundError(f"File {file_path} not found.")
|
|
455
|
+
|
|
456
|
+
# Handle NIfTI files specially - upload as single volume
|
|
457
|
+
if isinstance(file_path, str) and (file_path.endswith('.nii') or file_path.endswith('.nii.gz')):
|
|
458
|
+
_LOGGER.info(f"Uploading NIfTI segmentation file: {file_path}")
|
|
459
|
+
if frame_index is not None:
|
|
460
|
+
raise ValueError("Do not provide frame_index for NIfTI segmentations.")
|
|
461
|
+
|
|
462
|
+
# Ensure nest_asyncio is applied for Jupyter compatibility
|
|
463
|
+
nest_asyncio.apply()
|
|
464
|
+
loop = asyncio.get_event_loop()
|
|
465
|
+
task = self._upload_segmentations_async(
|
|
466
|
+
resource=resource,
|
|
467
|
+
frame_index=None,
|
|
468
|
+
file_path=file_path,
|
|
469
|
+
name=name,
|
|
470
|
+
imported_from=imported_from,
|
|
471
|
+
author_email=author_email,
|
|
472
|
+
worklist_id=worklist_id,
|
|
473
|
+
model_id=model_id,
|
|
474
|
+
transpose_segmentation=transpose_segmentation,
|
|
475
|
+
upload_volume=True
|
|
476
|
+
)
|
|
477
|
+
return loop.run_until_complete(task)
|
|
478
|
+
|
|
479
|
+
# All other file types are converted to multiple PNGs and uploaded frame by frame
|
|
480
|
+
standardized_name = self.standardize_segmentation_names(name)
|
|
481
|
+
_LOGGER.debug(f"Standardized segmentation names: {standardized_name}")
|
|
482
|
+
|
|
483
|
+
# Handle frame_index parameter
|
|
484
|
+
if isinstance(frame_index, list):
|
|
485
|
+
if len(set(frame_index)) != len(frame_index):
|
|
486
|
+
raise ValueError("frame_index list contains duplicate values.")
|
|
487
|
+
|
|
488
|
+
if isinstance(frame_index, Sequence) and len(frame_index) == 1:
|
|
489
|
+
frame_index = frame_index[0]
|
|
490
|
+
|
|
491
|
+
nest_asyncio.apply()
|
|
492
|
+
loop = asyncio.get_event_loop()
|
|
493
|
+
task = self._upload_segmentations_async(
|
|
494
|
+
resource=resource,
|
|
495
|
+
frame_index=frame_index,
|
|
496
|
+
file_path=file_path,
|
|
497
|
+
name=standardized_name,
|
|
498
|
+
imported_from=imported_from,
|
|
499
|
+
author_email=author_email,
|
|
500
|
+
discard_empty_segmentations=discard_empty_segmentations,
|
|
501
|
+
worklist_id=worklist_id,
|
|
502
|
+
model_id=model_id,
|
|
503
|
+
transpose_segmentation=transpose_segmentation,
|
|
504
|
+
upload_volume=False
|
|
505
|
+
)
|
|
506
|
+
return loop.run_until_complete(task)
|
|
507
|
+
|
|
508
|
+
@staticmethod
|
|
509
|
+
def standardize_segmentation_names(name: str | dict | None
|
|
510
|
+
) -> dict:
|
|
511
|
+
"""
|
|
512
|
+
Standardize segmentation names to a consistent format.
|
|
513
|
+
|
|
514
|
+
Args:
|
|
515
|
+
name: The name input in various formats.
|
|
516
|
+
|
|
517
|
+
Returns:
|
|
518
|
+
Standardized name dictionary.
|
|
519
|
+
"""
|
|
520
|
+
if name is None:
|
|
521
|
+
return {'default': 'default'} # Return a dict with integer key for compatibility
|
|
522
|
+
elif isinstance(name, str):
|
|
523
|
+
return {'default': name} # Use integer key for single string names
|
|
524
|
+
elif isinstance(name, dict):
|
|
525
|
+
# Return the dict as-is since it's already in the correct format
|
|
526
|
+
return name
|
|
527
|
+
else:
|
|
528
|
+
raise ValueError("Invalid name format. Must be str, dict[int, str], dict[tuple, str], or None.")
|
|
529
|
+
|
|
530
|
+
async def _create_async(self,
|
|
531
|
+
resource_id: str,
|
|
532
|
+
annotations_dto: list[CreateAnnotationDto] | list[dict]) -> list[str]:
|
|
533
|
+
annotations = [ann.to_dict() if isinstance(ann, CreateAnnotationDto) else ann for ann in annotations_dto]
|
|
534
|
+
respdata = await self._make_request_async_json('POST',
|
|
535
|
+
f'{self.endpoint_base}/{resource_id}/annotations',
|
|
536
|
+
json=annotations)
|
|
537
|
+
for r in respdata:
|
|
538
|
+
if isinstance(r, dict) and 'error' in r:
|
|
539
|
+
raise DatamintException(r['error'])
|
|
540
|
+
return respdata
|
|
541
|
+
|
|
542
|
+
@staticmethod
|
|
543
|
+
def _get_segmentation_names_rgb(uniq_rgb_vals: list[tuple[int, int, int]],
|
|
544
|
+
names: dict[tuple[int, int, int], str]
|
|
545
|
+
) -> list[str]:
|
|
546
|
+
"""
|
|
547
|
+
Generate segmentation names for RGB combinations.
|
|
548
|
+
|
|
549
|
+
Args:
|
|
550
|
+
uniq_rgb_vals: List of unique RGB combinations as (R,G,B) tuples
|
|
551
|
+
names: Name mapping for RGB combinations
|
|
552
|
+
|
|
553
|
+
Returns:
|
|
554
|
+
List of segmentation names
|
|
555
|
+
"""
|
|
556
|
+
result = []
|
|
557
|
+
for rgb_tuple in uniq_rgb_vals:
|
|
558
|
+
seg_name = names.get(rgb_tuple, names.get('default', f'seg_{"_".join(map(str, rgb_tuple))}'))
|
|
559
|
+
if seg_name is None:
|
|
560
|
+
if rgb_tuple[0] == rgb_tuple[1] and rgb_tuple[1] == rgb_tuple[2]:
|
|
561
|
+
msg = f"Provide a name for {rgb_tuple} or {rgb_tuple[0]} or use 'default' key."
|
|
562
|
+
else:
|
|
563
|
+
msg = f"Provide a name for {rgb_tuple} or use 'default' key."
|
|
564
|
+
raise ValueError(f"RGB combination {rgb_tuple} not found in names dictionary. " +
|
|
565
|
+
msg)
|
|
566
|
+
# If using default prefix, append RGB values
|
|
567
|
+
# if rgb_tuple not in names and 'default' in names:
|
|
568
|
+
# seg_name = f"{seg_name}_{'_'.join(map(str, rgb_tuple))}"
|
|
569
|
+
result.append(seg_name)
|
|
570
|
+
return result
|
|
571
|
+
|
|
572
|
+
@staticmethod
|
|
573
|
+
def _split_rgb_segmentations(img: np.ndarray,
|
|
574
|
+
uniq_rgb_vals: list[tuple[int, int, int]]
|
|
575
|
+
) -> Generator[BytesIO, None, None]:
|
|
576
|
+
"""
|
|
577
|
+
Split RGB segmentations into individual binary masks.
|
|
578
|
+
|
|
579
|
+
Args:
|
|
580
|
+
img: RGB image array of shape (height, width, channels)
|
|
581
|
+
uniq_rgb_vals: List of unique RGB combinations as (R,G,B) tuples
|
|
582
|
+
|
|
583
|
+
Yields:
|
|
584
|
+
BytesIO objects containing individual segmentation masks
|
|
585
|
+
"""
|
|
586
|
+
for rgb_tuple in uniq_rgb_vals:
|
|
587
|
+
# Create binary mask for this RGB combination
|
|
588
|
+
rgb_array = np.array(rgb_tuple[:3]) # Ensure only R,G,B values
|
|
589
|
+
mask = np.all(img[:, :, :3] == rgb_array, axis=2)
|
|
590
|
+
|
|
591
|
+
# Convert to uint8 and create PNG
|
|
592
|
+
mask_img = (mask * 255).astype(np.uint8)
|
|
593
|
+
|
|
594
|
+
f_out = BytesIO()
|
|
595
|
+
Image.fromarray(mask_img).convert('L').save(f_out, format='PNG')
|
|
596
|
+
f_out.seek(0)
|
|
597
|
+
yield f_out
|
|
598
|
+
|
|
599
|
+
async def _upload_volume_segmentation_async(self,
|
|
600
|
+
resource_id: str,
|
|
601
|
+
file_path: str | np.ndarray,
|
|
602
|
+
name: str | dict[int, str] | dict[tuple, str] | None,
|
|
603
|
+
imported_from: str | None = None,
|
|
604
|
+
author_email: str | None = None,
|
|
605
|
+
worklist_id: str | None = None,
|
|
606
|
+
model_id: str | None = None,
|
|
607
|
+
transpose_segmentation: bool = False
|
|
608
|
+
) -> Sequence[str]:
|
|
609
|
+
"""
|
|
610
|
+
Upload a volume segmentation as a single file asynchronously.
|
|
611
|
+
|
|
612
|
+
Args:
|
|
613
|
+
resource_id: The resource unique id.
|
|
614
|
+
file_path: Path to segmentation file or numpy array.
|
|
615
|
+
name: The name of the segmentation (string only for volumes).
|
|
616
|
+
imported_from: The imported from value.
|
|
617
|
+
author_email: The author email.
|
|
618
|
+
worklist_id: The annotation worklist unique id.
|
|
619
|
+
model_id: The model unique id.
|
|
620
|
+
transpose_segmentation: Whether to transpose the segmentation.
|
|
621
|
+
|
|
622
|
+
Returns:
|
|
623
|
+
List of annotation IDs created.
|
|
624
|
+
|
|
625
|
+
Raises:
|
|
626
|
+
ValueError: If name is not a string or file format is unsupported for volume upload.
|
|
627
|
+
"""
|
|
628
|
+
|
|
629
|
+
if isinstance(name, str):
|
|
630
|
+
raise NotImplementedError("`name=string` is not supported yet for volume segmentation.")
|
|
631
|
+
if isinstance(name, dict):
|
|
632
|
+
if any(isinstance(k, tuple) for k in name.keys()):
|
|
633
|
+
raise NotImplementedError(
|
|
634
|
+
"For volume segmentations, `name` must be a dictionary with integer keys only.")
|
|
635
|
+
if 'default' in name:
|
|
636
|
+
_LOGGER.warning("Ignoring 'default' key in name dictionary for volume segmentation. Not supported yet.")
|
|
637
|
+
|
|
638
|
+
# Prepare file for upload
|
|
639
|
+
if isinstance(file_path, str):
|
|
640
|
+
if file_path.endswith('.nii') or file_path.endswith('.nii.gz'):
|
|
641
|
+
# Upload NIfTI file directly
|
|
642
|
+
with open(file_path, 'rb') as f:
|
|
643
|
+
filename = os.path.basename(file_path)
|
|
644
|
+
form = aiohttp.FormData()
|
|
645
|
+
form.add_field('file', f, filename=filename, content_type=DEFAULT_NIFTI_MIME)
|
|
646
|
+
if model_id is not None:
|
|
647
|
+
form.add_field('model_id', model_id) # Add model_id if provided
|
|
648
|
+
if worklist_id is not None:
|
|
649
|
+
form.add_field('annotation_worklist_id', worklist_id)
|
|
650
|
+
if name is not None:
|
|
651
|
+
form.add_field('segmentation_map', json.dumps(name), content_type='application/json')
|
|
652
|
+
|
|
653
|
+
respdata = await self._make_request_async_json('POST',
|
|
654
|
+
f'{self.endpoint_base}/{resource_id}/segmentations/file',
|
|
655
|
+
data=form)
|
|
656
|
+
if 'error' in respdata:
|
|
657
|
+
raise DatamintException(respdata['error'])
|
|
658
|
+
return respdata
|
|
659
|
+
else:
|
|
660
|
+
raise ValueError(f"Volume upload not supported for file format: {file_path}")
|
|
661
|
+
elif isinstance(file_path, np.ndarray):
|
|
662
|
+
raise NotImplementedError
|
|
663
|
+
else:
|
|
664
|
+
raise ValueError(f"Unsupported file_path type for volume upload: {type(file_path)}")
|
|
665
|
+
|
|
666
|
+
_USER_LOGGER.info(f'Volume segmentation uploaded for resource {resource_id}')
|
|
667
|
+
|
|
668
|
+
@staticmethod
|
|
669
|
+
def _generate_segmentations_ios(file_path: str | np.ndarray,
|
|
670
|
+
transpose_segmentation: bool = False
|
|
671
|
+
) -> tuple[int, Generator[BinaryIO, None, None]]:
|
|
672
|
+
if not isinstance(file_path, (str, np.ndarray)):
|
|
673
|
+
raise ValueError(f"Unsupported file type: {type(file_path)}")
|
|
674
|
+
|
|
675
|
+
if isinstance(file_path, np.ndarray):
|
|
676
|
+
normalized_imgs = AnnotationsApi._normalize_segmentation_array(file_path)
|
|
677
|
+
# normalized_imgs shape: (3, height, width, #frames)
|
|
678
|
+
|
|
679
|
+
# Apply transpose if requested
|
|
680
|
+
if transpose_segmentation:
|
|
681
|
+
# (channels, height, width, frames) -> (channels, width, height, frames)
|
|
682
|
+
normalized_imgs = normalized_imgs.transpose(0, 2, 1, 3)
|
|
683
|
+
|
|
684
|
+
nframes = normalized_imgs.shape[3]
|
|
685
|
+
fios = AnnotationsApi._numpy_to_bytesio_png(normalized_imgs)
|
|
686
|
+
|
|
687
|
+
elif file_path.endswith('.nii') or file_path.endswith('.nii.gz'):
|
|
688
|
+
segs_imgs = nib.load(file_path).get_fdata()
|
|
689
|
+
if segs_imgs.ndim != 3 and segs_imgs.ndim != 2:
|
|
690
|
+
raise ValueError(f"Invalid segmentation shape: {segs_imgs.shape}")
|
|
691
|
+
|
|
692
|
+
# Normalize and apply transpose
|
|
693
|
+
normalized_imgs = AnnotationsApi._normalize_segmentation_array(segs_imgs)
|
|
694
|
+
if not transpose_segmentation:
|
|
695
|
+
# Apply default NIfTI transpose
|
|
696
|
+
# (channels, width, height, frames) -> (channels, height, width, frames)
|
|
697
|
+
normalized_imgs = normalized_imgs.transpose(0, 2, 1, 3)
|
|
698
|
+
|
|
699
|
+
nframes = normalized_imgs.shape[3]
|
|
700
|
+
fios = AnnotationsApi._numpy_to_bytesio_png(normalized_imgs)
|
|
701
|
+
|
|
702
|
+
elif file_path.endswith('.png'):
|
|
703
|
+
with Image.open(file_path) as img:
|
|
704
|
+
img_array = np.array(img)
|
|
705
|
+
normalized_imgs = AnnotationsApi._normalize_segmentation_array(img_array)
|
|
706
|
+
|
|
707
|
+
if transpose_segmentation:
|
|
708
|
+
normalized_imgs = normalized_imgs.transpose(0, 2, 1, 3)
|
|
709
|
+
|
|
710
|
+
fios = AnnotationsApi._numpy_to_bytesio_png(normalized_imgs)
|
|
711
|
+
nframes = 1
|
|
712
|
+
else:
|
|
713
|
+
raise ValueError(f"Unsupported file format of '{file_path}'")
|
|
714
|
+
|
|
715
|
+
return nframes, fios
|
|
716
|
+
|
|
717
|
+
@staticmethod
|
|
718
|
+
def _normalize_segmentation_array(seg_imgs: np.ndarray) -> np.ndarray:
|
|
719
|
+
"""
|
|
720
|
+
Normalize segmentation array to a consistent format.
|
|
721
|
+
|
|
722
|
+
Args:
|
|
723
|
+
seg_imgs: Input segmentation array in various formats: (height, width, #frames), (height, width), (3, height, width, #frames).
|
|
724
|
+
|
|
725
|
+
Returns:
|
|
726
|
+
np.ndarray: Shape (#channels, height, width, #frames)
|
|
727
|
+
"""
|
|
728
|
+
if seg_imgs.ndim == 4:
|
|
729
|
+
return seg_imgs # .transpose(1, 2, 0, 3)
|
|
730
|
+
|
|
731
|
+
# Handle grayscale segmentations
|
|
732
|
+
if seg_imgs.ndim == 2:
|
|
733
|
+
# Add frame dimension: (height, width) -> (height, width, 1)
|
|
734
|
+
seg_imgs = seg_imgs[..., None]
|
|
735
|
+
if seg_imgs.ndim == 3:
|
|
736
|
+
# (height, width, #frames)
|
|
737
|
+
seg_imgs = seg_imgs[np.newaxis, ...] # Add channel dimension: (1, height, width, #frames)
|
|
738
|
+
|
|
739
|
+
return seg_imgs
|
|
740
|
+
|
|
741
|
+
@staticmethod
|
|
742
|
+
def _numpy_to_bytesio_png(seg_imgs: np.ndarray) -> Generator[BinaryIO, None, None]:
|
|
743
|
+
"""
|
|
744
|
+
Convert normalized segmentation images to PNG BytesIO objects.
|
|
745
|
+
|
|
746
|
+
Args:
|
|
747
|
+
seg_imgs: Normalized segmentation array in shape (channels, height, width, frames).
|
|
748
|
+
|
|
749
|
+
Yields:
|
|
750
|
+
BinaryIO: PNG image data as BytesIO objects
|
|
751
|
+
"""
|
|
752
|
+
# PIL RGB format is: (height, width, channels)
|
|
753
|
+
if seg_imgs.shape[0] not in [1, 3, 4]:
|
|
754
|
+
raise ValueError(f"Unsupported number of channels: {seg_imgs.shape[0]}. Expected 1 or 3")
|
|
755
|
+
nframes = seg_imgs.shape[3]
|
|
756
|
+
for i in range(nframes):
|
|
757
|
+
img = seg_imgs[:, :, :, i].astype(np.uint8)
|
|
758
|
+
if img.shape[0] == 1:
|
|
759
|
+
pil_img = Image.fromarray(img[0]).convert('RGB')
|
|
760
|
+
else:
|
|
761
|
+
pil_img = Image.fromarray(img.transpose(1, 2, 0))
|
|
762
|
+
img_bytes = BytesIO()
|
|
763
|
+
pil_img.save(img_bytes, format='PNG')
|
|
764
|
+
img_bytes.seek(0)
|
|
765
|
+
yield img_bytes
|
|
766
|
+
|
|
767
|
+
def add_line_annotation(self,
|
|
768
|
+
point1: tuple[int, int] | tuple[float, float, float],
|
|
769
|
+
point2: tuple[int, int] | tuple[float, float, float],
|
|
770
|
+
resource_id: str,
|
|
771
|
+
identifier: str,
|
|
772
|
+
frame_index: int | None = None,
|
|
773
|
+
dicom_metadata: pydicom.Dataset | str | None = None,
|
|
774
|
+
coords_system: CoordinateSystem = 'pixel',
|
|
775
|
+
project: str | None = None,
|
|
776
|
+
worklist_id: str | None = None,
|
|
777
|
+
imported_from: str | None = None,
|
|
778
|
+
author_email: str | None = None,
|
|
779
|
+
model_id: str | None = None) -> Sequence[str]:
|
|
780
|
+
"""
|
|
781
|
+
Add a line annotation to a resource.
|
|
782
|
+
|
|
783
|
+
Args:
|
|
784
|
+
point1: The first point of the line. Can be a 2d or 3d point.
|
|
785
|
+
If `coords_system` is 'pixel', it must be a 2d point and it represents the pixel coordinates of the image.
|
|
786
|
+
If `coords_system` is 'patient', it must be a 3d point and it represents the patient coordinates of the image, relative
|
|
787
|
+
to the DICOM metadata.
|
|
788
|
+
If `coords_system` is 'patient', it must be a 3d point.
|
|
789
|
+
point2: The second point of the line. See `point1` for more details.
|
|
790
|
+
resource_id: The resource unique id.
|
|
791
|
+
identifier: The annotation identifier, also as known as the annotation's label.
|
|
792
|
+
frame_index: The frame index of the annotation.
|
|
793
|
+
dicom_metadata: The DICOM metadata of the image. If provided, the coordinates will be converted to the
|
|
794
|
+
correct coordinates automatically using the DICOM metadata.
|
|
795
|
+
coords_system: The coordinate system of the points. Can be 'pixel', or 'patient'.
|
|
796
|
+
If 'pixel', the points are in pixel coordinates. If 'patient', the points are in patient coordinates (see DICOM patient coordinates).
|
|
797
|
+
project: The project unique id or name.
|
|
798
|
+
worklist_id: The annotation worklist unique id. Optional.
|
|
799
|
+
imported_from: The imported from source value.
|
|
800
|
+
author_email: The email to consider as the author of the annotation. If None, use the customer of the api key.
|
|
801
|
+
model_id: The model unique id. Optional.
|
|
802
|
+
|
|
803
|
+
Example:
|
|
804
|
+
.. code-block:: python
|
|
805
|
+
|
|
806
|
+
res_id = 'aa93813c-cef0-4edd-a45c-85d4a8f1ad0d'
|
|
807
|
+
api.add_line_annotation([0, 0], (10, 30),
|
|
808
|
+
resource_id=res_id,
|
|
809
|
+
identifier='Line1',
|
|
810
|
+
frame_index=2,
|
|
811
|
+
project='Example Project')
|
|
812
|
+
"""
|
|
813
|
+
|
|
814
|
+
if project is not None and worklist_id is not None:
|
|
815
|
+
raise ValueError('Only one of project or worklist_id can be provided.')
|
|
816
|
+
|
|
817
|
+
if coords_system == 'pixel':
|
|
818
|
+
if dicom_metadata is None:
|
|
819
|
+
point1 = (point1[0], point1[1], frame_index)
|
|
820
|
+
point2 = (point2[0], point2[1], frame_index)
|
|
821
|
+
geom = LineGeometry(point1, point2)
|
|
822
|
+
else:
|
|
823
|
+
if isinstance(dicom_metadata, str):
|
|
824
|
+
dicom_metadata = pydicom.dcmread(dicom_metadata)
|
|
825
|
+
geom = LineGeometry.from_dicom(dicom_metadata, point1, point2, slice_index=frame_index)
|
|
826
|
+
elif coords_system == 'patient':
|
|
827
|
+
geom = LineGeometry(point1, point2)
|
|
828
|
+
else:
|
|
829
|
+
raise ValueError(f"Unknown coordinate system: {coords_system}")
|
|
830
|
+
|
|
831
|
+
return self._create_geometry_annotation(
|
|
832
|
+
geometry=geom,
|
|
833
|
+
resource_id=resource_id,
|
|
834
|
+
identifier=identifier,
|
|
835
|
+
frame_index=frame_index,
|
|
836
|
+
project=project,
|
|
837
|
+
worklist_id=worklist_id,
|
|
838
|
+
imported_from=imported_from,
|
|
839
|
+
author_email=author_email,
|
|
840
|
+
model_id=model_id
|
|
841
|
+
)
|
|
842
|
+
|
|
843
|
+
def _create_geometry_annotation(self,
|
|
844
|
+
geometry: Geometry,
|
|
845
|
+
resource_id: str,
|
|
846
|
+
identifier: str,
|
|
847
|
+
frame_index: int | None = None,
|
|
848
|
+
project: str | None = None,
|
|
849
|
+
worklist_id: str | None = None,
|
|
850
|
+
imported_from: str | None = None,
|
|
851
|
+
author_email: str | None = None,
|
|
852
|
+
model_id: str | None = None) -> Sequence[str]:
|
|
853
|
+
"""
|
|
854
|
+
Create an annotation with the given geometry.
|
|
855
|
+
|
|
856
|
+
Args:
|
|
857
|
+
geometry: The geometry object (e.g., LineGeometry, BoxGeometry).
|
|
858
|
+
resource_id: The resource unique id.
|
|
859
|
+
identifier: The annotation identifier/label.
|
|
860
|
+
frame_index: The frame index of the annotation.
|
|
861
|
+
project: The project unique id or name.
|
|
862
|
+
worklist_id: The annotation worklist unique id.
|
|
863
|
+
imported_from: The imported from source value.
|
|
864
|
+
author_email: The email to consider as the author.
|
|
865
|
+
model_id: The model unique id.
|
|
866
|
+
|
|
867
|
+
Returns:
|
|
868
|
+
List of created annotation IDs.
|
|
869
|
+
"""
|
|
870
|
+
if project is not None and worklist_id is not None:
|
|
871
|
+
raise ValueError('Only one of project or worklist_id can be provided.')
|
|
872
|
+
|
|
873
|
+
scope = 'frame' if frame_index is not None else 'image'
|
|
874
|
+
annotation_dto = CreateAnnotationDto(
|
|
875
|
+
type=geometry.type,
|
|
876
|
+
identifier=identifier,
|
|
877
|
+
scope=scope,
|
|
878
|
+
frame_index=frame_index,
|
|
879
|
+
geometry=geometry,
|
|
880
|
+
imported_from=imported_from,
|
|
881
|
+
import_author=author_email,
|
|
882
|
+
model_id=model_id,
|
|
883
|
+
annotation_worklist_id=worklist_id
|
|
884
|
+
)
|
|
885
|
+
|
|
886
|
+
return self.create(resource_id, annotation_dto)
|
|
887
|
+
|
|
888
|
+
def download_file(self,
|
|
889
|
+
annotation: str | Annotation,
|
|
890
|
+
fpath_out: str | Path | None = None) -> bytes:
|
|
891
|
+
"""
|
|
892
|
+
Download the segmentation file for a given resource and annotation.
|
|
893
|
+
|
|
894
|
+
Args:
|
|
895
|
+
annotation: The annotation unique id or an annotation object.
|
|
896
|
+
fpath_out: (Optional) The file path to save the downloaded segmentation file.
|
|
897
|
+
|
|
898
|
+
Returns:
|
|
899
|
+
bytes: The content of the downloaded segmentation file in bytes format.
|
|
900
|
+
"""
|
|
901
|
+
if isinstance(annotation, Annotation):
|
|
902
|
+
annotation_id = annotation.id
|
|
903
|
+
resource_id = annotation.resource_id
|
|
904
|
+
else:
|
|
905
|
+
annotation_id = annotation
|
|
906
|
+
resource_id = self.get_by_id(annotation_id).resource_id
|
|
907
|
+
|
|
908
|
+
resp = self._make_request('GET', f'/annotations/{resource_id}/annotations/{annotation_id}/file')
|
|
909
|
+
if fpath_out:
|
|
910
|
+
with open(str(fpath_out), 'wb') as f:
|
|
911
|
+
f.write(resp.content)
|
|
912
|
+
return resp.content
|
|
913
|
+
|
|
914
|
+
async def _async_download_segmentation_file(self,
|
|
915
|
+
annotation: str | Annotation,
|
|
916
|
+
save_path: str | Path,
|
|
917
|
+
session: aiohttp.ClientSession | None = None,
|
|
918
|
+
progress_bar: tqdm | None = None):
|
|
919
|
+
"""
|
|
920
|
+
Asynchronously download a segmentation file.
|
|
921
|
+
|
|
922
|
+
Args:
|
|
923
|
+
annotation (str | dict): The annotation unique id or an annotation object.
|
|
924
|
+
save_path (str | Path): The path to save the file.
|
|
925
|
+
session (aiohttp.ClientSession): The aiohttp session to use for the request.
|
|
926
|
+
progress_bar (tqdm | None): Optional progress bar to update after download completion.
|
|
927
|
+
"""
|
|
928
|
+
if isinstance(annotation, Annotation):
|
|
929
|
+
annotation_id = annotation.id
|
|
930
|
+
resource_id = annotation.resource_id
|
|
931
|
+
else:
|
|
932
|
+
annotation_id = annotation
|
|
933
|
+
resource_id = self.get_by_id(annotation_id).resource_id
|
|
934
|
+
|
|
935
|
+
try:
|
|
936
|
+
async with self._make_request_async('GET',
|
|
937
|
+
f'/annotations/{resource_id}/annotations/{annotation_id}/file',
|
|
938
|
+
session=session) as resp:
|
|
939
|
+
data_bytes = await resp.read()
|
|
940
|
+
with open(save_path, 'wb') as f:
|
|
941
|
+
f.write(data_bytes)
|
|
942
|
+
if progress_bar:
|
|
943
|
+
progress_bar.update(1)
|
|
944
|
+
except ResourceNotFoundError as e:
|
|
945
|
+
e.set_params('annotation', {'annotation_id': annotation_id})
|
|
946
|
+
raise e
|
|
947
|
+
|
|
948
|
+
def download_multiple_files(self,
|
|
949
|
+
annotations: Sequence[str | Annotation],
|
|
950
|
+
save_paths: Sequence[str | Path] | str
|
|
951
|
+
) -> None:
|
|
952
|
+
"""
|
|
953
|
+
Download multiple segmentation files and save them to the specified paths.
|
|
954
|
+
|
|
955
|
+
Args:
|
|
956
|
+
annotations: A list of annotation unique ids or annotation objects.
|
|
957
|
+
save_paths: A list of paths to save the files or a directory path.
|
|
958
|
+
"""
|
|
959
|
+
import nest_asyncio
|
|
960
|
+
nest_asyncio.apply()
|
|
961
|
+
|
|
962
|
+
async def _download_all_async():
|
|
963
|
+
async with aiohttp.ClientSession() as session:
|
|
964
|
+
tasks = [
|
|
965
|
+
self._async_download_segmentation_file(
|
|
966
|
+
annotation, save_path=path, session=session, progress_bar=progress_bar)
|
|
967
|
+
for annotation, path in zip(annotations, save_paths)
|
|
968
|
+
]
|
|
969
|
+
await asyncio.gather(*tasks)
|
|
970
|
+
|
|
971
|
+
if isinstance(save_paths, str):
|
|
972
|
+
save_paths = [os.path.join(save_paths, self._entid(ann))
|
|
973
|
+
for ann in annotations]
|
|
974
|
+
|
|
975
|
+
with tqdm(total=len(annotations), desc="Downloading segmentations", unit="file") as progress_bar:
|
|
976
|
+
loop = asyncio.get_event_loop()
|
|
977
|
+
loop.run_until_complete(_download_all_async())
|
|
978
|
+
|
|
979
|
+
def bulk_download_file(self,
|
|
980
|
+
annotations: Sequence[str | Annotation],
|
|
981
|
+
save_paths: Sequence[str | Path] | str
|
|
982
|
+
) -> None:
|
|
983
|
+
"""Alias for :py:meth:`download_multiple_files`"""
|
|
984
|
+
return self.download_multiple_files(annotations, save_paths)
|