datamint 1.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of datamint might be problematic. Click here for more details.

@@ -0,0 +1,748 @@
1
+ from typing import Optional, Generator, Literal, IO, BinaryIO
2
+ from .base_api_handler import BaseAPIHandler, ResourceNotFoundError, DatamintException
3
+ from datetime import date
4
+ import logging
5
+ import numpy as np
6
+ from PIL import Image
7
+ from io import BytesIO
8
+ import nibabel as nib
9
+ import os
10
+ import asyncio
11
+ import aiohttp
12
+ from requests.exceptions import HTTPError
13
+ from deprecated.sphinx import deprecated
14
+ from .dto.annotation_dto import CreateAnnotationDto, LineGeometry, CoordinateSystem, AnnotationType
15
+ import pydicom
16
+
17
+ _LOGGER = logging.getLogger(__name__)
18
+ _USER_LOGGER = logging.getLogger('user_logger')
19
+
20
+
21
+ class AnnotationAPIHandler(BaseAPIHandler):
22
+ @staticmethod
23
+ def _numpy_to_bytesio_png(seg_imgs: np.ndarray) -> Generator[BinaryIO, None, None]:
24
+ """
25
+ Args:
26
+ seg_img (np.ndarray): The segmentation image with dimensions (height, width, #frames).
27
+ """
28
+
29
+ if seg_imgs.ndim == 2:
30
+ seg_imgs = seg_imgs[..., None]
31
+
32
+ seg_imgs = seg_imgs.astype(np.uint8)
33
+ for i in range(seg_imgs.shape[2]):
34
+ img = seg_imgs[:, :, i]
35
+ img = Image.fromarray(img).convert('L')
36
+ img_bytes = BytesIO()
37
+ img.save(img_bytes, format='PNG')
38
+ img_bytes.seek(0)
39
+ yield img_bytes
40
+
41
+ @staticmethod
42
+ def _generate_segmentations_ios(file_path: str | np.ndarray,
43
+ transpose_segmentation: bool = False) -> tuple[int, Generator[BinaryIO, None, None]]:
44
+ if not isinstance(file_path, (str, np.ndarray)):
45
+ raise ValueError(f"Unsupported file type: {type(file_path)}")
46
+
47
+ if isinstance(file_path, np.ndarray):
48
+ segs_imgs = file_path # (#frames, height, width) or (height, width)
49
+ if transpose_segmentation:
50
+ segs_imgs = segs_imgs.transpose(1, 0, 2) if segs_imgs.ndim == 3 else segs_imgs.transpose(1, 0)
51
+ nframes = segs_imgs.shape[2] if segs_imgs.ndim == 3 else 1
52
+ fios = AnnotationAPIHandler._numpy_to_bytesio_png(segs_imgs)
53
+ elif file_path.endswith('.nii') or file_path.endswith('.nii.gz'):
54
+ segs_imgs = nib.load(file_path).get_fdata()
55
+ if segs_imgs.ndim != 3 and segs_imgs.ndim != 2:
56
+ raise ValueError(f"Invalid segmentation shape: {segs_imgs.shape}")
57
+ if not transpose_segmentation:
58
+ # The if is correct. The image is already in a different shape than nifty images.
59
+ segs_imgs = segs_imgs.transpose(1, 0, 2) if segs_imgs.ndim == 3 else segs_imgs.transpose(1, 0)
60
+
61
+ fios = AnnotationAPIHandler._numpy_to_bytesio_png(segs_imgs)
62
+ nframes = segs_imgs.shape[2] if segs_imgs.ndim == 3 else 1
63
+ elif file_path.endswith('.png'):
64
+ if transpose_segmentation:
65
+ with Image.open(file_path) as img:
66
+ segs_imgs = np.array(img).transpose(1, 0)
67
+ fios = AnnotationAPIHandler._numpy_to_bytesio_png(segs_imgs)
68
+ else:
69
+ fios = (open(file_path, 'rb') for _ in range(1))
70
+ nframes = 1
71
+ else:
72
+ raise ValueError(f"Unsupported file format of '{file_path}'")
73
+
74
+ return nframes, fios
75
+
76
+ async def _upload_annotations_async(self,
77
+ resource_id: str,
78
+ annotations: list[dict | CreateAnnotationDto]) -> list[str]:
79
+ annotations = [ann.to_dict() if isinstance(ann, CreateAnnotationDto) else ann for ann in annotations]
80
+ request_params = dict(
81
+ method='POST',
82
+ url=f'{self.root_url}/annotations/{resource_id}/annotations',
83
+ json=annotations
84
+ )
85
+ resp = await self._run_request_async(request_params)
86
+ for r in resp:
87
+ if 'error' in r:
88
+ raise DatamintException(r['error'])
89
+ return resp
90
+
91
+ async def _upload_segmentations_async(self,
92
+ resource_id: str,
93
+ frame_index: int | None,
94
+ file_path: str | np.ndarray | None = None,
95
+ fio: IO | None = None,
96
+ name: Optional[str | dict[int, str]] = None,
97
+ imported_from: Optional[str] = None,
98
+ author_email: Optional[str] = None,
99
+ discard_empty_segmentations: bool = True,
100
+ worklist_id: Optional[str] = None,
101
+ model_id: Optional[str] = None,
102
+ transpose_segmentation: bool = False
103
+ ) -> list[str]:
104
+ if file_path is not None:
105
+ nframes, fios = AnnotationAPIHandler._generate_segmentations_ios(file_path,
106
+ transpose_segmentation=transpose_segmentation)
107
+ if frame_index is None:
108
+ frame_index = list(range(nframes))
109
+ annotids = []
110
+ for fidx, f in zip(frame_index, fios):
111
+ reti = await self._upload_segmentations_async(resource_id,
112
+ fio=f,
113
+ name=name,
114
+ frame_index=fidx,
115
+ imported_from=imported_from,
116
+ author_email=author_email,
117
+ discard_empty_segmentations=discard_empty_segmentations,
118
+ worklist_id=worklist_id,
119
+ model_id=model_id)
120
+ annotids.extend(reti)
121
+ return annotids
122
+ try:
123
+ try:
124
+ img = np.array(Image.open(fio))
125
+ ### Check that frame is not empty ###
126
+ uniq_vals = np.unique(img)
127
+ if discard_empty_segmentations:
128
+ if len(uniq_vals) == 1 and uniq_vals[0] == 0:
129
+ msg = f"Discarding empty segmentation for frame {frame_index}"
130
+ _LOGGER.debug(msg)
131
+ _USER_LOGGER.debug(msg)
132
+ return []
133
+ fio.seek(0)
134
+ # TODO: Optimize this. It is not necessary to open the image twice.
135
+
136
+ segnames = AnnotationAPIHandler._get_segmentation_names(uniq_vals, names=name)
137
+ segs_generator = AnnotationAPIHandler._split_segmentations(img, uniq_vals, fio)
138
+ annotations: list[CreateAnnotationDto] = []
139
+ for segname in segnames:
140
+ ann = CreateAnnotationDto(type='segmentation',
141
+ identifier=segname,
142
+ scope='frame',
143
+ frame_index=frame_index,
144
+ imported_from=imported_from,
145
+ import_author=author_email,
146
+ model_id=model_id,
147
+ annotation_worklist_id=worklist_id)
148
+ annotations.append(ann)
149
+ # raise ValueError if there is multiple annotations with the same identifier, frame_index, scope and author
150
+ if len(annotations) != len(set([a.identifier for a in annotations])):
151
+ raise ValueError(
152
+ "Multiple annotations with the same identifier, frame_index, scope and author is not supported yet.")
153
+
154
+ annotids = await self._upload_annotations_async(resource_id, annotations)
155
+
156
+ ### Upload segmentation ###
157
+ if len(annotids) != len(segnames):
158
+ _LOGGER.warning(f"Number of uploaded annotations ({len(annotids)})" +
159
+ f" does not match the number of annotations ({len(segnames)})")
160
+ for annotid, segname, fio in zip(annotids, segnames, segs_generator):
161
+ form = aiohttp.FormData()
162
+ form.add_field('file', fio, filename=segname, content_type='image/png')
163
+ request_params = dict(
164
+ method='POST',
165
+ url=f'{self.root_url}/annotations/{resource_id}/annotations/{annotid}/file',
166
+ data=form,
167
+ )
168
+ resp = await self._run_request_async(request_params)
169
+ if 'error' in resp:
170
+ raise DatamintException(resp['error'])
171
+ #######
172
+ finally:
173
+ fio.close()
174
+ _USER_LOGGER.info(f'Segmentations uploaded for resource {resource_id}')
175
+ return annotids
176
+ except ResourceNotFoundError:
177
+ raise ResourceNotFoundError('resource', {'resource_id': resource_id})
178
+
179
+ def upload_segmentations(self,
180
+ resource_id: str,
181
+ file_path: str | np.ndarray,
182
+ name: Optional[str | dict[int, str]] = None,
183
+ frame_index: int | list[int] = None,
184
+ imported_from: Optional[str] = None,
185
+ author_email: Optional[str] = None,
186
+ discard_empty_segmentations: bool = True,
187
+ worklist_id: Optional[str] = None,
188
+ model_id: Optional[str] = None,
189
+ transpose_segmentation: bool = False
190
+ ) -> list[str]:
191
+ """
192
+ Upload segmentations to a resource.
193
+
194
+ Args:
195
+ resource_id (str): The resource unique id.
196
+ file_path (str|np.ndarray): The path to the segmentation file or a numpy array.
197
+ If a numpy array is provided, it must have the shape (height, width, #frames) or (height, width).
198
+ name (Optional[Union[str, Dict[int, str]]]): The name of the segmentation or a dictionary mapping pixel values to names.
199
+ example: {1: 'Femur', 2: 'Tibia'}.
200
+ frame_index (int | list[int]): The frame index of the segmentation.
201
+ If a list, it must have the same length as the number of frames in the segmentation.
202
+ If None, it is assumed that the segmentations are in sequential order starting from 0.
203
+
204
+ discard_empty_segmentations (bool): Whether to discard empty segmentations or not.
205
+
206
+ Returns:
207
+ str: The segmentation unique id.
208
+
209
+ Raises:
210
+ ResourceNotFoundError: If the resource does not exists or the segmentation is invalid.
211
+
212
+ Example:
213
+ >>> api_handler.upload_segmentation(resource_id, 'path/to/segmentation.png', 'SegmentationName')
214
+ """
215
+ if isinstance(file_path, str) and not os.path.exists(file_path):
216
+ raise FileNotFoundError(f"File {file_path} not found.")
217
+ if isinstance(frame_index, int):
218
+ frame_index = [frame_index]
219
+
220
+ loop = asyncio.get_event_loop()
221
+ to_run = []
222
+ # Generate IOs for the segmentations.
223
+ nframes, fios = AnnotationAPIHandler._generate_segmentations_ios(file_path,
224
+ transpose_segmentation=transpose_segmentation)
225
+ if frame_index is None:
226
+ frame_index = list(range(nframes))
227
+ elif len(frame_index) != nframes:
228
+ raise ValueError("Do not provide frame_index for images of multiple frames.")
229
+ #######
230
+
231
+ # For each frame, create the annotations and upload the segmentations.
232
+ for fidx, f in zip(frame_index, fios):
233
+ task = self._upload_segmentations_async(resource_id,
234
+ fio=f,
235
+ name=name,
236
+ frame_index=fidx,
237
+ imported_from=imported_from,
238
+ author_email=author_email,
239
+ discard_empty_segmentations=discard_empty_segmentations,
240
+ worklist_id=worklist_id,
241
+ model_id=model_id)
242
+ to_run.append(task)
243
+
244
+ ret = loop.run_until_complete(asyncio.gather(*to_run))
245
+ # merge the results in a single list
246
+ ret = [item for sublist in ret for item in sublist]
247
+ return ret
248
+
249
+ def add_image_category_annotation(self,
250
+ resource_id: str,
251
+ identifier: str,
252
+ value: str,
253
+ imported_from: Optional[str] = None,
254
+ author_email: Optional[str] = None,
255
+ worklist_id: Optional[str] = None,
256
+ project: Optional[str] = None
257
+ ):
258
+ """
259
+ Add a category annotation to an image.
260
+
261
+ Args:
262
+ resource_id (str): The resource unique id.
263
+ identifier (str): The annotation identifier. For example: 'fracture'.
264
+ value (str): The annotation value.
265
+ imported_from (Optional[str]): The imported from value.
266
+ author_email (Optional[str]): The author email. If None, use the customer of the api key.
267
+ wokklist_id (Optional[str]): The annotation worklist unique id.
268
+ project (Optional[str]): The project unique id or name. Only this or worklist_id can be provided at the same time.
269
+
270
+ """
271
+ if project is not None and worklist_id is not None:
272
+ raise ValueError('Only one of project or worklist_id can be provided.')
273
+ if project is not None:
274
+ proj = self.get_project_by_name(project)
275
+ if 'error' in proj.keys():
276
+ raise DatamintException(f"Project {project} not found.")
277
+ worklist_id = proj['worklist_id']
278
+
279
+ if value is None:
280
+ raise ValueError('Value cannot be None.')
281
+
282
+ request_params = {
283
+ 'method': 'POST',
284
+ 'url': f'{self.root_url}/annotations/{resource_id}/annotations',
285
+ 'json': [{
286
+ 'identifier': identifier,
287
+ 'value': value,
288
+ 'scope': 'image',
289
+ 'type': 'category',
290
+ 'imported_from': imported_from,
291
+ 'import_author': author_email,
292
+ 'annotation_worklist_id': worklist_id
293
+ }]
294
+ }
295
+
296
+ resp = self._run_request(request_params)
297
+ self._check_errors_response_json(resp)
298
+
299
+ def add_frame_category_annotation(self,
300
+ resource_id: str,
301
+ frame_index: int | tuple[int, int],
302
+ identifier: str,
303
+ value: str,
304
+ worklist_id: Optional[str] = None,
305
+ imported_from: Optional[str] = None,
306
+ author_email: Optional[str] = None
307
+ ):
308
+ """
309
+ Add a category annotation to a frame.
310
+
311
+ Args:
312
+ resource_id (str): The resource unique id.
313
+ frame_index (Union[int, Tuple[int, int]]): The frame index or a tuple with the range of frame indexes.
314
+ If a tuple is provided, the annotation will be added to all frames in the range (Inclusive on both ends).
315
+ identifier (str): The annotation identifier.
316
+ value (str): The annotation value.
317
+ worklist_id (Optional[str]): The annotation worklist unique id.
318
+ author_email (Optional[str]): The author email. If None, use the customer of the api key.
319
+ Requires admin permissions to set a different customer.
320
+ """
321
+
322
+ if isinstance(frame_index, tuple):
323
+ frame_index = list(range(frame_index[0], frame_index[1]+1))
324
+ elif isinstance(frame_index, int):
325
+ frame_index = [frame_index]
326
+
327
+ json_data = [{
328
+ 'identifier': identifier,
329
+ 'value': value,
330
+ 'scope': 'frame',
331
+ 'frame_index': i,
332
+ 'annotation_worklist_id': worklist_id,
333
+ 'imported_from': imported_from,
334
+ 'import_author': author_email,
335
+ 'type': 'category'} for i in frame_index]
336
+
337
+ request_params = {
338
+ 'method': 'POST',
339
+ 'url': f'{self.root_url}/annotations/{resource_id}/annotations',
340
+ 'json': json_data
341
+ }
342
+
343
+ resp = self._run_request(request_params)
344
+ self._check_errors_response_json(resp)
345
+
346
+ def add_annotations(self,
347
+ resource_id: str,
348
+ identifier: str,
349
+ frame_index: int | tuple[int, int] | None = None,
350
+ value: Optional[str] = None,
351
+ worklist_id: Optional[str] = None,
352
+ imported_from: Optional[str] = None,
353
+ author_email: Optional[str] = None,
354
+ model_id: Optional[str] = None,
355
+ project: Optional[str] = None,
356
+ ):
357
+ """
358
+ Add annotations to a resource.
359
+
360
+ Args:
361
+ resource_id: The resource unique id.
362
+ identifier: The annotation identifier.
363
+ frame_index: The frame index or a tuple with the range of frame indexes.
364
+ If a tuple is provided, the annotation will be added to all frames in the range (Inclusive on both ends).
365
+ value: The annotation value.
366
+ worklist_id: The annotation worklist unique id.
367
+ imported_from: The imported from value.
368
+ author_email: The author email. If None, use the customer of the api key.
369
+ Requires admin permissions to set a different customer.
370
+ model_id: The model unique id.
371
+ project: The project unique id or name. Only this or worklist_id can be provided at the same time.
372
+ """
373
+
374
+ if project is not None and worklist_id is not None:
375
+ raise ValueError('Only one of project or worklist_id can be provided.')
376
+ if project is not None:
377
+ proj = self.get_project_by_name(project)
378
+ if 'error' in proj.keys():
379
+ raise DatamintException(f"Project {project} not found.")
380
+ worklist_id = proj['worklist_id']
381
+
382
+ if isinstance(frame_index, tuple):
383
+ begin, end = frame_index
384
+ if begin > end:
385
+ raise ValueError('The first element of the tuple must be less than the second element.')
386
+ frame_index = list(range(begin, end+1))
387
+ elif isinstance(frame_index, int):
388
+ frame_index = [frame_index]
389
+
390
+ scope = 'frame' if frame_index is not None else 'image'
391
+
392
+ params = {
393
+ 'identifier': identifier,
394
+ 'value': value,
395
+ 'scope': scope,
396
+ 'annotation_worklist_id': worklist_id,
397
+ 'imported_from': imported_from,
398
+ 'import_author': author_email,
399
+ 'type': 'label' if value is None else 'category',
400
+ }
401
+ if model_id is not None:
402
+ params['model_id'] = model_id
403
+ params['is_model'] = True
404
+
405
+ if frame_index is not None:
406
+ json_data = [dict(params, frame_index=i) for i in frame_index]
407
+ else:
408
+ json_data = [params]
409
+
410
+ request_params = {
411
+ 'method': 'POST',
412
+ 'url': f'{self.root_url}/annotations/{resource_id}/annotations',
413
+ 'json': json_data
414
+ }
415
+
416
+ resp = self._run_request(request_params)
417
+ self._check_errors_response_json(resp)
418
+
419
+ def add_line_annotation(self,
420
+ point1: tuple[int, int] | tuple[float, float, float],
421
+ point2: tuple[int, int] | tuple[float, float, float],
422
+ resource_id: str,
423
+ identifier: str,
424
+ frame_index: int | None = None,
425
+ dicom_metadata: pydicom.Dataset | str | None = None,
426
+ coords_system: CoordinateSystem = 'pixel',
427
+ project: Optional[str] = None,
428
+ worklist_id: Optional[str] = None,
429
+ imported_from: Optional[str] = None,
430
+ author_email: Optional[str] = None,
431
+ model_id: Optional[str] = None):
432
+ """
433
+ Add a line annotation to a resource.
434
+
435
+ Args:
436
+ point1: The first point of the line. Can be a 2d or 3d point.
437
+ If `coords_system` is 'pixel', it must be a 2d point and it represents the pixel coordinates of the image.
438
+ If `coords_system` is 'patient', it must be a 3d point and it represents the patient coordinates of the image, relative
439
+ to the DICOM metadata.
440
+ If `coords_system` is 'patient', it must be a 3d point.
441
+ point2: The second point of the line. See `point1` for more details.
442
+ resource_id: The resource unique id.
443
+ identifier: The annotation identifier, also as known as the annotation's label.
444
+ frame_index: The frame index of the annotation.
445
+ dicom_metadata: The DICOM metadata of the image. If provided, the coordinates will be converted to the
446
+ correct coordinates automatically using the DICOM metadata.
447
+ coords_system: The coordinate system of the points. Can be 'pixel', or 'patient'.
448
+ If 'pixel', the points are in pixel coordinates. If 'patient', the points are in patient coordinates (see DICOM patient coordinates).
449
+ project: The project unique id or name.
450
+ worklist_id: The annotation worklist unique id. Optional.
451
+ imported_from: The imported from source value.
452
+ author_email: The email to consider as the author of the annotation. If None, use the customer of the api key.
453
+ model_id: The model unique id. Optional.
454
+
455
+ Example:
456
+ .. code-block:: python
457
+
458
+ res_id = 'aa93813c-cef0-4edd-a45c-85d4a8f1ad0d'
459
+ api.add_line_annotation([0, 0], (10, 30),
460
+ resource_id=res_id,
461
+ identifier='Line1',
462
+ frame_index=2,
463
+ project='Example Project')
464
+ """
465
+
466
+ if project is not None and worklist_id is not None:
467
+ raise ValueError('Only one of project or worklist_id can be provided.')
468
+
469
+ if project is not None:
470
+ proj = self.get_project_by_name(project)
471
+ if 'error' in proj.keys():
472
+ raise DatamintException(f"Project {project} not found.")
473
+ worklist_id = proj['worklist_id']
474
+
475
+ if coords_system == 'pixel':
476
+ if dicom_metadata is None:
477
+ point1 = (point1[0], point1[1], frame_index)
478
+ point2 = (point2[0], point2[1], frame_index)
479
+ geom = LineGeometry(point1, point2)
480
+ else:
481
+ if isinstance(dicom_metadata, str):
482
+ dicom_metadata = pydicom.dcmread(dicom_metadata)
483
+ geom = LineGeometry.from_dicom(dicom_metadata, point1, point2, slice_index=frame_index)
484
+ elif coords_system == 'patient':
485
+ geom = LineGeometry(point1, point2)
486
+ else:
487
+ raise ValueError(f"Unknown coordinate system: {coords_system}")
488
+
489
+ anndto = CreateAnnotationDto(
490
+ type=AnnotationType.LINE,
491
+ identifier=identifier,
492
+ scope='frame',
493
+ annotation_worklist_id=worklist_id,
494
+ value=None,
495
+ imported_from=imported_from,
496
+ import_author=author_email,
497
+ frame_index=frame_index,
498
+ geometry=geom,
499
+ model_id=model_id,
500
+ is_model=model_id is not None,
501
+ )
502
+
503
+ json_data = anndto.to_dict()
504
+
505
+ request_params = {
506
+ 'method': 'POST',
507
+ 'url': f'{self.root_url}/annotations/{resource_id}/annotations',
508
+ 'json': [json_data]
509
+ }
510
+
511
+ resp = self._run_request(request_params)
512
+ self._check_errors_response_json(resp)
513
+
514
+ @deprecated(version='0.12.1', reason='Use :meth:`~get_annotations` instead with `resource_id` parameter.')
515
+ def get_resource_annotations(self,
516
+ resource_id: str,
517
+ annotation_type: Optional[str] = None,
518
+ annotator_email: Optional[str] = None,
519
+ date_from: Optional[date] = None,
520
+ date_to: Optional[date] = None) -> Generator[dict, None, None]:
521
+
522
+ return self.get_annotations(resource_id=resource_id,
523
+ annotation_type=annotation_type,
524
+ annotator_email=annotator_email,
525
+ date_from=date_from,
526
+ date_to=date_to)
527
+
528
+ def get_annotations(self,
529
+ resource_id: Optional[str] = None,
530
+ annotation_type: AnnotationType | str | None = None,
531
+ annotator_email: Optional[str] = None,
532
+ date_from: Optional[date] = None,
533
+ date_to: Optional[date] = None,
534
+ dataset_id: Optional[str] = None,
535
+ worklist_id: Optional[str] = None,
536
+ status: Optional[Literal['new', 'published']] = None,
537
+ load_ai_segmentations: bool = None,
538
+ ) -> Generator[dict, None, None]:
539
+ """
540
+ Get annotations for a resource.
541
+
542
+ Args:
543
+ resource_id (Optional[str]): The resource unique id.
544
+ annotation_type (Optional[str]): The annotation type. See :class:`~datamintapi.dto.annotation_dto.AnnotationType`.
545
+ annotator_email (Optional[str]): The annotator email.
546
+ date_from (Optional[date]): The start date.
547
+ date_to (Optional[date]): The end date.
548
+ dataset_id (Optional[str]): The dataset unique id.
549
+ worklist_id (Optional[str]): The annotation worklist unique id.
550
+ status (Optional[Literal['new', 'published']]): The status of the annotations.
551
+ load_ai_segmentations (bool): Whether to load the AI segmentations or not. Default is False.
552
+
553
+ Returns:
554
+ Generator[dict, None, None]: A generator of dictionaries with the annotations information.
555
+ """
556
+ # TODO: create annotation_type enum
557
+
558
+ if annotation_type is not None and isinstance(annotation_type, AnnotationType):
559
+ annotation_type = annotation_type.value
560
+
561
+ payload = {
562
+ 'resource_id': resource_id,
563
+ 'annotation_type': annotation_type,
564
+ 'annotatorEmail': annotator_email,
565
+ 'from': date_from.isoformat() if date_from is not None else None,
566
+ 'to': date_to.isoformat() if date_to is not None else None,
567
+ 'dataset_id': dataset_id,
568
+ 'annotation_worklist_id': worklist_id,
569
+ 'status': status,
570
+ 'load_ai_segmentations': load_ai_segmentations
571
+ }
572
+
573
+ # remove nones
574
+ payload = {k: v for k, v in payload.items() if v is not None}
575
+
576
+ request_params = {
577
+ 'method': 'GET',
578
+ 'url': f'{self.root_url}/annotations',
579
+ 'params': payload
580
+ }
581
+
582
+ yield from self._run_pagination_request(request_params, return_field='data')
583
+
584
+ def get_annotation_worklist(self,
585
+ status: Literal['new', 'updating', 'active', 'completed'] = None
586
+ ) -> Generator[dict, None, None]:
587
+ """
588
+ Get the annotation worklist.
589
+
590
+ Args:
591
+ status (Literal['new', 'updating','active', 'completed']): The status of the annotations.
592
+
593
+ Returns:
594
+ Generator[dict, None, None]: A generator of dictionaries with the annotations information.
595
+ """
596
+
597
+ payload = {}
598
+
599
+ if status is not None:
600
+ payload['status'] = status
601
+
602
+ request_params = {
603
+ 'method': 'GET',
604
+ 'url': f'{self.root_url}/annotationsets',
605
+ 'params': payload
606
+ }
607
+
608
+ yield from self._run_pagination_request(request_params, return_field='data')
609
+
610
+ def get_annotation_worklist_by_id(self,
611
+ id: str) -> dict:
612
+ """Get the annotation worklist.
613
+
614
+ Args:
615
+ id: The annotation worklist unique id.
616
+
617
+ Returns:
618
+ Dict: A dictionary with the annotations information.
619
+ """
620
+
621
+ request_params = {
622
+ 'method': 'GET',
623
+ 'url': f'{self.root_url}/annotationsets/{id}',
624
+ }
625
+
626
+ try:
627
+ resp = self._run_request(request_params).json()
628
+ return resp
629
+ except HTTPError as e:
630
+ if e.response.status_code == 404:
631
+ raise ResourceNotFoundError('annotation worklist', {'id': id})
632
+ raise e
633
+
634
+ def update_annotation_worklist(self,
635
+ worklist_id: str,
636
+ frame_labels: list[str] = None,
637
+ image_labels: list[str] = None,
638
+ annotations: list[dict] = None,
639
+ status: Literal['new', 'updating', 'active', 'completed'] = None,
640
+ name: str = None,
641
+ ):
642
+ """
643
+ Update the status of an annotation worklist.
644
+
645
+ Args:
646
+ worklist_id (str): The annotation worklist unique id.
647
+ frame_labels (List[str]): The frame labels.
648
+ image_labels (List[str]): The image labels.
649
+ annotations (List[Dict]): The annotations.
650
+ status (Literal['new', 'updating','active', 'completed']): The status of the annotations.
651
+
652
+ """
653
+
654
+ payload = {}
655
+ if status is not None:
656
+ payload['status'] = status
657
+ if frame_labels is not None:
658
+ payload['frame_labels'] = frame_labels
659
+ if image_labels is not None:
660
+ payload['image_labels'] = image_labels
661
+ if annotations is not None:
662
+ payload['annotations'] = annotations
663
+ if name is not None:
664
+ payload['name'] = name
665
+
666
+ request_params = {
667
+ 'method': 'PATCH',
668
+ 'url': f'{self.root_url}/annotationsets/{worklist_id}',
669
+ 'json': payload
670
+ }
671
+
672
+ self._run_request(request_params)
673
+
674
+ @staticmethod
675
+ def _get_segmentation_names(uniq_vals: np.ndarray,
676
+ names: Optional[str | dict[int, str]] = None
677
+ ) -> list[str]:
678
+ uniq_vals = uniq_vals[uniq_vals != 0]
679
+ if names is None:
680
+ names = 'seg'
681
+ if isinstance(names, str):
682
+ if len(uniq_vals) == 1:
683
+ return [names]
684
+ return [f'{names}_{v}' for v in uniq_vals]
685
+ if isinstance(names, dict):
686
+ for v in uniq_vals:
687
+ new_name = names.get(v, names.get('default', None))
688
+ if new_name is None:
689
+ raise ValueError(f"Value {v} not found in names dictionary." +
690
+ f" Provide a name for {v} or use 'default' key to provide a prefix.")
691
+ return [names.get(v, names.get('default', '')+'_'+str(v)) for v in uniq_vals]
692
+ raise ValueError("names must be a string or a dictionary.")
693
+
694
+ @staticmethod
695
+ def _split_segmentations(img: np.ndarray,
696
+ uniq_vals: np.ndarray,
697
+ f: IO,
698
+ ) -> Generator[BytesIO, None, None]:
699
+ # remove zero from uniq_vals
700
+ uniq_vals = uniq_vals[uniq_vals != 0]
701
+
702
+ for v in uniq_vals:
703
+ img_v = (img == v).astype(np.uint8)
704
+
705
+ f = BytesIO()
706
+ Image.fromarray(img_v*255).convert('RGB').save(f, format='PNG')
707
+ f.seek(0)
708
+ yield f
709
+
710
+ def delete_annotation(self, annotation_id: str | dict):
711
+ if isinstance(annotation_id, dict):
712
+ annotation_id = annotation_id.get('id', None)
713
+ if annotation_id is None:
714
+ raise ValueError("annotation_id must be a string or a dict with 'id' key.")
715
+ request_params = {
716
+ 'method': 'DELETE',
717
+ 'url': f'{self.root_url}/annotations/{annotation_id}',
718
+ }
719
+
720
+ resp = self._run_request(request_params)
721
+ self._check_errors_response_json(resp)
722
+
723
+ def get_segmentation_file(self, resource_id: str, annotation_id: str) -> bytes:
724
+ request_params = {
725
+ 'method': 'GET',
726
+ 'url': f'{self.root_url}/annotations/{resource_id}/annotations/{annotation_id}/file',
727
+ }
728
+
729
+ resp = self._run_request(request_params)
730
+ return resp.content
731
+
732
+ def set_annotation_status(self,
733
+ project_id: str,
734
+ resource_id: str,
735
+ status: Literal['opened', 'annotated', 'closed']
736
+ ):
737
+
738
+ if status not in ['opened', 'annotated', 'closed']:
739
+ raise ValueError("status must be one of ['opened', 'annotated', 'closed']")
740
+ request_params = {
741
+ 'method': 'POST',
742
+ 'url': f'{self.root_url}/projects/{project_id}/resources/{resource_id}/status',
743
+ 'json': {
744
+ 'status': status
745
+ }
746
+ }
747
+ resp = self._run_request(request_params)
748
+ self._check_errors_response_json(resp)