datamint 1.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of datamint might be problematic. Click here for more details.

@@ -0,0 +1,492 @@
1
+ from .base_dataset import DatamintBaseDataset
2
+ from typing import List, Optional, Callable, Any, Dict, Literal
3
+ import torch
4
+ from torch import Tensor
5
+ import os
6
+ import numpy as np
7
+ import logging
8
+ from PIL import Image
9
+ import albumentations
10
+
11
+ _LOGGER = logging.getLogger(__name__)
12
+
13
+
14
+ class DatamintDataset(DatamintBaseDataset):
15
+ """
16
+ This Dataset class extends the `DatamintBaseDataset` class to be easily used with PyTorch.
17
+ In addition to that, it has functionality to better process annotations and segmentations.
18
+
19
+ .. note::
20
+ Import using ``from datamintapi import Dataset``.
21
+
22
+ Args:
23
+ root: Root directory of dataset where data already exists or will be downloaded.
24
+ project_name: Name of the project to download.
25
+ auto_update: If True, the dataset will be checked for updates and downloaded if necessary.
26
+ api_key: API key to access the Datamint API. If not provided, it will look for the
27
+ environment variable 'DATAMINT_API_KEY'. Not necessary if
28
+ you don't want to download/update the dataset.
29
+ return_dicom: If True, the DICOM object will be returned, if the image is a DICOM file.
30
+ return_metainfo: If True, the metainfo of the image will be returned.
31
+ return_annotations: If True, the annotations of the image will be returned.
32
+ return_frame_by_frame: If True, each frame of a video/DICOM/3d-image will be returned separately.
33
+ include_unannotated: If True, images without annotations will be included. If False, images without annotations will be discarded.
34
+ all_annotations: If True, all annotations will be downloaded, including the ones that are not set as closed/done.
35
+ server_url: URL of the Datamint server. If not provided, it will use the default server.
36
+ return_segmentations: If True (default), the segmentations of the image will be returned in the 'segmentations' key.
37
+ return_as_semantic_segmentation: If True, the segmentations will be returned as semantic segmentation.
38
+ image_transform: A function to transform the image.
39
+ mask_transform: A function to transform the mask.
40
+ semantic_seg_merge_strategy: If not None, the segmentations will be merged using this strategy.
41
+ Possible values are 'union', 'intersection', 'mode'.
42
+ include_annotators: List of annotators to include. If None, all annotators will be included. See parameter ``exclude_annotators``.
43
+ exclude_annotators: List of annotators to exclude. If None, no annotators will be excluded. See parameter ``include_annotators``.
44
+ include_segmentation_names: List of segmentation names to include. If None, all segmentations will be included.
45
+ exclude_segmentation_names: List of segmentation names to exclude. If None, no segmentations will be excluded.
46
+ include_image_label_names: List of image label names to include. If None, all image labels will be included.
47
+ exclude_image_label_names: List of image label names to exclude. If None, no image labels will be excluded.
48
+ include_frame_label_names: List of frame label names to include. If None, all frame labels will be included.
49
+ exclude_frame_label_names: List of frame label names to exclude. If None, no frame labels will be excluded.
50
+ all_annotations: If True, all annotations will be downloaded, including the ones that are not set as closed/done.
51
+ """
52
+
53
+ def __init__(self,
54
+ project_name: str,
55
+ root: str | None = None,
56
+ auto_update: bool = True,
57
+ api_key: Optional[str] = None,
58
+ server_url: Optional[str] = None,
59
+ return_dicom: bool = False,
60
+ return_metainfo: bool = True,
61
+ return_frame_by_frame: bool = False,
62
+ return_annotations: bool = True,
63
+ # new parameters
64
+ return_segmentations: bool = True,
65
+ return_as_semantic_segmentation: bool = False,
66
+ image_transform: Callable[[torch.Tensor], Any] | None = None,
67
+ mask_transform: Callable[[torch.Tensor], Any] | None = None,
68
+ alb_transform: albumentations.BasicTransform | None = None,
69
+ semantic_seg_merge_strategy: Optional[Literal['union', 'intersection', 'mode']] = None,
70
+ include_unannotated: bool = True,
71
+ # filtering parameters
72
+ include_annotators: Optional[list[str]] = None,
73
+ exclude_annotators: Optional[list[str]] = None,
74
+ include_segmentation_names: Optional[list[str]] = None,
75
+ exclude_segmentation_names: Optional[list[str]] = None,
76
+ include_image_label_names: Optional[list[str]] = None,
77
+ exclude_image_label_names: Optional[list[str]] = None,
78
+ include_frame_label_names: Optional[list[str]] = None,
79
+ exclude_frame_label_names: Optional[list[str]] = None,
80
+ all_annotations: bool = False,
81
+ ):
82
+ super().__init__(root=root,
83
+ project_name=project_name,
84
+ auto_update=auto_update,
85
+ api_key=api_key,
86
+ server_url=server_url,
87
+ return_dicom=return_dicom,
88
+ return_metainfo=return_metainfo,
89
+ return_frame_by_frame=return_frame_by_frame,
90
+ return_annotations=return_annotations,
91
+ include_unannotated=include_unannotated,
92
+ all_annotations=all_annotations,
93
+ include_annotators=include_annotators,
94
+ exclude_annotators=exclude_annotators,
95
+ include_segmentation_names=include_segmentation_names,
96
+ exclude_segmentation_names=exclude_segmentation_names,
97
+ include_image_label_names=include_image_label_names,
98
+ exclude_image_label_names=exclude_image_label_names,
99
+ include_frame_label_names=include_frame_label_names,
100
+ exclude_frame_label_names=exclude_frame_label_names
101
+ )
102
+ self.return_segmentations = return_segmentations
103
+ self.return_as_semantic_segmentation = return_as_semantic_segmentation
104
+ self.image_transform = image_transform
105
+ self.mask_transform = mask_transform
106
+ self.alb_transform = alb_transform
107
+ if alb_transform is not None and return_frame_by_frame == False:
108
+ # not supported yet
109
+ raise NotImplementedError(
110
+ "albumentations transform is not supported yet when return_frame_by_frame is False")
111
+ self.semantic_seg_merge_strategy = semantic_seg_merge_strategy
112
+
113
+ if return_segmentations == False and return_as_semantic_segmentation == True:
114
+ raise ValueError("return_as_semantic_segmentation can only be True if return_segmentations is True")
115
+
116
+ if semantic_seg_merge_strategy is not None and not return_as_semantic_segmentation:
117
+ raise ValueError("semantic_seg_merge_strategy can only be used if return_as_semantic_segmentation is True")
118
+
119
+ def _load_segmentations(self, annotations: list[dict], img_shape) -> tuple[dict[str, list], dict[str, list]]:
120
+ """
121
+ Load segmentations from annotations.
122
+
123
+ Args:
124
+ annotations: list of annotations. Each annotation is a dictionary with keys 'type', 'file', 'added_by', 'name', 'index'.
125
+ img_shape: shape of the image (#frames, C, H, W)
126
+
127
+ Returns:
128
+ tuple[dict[str, list], dict[str, list]]: a tuple of two dictionaries.
129
+ The first dictionary is author -> list of #frames tensors, each tensor has shape (#instances_i, H, W).
130
+ The second dictionary is author -> list of #frames segmentation labels (tensors).
131
+ """
132
+ segmentations = {}
133
+ seg_labels = {}
134
+
135
+ if self.return_frame_by_frame:
136
+ assert len(img_shape) == 3, f"img_shape must have 3 dimensions, got {img_shape}"
137
+ _, h, w = img_shape
138
+ nframes = 1
139
+ else:
140
+ assert len(img_shape) == 4, f"img_shape must have 4 dimensions, got {img_shape}"
141
+ nframes, _, h, w = img_shape
142
+
143
+ # Load segmentation annotations
144
+ for ann in annotations:
145
+ if ann['type'] != 'segmentation':
146
+ continue
147
+ if 'file' not in ann:
148
+ _LOGGER.warning(f"Segmentation annotation without file in annotations {ann}")
149
+ continue
150
+ author = ann['added_by']
151
+
152
+ segfilepath = ann['file'] # png file
153
+ segfilepath = os.path.join(self.dataset_dir, segfilepath)
154
+ # FIXME: avoid enforcing resizing the mask
155
+ seg = (Image.open(segfilepath)
156
+ .convert('L')
157
+ .resize((w, h), Image.NEAREST)
158
+ )
159
+ seg = np.array(seg)
160
+
161
+ seg = torch.from_numpy(seg)
162
+ seg = seg == 255 # binary mask
163
+ # map the segmentation label to the code
164
+ seg_code = self.frame_lcodes['segmentation'][ann['name']]
165
+ if self.return_frame_by_frame:
166
+ frame_index = 0
167
+ else:
168
+ frame_index = ann['index']
169
+
170
+ if author not in segmentations.keys():
171
+ segmentations[author] = [None] * nframes
172
+ seg_labels[author] = [None] * nframes
173
+ author_segs = segmentations[author]
174
+ author_labels = seg_labels[author]
175
+
176
+ if author_segs[frame_index] is None:
177
+ author_segs[frame_index] = []
178
+ author_labels[frame_index] = []
179
+
180
+ author_segs[frame_index].append(seg)
181
+ author_labels[frame_index].append(seg_code)
182
+
183
+ # convert to tensor
184
+ for author in segmentations.keys():
185
+ author_segs = segmentations[author]
186
+ author_labels = seg_labels[author]
187
+ for i in range(len(author_segs)):
188
+ if author_segs[i] is not None:
189
+ author_segs[i] = torch.stack(author_segs[i])
190
+ author_labels[i] = torch.tensor(author_labels[i], dtype=torch.int32)
191
+ else:
192
+ author_segs[i] = torch.zeros((0, h, w), dtype=torch.bool)
193
+ author_labels[i] = torch.zeros(0, dtype=torch.int32)
194
+
195
+ return segmentations, seg_labels
196
+
197
+ def _instanceseg2semanticseg(self,
198
+ segmentations: List[Tensor],
199
+ seg_labels: List[Tensor]) -> Tensor:
200
+ """
201
+ Convert instance segmentation to semantic segmentation.
202
+
203
+ Args:
204
+ segmentations: list of `n` tensors of shape (num_instances, H, W), where `n` is the number of frames.
205
+ seg_labels: list of `n` tensors of shape (num_instances,), where `n` is the number of frames.
206
+
207
+ Returns:
208
+ Tensor: tensor of shape (n, num_labels, H, W), where `n` is the number of frames.
209
+ """
210
+ if segmentations is not None:
211
+ if len(segmentations) != len(seg_labels):
212
+ raise ValueError("segmentations and seg_labels must have the same length")
213
+
214
+ h, w = segmentations[0].shape[1:]
215
+ new_shape = (len(segmentations),
216
+ len(self.segmentation_labels_set)+1, # +1 for background
217
+ h, w)
218
+ new_segmentations = torch.zeros(new_shape, dtype=torch.uint8)
219
+ # for each frame
220
+ for i in range(len(segmentations)):
221
+ # for each instance
222
+ for j in range(len(segmentations[i])):
223
+ new_segmentations[i, seg_labels[i][j]] += segmentations[i][j]
224
+ new_segmentations = new_segmentations > 0
225
+ # pixels that are not in any segmentation are labeled as background
226
+ new_segmentations[:, 0] = new_segmentations.sum(dim=1) == 0
227
+ segmentations = new_segmentations.float()
228
+ return segmentations
229
+
230
+ def apply_semantic_seg_merge_strategy(self, segmentations: dict[str, Tensor],
231
+ nframes: int,
232
+ h, w) -> Tensor | dict[str, Tensor]:
233
+ if self.semantic_seg_merge_strategy is None:
234
+ return segmentations
235
+ if len(segmentations) == 0:
236
+ segmentations = torch.zeros((nframes, len(self.segmentation_labels_set)+1, h, w),
237
+ dtype=torch.get_default_dtype())
238
+ segmentations[:, 0, :, :] = 1 # background
239
+ return segmentations
240
+ if self.semantic_seg_merge_strategy == 'union':
241
+ merged_segs = self._apply_semantic_seg_merge_strategy_union(segmentations)
242
+ elif self.semantic_seg_merge_strategy == 'intersection':
243
+ merged_segs = self._apply_semantic_seg_merge_strategy_intersection(segmentations)
244
+ elif self.semantic_seg_merge_strategy == 'mode':
245
+ merged_segs = self._apply_semantic_seg_merge_strategy_mode(segmentations)
246
+ else:
247
+ raise ValueError(f"Unknown semantic_seg_merge_strategy: {self.semantic_seg_merge_strategy}")
248
+ return merged_segs.to(torch.get_default_dtype())
249
+
250
+ def _apply_semantic_seg_merge_strategy_union(self, segmentations: Dict[str, torch.Tensor]) -> torch.Tensor:
251
+ new_segmentations = torch.zeros_like(list(segmentations.values())[0])
252
+ for seg in segmentations.values():
253
+ new_segmentations += seg
254
+ return new_segmentations.bool()
255
+
256
+ def _apply_semantic_seg_merge_strategy_intersection(self, segmentations: Dict[str, torch.Tensor]) -> torch.Tensor:
257
+ new_segmentations = torch.ones_like(list(segmentations.values())[0])
258
+ for seg in segmentations.values():
259
+ new_segmentations += seg
260
+ return new_segmentations.bool()
261
+
262
+ def _apply_semantic_seg_merge_strategy_mode(self, segmentations: Dict[str, torch.Tensor]) -> torch.Tensor:
263
+ new_segmentations = torch.zeros_like(list(segmentations.values())[0])
264
+ for seg in segmentations.values():
265
+ new_segmentations += seg
266
+ new_segmentations = new_segmentations >= len(segmentations) / 2
267
+ return new_segmentations
268
+
269
+ def __apply_alb_transform_segmentation(self,
270
+ img: Tensor,
271
+ segmentations: dict[str, list[Tensor]]
272
+ ) -> tuple[np.ndarray, dict[str, list]]:
273
+ all_masks_list = []
274
+ num_masks = 0
275
+ all_masks_key: dict[str, list] = {}
276
+ for author_name, seglist in segmentations.items():
277
+ all_masks_key[author_name] = []
278
+ for i, seg in enumerate(seglist):
279
+ if seg is not None:
280
+ all_masks_list.append(seg)
281
+ assert len(seg.shape) == 3, f"Segmentation must have 3 dimensions, got {seg.shape}"
282
+ all_masks_key[author_name].append([num_masks+j for j in range(seg.shape[0])])
283
+ num_masks += seg.shape[0]
284
+ else:
285
+ all_masks_key[author_name].append(None)
286
+
287
+ if len(all_masks_list) != 0:
288
+ all_masks_list = torch.concatenate(all_masks_list).numpy().astype(np.uint8)
289
+ else:
290
+ all_masks_list = None#np.empty((0,img.shape[-2], img.shape[-1]), dtype=np.uint8)
291
+
292
+ augmented = self.alb_transform(image=img.numpy().transpose(1, 2, 0),
293
+ masks=all_masks_list)
294
+
295
+ # reconstruct the segmentations
296
+ if all_masks_list is not None:
297
+ all_masks = augmented['masks'] # shape: (num_masks, H, W)
298
+ new_segmentations: dict[str, list] = {}
299
+ for author_name, seglist in all_masks_key.items():
300
+ new_segmentations[author_name] = []
301
+ for i in range(len(seglist)):
302
+ if seglist[i] is None:
303
+ new_segmentations[author_name].append(None)
304
+ else:
305
+ masks_i = all_masks[seglist[i]]
306
+ masks_i = np.stack(masks_i)
307
+ new_segmentations[author_name].append(masks_i)
308
+
309
+ return augmented['image'], new_segmentations
310
+
311
+ def __getitem__(self, index) -> dict[str, Any]:
312
+ """
313
+ Get the item at the given index.
314
+
315
+ Args:
316
+ index (int): Index of the item to return.
317
+
318
+ Returns:
319
+ dict[str, Any]: A dictionary with the following keys:
320
+
321
+ * 'image' (Tensor): Tensor of shape (C, H, W) or (N, C, H, W), depending on `self.return_frame_by_frame`.
322
+ If `self.return_as_semantic_segmentation` is True, the image is a tensor of shape (N, L, H, W) or (L, H, W),
323
+ where `L` is the number of segmentation labels + 1 (background): ``L=len(self.segmentation_labels_set)+1``.
324
+ * 'metainfo' (dict): Dictionary with metadata information.
325
+ * 'segmentations' (dict[str, list[Tensor]] or dict[str,Tensor] or Tensor): Segmentation masks,
326
+ depending on the configuration of parameters `self.return_segmentations`, `self.return_as_semantic_segmentation`, `self.return_frame_by_frame`, `self.semantic_seg_merge_strategy`.
327
+ * 'seg_labels' (dict[str, list[Tensor]] or Tensor): Segmentation labels with the same length as `segmentations`.
328
+ * 'frame_labels' (dict[str, Tensor]): Frame-level labels.
329
+ * 'image_labels' (dict[str, Tensor]): Image-level labels.
330
+ """
331
+ item = super().__getitem__(index)
332
+ img = item['image']
333
+ metainfo = item['metainfo']
334
+ annotations = item['annotations']
335
+
336
+ has_transformed = False # to check if albumentations transform was applied
337
+
338
+ if self.image_transform is not None:
339
+ img = self.image_transform(img)
340
+ if isinstance(img, np.ndarray):
341
+ img = torch.from_numpy(img)
342
+
343
+ if img.ndim == 3:
344
+ _, h, w = img.shape
345
+ nframes = 1
346
+ elif img.ndim == 4:
347
+ nframes, _, h, w = img.shape
348
+ else:
349
+ raise ValueError(f"Image must have 3 or 4 dimensions, got {img.shape}")
350
+
351
+ new_item = {
352
+ 'image': img,
353
+ 'metainfo': metainfo,
354
+ }
355
+ if 'dicom' in item:
356
+ new_item['dicom'] = item['dicom']
357
+
358
+ try:
359
+ if self.return_segmentations:
360
+ segmentations, seg_labels = self._load_segmentations(annotations, img.shape)
361
+ # apply mask transform
362
+ if self.mask_transform is not None:
363
+ for seglist in segmentations.values():
364
+ for i, seg in enumerate(seglist):
365
+ if seg is not None:
366
+ seglist[i] = self.mask_transform(seg)
367
+
368
+ if self.alb_transform is not None:
369
+ img, new_segmentations = self.__apply_alb_transform_segmentation(img, segmentations)
370
+ segmentations = new_segmentations
371
+ img = torch.from_numpy(img).permute(2, 0, 1)
372
+ new_item['image'] = img
373
+ has_transformed = True
374
+ # Update dimensions after transformation
375
+ if img.ndim == 3:
376
+ _, h, w = img.shape
377
+ elif img.ndim == 4:
378
+ nframes, _, h, w = img.shape
379
+
380
+ if self.return_as_semantic_segmentation:
381
+ sem_segmentations: dict[str, torch.Tensor] = {}
382
+ for author in segmentations.keys():
383
+ sem_segmentations[author] = self._instanceseg2semanticseg(segmentations[author],
384
+ seg_labels[author])
385
+ segmentations[author] = None # free memory
386
+ segmentations = self.apply_semantic_seg_merge_strategy(sem_segmentations,
387
+ nframes,
388
+ h, w)
389
+ # In semantic segmentation, seg_labels is not needed
390
+ seg_labels = None
391
+
392
+ if self.return_frame_by_frame:
393
+ if isinstance(segmentations, dict): # author->segmentations format
394
+ segmentations = {k: v[0] for k, v in segmentations.items()}
395
+ if seg_labels is not None:
396
+ seg_labels = {k: v[0] for k, v in seg_labels.items()}
397
+ else:
398
+ # segmentations is a tensor
399
+ segmentations = segmentations[0]
400
+ if seg_labels is not None and len(seg_labels) > 0:
401
+ seg_labels = seg_labels[0]
402
+ new_item['segmentations'] = segmentations
403
+ new_item['seg_labels'] = seg_labels
404
+ except Exception:
405
+ _LOGGER.error(f'Error in loading/processing segmentations of {metainfo}')
406
+ raise
407
+
408
+ if self.alb_transform is not None and not has_transformed:
409
+ # apply albumentations transform to the image
410
+ augmented = self.alb_transform(image=img.numpy().transpose(1, 2, 0))
411
+ img = torch.from_numpy(augmented['image']).permute(2, 0, 1)
412
+ new_item['image'] = img
413
+
414
+ framelabel_annotations = self._get_annotations_internal(annotations, type='label', scope='frame')
415
+ framelabels = self._convert_labels_annotations(framelabel_annotations, num_frames=nframes)
416
+ # framelabels.shape: (num_frames, num_labels)
417
+
418
+ imagelabel_annotations = self._get_annotations_internal(annotations, type='label', scope='image')
419
+ imagelabels = self._convert_labels_annotations(imagelabel_annotations)
420
+ # imagelabels.shape: (num_labels,)
421
+
422
+ new_item['frame_labels'] = framelabels
423
+ new_item['image_labels'] = imagelabels
424
+
425
+ # FIXME: deal with multiple annotators in instance segmentation
426
+
427
+ return new_item
428
+
429
+ def _convert_labels_annotations(self,
430
+ annotations: list[dict],
431
+ num_frames: int = None) -> dict[str, torch.Tensor]:
432
+ """
433
+ Converts the annotations, of the same type and scope, to tensor of shape (num_frames, num_labels)
434
+ for each annotator.
435
+
436
+ Args:
437
+ annotations: list of annotations
438
+ num_frames: number of frames in the video
439
+
440
+ Returns:
441
+ dict[str, torch.Tensor]: dictionary of annotator_id -> tensor of shape (num_frames, num_labels)
442
+ """
443
+ if num_frames is None:
444
+ labels_ret_size = (len(self.image_labels_set),)
445
+ label2code = self.image_lcodes['multilabel']
446
+ should_include_label = self._should_include_image_label
447
+ else:
448
+ labels_ret_size = (num_frames, len(self.frame_labels_set))
449
+ label2code = self.frame_lcodes['multilabel']
450
+ should_include_label = self._should_include_frame_label
451
+
452
+ if num_frames is not None and num_frames > 1 and self.return_frame_by_frame:
453
+ raise ValueError("num_frames must be 1 if return_frame_by_frame is True")
454
+
455
+ frame_labels_byuser = {} # defaultdict(lambda: torch.zeros(size=labels_ret_size, dtype=torch.int32))
456
+ if len(annotations) == 0:
457
+ return frame_labels_byuser
458
+ for ann in annotations:
459
+ user_id = ann['added_by']
460
+
461
+ frame_idx = ann.get('index', None)
462
+
463
+ if user_id not in frame_labels_byuser.keys():
464
+ frame_labels_byuser[user_id] = torch.zeros(size=labels_ret_size, dtype=torch.int32)
465
+ labels_onehot_i = frame_labels_byuser[user_id]
466
+ code = label2code[ann['name']]
467
+ if frame_idx is None:
468
+ labels_onehot_i[code] = 1
469
+ else:
470
+ if self.return_frame_by_frame:
471
+ labels_onehot_i[0, code] = 1
472
+ else:
473
+ labels_onehot_i[frame_idx, code] = 1
474
+
475
+ if self.return_frame_by_frame:
476
+ for user_id, labels_onehot_i in frame_labels_byuser.items():
477
+ frame_labels_byuser[user_id] = labels_onehot_i[0]
478
+ return dict(frame_labels_byuser)
479
+
480
+ def __repr__(self) -> str:
481
+ super_repr = super().__repr__()
482
+ body = []
483
+ if self.image_transform is not None:
484
+ body.append("Image transform:")
485
+ body += [" " * 4 + line for line in repr(self.image_transform).split('\n')]
486
+ if self.mask_transform is not None:
487
+ body.append("Mask transform:")
488
+ body += [" " * 4 + line for line in repr(self.mask_transform).split('\n')]
489
+ if len(body) == 0:
490
+ return super_repr
491
+ lines = [" " * 4 + line for line in body]
492
+ return super_repr + '\n' + "\n".join(lines)
@@ -0,0 +1 @@
1
+ from .example_projects import ProjectMR
@@ -0,0 +1,75 @@
1
+ import requests
2
+ import io
3
+ from datamintapi import APIHandler
4
+ import logging
5
+ from PIL import Image
6
+ import numpy as np
7
+
8
+ _LOGGER = logging.getLogger(__name__)
9
+
10
+
11
+ def _download_pydicom_test_file(filename: str) -> io.BytesIO:
12
+ """Download a pydicom test file from GitHub and return its content as a BytesIO object."""
13
+ url = f'https://raw.githubusercontent.com/pydicom/pydicom/master/tests/data/{filename}'
14
+ response = requests.get(url)
15
+ response.raise_for_status()
16
+ content = io.BytesIO(response.content)
17
+ content.name = filename
18
+ return content
19
+
20
+
21
+ class ProjectMR:
22
+ @staticmethod
23
+ def upload_resource_emri_small(api: APIHandler = None) -> str:
24
+ if api is None:
25
+ api = APIHandler()
26
+
27
+ searched_res = api.get_resources(status='published', tags=['example'], filename='emri_small.dcm')
28
+ for res in searched_res:
29
+ _LOGGER.info('Resource already exists.')
30
+ return res['id']
31
+
32
+ dcm_content = _download_pydicom_test_file('emri_small.dcm')
33
+
34
+ _LOGGER.info(f'Uploading resource {dcm_content.name}...')
35
+ return api.upload_resources(dcm_content,
36
+ anonymize=True,
37
+ publish=True,
38
+ tags=['example'])
39
+
40
+ @staticmethod
41
+ def _upload_annotations(api: APIHandler,
42
+ resid: str,
43
+ proj) -> None:
44
+ _LOGGER.info('Uploading annotations...')
45
+ proj_id = proj['id']
46
+ proj_info = api.get_project_by_id(proj_id)
47
+ segurl = 'https://github.com/user-attachments/assets/8c5d7dfe-1b5a-497d-b76e-fe790f09bb90'
48
+ resp = requests.get(segurl, stream=True)
49
+ resp.raise_for_status()
50
+ img = Image.open(io.BytesIO(resp.content)).convert('L')
51
+ api.upload_segmentations(resid, np.array(img),
52
+ name='object1', frame_index=1,
53
+ worklist_id=proj_info['worklist_id'])
54
+ api.set_annotation_status(project_id=proj_id,
55
+ resource_id=resid,
56
+ status='closed')
57
+
58
+ @staticmethod
59
+ def create(project_name: str = 'Example Project MR',
60
+ with_annotations=True) -> str:
61
+ api = APIHandler()
62
+
63
+ resid = ProjectMR.upload_resource_emri_small(api)
64
+ proj = api.get_project_by_name(project_name)
65
+ if 'id' in proj:
66
+ msg = f'Project {project_name} already exists. Delete it first or choose another name.'
67
+ raise ValueError(msg)
68
+ _LOGGER.info(f'Creating project {project_name}...')
69
+ proj = api.create_project(name=project_name,
70
+ description='This is an example project',
71
+ resources_ids=[resid])
72
+ if with_annotations:
73
+ ProjectMR._upload_annotations(api, resid, proj)
74
+
75
+ return proj['id']
@@ -0,0 +1 @@
1
+ from .experiment import Experiment