datamint 1.6.0__tar.gz → 1.6.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of datamint might be problematic. Click here for more details.
- {datamint-1.6.0 → datamint-1.6.2}/PKG-INFO +2 -1
- {datamint-1.6.0 → datamint-1.6.2}/datamint/apihandler/annotation_api_handler.py +14 -10
- {datamint-1.6.0 → datamint-1.6.2}/datamint/apihandler/dto/annotation_dto.py +1 -1
- {datamint-1.6.0 → datamint-1.6.2}/datamint/apihandler/root_api_handler.py +4 -2
- {datamint-1.6.0 → datamint-1.6.2}/datamint/client_cmd_tools/datamint_upload.py +101 -42
- {datamint-1.6.0 → datamint-1.6.2}/datamint/dataset/base_dataset.py +2 -2
- {datamint-1.6.0 → datamint-1.6.2}/pyproject.toml +2 -1
- datamint-1.6.0/datamint/utils/dicom_utils.py +0 -707
- datamint-1.6.0/datamint/utils/io_utils.py +0 -187
- {datamint-1.6.0 → datamint-1.6.2}/README.md +0 -0
- {datamint-1.6.0 → datamint-1.6.2}/datamint/__init__.py +0 -0
- {datamint-1.6.0 → datamint-1.6.2}/datamint/apihandler/api_handler.py +0 -0
- {datamint-1.6.0 → datamint-1.6.2}/datamint/apihandler/base_api_handler.py +0 -0
- {datamint-1.6.0 → datamint-1.6.2}/datamint/apihandler/exp_api_handler.py +0 -0
- {datamint-1.6.0 → datamint-1.6.2}/datamint/client_cmd_tools/__init__.py +0 -0
- {datamint-1.6.0 → datamint-1.6.2}/datamint/client_cmd_tools/datamint_config.py +0 -0
- {datamint-1.6.0 → datamint-1.6.2}/datamint/configs.py +0 -0
- {datamint-1.6.0 → datamint-1.6.2}/datamint/dataset/__init__.py +0 -0
- {datamint-1.6.0 → datamint-1.6.2}/datamint/dataset/dataset.py +0 -0
- {datamint-1.6.0 → datamint-1.6.2}/datamint/examples/__init__.py +0 -0
- {datamint-1.6.0 → datamint-1.6.2}/datamint/examples/example_projects.py +0 -0
- {datamint-1.6.0 → datamint-1.6.2}/datamint/experiment/__init__.py +0 -0
- {datamint-1.6.0 → datamint-1.6.2}/datamint/experiment/_patcher.py +0 -0
- {datamint-1.6.0 → datamint-1.6.2}/datamint/experiment/experiment.py +0 -0
- {datamint-1.6.0 → datamint-1.6.2}/datamint/logging.yaml +0 -0
- {datamint-1.6.0 → datamint-1.6.2}/datamint/utils/logging_utils.py +0 -0
- {datamint-1.6.0 → datamint-1.6.2}/datamint/utils/torchmetrics.py +0 -0
- {datamint-1.6.0 → datamint-1.6.2}/datamint/utils/visualization.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: datamint
|
|
3
|
-
Version: 1.6.
|
|
3
|
+
Version: 1.6.2
|
|
4
4
|
Summary: A library for interacting with the Datamint API, designed for efficient data management, processing and Deep Learning workflows.
|
|
5
5
|
Requires-Python: >=3.10
|
|
6
6
|
Classifier: Programming Language :: Python :: 3
|
|
@@ -19,6 +19,7 @@ Requires-Dist: humanize (>=4.0.0,<5.0.0)
|
|
|
19
19
|
Requires-Dist: lazy-loader (>=0.3.0)
|
|
20
20
|
Requires-Dist: lightning
|
|
21
21
|
Requires-Dist: matplotlib
|
|
22
|
+
Requires-Dist: medimgkit
|
|
22
23
|
Requires-Dist: nest-asyncio (>=1.0.0,<2.0.0)
|
|
23
24
|
Requires-Dist: nibabel (>=4.0.0)
|
|
24
25
|
Requires-Dist: numpy
|
|
@@ -10,7 +10,6 @@ import os
|
|
|
10
10
|
import asyncio
|
|
11
11
|
import aiohttp
|
|
12
12
|
from requests.exceptions import HTTPError
|
|
13
|
-
from deprecated.sphinx import deprecated
|
|
14
13
|
from .dto.annotation_dto import CreateAnnotationDto, LineGeometry, BoxGeometry, CoordinateSystem, AnnotationType
|
|
15
14
|
import pydicom
|
|
16
15
|
import json
|
|
@@ -237,7 +236,7 @@ class AnnotationAPIHandler(BaseAPIHandler):
|
|
|
237
236
|
async def _upload_volume_segmentation_async(self,
|
|
238
237
|
resource_id: str,
|
|
239
238
|
file_path: str | np.ndarray,
|
|
240
|
-
name: dict[int, str] | dict[tuple, str],
|
|
239
|
+
name: str | dict[int, str] | dict[tuple, str] | None,
|
|
241
240
|
imported_from: Optional[str] = None,
|
|
242
241
|
author_email: Optional[str] = None,
|
|
243
242
|
worklist_id: Optional[str] = None,
|
|
@@ -263,6 +262,13 @@ class AnnotationAPIHandler(BaseAPIHandler):
|
|
|
263
262
|
Raises:
|
|
264
263
|
ValueError: If name is not a string or file format is unsupported for volume upload.
|
|
265
264
|
"""
|
|
265
|
+
|
|
266
|
+
if isinstance(name, str):
|
|
267
|
+
raise NotImplementedError("`name=string` is not supported yet for volume segmentation.")
|
|
268
|
+
if isinstance(name, dict):
|
|
269
|
+
if any(isinstance(k, tuple) for k in name.keys()):
|
|
270
|
+
raise NotImplementedError("For volume segmentations, `name` must be a dictionary with integer keys only.")
|
|
271
|
+
|
|
266
272
|
# Prepare file for upload
|
|
267
273
|
if isinstance(file_path, str):
|
|
268
274
|
if file_path.endswith('.nii') or file_path.endswith('.nii.gz'):
|
|
@@ -275,7 +281,8 @@ class AnnotationAPIHandler(BaseAPIHandler):
|
|
|
275
281
|
form.add_field('model_id', model_id) # Add model_id if provided
|
|
276
282
|
if worklist_id is not None:
|
|
277
283
|
form.add_field('annotation_worklist_id', worklist_id)
|
|
278
|
-
|
|
284
|
+
if name is not None:
|
|
285
|
+
form.add_field('segmentation_map', json.dumps(name), content_type='application/json')
|
|
279
286
|
|
|
280
287
|
request_params = dict(
|
|
281
288
|
method='POST',
|
|
@@ -449,30 +456,27 @@ class AnnotationAPIHandler(BaseAPIHandler):
|
|
|
449
456
|
if isinstance(file_path, str) and not os.path.exists(file_path):
|
|
450
457
|
raise FileNotFoundError(f"File {file_path} not found.")
|
|
451
458
|
|
|
452
|
-
name = AnnotationAPIHandler.standardize_segmentation_names(name)
|
|
453
|
-
|
|
454
459
|
# Handle NIfTI files specially - upload as single volume
|
|
455
460
|
if isinstance(file_path, str) and (file_path.endswith('.nii') or file_path.endswith('.nii.gz')):
|
|
456
461
|
_LOGGER.info(f"Uploading NIfTI segmentation file: {file_path}")
|
|
457
462
|
if frame_index is not None:
|
|
458
463
|
raise ValueError("Do not provide frame_index for NIfTI segmentations.")
|
|
459
464
|
loop = asyncio.get_event_loop()
|
|
460
|
-
task = self.
|
|
465
|
+
task = self._upload_volume_segmentation_async(
|
|
461
466
|
resource_id=resource_id,
|
|
462
|
-
frame_index=None,
|
|
463
467
|
file_path=file_path,
|
|
464
468
|
name=name,
|
|
465
469
|
imported_from=imported_from,
|
|
466
470
|
author_email=author_email,
|
|
467
|
-
discard_empty_segmentations=False,
|
|
468
471
|
worklist_id=worklist_id,
|
|
469
472
|
model_id=model_id,
|
|
470
|
-
transpose_segmentation=transpose_segmentation
|
|
471
|
-
upload_volume=True
|
|
473
|
+
transpose_segmentation=transpose_segmentation
|
|
472
474
|
)
|
|
473
475
|
return loop.run_until_complete(task)
|
|
474
476
|
# All other file types are converted to multiple PNGs and uploaded frame by frame.
|
|
475
477
|
|
|
478
|
+
name = AnnotationAPIHandler.standardize_segmentation_names(name)
|
|
479
|
+
|
|
476
480
|
to_run = []
|
|
477
481
|
# Generate IOs for the segmentations.
|
|
478
482
|
nframes, fios = AnnotationAPIHandler._generate_segmentations_ios(file_path,
|
|
@@ -6,8 +6,8 @@ from requests.exceptions import HTTPError
|
|
|
6
6
|
import logging
|
|
7
7
|
import asyncio
|
|
8
8
|
import aiohttp
|
|
9
|
-
from
|
|
10
|
-
from
|
|
9
|
+
from medimgkit.dicom_utils import anonymize_dicom, to_bytesio, is_dicom
|
|
10
|
+
from medimgkit import dicom_utils
|
|
11
11
|
import pydicom
|
|
12
12
|
from pathlib import Path
|
|
13
13
|
from datetime import date
|
|
@@ -447,6 +447,8 @@ class RootAPIHandler(BaseAPIHandler):
|
|
|
447
447
|
for segfiles in segmentation_files]
|
|
448
448
|
|
|
449
449
|
for segfiles in segmentation_files:
|
|
450
|
+
if segfiles is None:
|
|
451
|
+
continue
|
|
450
452
|
if 'files' not in segfiles:
|
|
451
453
|
raise ValueError("segmentation_files must contain a 'files' key with a list of file paths.")
|
|
452
454
|
if 'names' in segfiles:
|
|
@@ -5,7 +5,7 @@ from humanize import naturalsize
|
|
|
5
5
|
import logging
|
|
6
6
|
from pathlib import Path
|
|
7
7
|
import sys
|
|
8
|
-
from
|
|
8
|
+
from medimgkit.dicom_utils import is_dicom
|
|
9
9
|
import fnmatch
|
|
10
10
|
from typing import Generator, Optional, Any
|
|
11
11
|
from collections import defaultdict
|
|
@@ -15,6 +15,7 @@ from datamint.client_cmd_tools.datamint_config import ask_api_key
|
|
|
15
15
|
from datamint.utils.logging_utils import load_cmdline_logging_config
|
|
16
16
|
import yaml
|
|
17
17
|
from collections.abc import Iterable
|
|
18
|
+
import pandas as pd
|
|
18
19
|
|
|
19
20
|
# Create two loggings: one for the user and one for the developer
|
|
20
21
|
_LOGGER = logging.getLogger(__name__)
|
|
@@ -23,6 +24,38 @@ _USER_LOGGER = logging.getLogger('user_logger')
|
|
|
23
24
|
MAX_RECURSION_LIMIT = 1000
|
|
24
25
|
|
|
25
26
|
|
|
27
|
+
def _read_segmentation_names(segmentation_names_path: str | Path) -> dict:
|
|
28
|
+
"""
|
|
29
|
+
Read a segmentation names file (yaml or csv) and return its content as a dictionary.
|
|
30
|
+
If the file is a YAML file, it should contain two keys: "segmentation_names" and "class_names".
|
|
31
|
+
If the file is a CSV file, it should contain the following columns:
|
|
32
|
+
index, r, g, b, ..., name
|
|
33
|
+
"""
|
|
34
|
+
segmentation_names_path = Path(segmentation_names_path)
|
|
35
|
+
if segmentation_names_path.suffix in ['.yaml', '.yml']:
|
|
36
|
+
with open(segmentation_names_path, 'r') as f:
|
|
37
|
+
metadata = yaml.safe_load(f)
|
|
38
|
+
elif segmentation_names_path.suffix in ['.csv', '.tsv']:
|
|
39
|
+
df = pd.read_csv(segmentation_names_path,
|
|
40
|
+
header=None,
|
|
41
|
+
index_col=0,
|
|
42
|
+
sep=None, # use sep=None to automatically detect the separator
|
|
43
|
+
engine='python'
|
|
44
|
+
)
|
|
45
|
+
df = df.rename(columns={1: 'r', 2: 'g', 3: 'b', df.columns[-1]: 'name'})
|
|
46
|
+
# df = df.set_index(['r', 'g', 'b'])
|
|
47
|
+
metadata = {'class_names': df['name'].to_dict()}
|
|
48
|
+
else:
|
|
49
|
+
raise ValueError(f"Unsupported file format: {segmentation_names_path.suffix}")
|
|
50
|
+
|
|
51
|
+
if 'segmentation_names' in metadata:
|
|
52
|
+
segnames = sorted(metadata['segmentation_names'],
|
|
53
|
+
key=lambda x: len(x))
|
|
54
|
+
metadata['segmentation_names'] = segnames
|
|
55
|
+
|
|
56
|
+
return metadata
|
|
57
|
+
|
|
58
|
+
|
|
26
59
|
def _is_valid_path_argparse(x):
|
|
27
60
|
"""
|
|
28
61
|
argparse type that checks if the path exists
|
|
@@ -101,7 +134,6 @@ def walk_to_depth(path: str | Path,
|
|
|
101
134
|
continue
|
|
102
135
|
yield from walk_to_depth(child, depth-1, exclude_pattern)
|
|
103
136
|
else:
|
|
104
|
-
_LOGGER.debug(f"yielding {child} from {path}")
|
|
105
137
|
yield child
|
|
106
138
|
|
|
107
139
|
|
|
@@ -157,31 +189,32 @@ def handle_api_key() -> str | None:
|
|
|
157
189
|
|
|
158
190
|
def _find_segmentation_files(segmentation_root_path: str,
|
|
159
191
|
images_files: list[str],
|
|
160
|
-
segmentation_metainfo: dict = None
|
|
161
|
-
) ->
|
|
192
|
+
segmentation_metainfo: dict | None = None
|
|
193
|
+
) -> list[dict]:
|
|
162
194
|
"""
|
|
163
195
|
Find the segmentation files that match the images files based on the same folder structure
|
|
164
196
|
"""
|
|
165
197
|
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
if len(images_files) == 1 and os.path.isfile(images_files[0]) and os.path.isfile(segmentation_root_path):
|
|
170
|
-
return [{'files': [segmentation_root_path]}]
|
|
171
|
-
|
|
172
|
-
segmentation_files = []
|
|
173
|
-
acceptable_extensions = ['.nii.gz', '.nii', '.png']
|
|
174
|
-
|
|
198
|
+
segnames = None
|
|
199
|
+
classnames = None
|
|
175
200
|
if segmentation_metainfo is not None:
|
|
176
201
|
if 'segmentation_names' in segmentation_metainfo:
|
|
177
202
|
segnames = sorted(segmentation_metainfo['segmentation_names'],
|
|
178
203
|
key=lambda x: len(x))
|
|
179
|
-
else:
|
|
180
|
-
segnames = None
|
|
181
204
|
classnames = segmentation_metainfo.get('class_names', None)
|
|
182
205
|
if classnames is not None:
|
|
183
206
|
_LOGGER.debug(f"Number of class names: {len(classnames)}")
|
|
184
207
|
|
|
208
|
+
if len(images_files) == 1 and os.path.isfile(images_files[0]) and os.path.isfile(segmentation_root_path):
|
|
209
|
+
ret = [{'files': [segmentation_root_path]}]
|
|
210
|
+
if classnames is not None:
|
|
211
|
+
ret[0]['names'] = classnames
|
|
212
|
+
_LOGGER.debug(f"Returning segmentation files: {ret}")
|
|
213
|
+
return ret
|
|
214
|
+
|
|
215
|
+
segmentation_files = []
|
|
216
|
+
acceptable_extensions = ['.nii.gz', '.nii', '.png']
|
|
217
|
+
|
|
185
218
|
segmentation_root_path = Path(segmentation_root_path).absolute()
|
|
186
219
|
|
|
187
220
|
for imgpath in images_files:
|
|
@@ -197,7 +230,6 @@ def _find_segmentation_files(segmentation_root_path: str,
|
|
|
197
230
|
else:
|
|
198
231
|
common_parent = Path(*common_parent)
|
|
199
232
|
|
|
200
|
-
_LOGGER.debug(f"_find_segmentation_files::common_parent: {common_parent}")
|
|
201
233
|
path_structure = imgpath_parent.relative_to(common_parent).parts[1:]
|
|
202
234
|
|
|
203
235
|
# path_structure = imgpath_parent.relative_to(root_path).parts[1:]
|
|
@@ -230,24 +262,47 @@ def _find_segmentation_files(segmentation_root_path: str,
|
|
|
230
262
|
if len(frame_indices) > 0:
|
|
231
263
|
seginfo['frame_index'] = frame_indices
|
|
232
264
|
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
265
|
+
snames_associated = []
|
|
266
|
+
for segfile in seg_files:
|
|
267
|
+
# check if there is a metadata file associated, besides json, with the segmentation
|
|
268
|
+
for ext in ['.yaml', '.yml', '.csv']:
|
|
269
|
+
if str(segfile).endswith('nii.gz'):
|
|
270
|
+
# has two extensions, so we need to remove both
|
|
271
|
+
metadata_file = segfile.with_suffix('').with_suffix(ext)
|
|
272
|
+
if not metadata_file.exists():
|
|
273
|
+
metadata_file = segfile.with_suffix(ext)
|
|
274
|
+
else:
|
|
275
|
+
metadata_file = segfile.with_suffix(ext)
|
|
276
|
+
if metadata_file.exists():
|
|
277
|
+
_LOGGER.debug(f"Found metadata file: {metadata_file}")
|
|
278
|
+
try:
|
|
279
|
+
new_segmentation_metainfo = _read_segmentation_names(metadata_file)
|
|
280
|
+
cur_segnames = new_segmentation_metainfo.get('segmentation_names', segnames)
|
|
281
|
+
cur_classnames = new_segmentation_metainfo.get('class_names', classnames)
|
|
282
|
+
break
|
|
283
|
+
except Exception as e:
|
|
284
|
+
_LOGGER.warning(f"Error reading metadata file {metadata_file}: {e}")
|
|
285
|
+
else:
|
|
286
|
+
cur_segnames = segnames
|
|
287
|
+
cur_classnames = classnames
|
|
288
|
+
|
|
289
|
+
if cur_segnames is None:
|
|
290
|
+
_LOGGER.debug(f'adding {cur_classnames}')
|
|
291
|
+
snames_associated.append(cur_classnames)
|
|
292
|
+
else:
|
|
293
|
+
for segname in cur_segnames:
|
|
294
|
+
if segname in str(segfile):
|
|
295
|
+
if cur_classnames is not None:
|
|
296
|
+
new_segname = {cid: f'{segname}_{cname}' for cid, cname in cur_classnames.items()}
|
|
297
|
+
new_segname.update({'default': segname})
|
|
298
|
+
else:
|
|
299
|
+
new_segname = segname
|
|
300
|
+
snames_associated.append(new_segname)
|
|
301
|
+
break
|
|
238
302
|
else:
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
new_segname = {cid: f'{segname}_{cname}' for cid, cname in classnames.items()}
|
|
243
|
-
new_segname.update({'default': segname})
|
|
244
|
-
else:
|
|
245
|
-
new_segname = segname
|
|
246
|
-
snames_associated.append(new_segname)
|
|
247
|
-
break
|
|
248
|
-
else:
|
|
249
|
-
_USER_LOGGER.warning(f"Segmentation file {segname} does not match any segmentation name.")
|
|
250
|
-
snames_associated.append(None)
|
|
303
|
+
_USER_LOGGER.warning(f"Segmentation file {segfile} does not match any segmentation name.")
|
|
304
|
+
snames_associated.append(None)
|
|
305
|
+
if len(snames_associated) > 0:
|
|
251
306
|
seginfo['names'] = snames_associated
|
|
252
307
|
|
|
253
308
|
segmentation_files.append(seginfo)
|
|
@@ -268,7 +323,7 @@ def _find_json_metadata(file_path: str | Path) -> Optional[str]:
|
|
|
268
323
|
Optional[str]: Path to the JSON metadata file if found, None otherwise
|
|
269
324
|
"""
|
|
270
325
|
file_path = Path(file_path)
|
|
271
|
-
|
|
326
|
+
|
|
272
327
|
# Handle .nii.gz files specially - need to remove both extensions
|
|
273
328
|
if file_path.name.endswith('.nii.gz'):
|
|
274
329
|
base_name = file_path.name[:-7] # Remove .nii.gz
|
|
@@ -320,7 +375,7 @@ def _collect_metadata_files(files_path: list[str], auto_detect_json: bool) -> tu
|
|
|
320
375
|
if used_json_files:
|
|
321
376
|
_LOGGER.debug(f"Filtering out {len(used_json_files)} JSON metadata files from main upload list")
|
|
322
377
|
filtered_metadata_files = []
|
|
323
|
-
|
|
378
|
+
|
|
324
379
|
for original_file in files_path:
|
|
325
380
|
if original_file not in used_json_files:
|
|
326
381
|
original_index = files_path.index(original_file)
|
|
@@ -376,8 +431,10 @@ def _parse_args() -> tuple[Any, list[str], Optional[list[dict]], Optional[list[s
|
|
|
376
431
|
help='Path to the segmentation file(s) or a directory')
|
|
377
432
|
parser.add_argument('--segmentation_names', type=_is_valid_path_argparse, metavar="FILE",
|
|
378
433
|
required=False,
|
|
379
|
-
help='Path to a yaml file containing the segmentation names.' +
|
|
380
|
-
'
|
|
434
|
+
help='Path to a yaml or csv file containing the segmentation names.' +
|
|
435
|
+
' If yaml, the file may contain two keys: "segmentation_names" and "class_names".'
|
|
436
|
+
' If csv, the file should contain the following columns:'
|
|
437
|
+
' index, r, g, b, ..., name')
|
|
381
438
|
parser.add_argument('--yes', action='store_true',
|
|
382
439
|
help='Automatically answer yes to all prompts')
|
|
383
440
|
parser.add_argument('--transpose-segmentation', action='store_true', default=False,
|
|
@@ -446,15 +503,17 @@ def _parse_args() -> tuple[Any, list[str], Optional[list[dict]], Optional[list[s
|
|
|
446
503
|
raise ValueError(f"No valid non-metadata files found in {args.path}")
|
|
447
504
|
|
|
448
505
|
if args.segmentation_names is not None:
|
|
449
|
-
|
|
450
|
-
segmentation_names = yaml.safe_load(f)
|
|
506
|
+
segmentation_names = _read_segmentation_names(args.segmentation_names)
|
|
451
507
|
else:
|
|
452
508
|
segmentation_names = None
|
|
453
509
|
|
|
454
510
|
_LOGGER.debug(f'finding segmentations at {args.segmentation_path}')
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
511
|
+
if args.segmentation_path is None:
|
|
512
|
+
segmentation_files = None
|
|
513
|
+
else:
|
|
514
|
+
segmentation_files = _find_segmentation_files(args.segmentation_path,
|
|
515
|
+
file_path,
|
|
516
|
+
segmentation_metainfo=segmentation_names)
|
|
458
517
|
|
|
459
518
|
_LOGGER.info(f"args parsed: {args}")
|
|
460
519
|
|
|
@@ -14,9 +14,9 @@ from torch.utils.data import DataLoader
|
|
|
14
14
|
import torch
|
|
15
15
|
from torch import Tensor
|
|
16
16
|
from datamint.apihandler.base_api_handler import DatamintException
|
|
17
|
-
from
|
|
17
|
+
from medimgkit.dicom_utils import is_dicom
|
|
18
18
|
import cv2
|
|
19
|
-
from
|
|
19
|
+
from medimgkit.io_utils import read_array_normalized
|
|
20
20
|
from datetime import datetime
|
|
21
21
|
|
|
22
22
|
_LOGGER = logging.getLogger(__name__)
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "datamint"
|
|
3
3
|
description = "A library for interacting with the Datamint API, designed for efficient data management, processing and Deep Learning workflows."
|
|
4
|
-
version = "1.6.
|
|
4
|
+
version = "1.6.2"
|
|
5
5
|
dynamic = ["dependencies"]
|
|
6
6
|
requires-python = ">=3.10"
|
|
7
7
|
readme = "README.md"
|
|
@@ -40,6 +40,7 @@ matplotlib = "*"
|
|
|
40
40
|
lightning = "*"
|
|
41
41
|
albumentations = ">=2.0.0"
|
|
42
42
|
lazy-loader = ">=0.3.0"
|
|
43
|
+
medimgkit = "*"
|
|
43
44
|
# For compatibility with the datamintapi package
|
|
44
45
|
datamintapi = "0.0.*"
|
|
45
46
|
# Extra dependencies for docs
|