datamint 1.9.2__tar.gz → 1.9.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of datamint might be problematic. Click here for more details.

Files changed (29) hide show
  1. {datamint-1.9.2 → datamint-1.9.3}/PKG-INFO +1 -1
  2. {datamint-1.9.2 → datamint-1.9.3}/datamint/apihandler/annotation_api_handler.py +5 -5
  3. {datamint-1.9.2 → datamint-1.9.3}/datamint/apihandler/root_api_handler.py +53 -28
  4. {datamint-1.9.2 → datamint-1.9.3}/datamint/client_cmd_tools/datamint_config.py +6 -37
  5. {datamint-1.9.2 → datamint-1.9.3}/datamint/client_cmd_tools/datamint_upload.py +94 -57
  6. {datamint-1.9.2 → datamint-1.9.3}/datamint/logging.yaml +1 -1
  7. datamint-1.9.3/datamint/utils/logging_utils.py +130 -0
  8. {datamint-1.9.2 → datamint-1.9.3}/pyproject.toml +1 -1
  9. datamint-1.9.2/datamint/utils/logging_utils.py +0 -55
  10. {datamint-1.9.2 → datamint-1.9.3}/README.md +0 -0
  11. {datamint-1.9.2 → datamint-1.9.3}/datamint/__init__.py +0 -0
  12. {datamint-1.9.2 → datamint-1.9.3}/datamint/apihandler/api_handler.py +0 -0
  13. {datamint-1.9.2 → datamint-1.9.3}/datamint/apihandler/base_api_handler.py +0 -0
  14. {datamint-1.9.2 → datamint-1.9.3}/datamint/apihandler/dto/annotation_dto.py +0 -0
  15. {datamint-1.9.2 → datamint-1.9.3}/datamint/apihandler/exp_api_handler.py +0 -0
  16. {datamint-1.9.2 → datamint-1.9.3}/datamint/client_cmd_tools/__init__.py +0 -0
  17. {datamint-1.9.2 → datamint-1.9.3}/datamint/configs.py +0 -0
  18. {datamint-1.9.2 → datamint-1.9.3}/datamint/dataset/__init__.py +0 -0
  19. {datamint-1.9.2 → datamint-1.9.3}/datamint/dataset/annotation.py +0 -0
  20. {datamint-1.9.2 → datamint-1.9.3}/datamint/dataset/base_dataset.py +0 -0
  21. {datamint-1.9.2 → datamint-1.9.3}/datamint/dataset/dataset.py +0 -0
  22. {datamint-1.9.2 → datamint-1.9.3}/datamint/examples/__init__.py +0 -0
  23. {datamint-1.9.2 → datamint-1.9.3}/datamint/examples/example_projects.py +0 -0
  24. {datamint-1.9.2 → datamint-1.9.3}/datamint/exceptions.py +0 -0
  25. {datamint-1.9.2 → datamint-1.9.3}/datamint/experiment/__init__.py +0 -0
  26. {datamint-1.9.2 → datamint-1.9.3}/datamint/experiment/_patcher.py +0 -0
  27. {datamint-1.9.2 → datamint-1.9.3}/datamint/experiment/experiment.py +0 -0
  28. {datamint-1.9.2 → datamint-1.9.3}/datamint/utils/torchmetrics.py +0 -0
  29. {datamint-1.9.2 → datamint-1.9.3}/datamint/utils/visualization.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: datamint
3
- Version: 1.9.2
3
+ Version: 1.9.3
4
4
  Summary: A library for interacting with the Datamint API, designed for efficient data management, processing and Deep Learning workflows.
5
5
  Requires-Python: >=3.10
6
6
  Classifier: Programming Language :: Python :: 3
@@ -995,11 +995,11 @@ class AnnotationAPIHandler(BaseAPIHandler):
995
995
 
996
996
  def update_annotation_worklist(self,
997
997
  worklist_id: str,
998
- frame_labels: list[str] = None,
999
- image_labels: list[str] = None,
1000
- annotations: list[dict] = None,
1001
- status: Literal['new', 'updating', 'active', 'completed'] = None,
1002
- name: str = None,
998
+ frame_labels: list[str] | None = None,
999
+ image_labels: list[str] | None = None,
1000
+ annotations: list[dict] | None = None,
1001
+ status: Literal['new', 'updating', 'active', 'completed'] | None = None,
1002
+ name: str | None = None,
1003
1003
  ):
1004
1004
  """
1005
1005
  Update the status of an annotation worklist.
@@ -6,7 +6,7 @@ from requests.exceptions import HTTPError
6
6
  import logging
7
7
  import asyncio
8
8
  import aiohttp
9
- from medimgkit.dicom_utils import anonymize_dicom, to_bytesio, is_dicom, is_dicom_report
9
+ from medimgkit.dicom_utils import anonymize_dicom, to_bytesio, is_dicom, is_dicom_report, GeneratorWithLength
10
10
  from medimgkit import dicom_utils, standardize_mimetype
11
11
  from medimgkit.io_utils import is_io_object, peek
12
12
  from medimgkit.format_detection import guess_typez, guess_extension, DEFAULT_MIME_TYPE
@@ -185,9 +185,7 @@ class RootAPIHandler(BaseAPIHandler):
185
185
  resp_data = await self._run_request_async(request_params, session)
186
186
  if 'error' in resp_data:
187
187
  raise DatamintException(resp_data['error'])
188
- _LOGGER.info(f"Response on uploading {name}: {resp_data}")
189
-
190
- _USER_LOGGER.info(f'"{name}" uploaded')
188
+ _LOGGER.debug(f"Response on uploading {name}: {resp_data}")
191
189
  return resp_data['id']
192
190
  except Exception as e:
193
191
  if 'name' in locals():
@@ -212,6 +210,7 @@ class RootAPIHandler(BaseAPIHandler):
212
210
  segmentation_files: Optional[list[dict]] = None,
213
211
  transpose_segmentation: bool = False,
214
212
  metadata_files: Optional[list[str | dict | None]] = None,
213
+ progress_bar: tqdm | None = None,
215
214
  ) -> list[str]:
216
215
  if on_error not in ['raise', 'skip']:
217
216
  raise ValueError("on_error must be either 'raise' or 'skip'")
@@ -225,6 +224,8 @@ class RootAPIHandler(BaseAPIHandler):
225
224
  async with aiohttp.ClientSession() as session:
226
225
  async def __upload_single_resource(file_path, segfiles: dict[str, list | dict],
227
226
  metadata_file: str | dict | None):
227
+ name = file_path.name if is_io_object(file_path) else file_path
228
+ name = os.path.basename(name)
228
229
  rid = await self._upload_single_resource_async(
229
230
  file_path=file_path,
230
231
  mimetype=mimetype,
@@ -238,6 +239,12 @@ class RootAPIHandler(BaseAPIHandler):
238
239
  publish=publish,
239
240
  metadata_file=metadata_file,
240
241
  )
242
+ if progress_bar:
243
+ progress_bar.update(1)
244
+ progress_bar.set_postfix(file=name)
245
+ else:
246
+ _USER_LOGGER.info(f'"{name}" uploaded')
247
+
241
248
  if segfiles is not None:
242
249
  fpaths = segfiles['files']
243
250
  names = segfiles.get('names', _infinite_gen(None))
@@ -295,18 +302,17 @@ class RootAPIHandler(BaseAPIHandler):
295
302
  if new_len != orig_len:
296
303
  _LOGGER.info(f"Assembled {new_len} dicom files out of {orig_len} files.")
297
304
  mapping_idx = [None] * len(files_path)
298
- files_path = itertools.chain(dicoms_files_path, other_files_path)
305
+
306
+ files_path = GeneratorWithLength(itertools.chain(dicoms_files_path, other_files_path),
307
+ length=new_len + len(other_files_path))
299
308
  assembled = True
300
309
  for orig_idx, value in zip(dicom_original_idxs, dicoms_files_path.inverse_mapping_idx):
301
310
  mapping_idx[orig_idx] = value
302
311
  for i, orig_idx in enumerate(others_original_idxs):
303
312
  mapping_idx[orig_idx] = new_len + i
304
- # mapping_idx = [[dicom_original_idxs[i] for i in idxlist]
305
- # for idxlist in dicoms_files_path.mapping_idx]
306
- # mapping_idx += [[i] for i in others_original_idxs]
307
313
  else:
308
314
  assembled = False
309
- # mapping_idx = [[i] for i in range(len(files_path))]
315
+ mapping_idx = [i for i in range(len(files_path))]
310
316
 
311
317
  return files_path, assembled, mapping_idx
312
318
 
@@ -391,7 +397,8 @@ class RootAPIHandler(BaseAPIHandler):
391
397
  transpose_segmentation=transpose_segmentation,
392
398
  modality=modality,
393
399
  assemble_dicoms=assemble_dicoms,
394
- metadata=metadata
400
+ metadata=metadata,
401
+ progress_bar=False
395
402
  )
396
403
 
397
404
  return result[0]
@@ -412,7 +419,8 @@ class RootAPIHandler(BaseAPIHandler):
412
419
  modality: Optional[str] = None,
413
420
  assemble_dicoms: bool = True,
414
421
  metadata: list[str | dict | None] | dict | str | None = None,
415
- discard_dicom_reports: bool = True
422
+ discard_dicom_reports: bool = True,
423
+ progress_bar: bool = False
416
424
  ) -> list[str | Exception] | str | Exception:
417
425
  """
418
426
  Upload resources.
@@ -485,6 +493,11 @@ class RootAPIHandler(BaseAPIHandler):
485
493
  assemble_dicoms = assembled
486
494
  else:
487
495
  mapping_idx = [i for i in range(len(files_path))]
496
+ n_files = len(files_path)
497
+
498
+ if n_files <= 1:
499
+ # Disable progress bar for single file uploads
500
+ progress_bar = False
488
501
 
489
502
  if segmentation_files is not None:
490
503
  if assemble_dicoms:
@@ -513,22 +526,32 @@ class RootAPIHandler(BaseAPIHandler):
513
526
  "segmentation_files['names'] must have the same length as segmentation_files['files'].")
514
527
 
515
528
  loop = asyncio.get_event_loop()
516
- task = self._upload_resources_async(files_path=files_path,
517
- mimetype=mimetype,
518
- anonymize=anonymize,
519
- anonymize_retain_codes=anonymize_retain_codes,
520
- on_error=on_error,
521
- tags=tags,
522
- mung_filename=mung_filename,
523
- channel=channel,
524
- publish=publish,
525
- segmentation_files=segmentation_files,
526
- transpose_segmentation=transpose_segmentation,
527
- modality=modality,
528
- metadata_files=metadata,
529
- )
530
-
531
- resource_ids = loop.run_until_complete(task)
529
+ pbar = None
530
+ try:
531
+ if progress_bar:
532
+ pbar = tqdm(total=n_files, desc="Uploading resources", unit="file")
533
+
534
+ task = self._upload_resources_async(files_path=files_path,
535
+ mimetype=mimetype,
536
+ anonymize=anonymize,
537
+ anonymize_retain_codes=anonymize_retain_codes,
538
+ on_error=on_error,
539
+ tags=tags,
540
+ mung_filename=mung_filename,
541
+ channel=channel,
542
+ publish=publish,
543
+ segmentation_files=segmentation_files,
544
+ transpose_segmentation=transpose_segmentation,
545
+ modality=modality,
546
+ metadata_files=metadata,
547
+ progress_bar=pbar
548
+ )
549
+
550
+ resource_ids = loop.run_until_complete(task)
551
+ finally:
552
+ if pbar:
553
+ pbar.close()
554
+
532
555
  _LOGGER.info(f"Resources uploaded: {resource_ids}")
533
556
 
534
557
  if publish_to is not None:
@@ -623,7 +646,9 @@ class RootAPIHandler(BaseAPIHandler):
623
646
  # get the project id by its name
624
647
  project = self.get_project_by_name(project_name)
625
648
  if 'error' in project:
626
- raise ResourceNotFoundError('project', {'project_name': project_name})
649
+ project = self.get_project_by_id(project_name)
650
+ if 'error' in project:
651
+ raise ResourceNotFoundError('project', {'project_name': project_name})
627
652
 
628
653
  dataset_id = project['dataset_id']
629
654
 
@@ -1,46 +1,13 @@
1
1
  import argparse
2
2
  import logging
3
- import os
4
- import platform
5
3
  from datamint import configs
6
- from datamint.utils.logging_utils import load_cmdline_logging_config
4
+ from datamint.utils.logging_utils import load_cmdline_logging_config, ConsoleWrapperHandler
7
5
  from rich.prompt import Prompt, Confirm
8
6
  from rich.console import Console
9
- from rich.theme import Theme
10
-
11
- # Create a custom theme that works well on both dark and blue backgrounds
12
- def _create_console_theme() -> Theme:
13
- """Create a custom Rich theme optimized for cross-platform terminals."""
14
- # Detect if we're likely on PowerShell (Windows + PowerShell)
15
- is_powershell = (
16
- platform.system() == "Windows" and
17
- os.environ.get("PSModulePath") is not None
18
- )
19
-
20
- if is_powershell:
21
- # PowerShell blue background - use high contrast colors
22
- return Theme({
23
- "warning": "bright_yellow",
24
- "error": "bright_red on white",
25
- "success": "bright_green",
26
- "key": "bright_cyan",
27
- "accent": "bright_cyan",
28
- "title": "bold"
29
- })
30
- else:
31
- # Linux/Unix terminals - standard colors
32
- return Theme({
33
- "warning": "yellow",
34
- "error": "red",
35
- "success": "green",
36
- "key": "cyan",
37
- "accent": "bright_blue",
38
- "title": "bold"
39
- })
40
-
41
- # Create console with custom theme
42
- console = Console(theme=_create_console_theme())
7
+
43
8
  _LOGGER = logging.getLogger(__name__)
9
+ _USER_LOGGER = logging.getLogger('user_logger')
10
+ console: Console
44
11
 
45
12
 
46
13
  def configure_default_url():
@@ -170,7 +137,9 @@ def interactive_mode():
170
137
 
171
138
  def main():
172
139
  """Main entry point for the configuration tool."""
140
+ global console
173
141
  load_cmdline_logging_config()
142
+ console = [h for h in _USER_LOGGER.handlers if isinstance(h, ConsoleWrapperHandler)][0].console
174
143
  parser = argparse.ArgumentParser(
175
144
  description='🔧 Datamint API Configuration Tool',
176
145
  epilog="""
@@ -12,8 +12,8 @@ from typing import Generator, Optional, Any
12
12
  from collections import defaultdict
13
13
  from datamint import __version__ as datamint_version
14
14
  from datamint import configs
15
- from datamint.client_cmd_tools.datamint_config import ask_api_key
16
- from datamint.utils.logging_utils import load_cmdline_logging_config
15
+ from datamint.utils.logging_utils import load_cmdline_logging_config, ConsoleWrapperHandler
16
+ from rich.console import Console
17
17
  import yaml
18
18
  from collections.abc import Iterable
19
19
  import pandas as pd
@@ -22,32 +22,42 @@ import pydicom.errors
22
22
  # Create two loggings: one for the user and one for the developer
23
23
  _LOGGER = logging.getLogger(__name__)
24
24
  _USER_LOGGER = logging.getLogger('user_logger')
25
+ logging.getLogger('pydicom').setLevel(logging.ERROR)
26
+ CONSOLE: Console
25
27
 
26
28
  MAX_RECURSION_LIMIT = 1000
27
29
 
30
+ # Default extensions to exclude when --include-extensions is not specified
31
+ DEFAULT_EXCLUDED_EXTENSIONS = [
32
+ '.txt', '.json', '.xml', '.docx', '.doc', '.pdf', '.xlsx', '.xls', '.csv', '.tsv',
33
+ '.log', '.ini', '.cfg', '.conf', '.yaml', '.yml', '.md', '.rst', '.html', '.htm',
34
+ '.exe', '.bat', '.sh', '.py', '.js', '.css',
35
+ '.sql', '.bak', '.tmp', '.temp', '.lock', '.DS_Store', '.gitignore'
36
+ ]
37
+
28
38
 
29
39
  def _get_minimal_distinguishing_paths(file_paths: list[str]) -> dict[str, str]:
30
40
  """
31
41
  Generate minimal distinguishing paths for files to avoid ambiguity when multiple files have the same name.
32
-
42
+
33
43
  Args:
34
44
  file_paths: List of file paths
35
-
45
+
36
46
  Returns:
37
47
  Dictionary mapping full path to minimal distinguishing path
38
48
  """
39
49
  if not file_paths:
40
50
  return {}
41
-
51
+
42
52
  # Convert to Path objects and get absolute paths
43
53
  paths = [Path(fp).resolve() for fp in file_paths]
44
54
  result = {}
45
-
55
+
46
56
  # Group files by basename
47
57
  basename_groups = defaultdict(list)
48
58
  for i, path in enumerate(paths):
49
59
  basename_groups[path.name].append((i, path))
50
-
60
+
51
61
  for basename, path_list in basename_groups.items():
52
62
  if len(path_list) == 1:
53
63
  # Only one file with this name, use just the basename
@@ -56,7 +66,7 @@ def _get_minimal_distinguishing_paths(file_paths: list[str]) -> dict[str, str]:
56
66
  else:
57
67
  # Multiple files with same name, need to distinguish them
58
68
  path_parts_list = [path.parts for _, path in path_list]
59
-
69
+
60
70
  # Find the minimum number of parent directories needed to distinguish
61
71
  max_depth_needed = 1
62
72
  for depth in range(1, max(len(parts) for parts in path_parts_list) + 1):
@@ -67,12 +77,12 @@ def _get_minimal_distinguishing_paths(file_paths: list[str]) -> dict[str, str]:
67
77
  suffixes.append('/'.join(parts))
68
78
  else:
69
79
  suffixes.append('/'.join(parts[-depth:]))
70
-
80
+
71
81
  if len(set(suffixes)) == len(suffixes):
72
82
  # All suffixes are unique at this depth
73
83
  max_depth_needed = depth
74
84
  break
75
-
85
+
76
86
  # Apply the minimal distinguishing paths
77
87
  for (idx, path), parts in zip(path_list, path_parts_list):
78
88
  if max_depth_needed >= len(parts):
@@ -80,10 +90,8 @@ def _get_minimal_distinguishing_paths(file_paths: list[str]) -> dict[str, str]:
80
90
  else:
81
91
  distinguishing_path = '/'.join(parts[-max_depth_needed:])
82
92
  result[file_paths[idx]] = distinguishing_path
83
-
84
- return result
85
-
86
93
 
94
+ return result
87
95
 
88
96
 
89
97
  def _read_segmentation_names(segmentation_names_path: str | Path) -> dict:
@@ -257,6 +265,7 @@ def handle_api_key() -> str | None:
257
265
  If it does not exist, it asks the user to input it.
258
266
  Then, it asks the user if he wants to save the API key at a proper location in the machine
259
267
  """
268
+ from datamint.client_cmd_tools.datamint_config import ask_api_key
260
269
  api_key = configs.get_value(configs.APIKEY_KEY)
261
270
  if api_key is None:
262
271
  _USER_LOGGER.info("API key not found. Please provide it:")
@@ -524,6 +533,8 @@ def _parse_args() -> tuple[Any, list[str], Optional[list[dict]], Optional[list[s
524
533
  parser.add_argument('--channel', '--name', type=str, required=False,
525
534
  help='Channel name (arbritary) to upload the resources to. \
526
535
  Useful for organizing the resources in the platform.')
536
+ parser.add_argument('--project', type=str, required=False,
537
+ help='Project name to add the uploaded resources to after successful upload.')
527
538
  parser.add_argument('--retain-pii', action='store_true', help='Do not anonymize DICOMs')
528
539
  parser.add_argument('--retain-attribute', type=_tuple_int_type, action='append',
529
540
  default=[],
@@ -541,7 +552,8 @@ def _parse_args() -> tuple[Any, list[str], Optional[list[dict]], Optional[list[s
541
552
  help='File extensions to be considered for uploading. Default: all file extensions.' +
542
553
  ' Example: --include-extensions dcm jpg png')
543
554
  parser.add_argument('--exclude-extensions', type=str, nargs='+',
544
- help='File extensions to be excluded from uploading. Default: none.' +
555
+ help='File extensions to be excluded from uploading. ' +
556
+ 'Default: common non-medical file extensions (.txt, .json, .xml, .docx, etc.) when --include-extensions is not specified.' +
545
557
  ' Example: --exclude-extensions txt csv'
546
558
  )
547
559
  parser.add_argument('--segmentation_path', type=_is_valid_path_argparse, metavar="FILE",
@@ -581,7 +593,6 @@ def _parse_args() -> tuple[Any, list[str], Optional[list[dict]], Optional[list[s
581
593
 
582
594
  if args.verbose:
583
595
  # Get the console handler and set to debug
584
- print(logging.getLogger().handlers)
585
596
  logging.getLogger().handlers[0].setLevel(logging.DEBUG)
586
597
  logging.getLogger('datamint').setLevel(logging.DEBUG)
587
598
  _LOGGER.setLevel(logging.DEBUG)
@@ -594,6 +605,11 @@ def _parse_args() -> tuple[Any, list[str], Optional[list[dict]], Optional[list[s
594
605
  if args.include_extensions is not None and args.exclude_extensions is not None:
595
606
  raise ValueError("--include-extensions and --exclude-extensions are mutually exclusive.")
596
607
 
608
+ # Apply default excluded extensions if neither include nor exclude extensions are specified
609
+ if args.include_extensions is None and args.exclude_extensions is None:
610
+ args.exclude_extensions = DEFAULT_EXCLUDED_EXTENSIONS
611
+ _LOGGER.debug(f"Applied default excluded extensions: {args.exclude_extensions}")
612
+
597
613
  try:
598
614
  if os.path.isfile(args.path):
599
615
  file_path = [args.path]
@@ -720,24 +736,26 @@ def print_results_summary(files_path: list[str],
720
736
  failure_files = [f for f, r in zip(files_path, results) if isinstance(r, Exception)]
721
737
  # Get distinguishing paths for better error reporting
722
738
  distinguishing_paths = _get_minimal_distinguishing_paths(files_path)
723
-
739
+
724
740
  _USER_LOGGER.info(f"\nUpload summary:")
725
741
  _USER_LOGGER.info(f"\tTotal files: {len(files_path)}")
726
742
  _USER_LOGGER.info(f"\tSuccessful uploads: {len(files_path) - len(failure_files)}")
727
743
  if len(failure_files) > 0:
728
- _USER_LOGGER.info(f"\t❌ Failed uploads: {len(failure_files)}")
744
+ _USER_LOGGER.warning(f"\tFailed uploads: {len(failure_files)}")
729
745
  _USER_LOGGER.warning(f"\tFailed files: {[distinguishing_paths[f] for f in failure_files]}")
730
746
  _USER_LOGGER.warning(f"\nFailures:")
731
747
  for f, r in zip(files_path, results):
732
748
  if isinstance(r, Exception):
733
749
  _USER_LOGGER.warning(f"\t{distinguishing_paths[f]}: {r}")
734
750
  else:
735
- _USER_LOGGER.info(f'✅ All uploads successful!')
751
+ CONSOLE.print(f'✅ All uploads successful!', style='success')
736
752
  return len(failure_files)
737
753
 
738
754
 
739
755
  def main():
756
+ global CONSOLE
740
757
  load_cmdline_logging_config()
758
+ CONSOLE = [h for h in _USER_LOGGER.handlers if isinstance(h, ConsoleWrapperHandler)][0].console
741
759
 
742
760
  try:
743
761
  args, files_path, segfiles, metadata_files = _parse_args()
@@ -745,48 +763,67 @@ def main():
745
763
  _USER_LOGGER.error(f'Error validating arguments. {e}')
746
764
  sys.exit(1)
747
765
 
748
- print_input_summary(files_path,
749
- args=args,
750
- segfiles=segfiles,
751
- metadata_files=metadata_files,
752
- include_extensions=args.include_extensions)
766
+ try:
767
+ print_input_summary(files_path,
768
+ args=args,
769
+ segfiles=segfiles,
770
+ metadata_files=metadata_files,
771
+ include_extensions=args.include_extensions)
753
772
 
754
- if not args.yes:
755
- confirmation = input("Do you want to proceed with the upload? (y/n): ")
756
- if confirmation.lower() != "y":
757
- _USER_LOGGER.info("Upload cancelled.")
758
- return
759
- #######################################
773
+ if not args.yes:
774
+ confirmation = input("Do you want to proceed with the upload? (y/n): ")
775
+ if confirmation.lower() != "y":
776
+ _USER_LOGGER.info("Upload cancelled.")
777
+ return
778
+ #######################################
760
779
 
761
- has_a_dicom_file = any(is_dicom(f) for f in files_path)
780
+ has_a_dicom_file = any(is_dicom(f) for f in files_path)
762
781
 
763
- try:
764
- api_handler = APIHandler(check_connection=True)
765
- except DatamintException as e:
766
- _USER_LOGGER.error(f'❌ Connection failed: {e}')
767
- return
768
- try:
769
- results = api_handler.upload_resources(channel=args.channel,
770
- files_path=files_path,
771
- tags=args.tag,
772
- on_error='skip',
773
- anonymize=args.retain_pii == False and has_a_dicom_file,
774
- anonymize_retain_codes=args.retain_attribute,
775
- mung_filename=args.mungfilename,
776
- publish=args.publish,
777
- segmentation_files=segfiles,
778
- transpose_segmentation=args.transpose_segmentation,
779
- assemble_dicoms=True,
780
- metadata=metadata_files
781
- )
782
- except pydicom.errors.InvalidDicomError as e:
783
- _USER_LOGGER.error(f'❌ Invalid DICOM file: {e}')
784
- return
785
- _USER_LOGGER.info('Upload finished!')
786
- _LOGGER.debug(f"Number of results: {len(results)}")
787
-
788
- num_failures = print_results_summary(files_path, results)
789
- if num_failures > 0:
782
+ try:
783
+ api_handler = APIHandler(check_connection=True)
784
+ except DatamintException as e:
785
+ _USER_LOGGER.error(f'❌ Connection failed: {e}')
786
+ return
787
+ try:
788
+ results = api_handler.upload_resources(channel=args.channel,
789
+ files_path=files_path,
790
+ tags=args.tag,
791
+ on_error='skip',
792
+ anonymize=args.retain_pii == False and has_a_dicom_file,
793
+ anonymize_retain_codes=args.retain_attribute,
794
+ mung_filename=args.mungfilename,
795
+ publish=args.publish,
796
+ segmentation_files=segfiles,
797
+ transpose_segmentation=args.transpose_segmentation,
798
+ assemble_dicoms=True,
799
+ metadata=metadata_files,
800
+ progress_bar=True
801
+ )
802
+ except pydicom.errors.InvalidDicomError as e:
803
+ _USER_LOGGER.error(f'❌ Invalid DICOM file: {e}')
804
+ return
805
+ _USER_LOGGER.info('Upload finished!')
806
+ _LOGGER.debug(f"Number of results: {len(results)}")
807
+
808
+ # Add resources to project if specified
809
+ if args.project is not None:
810
+ _USER_LOGGER.info(f"Adding uploaded resources to project '{args.project}'...")
811
+ try:
812
+ # Filter successful uploads to get resource IDs
813
+ successful_resource_ids = [r for r in results if not isinstance(r, Exception)]
814
+ if successful_resource_ids:
815
+ api_handler.add_to_project(project_name=args.project, resource_ids=successful_resource_ids)
816
+ _USER_LOGGER.info(f"✅ Successfully added {len(successful_resource_ids)} resources to project '{args.project}'")
817
+ else:
818
+ _USER_LOGGER.warning("No successful uploads to add to project")
819
+ except Exception as e:
820
+ _USER_LOGGER.error(f"❌ Failed to add resources to project '{args.project}': {e}")
821
+
822
+ num_failures = print_results_summary(files_path, results)
823
+ if num_failures > 0:
824
+ sys.exit(1)
825
+ except KeyboardInterrupt:
826
+ CONSOLE.print("\nUpload cancelled by user.", style='warning')
790
827
  sys.exit(1)
791
828
 
792
829
 
@@ -7,7 +7,7 @@ handlers:
7
7
  level: WARNING
8
8
  show_time: False
9
9
  console_user:
10
- class: datamint.utils.logging_utils.ConditionalRichHandler
10
+ class: datamint.utils.logging_utils.ConsoleWrapperHandler
11
11
  level: INFO
12
12
  show_path: False
13
13
  show_time: False
@@ -0,0 +1,130 @@
1
+ from rich.theme import Theme
2
+ from logging import Logger, DEBUG, INFO, WARNING, ERROR, CRITICAL
3
+ from rich.console import Console
4
+ import platform
5
+ import os
6
+ import logging
7
+ import logging.config
8
+ from rich.console import ConsoleRenderable
9
+ from rich.logging import RichHandler
10
+ from rich.traceback import Traceback
11
+ import yaml
12
+ import importlib
13
+
14
+ _LOGGER = logging.getLogger(__name__)
15
+
16
+
17
+ class ConditionalRichHandler(RichHandler):
18
+ """
19
+ Class that uses 'show_level=True' only if the message level is WARNING or higher.
20
+ """
21
+
22
+ def __init__(self, *args, **kwargs):
23
+ super().__init__(*args, **kwargs)
24
+
25
+ def handle(self, record):
26
+ if record.levelno >= logging.WARNING:
27
+ self.show_level = True
28
+ else:
29
+ self.show_level = False
30
+ super().handle(record)
31
+
32
+ def render(self, *, record: logging.LogRecord,
33
+ traceback: Traceback | None,
34
+ message_renderable: ConsoleRenderable) -> ConsoleRenderable:
35
+ # if level is WARNING or higher, add the level column
36
+ try:
37
+ self._log_render.show_level = record.levelno >= logging.WARNING
38
+ ret = super().render(record=record, traceback=traceback, message_renderable=message_renderable)
39
+ self._log_render.show_level = False
40
+ except Exception as e:
41
+ _LOGGER.error(f"Error rendering log. {e}")
42
+ return ret
43
+
44
+
45
+ def load_cmdline_logging_config():
46
+ # Load the logging configuration file
47
+ try:
48
+ try:
49
+ # try loading the developer's logging config
50
+ with open('logging_dev.yaml', 'r') as f:
51
+ config = yaml.safe_load(f)
52
+ except:
53
+ with importlib.resources.open_text('datamint', 'logging.yaml') as f:
54
+ config = yaml.safe_load(f.read())
55
+
56
+ logging.config.dictConfig(config)
57
+ except Exception as e:
58
+ print(f"Warning: Error loading logging configuration file: {e}")
59
+ _LOGGER.exception(e)
60
+ logging.basicConfig(level=logging.INFO)
61
+
62
+
63
+ LEVELS_MAPPING = {
64
+ DEBUG: None,
65
+ INFO: None,
66
+ WARNING: "warning",
67
+ ERROR: "error",
68
+ CRITICAL: "error"
69
+ }
70
+
71
+
72
+ def _create_console_theme() -> Theme:
73
+ """Create a custom Rich theme optimized for cross-platform terminals."""
74
+ # Detect if we're likely on PowerShell (Windows + PowerShell)
75
+ is_powershell = (
76
+ platform.system() == "Windows" and
77
+ os.environ.get("PSModulePath") is not None
78
+ )
79
+
80
+ if is_powershell:
81
+ # PowerShell blue background - use high contrast colors
82
+ return Theme({
83
+ "warning": "bright_yellow",
84
+ "error": "bright_red on white",
85
+ "success": "bright_green",
86
+ "key": "bright_cyan",
87
+ "accent": "bright_cyan",
88
+ "title": "bold"
89
+ })
90
+ else:
91
+ # Linux/Unix terminals - standard colors
92
+ return Theme({
93
+ "warning": "yellow",
94
+ "error": "red",
95
+ "success": "green",
96
+ "key": "cyan",
97
+ "accent": "bright_blue",
98
+ "title": "bold"
99
+ })
100
+
101
+
102
+ class ConsoleWrapperHandler(ConditionalRichHandler):
103
+ """
104
+ A logging handler that uses a rich.console.Console to print log messages.
105
+ """
106
+ def __init__(self, *args, console: Console | None = None, **kwargs):
107
+ """
108
+ Initializes the ConsoleWrapperHandler.
109
+
110
+ Args:
111
+ console (Console | None): A rich Console instance. If None, a new one is created.
112
+ """
113
+ super().__init__(*args, **kwargs)
114
+ if console is None:
115
+ console = Console(theme=_create_console_theme())
116
+ self.console = console
117
+
118
+ def emit(self, record: logging.LogRecord) -> None:
119
+ """
120
+ Emits a log record.
121
+
122
+ Args:
123
+ record (logging.LogRecord): The log record to emit.
124
+ """
125
+ try:
126
+ msg = self.format(record)
127
+ style = LEVELS_MAPPING.get(record.levelno)
128
+ self.console.print(msg, style=style)
129
+ except Exception:
130
+ self.handleError(record)
@@ -1,7 +1,7 @@
1
1
  [project]
2
2
  name = "datamint"
3
3
  description = "A library for interacting with the Datamint API, designed for efficient data management, processing and Deep Learning workflows."
4
- version = "1.9.2"
4
+ version = "1.9.3"
5
5
  dynamic = ["dependencies"]
6
6
  requires-python = ">=3.10"
7
7
  readme = "README.md"
@@ -1,55 +0,0 @@
1
- import logging
2
- import logging.config
3
- from rich.console import ConsoleRenderable
4
- from rich.logging import RichHandler
5
- from rich.traceback import Traceback
6
- import yaml
7
- import importlib
8
-
9
- _LOGGER = logging.getLogger(__name__)
10
-
11
-
12
- class ConditionalRichHandler(RichHandler):
13
- """
14
- Class that uses 'show_level=True' only if the message level is WARNING or higher.
15
- """
16
-
17
- def __init__(self, *args, **kwargs):
18
- super().__init__(*args, **kwargs)
19
-
20
- def handle(self, record):
21
- if record.levelno >= logging.WARNING:
22
- self.show_level = True
23
- else:
24
- self.show_level = False
25
- super().handle(record)
26
-
27
- def render(self, *, record: logging.LogRecord,
28
- traceback: Traceback | None,
29
- message_renderable: ConsoleRenderable) -> ConsoleRenderable:
30
- # if level is WARNING or higher, add the level column
31
- try:
32
- self._log_render.show_level = record.levelno >= logging.WARNING
33
- ret = super().render(record=record, traceback=traceback, message_renderable=message_renderable)
34
- self._log_render.show_level = False
35
- except Exception as e:
36
- _LOGGER.error(f"Error rendering log. {e}")
37
- return ret
38
-
39
-
40
- def load_cmdline_logging_config():
41
- # Load the logging configuration file
42
- try:
43
- try:
44
- # try loading the developer's logging config
45
- with open('logging_dev.yaml', 'r') as f:
46
- config = yaml.safe_load(f)
47
- except:
48
- with importlib.resources.open_text('datamint', 'logging.yaml') as f:
49
- config = yaml.safe_load(f.read())
50
-
51
- logging.config.dictConfig(config)
52
- except Exception as e:
53
- print(f"Warning: Error loading logging configuration file: {e}")
54
- _LOGGER.exception(e)
55
- logging.basicConfig(level=logging.INFO)
File without changes
File without changes
File without changes