spacr 0.5.0__py3-none-any.whl → 0.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (100) hide show
  1. spacr/__init__.py +0 -2
  2. spacr/__main__.py +3 -3
  3. spacr/core.py +13 -106
  4. spacr/gui_core.py +2 -2
  5. spacr/gui_utils.py +1 -13
  6. spacr/io.py +24 -25
  7. spacr/mediar.py +12 -8
  8. spacr/plot.py +50 -13
  9. spacr/settings.py +45 -6
  10. spacr/submodules.py +11 -1
  11. spacr/timelapse.py +21 -3
  12. spacr/utils.py +154 -15
  13. {spacr-0.5.0.dist-info → spacr-0.9.0.dist-info}/METADATA +62 -62
  14. spacr-0.9.0.dist-info/RECORD +109 -0
  15. {spacr-0.5.0.dist-info → spacr-0.9.0.dist-info}/WHEEL +1 -1
  16. spacr/resources/MEDIAR/.gitignore +0 -18
  17. spacr/resources/MEDIAR/LICENSE +0 -21
  18. spacr/resources/MEDIAR/README.md +0 -189
  19. spacr/resources/MEDIAR/SetupDict.py +0 -39
  20. spacr/resources/MEDIAR/__pycache__/SetupDict.cpython-39.pyc +0 -0
  21. spacr/resources/MEDIAR/__pycache__/evaluate.cpython-39.pyc +0 -0
  22. spacr/resources/MEDIAR/__pycache__/generate_mapping.cpython-39.pyc +0 -0
  23. spacr/resources/MEDIAR/__pycache__/main.cpython-39.pyc +0 -0
  24. spacr/resources/MEDIAR/config/baseline.json +0 -60
  25. spacr/resources/MEDIAR/config/mediar_example.json +0 -72
  26. spacr/resources/MEDIAR/config/pred/pred_mediar.json +0 -17
  27. spacr/resources/MEDIAR/config/step1_pretraining/phase1.json +0 -55
  28. spacr/resources/MEDIAR/config/step1_pretraining/phase2.json +0 -58
  29. spacr/resources/MEDIAR/config/step2_finetuning/finetuning1.json +0 -66
  30. spacr/resources/MEDIAR/config/step2_finetuning/finetuning2.json +0 -66
  31. spacr/resources/MEDIAR/config/step3_prediction/base_prediction.json +0 -16
  32. spacr/resources/MEDIAR/config/step3_prediction/ensemble_tta.json +0 -23
  33. spacr/resources/MEDIAR/core/BasePredictor.py +0 -120
  34. spacr/resources/MEDIAR/core/BaseTrainer.py +0 -240
  35. spacr/resources/MEDIAR/core/Baseline/Predictor.py +0 -59
  36. spacr/resources/MEDIAR/core/Baseline/Trainer.py +0 -113
  37. spacr/resources/MEDIAR/core/Baseline/__init__.py +0 -2
  38. spacr/resources/MEDIAR/core/Baseline/__pycache__/Predictor.cpython-39.pyc +0 -0
  39. spacr/resources/MEDIAR/core/Baseline/__pycache__/Trainer.cpython-39.pyc +0 -0
  40. spacr/resources/MEDIAR/core/Baseline/__pycache__/__init__.cpython-39.pyc +0 -0
  41. spacr/resources/MEDIAR/core/Baseline/__pycache__/utils.cpython-39.pyc +0 -0
  42. spacr/resources/MEDIAR/core/Baseline/utils.py +0 -80
  43. spacr/resources/MEDIAR/core/MEDIAR/EnsemblePredictor.py +0 -105
  44. spacr/resources/MEDIAR/core/MEDIAR/Predictor.py +0 -234
  45. spacr/resources/MEDIAR/core/MEDIAR/Trainer.py +0 -172
  46. spacr/resources/MEDIAR/core/MEDIAR/__init__.py +0 -3
  47. spacr/resources/MEDIAR/core/MEDIAR/__pycache__/EnsemblePredictor.cpython-39.pyc +0 -0
  48. spacr/resources/MEDIAR/core/MEDIAR/__pycache__/Predictor.cpython-39.pyc +0 -0
  49. spacr/resources/MEDIAR/core/MEDIAR/__pycache__/Trainer.cpython-39.pyc +0 -0
  50. spacr/resources/MEDIAR/core/MEDIAR/__pycache__/__init__.cpython-39.pyc +0 -0
  51. spacr/resources/MEDIAR/core/MEDIAR/__pycache__/utils.cpython-39.pyc +0 -0
  52. spacr/resources/MEDIAR/core/MEDIAR/utils.py +0 -429
  53. spacr/resources/MEDIAR/core/__init__.py +0 -2
  54. spacr/resources/MEDIAR/core/__pycache__/BasePredictor.cpython-39.pyc +0 -0
  55. spacr/resources/MEDIAR/core/__pycache__/BaseTrainer.cpython-39.pyc +0 -0
  56. spacr/resources/MEDIAR/core/__pycache__/__init__.cpython-39.pyc +0 -0
  57. spacr/resources/MEDIAR/core/__pycache__/utils.cpython-39.pyc +0 -0
  58. spacr/resources/MEDIAR/core/utils.py +0 -40
  59. spacr/resources/MEDIAR/evaluate.py +0 -71
  60. spacr/resources/MEDIAR/generate_mapping.py +0 -121
  61. spacr/resources/MEDIAR/image/examples/img1.tiff +0 -0
  62. spacr/resources/MEDIAR/image/examples/img2.tif +0 -0
  63. spacr/resources/MEDIAR/image/failure_cases.png +0 -0
  64. spacr/resources/MEDIAR/image/mediar_framework.png +0 -0
  65. spacr/resources/MEDIAR/image/mediar_model.PNG +0 -0
  66. spacr/resources/MEDIAR/image/mediar_results.png +0 -0
  67. spacr/resources/MEDIAR/main.py +0 -125
  68. spacr/resources/MEDIAR/predict.py +0 -70
  69. spacr/resources/MEDIAR/requirements.txt +0 -14
  70. spacr/resources/MEDIAR/train_tools/__init__.py +0 -3
  71. spacr/resources/MEDIAR/train_tools/__pycache__/__init__.cpython-39.pyc +0 -0
  72. spacr/resources/MEDIAR/train_tools/__pycache__/measures.cpython-39.pyc +0 -0
  73. spacr/resources/MEDIAR/train_tools/__pycache__/utils.cpython-39.pyc +0 -0
  74. spacr/resources/MEDIAR/train_tools/data_utils/__init__.py +0 -1
  75. spacr/resources/MEDIAR/train_tools/data_utils/__pycache__/__init__.cpython-39.pyc +0 -0
  76. spacr/resources/MEDIAR/train_tools/data_utils/__pycache__/datasetter.cpython-39.pyc +0 -0
  77. spacr/resources/MEDIAR/train_tools/data_utils/__pycache__/transforms.cpython-39.pyc +0 -0
  78. spacr/resources/MEDIAR/train_tools/data_utils/__pycache__/utils.cpython-39.pyc +0 -0
  79. spacr/resources/MEDIAR/train_tools/data_utils/custom/CellAware.py +0 -88
  80. spacr/resources/MEDIAR/train_tools/data_utils/custom/LoadImage.py +0 -161
  81. spacr/resources/MEDIAR/train_tools/data_utils/custom/NormalizeImage.py +0 -77
  82. spacr/resources/MEDIAR/train_tools/data_utils/custom/__init__.py +0 -3
  83. spacr/resources/MEDIAR/train_tools/data_utils/custom/__pycache__/CellAware.cpython-39.pyc +0 -0
  84. spacr/resources/MEDIAR/train_tools/data_utils/custom/__pycache__/LoadImage.cpython-39.pyc +0 -0
  85. spacr/resources/MEDIAR/train_tools/data_utils/custom/__pycache__/NormalizeImage.cpython-39.pyc +0 -0
  86. spacr/resources/MEDIAR/train_tools/data_utils/custom/__pycache__/__init__.cpython-39.pyc +0 -0
  87. spacr/resources/MEDIAR/train_tools/data_utils/custom/modalities.pkl +0 -0
  88. spacr/resources/MEDIAR/train_tools/data_utils/datasetter.py +0 -208
  89. spacr/resources/MEDIAR/train_tools/data_utils/transforms.py +0 -148
  90. spacr/resources/MEDIAR/train_tools/data_utils/utils.py +0 -84
  91. spacr/resources/MEDIAR/train_tools/measures.py +0 -200
  92. spacr/resources/MEDIAR/train_tools/models/MEDIARFormer.py +0 -102
  93. spacr/resources/MEDIAR/train_tools/models/__init__.py +0 -1
  94. spacr/resources/MEDIAR/train_tools/models/__pycache__/MEDIARFormer.cpython-39.pyc +0 -0
  95. spacr/resources/MEDIAR/train_tools/models/__pycache__/__init__.cpython-39.pyc +0 -0
  96. spacr/resources/MEDIAR/train_tools/utils.py +0 -70
  97. spacr-0.5.0.dist-info/RECORD +0 -190
  98. {spacr-0.5.0.dist-info → spacr-0.9.0.dist-info}/LICENSE +0 -0
  99. {spacr-0.5.0.dist-info → spacr-0.9.0.dist-info}/entry_points.txt +0 -0
  100. {spacr-0.5.0.dist-info → spacr-0.9.0.dist-info}/top_level.txt +0 -0
@@ -1,125 +0,0 @@
1
- import torch
2
- import os
3
- import wandb
4
- import argparse, pprint
5
-
6
- from train_tools import *
7
- from SetupDict import TRAINER, OPTIMIZER, SCHEDULER, MODELS, PREDICTOR
8
-
9
- # Ignore warnings for tiffle image reading
10
- import logging
11
-
12
- logging.getLogger().setLevel(logging.ERROR)
13
-
14
- # Set torch base print precision
15
- torch.set_printoptions(6)
16
-
17
-
18
- def _get_setups(args):
19
- """Get experiment configuration"""
20
-
21
- # Set model
22
- model_args = args.train_setups.model
23
- model = MODELS[model_args.name](**model_args.params)
24
-
25
- # Load pretrained weights
26
- if model_args.pretrained.enabled:
27
- weights = torch.load(model_args.pretrained.weights, map_location="cpu")
28
-
29
- print("\nLoading pretrained model....")
30
- model.load_state_dict(weights, strict=model_args.pretrained.strict)
31
-
32
- # Set dataloaders
33
- dataloaders = datasetter.get_dataloaders_labeled(**args.data_setups.labeled)
34
-
35
- # Set optimizer
36
- optimizer_args = args.train_setups.optimizer
37
- optimizer = OPTIMIZER[optimizer_args.name](
38
- model.parameters(), **optimizer_args.params
39
- )
40
-
41
- # Set scheduler
42
- scheduler = None
43
-
44
- if args.train_setups.scheduler.enabled:
45
- scheduler_args = args.train_setups.scheduler
46
- scheduler = SCHEDULER[scheduler_args.name](optimizer, **scheduler_args.params)
47
-
48
- # Set trainer
49
- trainer_args = args.train_setups.trainer
50
- trainer = TRAINER[trainer_args.name](
51
- model, dataloaders, optimizer, scheduler, **trainer_args.params
52
- )
53
-
54
- # Check if no validation
55
- if args.data_setups.labeled.valid_portion == 0:
56
- trainer.no_valid = True
57
-
58
- # Set public dataloader
59
- if args.data_setups.public.enabled:
60
- dataloaders = datasetter.get_dataloaders_public(
61
- **args.data_setups.public.params
62
- )
63
- trainer.public_loader = dataloaders["public"]
64
- trainer.public_iterator = iter(dataloaders["public"])
65
-
66
- return trainer
67
-
68
-
69
- def main(args):
70
- """Execute experiment."""
71
-
72
- # Initialize W&B
73
- wandb.init(config=args, **args.wandb_setups)
74
-
75
- # How many batches to wait before logging training status
76
- wandb.config.log_interval = 10
77
-
78
- # Fix randomness for reproducibility
79
- random_seeder(args.train_setups.seed)
80
-
81
- # Set experiment
82
- trainer = _get_setups(args)
83
-
84
- # Watch parameters & gradients of model
85
- wandb.watch(trainer.model, log="all", log_graph=True)
86
-
87
- # Conduct experiment
88
- trainer.train()
89
-
90
- # Upload model to wandb server
91
- model_path = os.path.join(wandb.run.dir, "model.pth")
92
- torch.save(trainer.model.state_dict(), model_path)
93
- wandb.save(model_path)
94
-
95
- # Conduct prediction using the trained model
96
- predictor = PREDICTOR[args.train_setups.trainer.name](
97
- trainer.model,
98
- args.train_setups.trainer.params.device,
99
- args.pred_setups.input_path,
100
- args.pred_setups.output_path,
101
- args.pred_setups.make_submission,
102
- args.pred_setups.exp_name,
103
- args.pred_setups.algo_params,
104
- )
105
-
106
- total_time = predictor.conduct_prediction()
107
- wandb.log({"total_time": total_time})
108
-
109
-
110
- # Parser arguments for terminal execution
111
- parser = argparse.ArgumentParser(description="Config file processing")
112
- parser.add_argument("--config_path", default="./config/baseline.json", type=str)
113
- args = parser.parse_args()
114
-
115
- #######################################################################################
116
-
117
- if __name__ == "__main__":
118
- # Load configuration from .json file
119
- opt = ConfLoader(args.config_path).opt
120
-
121
- # Print configuration dictionary pretty
122
- pprint_config(opt)
123
-
124
- # Run experiment
125
- main(opt)
@@ -1,70 +0,0 @@
1
- import torch
2
- import argparse, pprint
3
-
4
- from train_tools import *
5
- from SetupDict import MODELS, PREDICTOR
6
-
7
- # Set torch base print precision
8
- torch.set_printoptions(6)
9
-
10
-
11
- def main(args):
12
- """Execute prediction and save the results"""
13
-
14
- model_args = args.pred_setups.model
15
- model = MODELS[model_args.name](**model_args.params)
16
-
17
- if "ensemble" in args.pred_setups.name:
18
- weights = torch.load(args.pred_setups.model_path1, map_location="cpu")
19
- model.load_state_dict(weights, strict=False)
20
-
21
- model_aux = MODELS[model_args.name](**model_args.params)
22
- weights_aux = torch.load(args.pred_setups.model_path2, map_location="cpu")
23
- model_aux.load_state_dict(weights_aux, strict=False)
24
-
25
- predictor = PREDICTOR[args.pred_setups.name](
26
- model,
27
- model_aux,
28
- args.pred_setups.device,
29
- args.pred_setups.input_path,
30
- args.pred_setups.output_path,
31
- args.pred_setups.make_submission,
32
- args.pred_setups.exp_name,
33
- args.pred_setups.algo_params,
34
- )
35
-
36
- else:
37
- weights = torch.load(args.pred_setups.model_path, map_location="cpu")
38
- model.load_state_dict(weights, strict=False)
39
-
40
- predictor = PREDICTOR[args.pred_setups.name](
41
- model,
42
- args.pred_setups.device,
43
- args.pred_setups.input_path,
44
- args.pred_setups.output_path,
45
- args.pred_setups.make_submission,
46
- args.pred_setups.exp_name,
47
- args.pred_setups.algo_params,
48
- )
49
-
50
- _ = predictor.conduct_prediction()
51
-
52
-
53
- # Parser arguments for terminal execution
54
- parser = argparse.ArgumentParser(description="Config file processing")
55
- parser.add_argument(
56
- "--config_path", default="./config/step3_prediction/base_prediction.json", type=str
57
- )
58
- args = parser.parse_args()
59
-
60
- #######################################################################################
61
-
62
- if __name__ == "__main__":
63
- # Load configuration from .json file
64
- opt = ConfLoader(args.config_path).opt
65
-
66
- # Print configuration dictionary pretty
67
- pprint_config(opt)
68
-
69
- # Run experiment
70
- main(opt)
@@ -1,14 +0,0 @@
1
- fastremap==1.14.1
2
- monai==1.3.0
3
- numba==0.57.1
4
- numpy==1.24.3
5
- pandas==2.0.3
6
- pytz==2023.3.post1
7
- scipy==1.12.0
8
- segmentation_models_pytorch==0.3.3
9
- tifffile==2023.4.12
10
- torch==2.1.2
11
- tqdm==4.65.0
12
- wandb==0.16.2
13
- scikit-image
14
- matplotlib
@@ -1,3 +0,0 @@
1
- from .data_utils import *
2
- from .measures import *
3
- from .utils import *
@@ -1 +0,0 @@
1
- from .datasetter import *
@@ -1,88 +0,0 @@
1
- import numpy as np
2
- import copy
3
-
4
- from monai.transforms import RandScaleIntensity, Compose
5
- from monai.transforms.compose import MapTransform
6
- from skimage.segmentation import find_boundaries
7
-
8
-
9
- __all__ = ["BoundaryExclusion", "IntensityDiversification"]
10
-
11
-
12
- class BoundaryExclusion(MapTransform):
13
- """Map the cell boundary pixel labels to the background class (0)."""
14
-
15
- def __init__(self, keys=["label"], allow_missing_keys=False):
16
- super(BoundaryExclusion, self).__init__(keys, allow_missing_keys)
17
-
18
- def __call__(self, data):
19
- # Find and Exclude Boundary
20
- label_original = data["label"]
21
- label = copy.deepcopy(label_original)
22
- boundary = find_boundaries(label, connectivity=1, mode="thick")
23
- label[boundary] = 0
24
-
25
- # Do not exclude if the cell is too small (< 14x14).
26
- new_label = copy.deepcopy(label_original)
27
- new_label[label == 0] = 0
28
-
29
- cell_idx, cell_counts = np.unique(label_original, return_counts=True)
30
-
31
- for k in range(len(cell_counts)):
32
- if cell_counts[k] < 196:
33
- new_label[label_original == cell_idx[k]] = cell_idx[k]
34
-
35
- # Do not exclude if the pixels are at the image boundaries.
36
- _, W, H = label_original.shape
37
- bd = np.zeros_like(label_original, dtype=label.dtype)
38
- bd[:, 2 : W - 2, 2 : H - 2] = 1
39
- new_label += label_original * bd
40
-
41
- # Assign the transformed label
42
- data["label"] = new_label
43
-
44
- return data
45
-
46
-
47
- class IntensityDiversification(MapTransform):
48
- """Randomly rescale the intensity of cell pixels."""
49
-
50
- def __init__(
51
- self,
52
- keys=["img"],
53
- change_cell_ratio=0.4,
54
- scale_factors=[0, 0.7],
55
- allow_missing_keys=False,
56
- ):
57
- super(IntensityDiversification, self).__init__(keys, allow_missing_keys)
58
-
59
- self.change_cell_ratio = change_cell_ratio
60
- self.randscale_intensity = Compose(
61
- [RandScaleIntensity(prob=1.0, factors=scale_factors)]
62
- )
63
-
64
- def __call__(self, data):
65
- # Select cells to be transformed
66
- cell_count = int(data["label"].max())
67
- change_cell_count = int(cell_count * self.change_cell_ratio)
68
- change_cell = np.random.choice(cell_count, change_cell_count, replace=False)
69
-
70
- mask = copy.deepcopy(data["label"])
71
-
72
- for i in range(cell_count):
73
- cell_id = i + 1
74
-
75
- if cell_id not in change_cell:
76
- mask[mask == cell_id] = 0
77
-
78
- mask[mask > 0] = 1
79
-
80
- # Conduct intensity transformation for the selected cells
81
- img_original = copy.deepcopy((1 - mask) * data["img"])
82
- img_transformed = copy.deepcopy(mask * data["img"])
83
- img_transformed = self.randscale_intensity(img_transformed)
84
-
85
- # Assign the transformed image
86
- data["img"] = img_original + img_transformed
87
-
88
- return data
@@ -1,161 +0,0 @@
1
- import numpy as np
2
- import tifffile as tif
3
- import skimage.io as io
4
- from typing import Optional, Sequence, Union
5
- from monai.config import DtypeLike, PathLike, KeysCollection
6
- from monai.utils import ensure_tuple
7
- from monai.data.utils import is_supported_format, optional_import, ensure_tuple_rep
8
- from monai.data.image_reader import ImageReader, NumpyReader
9
- from monai.transforms import LoadImage, LoadImaged
10
- from monai.utils.enums import PostFix
11
-
12
- DEFAULT_POST_FIX = PostFix.meta()
13
- itk, has_itk = optional_import("itk", allow_namespace_pkg=True)
14
-
15
- __all__ = [
16
- "CustomLoadImaged",
17
- "CustomLoadImageD",
18
- "CustomLoadImageDict",
19
- "CustomLoadImage",
20
- ]
21
-
22
-
23
- class CustomLoadImage(LoadImage):
24
- """
25
- Load image file or files from provided path based on reader.
26
- If reader is not specified, this class automatically chooses readers
27
- based on the supported suffixes and in the following order:
28
-
29
- - User-specified reader at runtime when calling this loader.
30
- - User-specified reader in the constructor of `LoadImage`.
31
- - Readers from the last to the first in the registered list.
32
- - Current default readers: (nii, nii.gz -> NibabelReader), (png, jpg, bmp -> PILReader),
33
- (npz, npy -> NumpyReader), (nrrd -> NrrdReader), (DICOM file -> ITKReader).
34
-
35
- [!Caution] This overriding replaces the original ITK with Custom UnifiedITKReader.
36
- """
37
-
38
- def __init__(
39
- self,
40
- reader=None,
41
- image_only: bool = False,
42
- dtype: DtypeLike = np.float32,
43
- ensure_channel_first: bool = False,
44
- *args,
45
- **kwargs,
46
- ) -> None:
47
- super(CustomLoadImage, self).__init__(
48
- reader, image_only, dtype, ensure_channel_first, *args, **kwargs
49
- )
50
-
51
- # Adding TIFFReader. Although ITK Reader supports ".tiff" files, sometimes fails to load images.
52
- self.readers = []
53
- self.register(UnifiedITKReader(*args, **kwargs))
54
-
55
-
56
- class CustomLoadImaged(LoadImaged):
57
- """
58
- Dictionary-based wrapper of `CustomLoadImage`.
59
- """
60
-
61
- def __init__(
62
- self,
63
- keys: KeysCollection,
64
- reader: Optional[Union[ImageReader, str]] = None,
65
- dtype: DtypeLike = np.float32,
66
- meta_keys: Optional[KeysCollection] = None,
67
- meta_key_postfix: str = DEFAULT_POST_FIX,
68
- overwriting: bool = False,
69
- image_only: bool = False,
70
- ensure_channel_first: bool = False,
71
- simple_keys=False,
72
- allow_missing_keys: bool = False,
73
- *args,
74
- **kwargs,
75
- ) -> None:
76
- super(CustomLoadImaged, self).__init__(
77
- keys,
78
- reader,
79
- dtype,
80
- meta_keys,
81
- meta_key_postfix,
82
- overwriting,
83
- image_only,
84
- ensure_channel_first,
85
- simple_keys,
86
- allow_missing_keys,
87
- *args,
88
- **kwargs,
89
- )
90
-
91
- # Assign CustomLoader
92
- self._loader = CustomLoadImage(
93
- reader, image_only, dtype, ensure_channel_first, *args, **kwargs
94
- )
95
- if not isinstance(meta_key_postfix, str):
96
- raise TypeError(
97
- f"meta_key_postfix must be a str but is {type(meta_key_postfix).__name__}."
98
- )
99
- self.meta_keys = (
100
- ensure_tuple_rep(None, len(self.keys))
101
- if meta_keys is None
102
- else ensure_tuple(meta_keys)
103
- )
104
- if len(self.keys) != len(self.meta_keys):
105
- raise ValueError("meta_keys should have the same length as keys.")
106
- self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys))
107
- self.overwriting = overwriting
108
-
109
-
110
- class UnifiedITKReader(NumpyReader):
111
- """
112
- Unified Reader to read ".tif" and ".tiff files".
113
- As the tifffile reads the images as numpy arrays, it inherits from the NumpyReader.
114
- """
115
-
116
- def __init__(
117
- self, channel_dim: Optional[int] = None, **kwargs,
118
- ):
119
- super(UnifiedITKReader, self).__init__(channel_dim=channel_dim, **kwargs)
120
- self.kwargs = kwargs
121
- self.channel_dim = channel_dim
122
-
123
- def verify_suffix(self, filename: Union[Sequence[PathLike], PathLike]) -> bool:
124
- """Verify whether the file format is supported by TIFF Reader."""
125
-
126
- suffixes: Sequence[str] = ["tif", "tiff", "png", "jpg", "bmp", "jpeg",]
127
- return has_itk or is_supported_format(filename, suffixes)
128
-
129
- def read(self, data: Union[Sequence[PathLike], PathLike], **kwargs):
130
- """Read Images from the file."""
131
- img_ = []
132
-
133
- filenames: Sequence[PathLike] = ensure_tuple(data)
134
- kwargs_ = self.kwargs.copy()
135
- kwargs_.update(kwargs)
136
-
137
- for name in filenames:
138
- name = f"{name}"
139
-
140
- if name.endswith(".tif") or name.endswith(".tiff"):
141
- _obj = tif.imread(name)
142
- else:
143
- try:
144
- _obj = itk.imread(name, **kwargs_)
145
- _obj = itk.array_view_from_image(_obj, keep_axes=False)
146
- except:
147
- _obj = io.imread(name)
148
-
149
- if len(_obj.shape) == 2:
150
- _obj = np.repeat(np.expand_dims(_obj, axis=-1), 3, axis=-1)
151
- elif len(_obj.shape) == 3 and _obj.shape[-1] > 3:
152
- _obj = _obj[:, :, :3]
153
- else:
154
- pass
155
-
156
- img_.append(_obj)
157
-
158
- return img_ if len(filenames) > 1 else img_[0]
159
-
160
-
161
- CustomLoadImageD = CustomLoadImageDict = CustomLoadImaged
@@ -1,77 +0,0 @@
1
- import numpy as np
2
- from skimage import exposure
3
- from monai.config import KeysCollection
4
-
5
- from monai.transforms.transform import Transform
6
- from monai.transforms.compose import MapTransform
7
-
8
- from typing import Dict, Hashable, Mapping
9
-
10
-
11
- __all__ = [
12
- "CustomNormalizeImage",
13
- "CustomNormalizeImageD",
14
- "CustomNormalizeImageDict",
15
- "CustomNormalizeImaged",
16
- ]
17
-
18
-
19
- class CustomNormalizeImage(Transform):
20
- """Normalize the image."""
21
-
22
- def __init__(self, percentiles=[0, 99.5], channel_wise=False):
23
- self.lower, self.upper = percentiles
24
- self.channel_wise = channel_wise
25
-
26
- def _normalize(self, img) -> np.ndarray:
27
- non_zero_vals = img[np.nonzero(img)]
28
- percentiles = np.percentile(non_zero_vals, [self.lower, self.upper])
29
- img_norm = exposure.rescale_intensity(
30
- img, in_range=(percentiles[0], percentiles[1]), out_range="uint8"
31
- )
32
-
33
- return img_norm.astype(np.uint8)
34
-
35
- def __call__(self, img: np.ndarray) -> np.ndarray:
36
- if self.channel_wise:
37
- pre_img_data = np.zeros(img.shape, dtype=np.uint8)
38
- for i in range(img.shape[-1]):
39
- img_channel_i = img[:, :, i]
40
-
41
- if len(img_channel_i[np.nonzero(img_channel_i)]) > 0:
42
- pre_img_data[:, :, i] = self._normalize(img_channel_i)
43
-
44
- img = pre_img_data
45
-
46
- else:
47
- img = self._normalize(img)
48
-
49
- return img
50
-
51
-
52
- class CustomNormalizeImaged(MapTransform):
53
- """Dictionary-based wrapper of NormalizeImage"""
54
-
55
- def __init__(
56
- self,
57
- keys: KeysCollection,
58
- percentiles=[1, 99],
59
- channel_wise: bool = False,
60
- allow_missing_keys: bool = False,
61
- ):
62
- super(CustomNormalizeImageD, self).__init__(keys, allow_missing_keys)
63
- self.normalizer = CustomNormalizeImage(percentiles, channel_wise)
64
-
65
- def __call__(
66
- self, data: Mapping[Hashable, np.ndarray]
67
- ) -> Dict[Hashable, np.ndarray]:
68
-
69
- d = dict(data)
70
-
71
- for key in self.keys:
72
- d[key] = self.normalizer(d[key])
73
-
74
- return d
75
-
76
-
77
- CustomNormalizeImageD = CustomNormalizeImageDict = CustomNormalizeImaged
@@ -1,3 +0,0 @@
1
- from .LoadImage import *
2
- from .NormalizeImage import *
3
- from .CellAware import *