dicube 0.2.2__cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dicube/__init__.py +174 -0
- dicube/codecs/__init__.py +152 -0
- dicube/codecs/jph/__init__.py +15 -0
- dicube/codecs/jph/codec.py +161 -0
- dicube/codecs/jph/ojph_complete.cpython-310-aarch64-linux-gnu.so +0 -0
- dicube/codecs/jph/ojph_complete.cpython-311-aarch64-linux-gnu.so +0 -0
- dicube/codecs/jph/ojph_complete.cpython-312-aarch64-linux-gnu.so +0 -0
- dicube/codecs/jph/ojph_complete.cpython-38-aarch64-linux-gnu.so +0 -0
- dicube/codecs/jph/ojph_complete.cpython-39-aarch64-linux-gnu.so +0 -0
- dicube/codecs/jph/ojph_decode_complete.cpython-310-aarch64-linux-gnu.so +0 -0
- dicube/codecs/jph/ojph_decode_complete.cpython-311-aarch64-linux-gnu.so +0 -0
- dicube/codecs/jph/ojph_decode_complete.cpython-312-aarch64-linux-gnu.so +0 -0
- dicube/codecs/jph/ojph_decode_complete.cpython-38-aarch64-linux-gnu.so +0 -0
- dicube/codecs/jph/ojph_decode_complete.cpython-39-aarch64-linux-gnu.so +0 -0
- dicube/core/__init__.py +21 -0
- dicube/core/image.py +349 -0
- dicube/core/io.py +408 -0
- dicube/core/pixel_header.py +120 -0
- dicube/dicom/__init__.py +13 -0
- dicube/dicom/dcb_streaming.py +248 -0
- dicube/dicom/dicom_io.py +153 -0
- dicube/dicom/dicom_meta.py +740 -0
- dicube/dicom/dicom_status.py +259 -0
- dicube/dicom/dicom_tags.py +121 -0
- dicube/dicom/merge_utils.py +283 -0
- dicube/dicom/space_from_meta.py +70 -0
- dicube/exceptions.py +189 -0
- dicube/storage/__init__.py +17 -0
- dicube/storage/dcb_file.py +824 -0
- dicube/storage/pixel_utils.py +259 -0
- dicube/utils/__init__.py +6 -0
- dicube/validation.py +380 -0
- dicube-0.2.2.dist-info/METADATA +272 -0
- dicube-0.2.2.dist-info/RECORD +35 -0
- dicube-0.2.2.dist-info/WHEEL +6 -0
@@ -0,0 +1,259 @@
|
|
1
|
+
from typing import Tuple
|
2
|
+
|
3
|
+
import numpy as np
|
4
|
+
|
5
|
+
from ..core.pixel_header import PixelDataHeader
|
6
|
+
|
7
|
+
|
8
|
+
def derive_pixel_header_from_array(
|
9
|
+
image: np.ndarray, preferred_dtype=np.uint16
|
10
|
+
) -> Tuple[np.ndarray, PixelDataHeader]:
|
11
|
+
"""Derive pixel data header information from input numpy array.
|
12
|
+
|
13
|
+
Process different data types in different ways:
|
14
|
+
- For unsigned integers (uint8/16/32): use raw data directly
|
15
|
+
- For signed integers (int8/16/32): convert to unsigned and record offset
|
16
|
+
- For floating point (float16/32/64): normalize to specified unsigned integer range
|
17
|
+
|
18
|
+
Args:
|
19
|
+
image (np.ndarray): Input image array.
|
20
|
+
preferred_dtype (np.dtype): Preferred output data type. Defaults to np.uint16.
|
21
|
+
|
22
|
+
Returns:
|
23
|
+
Tuple[np.ndarray, PixelDataHeader]: A tuple containing:
|
24
|
+
- The converted image array
|
25
|
+
- A PixelDataHeader object with appropriate metadata
|
26
|
+
|
27
|
+
Raises:
|
28
|
+
ValueError: When preferred_dtype is not supported.
|
29
|
+
NotImplementedError: When input array dtype is not supported.
|
30
|
+
"""
|
31
|
+
dtype = str(image.dtype)
|
32
|
+
if image.dtype in (np.uint16, np.uint8, np.uint32):
|
33
|
+
return image, PixelDataHeader(
|
34
|
+
RescaleSlope=1,
|
35
|
+
RescaleIntercept=0,
|
36
|
+
PixelDtype=dtype,
|
37
|
+
OriginalPixelDtype=dtype,
|
38
|
+
)
|
39
|
+
elif image.dtype == np.int16:
|
40
|
+
min_val = int(np.min(image))
|
41
|
+
image = (image - min_val).astype("uint16")
|
42
|
+
return image, PixelDataHeader(
|
43
|
+
RescaleSlope=1,
|
44
|
+
RescaleIntercept=min_val,
|
45
|
+
PixelDtype="uint16",
|
46
|
+
OriginalPixelDtype=dtype,
|
47
|
+
)
|
48
|
+
elif image.dtype == np.int8:
|
49
|
+
min_val = int(np.min(image))
|
50
|
+
image = (image - min_val).astype("uint8")
|
51
|
+
return image, PixelDataHeader(
|
52
|
+
RescaleSlope=1,
|
53
|
+
RescaleIntercept=min_val,
|
54
|
+
PixelDtype="uint8",
|
55
|
+
OriginalPixelDtype=dtype,
|
56
|
+
)
|
57
|
+
elif image.dtype == np.int32:
|
58
|
+
min_val = int(np.min(image))
|
59
|
+
image = (image - min_val).astype("uint32")
|
60
|
+
return image, PixelDataHeader(
|
61
|
+
RescaleSlope=1,
|
62
|
+
RescaleIntercept=min_val,
|
63
|
+
PixelDtype="uint32",
|
64
|
+
OriginalPixelDtype=dtype,
|
65
|
+
)
|
66
|
+
elif image.dtype in (np.float16, np.float32, np.float64):
|
67
|
+
if preferred_dtype == "uint8":
|
68
|
+
dtype_max = 255
|
69
|
+
elif preferred_dtype == "uint16":
|
70
|
+
dtype_max = 65535
|
71
|
+
else:
|
72
|
+
raise ValueError(f"Unsupported preferred_dtype: {preferred_dtype}")
|
73
|
+
|
74
|
+
min_val = image.min()
|
75
|
+
max_val = image.max()
|
76
|
+
if np.isclose(min_val, max_val):
|
77
|
+
# For constant value arrays:
|
78
|
+
# Set all pixels to 0, slope=0, intercept=min_val
|
79
|
+
# When reading back: i*slope+intercept = min_val
|
80
|
+
header = PixelDataHeader(
|
81
|
+
RescaleSlope=1.0,
|
82
|
+
RescaleIntercept=float(min_val),
|
83
|
+
PixelDtype=preferred_dtype,
|
84
|
+
OriginalPixelDtype=dtype,
|
85
|
+
)
|
86
|
+
raw_image = np.zeros_like(image, dtype=preferred_dtype)
|
87
|
+
return raw_image, header
|
88
|
+
else:
|
89
|
+
slope = float(max_val - min_val)
|
90
|
+
intercept = float(min_val)
|
91
|
+
raw_image = ((image - intercept) / slope * dtype_max).astype(
|
92
|
+
preferred_dtype
|
93
|
+
)
|
94
|
+
header = PixelDataHeader(
|
95
|
+
RescaleSlope=slope,
|
96
|
+
RescaleIntercept=intercept,
|
97
|
+
PixelDtype=preferred_dtype,
|
98
|
+
OriginalPixelDtype=dtype,
|
99
|
+
MaxVal=max_val,
|
100
|
+
MinVal=min_val,
|
101
|
+
)
|
102
|
+
return raw_image, header
|
103
|
+
else:
|
104
|
+
raise NotImplementedError("Unsupported dtype")
|
105
|
+
|
106
|
+
|
107
|
+
def get_float_data(
|
108
|
+
raw_image: np.ndarray, pixel_header: PixelDataHeader, dtype="float32"
|
109
|
+
) -> np.ndarray:
|
110
|
+
"""Get image data as floating point array with slope/intercept applied.
|
111
|
+
|
112
|
+
Inspired by NIfTI's get_fdata method, this converts the raw image data
|
113
|
+
to floating point format and applies the rescale slope and intercept.
|
114
|
+
|
115
|
+
Args:
|
116
|
+
raw_image (np.ndarray): Raw image data array.
|
117
|
+
pixel_header (PixelDataHeader): Pixel data header containing rescale information.
|
118
|
+
dtype (str): Output data type, must be one of: float16, float32, float64.
|
119
|
+
Defaults to "float32".
|
120
|
+
|
121
|
+
Returns:
|
122
|
+
np.ndarray: Floating point image data with rescale factors applied.
|
123
|
+
|
124
|
+
Raises:
|
125
|
+
AssertionError: If dtype is not one of the allowed float types.
|
126
|
+
"""
|
127
|
+
assert dtype in (
|
128
|
+
"float16",
|
129
|
+
"float32",
|
130
|
+
"float64",
|
131
|
+
), "only accept float16, float32, float64"
|
132
|
+
|
133
|
+
# Note: Output may be positive or negative depending on original dtype and slope/intercept
|
134
|
+
output_img = raw_image.astype(dtype)
|
135
|
+
if pixel_header.RescaleSlope is not None:
|
136
|
+
slope = np.array(pixel_header.RescaleSlope).astype(dtype)
|
137
|
+
if slope != 1.0:
|
138
|
+
output_img *= slope
|
139
|
+
if pixel_header.RescaleIntercept is not None:
|
140
|
+
intercept = np.array(pixel_header.RescaleIntercept).astype(dtype)
|
141
|
+
if intercept != 0.0:
|
142
|
+
output_img += intercept
|
143
|
+
return output_img
|
144
|
+
|
145
|
+
|
146
|
+
def determine_optimal_nifti_dtype(
|
147
|
+
image: np.ndarray, pixel_header: PixelDataHeader
|
148
|
+
) -> Tuple[np.ndarray, str]:
|
149
|
+
"""Determine the optimal data type for saving to NIfTI and return the converted data.
|
150
|
+
|
151
|
+
This function selects the most appropriate data type for NIfTI export based on the value range
|
152
|
+
of the raw image and the rescale slope/intercept. It minimizes unnecessary data conversion and
|
153
|
+
only applies scaling or offset if needed.
|
154
|
+
|
155
|
+
Args:
|
156
|
+
image (np.ndarray): The raw image data (integer type guaranteed).
|
157
|
+
pixel_header (PixelDataHeader): Pixel header containing rescale information.
|
158
|
+
|
159
|
+
Returns:
|
160
|
+
Tuple[np.ndarray, str]:
|
161
|
+
- The image data converted to the optimal type for NIfTI export.
|
162
|
+
- The name of the chosen data type as a string.
|
163
|
+
|
164
|
+
Raises:
|
165
|
+
ValueError: If the data cannot be represented in any supported NIfTI type.
|
166
|
+
|
167
|
+
Example:
|
168
|
+
>>> arr = np.array([0, 100, 200], dtype=np.uint16)
|
169
|
+
>>> header = PixelDataHeader(RescaleSlope=1.0, RescaleIntercept=0.0, OriginalPixelDtype="uint16", PixelDtype="uint16")
|
170
|
+
>>> data, dtype_name = determine_optimal_nifti_dtype(arr, header)
|
171
|
+
>>> print(data.dtype, dtype_name)
|
172
|
+
uint8 uint8
|
173
|
+
"""
|
174
|
+
# 获取slope和intercept
|
175
|
+
slope = pixel_header.RescaleSlope if pixel_header.RescaleSlope is not None else 1.0
|
176
|
+
intercept = pixel_header.RescaleIntercept if pixel_header.RescaleIntercept is not None else 0.0
|
177
|
+
|
178
|
+
# 直接从原始数据计算值域
|
179
|
+
raw_min = float(image.min())
|
180
|
+
raw_max = float(image.max())
|
181
|
+
|
182
|
+
# 计算应用slope和intercept后的值域
|
183
|
+
min_val = raw_min * slope + intercept
|
184
|
+
max_val = raw_max * slope + intercept
|
185
|
+
|
186
|
+
# 如果斜率为负,需要交换min和max
|
187
|
+
if slope < 0:
|
188
|
+
min_val, max_val = max_val, min_val
|
189
|
+
|
190
|
+
# 检查原始数据类型
|
191
|
+
original_dtype = pixel_header.OriginalPixelDtype
|
192
|
+
is_signed_original = original_dtype in ("int8", "int16", "int32", "int64")
|
193
|
+
|
194
|
+
# 检查slope和intercept是否为整数值
|
195
|
+
has_integer_transform = (
|
196
|
+
np.isclose(slope % 1, 0) and
|
197
|
+
np.isclose(intercept % 1, 0)
|
198
|
+
)
|
199
|
+
|
200
|
+
# 准备最终数据 - 仅在确定dtype后执行一次转换
|
201
|
+
result_dtype = None
|
202
|
+
result_dtype_name = None
|
203
|
+
|
204
|
+
# 如果slope和intercept都是整数,则可以使用整数类型
|
205
|
+
if has_integer_transform:
|
206
|
+
# 尊重原始数据类型的符号属性
|
207
|
+
if is_signed_original or min_val < 0:
|
208
|
+
# 有符号整数
|
209
|
+
if min_val >= -128 and max_val <= 127:
|
210
|
+
result_dtype = np.int8
|
211
|
+
result_dtype_name = "int8"
|
212
|
+
elif min_val >= -32768 and max_val <= 32767:
|
213
|
+
result_dtype = np.int16
|
214
|
+
result_dtype_name = "int16"
|
215
|
+
elif min_val >= -2147483648 and max_val <= 2147483647:
|
216
|
+
result_dtype = np.int32
|
217
|
+
result_dtype_name = "int32"
|
218
|
+
elif max_val <= 2147483647: # 值域在int32范围内,但原始类型是int32
|
219
|
+
result_dtype = np.int32
|
220
|
+
result_dtype_name = "int32"
|
221
|
+
else:
|
222
|
+
# 无符号整数
|
223
|
+
if max_val <= 255:
|
224
|
+
result_dtype = np.uint8
|
225
|
+
result_dtype_name = "uint8"
|
226
|
+
elif max_val <= 65535:
|
227
|
+
result_dtype = np.uint16
|
228
|
+
result_dtype_name = "uint16"
|
229
|
+
elif max_val <= 4294967295:
|
230
|
+
result_dtype = np.uint32
|
231
|
+
result_dtype_name = "uint32"
|
232
|
+
|
233
|
+
# 如果没有找到合适的整数类型,使用浮点类型
|
234
|
+
if result_dtype is None:
|
235
|
+
if np.issubdtype(image.dtype, np.float64) or min_val < -3.4e38 or max_val > 3.4e38:
|
236
|
+
result_dtype = np.float64
|
237
|
+
result_dtype_name = "float64"
|
238
|
+
else:
|
239
|
+
result_dtype = np.float32
|
240
|
+
result_dtype_name = "float32"
|
241
|
+
|
242
|
+
if has_integer_transform:
|
243
|
+
intercept = int(intercept)
|
244
|
+
else:
|
245
|
+
intercept = np.array(intercept,dtype=result_dtype)
|
246
|
+
|
247
|
+
if slope == 1.0:
|
248
|
+
# 只要加法
|
249
|
+
return image.astype(result_dtype) + intercept, result_dtype_name
|
250
|
+
else:
|
251
|
+
# 需要乘法,生成最终数据
|
252
|
+
if result_dtype in (np.float32, np.float64):
|
253
|
+
# 浮点类型,直接使用浮点运算
|
254
|
+
result = image.astype(result_dtype) * slope + intercept
|
255
|
+
else:
|
256
|
+
# 整数类型,先做浮点运算再转换
|
257
|
+
result = (image.astype(np.float32) * slope + intercept).astype(result_dtype)
|
258
|
+
|
259
|
+
return result, result_dtype_name
|
dicube/utils/__init__.py
ADDED
dicube/validation.py
ADDED
@@ -0,0 +1,380 @@
|
|
1
|
+
"""Validation utilities for DiCube library.
|
2
|
+
|
3
|
+
This module provides reusable validation functions that automatically raise
|
4
|
+
appropriate DicomCubeError exceptions with consistent error messages and context.
|
5
|
+
These utilities help minimize code duplication and provide clean error handling
|
6
|
+
throughout the DiCube library.
|
7
|
+
"""
|
8
|
+
|
9
|
+
import os
|
10
|
+
from typing import Any, Type, Union, Optional
|
11
|
+
import numpy as np
|
12
|
+
|
13
|
+
from .exceptions import (
|
14
|
+
DicomCubeError,
|
15
|
+
InvalidCubeFileError,
|
16
|
+
DataConsistencyError,
|
17
|
+
MetaDataError
|
18
|
+
)
|
19
|
+
|
20
|
+
|
21
|
+
def validate_not_none(
|
22
|
+
value: Any,
|
23
|
+
name: str,
|
24
|
+
context: str = "",
|
25
|
+
exception_class: Type[DicomCubeError] = DicomCubeError
|
26
|
+
) -> Any:
|
27
|
+
"""Validate that a value is not None.
|
28
|
+
|
29
|
+
Args:
|
30
|
+
value: The value to validate.
|
31
|
+
name (str): Name of the parameter being validated.
|
32
|
+
context (str): Context about the operation being performed.
|
33
|
+
exception_class (Type[DicomCubeError]): Exception class to raise on failure.
|
34
|
+
|
35
|
+
Returns:
|
36
|
+
Any: The validated value if not None.
|
37
|
+
|
38
|
+
Raises:
|
39
|
+
DicomCubeError: If the value is None.
|
40
|
+
"""
|
41
|
+
if value is None:
|
42
|
+
raise exception_class(
|
43
|
+
f"Parameter '{name}' cannot be None",
|
44
|
+
context=context,
|
45
|
+
details={"parameter": name, "value": value}
|
46
|
+
)
|
47
|
+
return value
|
48
|
+
|
49
|
+
|
50
|
+
def validate_file_exists(
|
51
|
+
file_path: str,
|
52
|
+
context: str = "",
|
53
|
+
exception_class: Type[DicomCubeError] = InvalidCubeFileError
|
54
|
+
) -> str:
|
55
|
+
"""Validate that a file exists.
|
56
|
+
|
57
|
+
Args:
|
58
|
+
file_path (str): Path to the file to validate.
|
59
|
+
context (str): Context about the operation being performed.
|
60
|
+
exception_class (Type[DicomCubeError]): Exception class to raise on failure.
|
61
|
+
|
62
|
+
Returns:
|
63
|
+
str: The validated file path.
|
64
|
+
|
65
|
+
Raises:
|
66
|
+
InvalidCubeFileError: If the file does not exist.
|
67
|
+
"""
|
68
|
+
validate_not_none(file_path, "file_path", context, exception_class)
|
69
|
+
|
70
|
+
if not os.path.exists(file_path):
|
71
|
+
raise exception_class(
|
72
|
+
f"File not found",
|
73
|
+
context=context,
|
74
|
+
details={"file_path": file_path},
|
75
|
+
suggestion="Verify the file path is correct and the file exists"
|
76
|
+
)
|
77
|
+
|
78
|
+
if not os.path.isfile(file_path):
|
79
|
+
raise exception_class(
|
80
|
+
f"Path exists but is not a file",
|
81
|
+
context=context,
|
82
|
+
details={"file_path": file_path},
|
83
|
+
suggestion="Ensure the path points to a file, not a directory"
|
84
|
+
)
|
85
|
+
|
86
|
+
return file_path
|
87
|
+
|
88
|
+
|
89
|
+
def validate_folder_exists(
|
90
|
+
folder_path: str,
|
91
|
+
context: str = "",
|
92
|
+
exception_class: Type[DicomCubeError] = InvalidCubeFileError
|
93
|
+
) -> str:
|
94
|
+
"""Validate that a folder exists.
|
95
|
+
|
96
|
+
Args:
|
97
|
+
folder_path (str): Path to the folder to validate.
|
98
|
+
context (str): Context about the operation being performed.
|
99
|
+
exception_class (Type[DicomCubeError]): Exception class to raise on failure.
|
100
|
+
|
101
|
+
Returns:
|
102
|
+
str: The validated folder path.
|
103
|
+
|
104
|
+
Raises:
|
105
|
+
InvalidCubeFileError: If the folder does not exist or is not a directory.
|
106
|
+
"""
|
107
|
+
validate_not_none(folder_path, "folder_path", context, exception_class)
|
108
|
+
|
109
|
+
if not os.path.exists(folder_path):
|
110
|
+
raise exception_class(
|
111
|
+
f"Folder not found",
|
112
|
+
context=context,
|
113
|
+
details={"folder_path": folder_path},
|
114
|
+
suggestion="Verify the folder path is correct and the folder exists"
|
115
|
+
)
|
116
|
+
|
117
|
+
if not os.path.isdir(folder_path):
|
118
|
+
raise exception_class(
|
119
|
+
f"Path exists but is not a directory",
|
120
|
+
context=context,
|
121
|
+
details={"folder_path": folder_path},
|
122
|
+
suggestion="Ensure the path points to a directory, not a file"
|
123
|
+
)
|
124
|
+
|
125
|
+
return folder_path
|
126
|
+
|
127
|
+
|
128
|
+
def validate_array_shape(
|
129
|
+
array: np.ndarray,
|
130
|
+
expected_dims: Optional[int] = None,
|
131
|
+
min_dims: Optional[int] = None,
|
132
|
+
max_dims: Optional[int] = None,
|
133
|
+
name: str = "array",
|
134
|
+
context: str = "",
|
135
|
+
exception_class: Type[DicomCubeError] = DataConsistencyError
|
136
|
+
) -> np.ndarray:
|
137
|
+
"""Validate numpy array shape and dimensions.
|
138
|
+
|
139
|
+
Args:
|
140
|
+
array (np.ndarray): The array to validate.
|
141
|
+
expected_dims (int, optional): Expected number of dimensions.
|
142
|
+
min_dims (int, optional): Minimum number of dimensions.
|
143
|
+
max_dims (int, optional): Maximum number of dimensions.
|
144
|
+
name (str): Name of the array parameter.
|
145
|
+
context (str): Context about the operation being performed.
|
146
|
+
exception_class (Type[DicomCubeError]): Exception class to raise on failure.
|
147
|
+
|
148
|
+
Returns:
|
149
|
+
np.ndarray: The validated array.
|
150
|
+
|
151
|
+
Raises:
|
152
|
+
DataConsistencyError: If array shape validation fails.
|
153
|
+
"""
|
154
|
+
validate_not_none(array, name, context, exception_class)
|
155
|
+
|
156
|
+
if not isinstance(array, np.ndarray):
|
157
|
+
raise exception_class(
|
158
|
+
f"Parameter '{name}' must be a numpy array",
|
159
|
+
context=context,
|
160
|
+
details={"parameter": name, "type": type(array).__name__},
|
161
|
+
suggestion="Convert the data to a numpy array before passing"
|
162
|
+
)
|
163
|
+
|
164
|
+
actual_dims = array.ndim
|
165
|
+
|
166
|
+
if expected_dims is not None and actual_dims != expected_dims:
|
167
|
+
raise exception_class(
|
168
|
+
f"Array '{name}' has incorrect dimensions",
|
169
|
+
context=context,
|
170
|
+
details={
|
171
|
+
"parameter": name,
|
172
|
+
"expected_dims": expected_dims,
|
173
|
+
"actual_dims": actual_dims,
|
174
|
+
"shape": array.shape
|
175
|
+
},
|
176
|
+
suggestion=f"Ensure the array has exactly {expected_dims} dimensions"
|
177
|
+
)
|
178
|
+
|
179
|
+
if min_dims is not None and actual_dims < min_dims:
|
180
|
+
raise exception_class(
|
181
|
+
f"Array '{name}' has too few dimensions",
|
182
|
+
context=context,
|
183
|
+
details={
|
184
|
+
"parameter": name,
|
185
|
+
"min_dims": min_dims,
|
186
|
+
"actual_dims": actual_dims,
|
187
|
+
"shape": array.shape
|
188
|
+
},
|
189
|
+
suggestion=f"Ensure the array has at least {min_dims} dimensions"
|
190
|
+
)
|
191
|
+
|
192
|
+
if max_dims is not None and actual_dims > max_dims:
|
193
|
+
raise exception_class(
|
194
|
+
f"Array '{name}' has too many dimensions",
|
195
|
+
context=context,
|
196
|
+
details={
|
197
|
+
"parameter": name,
|
198
|
+
"max_dims": max_dims,
|
199
|
+
"actual_dims": actual_dims,
|
200
|
+
"shape": array.shape
|
201
|
+
},
|
202
|
+
suggestion=f"Ensure the array has at most {max_dims} dimensions"
|
203
|
+
)
|
204
|
+
|
205
|
+
return array
|
206
|
+
|
207
|
+
|
208
|
+
def validate_parameter_type(
|
209
|
+
value: Any,
|
210
|
+
expected_type: Union[Type, tuple],
|
211
|
+
name: str,
|
212
|
+
context: str = "",
|
213
|
+
exception_class: Type[DicomCubeError] = DataConsistencyError
|
214
|
+
) -> Any:
|
215
|
+
"""Validate parameter type.
|
216
|
+
|
217
|
+
Args:
|
218
|
+
value: The value to validate.
|
219
|
+
expected_type (Type or tuple): Expected type or tuple of types.
|
220
|
+
name (str): Name of the parameter being validated.
|
221
|
+
context (str): Context about the operation being performed.
|
222
|
+
exception_class (Type[DicomCubeError]): Exception class to raise on failure.
|
223
|
+
|
224
|
+
Returns:
|
225
|
+
Any: The validated value.
|
226
|
+
|
227
|
+
Raises:
|
228
|
+
DataConsistencyError: If the value is not of the expected type.
|
229
|
+
"""
|
230
|
+
validate_not_none(value, name, context, exception_class)
|
231
|
+
|
232
|
+
if not isinstance(value, expected_type):
|
233
|
+
if isinstance(expected_type, tuple):
|
234
|
+
type_names = [t.__name__ for t in expected_type]
|
235
|
+
expected_str = " or ".join(type_names)
|
236
|
+
else:
|
237
|
+
expected_str = expected_type.__name__
|
238
|
+
|
239
|
+
raise exception_class(
|
240
|
+
f"Parameter '{name}' has incorrect type",
|
241
|
+
context=context,
|
242
|
+
details={
|
243
|
+
"parameter": name,
|
244
|
+
"expected_type": expected_str,
|
245
|
+
"actual_type": type(value).__name__,
|
246
|
+
"value": repr(value)
|
247
|
+
},
|
248
|
+
suggestion=f"Ensure '{name}' is of type {expected_str}"
|
249
|
+
)
|
250
|
+
|
251
|
+
return value
|
252
|
+
|
253
|
+
|
254
|
+
def validate_shape_consistency(
|
255
|
+
array1: np.ndarray,
|
256
|
+
array2: np.ndarray,
|
257
|
+
name1: str = "array1",
|
258
|
+
name2: str = "array2",
|
259
|
+
context: str = "",
|
260
|
+
exception_class: Type[DicomCubeError] = DataConsistencyError
|
261
|
+
) -> tuple:
|
262
|
+
"""Validate that two arrays have consistent shapes.
|
263
|
+
|
264
|
+
Args:
|
265
|
+
array1 (np.ndarray): First array to compare.
|
266
|
+
array2 (np.ndarray): Second array to compare.
|
267
|
+
name1 (str): Name of the first array parameter.
|
268
|
+
name2 (str): Name of the second array parameter.
|
269
|
+
context (str): Context about the operation being performed.
|
270
|
+
exception_class (Type[DicomCubeError]): Exception class to raise on failure.
|
271
|
+
|
272
|
+
Returns:
|
273
|
+
tuple: Both validated arrays.
|
274
|
+
|
275
|
+
Raises:
|
276
|
+
DataConsistencyError: If array shapes are inconsistent.
|
277
|
+
"""
|
278
|
+
validate_array_shape(array1, name=name1, context=context, exception_class=exception_class)
|
279
|
+
validate_array_shape(array2, name=name2, context=context, exception_class=exception_class)
|
280
|
+
|
281
|
+
if array1.shape != array2.shape:
|
282
|
+
raise exception_class(
|
283
|
+
f"Arrays '{name1}' and '{name2}' have inconsistent shapes",
|
284
|
+
context=context,
|
285
|
+
details={
|
286
|
+
name1 + "_shape": array1.shape,
|
287
|
+
name2 + "_shape": array2.shape
|
288
|
+
},
|
289
|
+
suggestion="Ensure both arrays have the same dimensions and shape"
|
290
|
+
)
|
291
|
+
|
292
|
+
return array1, array2
|
293
|
+
|
294
|
+
|
295
|
+
def validate_string_not_empty(
|
296
|
+
value: str,
|
297
|
+
name: str,
|
298
|
+
context: str = "",
|
299
|
+
exception_class: Type[DicomCubeError] = DicomCubeError
|
300
|
+
) -> str:
|
301
|
+
"""Validate that a string is not None or empty.
|
302
|
+
|
303
|
+
Args:
|
304
|
+
value (str): The string value to validate.
|
305
|
+
name (str): Name of the parameter being validated.
|
306
|
+
context (str): Context about the operation being performed.
|
307
|
+
exception_class (Type[DicomCubeError]): Exception class to raise on failure.
|
308
|
+
|
309
|
+
Returns:
|
310
|
+
str: The validated string.
|
311
|
+
|
312
|
+
Raises:
|
313
|
+
DicomCubeError: If the string is None or empty.
|
314
|
+
"""
|
315
|
+
validate_not_none(value, name, context, exception_class)
|
316
|
+
validate_parameter_type(value, str, name, context, exception_class)
|
317
|
+
|
318
|
+
if not value.strip():
|
319
|
+
raise exception_class(
|
320
|
+
f"Parameter '{name}' cannot be empty",
|
321
|
+
context=context,
|
322
|
+
details={"parameter": name, "value": repr(value)},
|
323
|
+
suggestion=f"Provide a non-empty value for '{name}'"
|
324
|
+
)
|
325
|
+
|
326
|
+
return value
|
327
|
+
|
328
|
+
|
329
|
+
def validate_numeric_range(
|
330
|
+
value: Union[int, float],
|
331
|
+
name: str,
|
332
|
+
min_value: Optional[Union[int, float]] = None,
|
333
|
+
max_value: Optional[Union[int, float]] = None,
|
334
|
+
context: str = "",
|
335
|
+
exception_class: Type[DicomCubeError] = DataConsistencyError
|
336
|
+
) -> Union[int, float]:
|
337
|
+
"""Validate that a numeric value is within a specified range.
|
338
|
+
|
339
|
+
Args:
|
340
|
+
value (int or float): The numeric value to validate.
|
341
|
+
name (str): Name of the parameter being validated.
|
342
|
+
min_value (int or float, optional): Minimum allowed value.
|
343
|
+
max_value (int or float, optional): Maximum allowed value.
|
344
|
+
context (str): Context about the operation being performed.
|
345
|
+
exception_class (Type[DicomCubeError]): Exception class to raise on failure.
|
346
|
+
|
347
|
+
Returns:
|
348
|
+
int or float: The validated value.
|
349
|
+
|
350
|
+
Raises:
|
351
|
+
DataConsistencyError: If the value is outside the specified range.
|
352
|
+
"""
|
353
|
+
validate_not_none(value, name, context, exception_class)
|
354
|
+
validate_parameter_type(value, (int, float), name, context, exception_class)
|
355
|
+
|
356
|
+
if min_value is not None and value < min_value:
|
357
|
+
raise exception_class(
|
358
|
+
f"Parameter '{name}' is below minimum value",
|
359
|
+
context=context,
|
360
|
+
details={
|
361
|
+
"parameter": name,
|
362
|
+
"value": value,
|
363
|
+
"min_value": min_value
|
364
|
+
},
|
365
|
+
suggestion=f"Ensure '{name}' is at least {min_value}"
|
366
|
+
)
|
367
|
+
|
368
|
+
if max_value is not None and value > max_value:
|
369
|
+
raise exception_class(
|
370
|
+
f"Parameter '{name}' exceeds maximum value",
|
371
|
+
context=context,
|
372
|
+
details={
|
373
|
+
"parameter": name,
|
374
|
+
"value": value,
|
375
|
+
"max_value": max_value
|
376
|
+
},
|
377
|
+
suggestion=f"Ensure '{name}' is at most {max_value}"
|
378
|
+
)
|
379
|
+
|
380
|
+
return value
|