dicube 0.1.4__cp310-cp310-macosx_10_9_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dicube/__init__.py +140 -0
- dicube/codecs/__init__.py +152 -0
- dicube/codecs/jph/__init__.py +15 -0
- dicube/codecs/jph/codec.py +161 -0
- dicube/codecs/jph/ojph_complete.cpython-310-darwin.so +0 -0
- dicube/codecs/jph/ojph_complete.cpython-38-darwin.so +0 -0
- dicube/codecs/jph/ojph_complete.cpython-39-darwin.so +0 -0
- dicube/codecs/jph/ojph_decode_complete.cpython-310-darwin.so +0 -0
- dicube/codecs/jph/ojph_decode_complete.cpython-38-darwin.so +0 -0
- dicube/codecs/jph/ojph_decode_complete.cpython-39-darwin.so +0 -0
- dicube/core/__init__.py +21 -0
- dicube/core/image.py +349 -0
- dicube/core/io.py +354 -0
- dicube/core/pixel_header.py +117 -0
- dicube/dicom/__init__.py +13 -0
- dicube/dicom/dcb_streaming.py +250 -0
- dicube/dicom/dicom_io.py +153 -0
- dicube/dicom/dicom_meta.py +740 -0
- dicube/dicom/dicom_status.py +259 -0
- dicube/dicom/dicom_tags.py +121 -0
- dicube/dicom/merge_utils.py +283 -0
- dicube/dicom/space_from_meta.py +70 -0
- dicube/exceptions.py +189 -0
- dicube/storage/__init__.py +17 -0
- dicube/storage/dcb_file.py +805 -0
- dicube/storage/pixel_utils.py +141 -0
- dicube/utils/__init__.py +6 -0
- dicube/validation.py +380 -0
- dicube-0.1.4.dist-info/METADATA +271 -0
- dicube-0.1.4.dist-info/RECORD +31 -0
- dicube-0.1.4.dist-info/WHEEL +5 -0
@@ -0,0 +1,141 @@
|
|
1
|
+
from typing import Tuple
|
2
|
+
|
3
|
+
import numpy as np
|
4
|
+
|
5
|
+
from ..core.pixel_header import PixelDataHeader
|
6
|
+
|
7
|
+
|
8
|
+
def derive_pixel_header_from_array(
|
9
|
+
image: np.ndarray, preferred_dtype=np.uint16
|
10
|
+
) -> Tuple[np.ndarray, PixelDataHeader]:
|
11
|
+
"""Derive pixel data header information from input numpy array.
|
12
|
+
|
13
|
+
Process different data types in different ways:
|
14
|
+
- For unsigned integers (uint8/16/32): use raw data directly
|
15
|
+
- For signed integers (int8/16/32): convert to unsigned and record offset
|
16
|
+
- For floating point (float16/32/64): normalize to specified unsigned integer range
|
17
|
+
|
18
|
+
Args:
|
19
|
+
image (np.ndarray): Input image array.
|
20
|
+
preferred_dtype (np.dtype): Preferred output data type. Defaults to np.uint16.
|
21
|
+
|
22
|
+
Returns:
|
23
|
+
Tuple[np.ndarray, PixelDataHeader]: A tuple containing:
|
24
|
+
- The converted image array
|
25
|
+
- A PixelDataHeader object with appropriate metadata
|
26
|
+
|
27
|
+
Raises:
|
28
|
+
ValueError: When preferred_dtype is not supported.
|
29
|
+
NotImplementedError: When input array dtype is not supported.
|
30
|
+
"""
|
31
|
+
dtype = str(image.dtype)
|
32
|
+
if image.dtype in (np.uint16, np.uint8, np.uint32):
|
33
|
+
return image, PixelDataHeader(
|
34
|
+
RESCALE_SLOPE=1,
|
35
|
+
RESCALE_INTERCEPT=0,
|
36
|
+
PIXEL_DTYPE=dtype,
|
37
|
+
ORIGINAL_PIXEL_DTYPE=dtype,
|
38
|
+
)
|
39
|
+
elif image.dtype == np.int16:
|
40
|
+
min_val = int(np.min(image))
|
41
|
+
image = (image - min_val).astype("uint16")
|
42
|
+
return image, PixelDataHeader(
|
43
|
+
RESCALE_SLOPE=1,
|
44
|
+
RESCALE_INTERCEPT=min_val,
|
45
|
+
PIXEL_DTYPE="uint16",
|
46
|
+
ORIGINAL_PIXEL_DTYPE=dtype,
|
47
|
+
)
|
48
|
+
elif image.dtype == np.int8:
|
49
|
+
min_val = int(np.min(image))
|
50
|
+
image = (image - min_val).astype("uint8")
|
51
|
+
return image, PixelDataHeader(
|
52
|
+
RESCALE_SLOPE=1,
|
53
|
+
RESCALE_INTERCEPT=min_val,
|
54
|
+
PIXEL_DTYPE="uint8",
|
55
|
+
ORIGINAL_PIXEL_DTYPE=dtype,
|
56
|
+
)
|
57
|
+
elif image.dtype == np.int32:
|
58
|
+
min_val = int(np.min(image))
|
59
|
+
image = (image - min_val).astype("uint32")
|
60
|
+
return image, PixelDataHeader(
|
61
|
+
RESCALE_SLOPE=1,
|
62
|
+
RESCALE_INTERCEPT=min_val,
|
63
|
+
PIXEL_DTYPE="uint32",
|
64
|
+
ORIGINAL_PIXEL_DTYPE=dtype,
|
65
|
+
)
|
66
|
+
elif image.dtype in (np.float16, np.float32, np.float64):
|
67
|
+
if preferred_dtype == "uint8":
|
68
|
+
dtype_max = 255
|
69
|
+
elif preferred_dtype == "uint16":
|
70
|
+
dtype_max = 65535
|
71
|
+
else:
|
72
|
+
raise ValueError(f"Unsupported preferred_dtype: {preferred_dtype}")
|
73
|
+
|
74
|
+
min_val = image.min()
|
75
|
+
max_val = image.max()
|
76
|
+
if np.isclose(min_val, max_val):
|
77
|
+
# For constant value arrays:
|
78
|
+
# Set all pixels to 0, slope=0, intercept=min_val
|
79
|
+
# When reading back: i*slope+intercept = min_val
|
80
|
+
header = PixelDataHeader(
|
81
|
+
RESCALE_SLOPE=1.0,
|
82
|
+
RESCALE_INTERCEPT=float(min_val),
|
83
|
+
PIXEL_DTYPE=preferred_dtype,
|
84
|
+
ORIGINAL_PIXEL_DTYPE=dtype,
|
85
|
+
)
|
86
|
+
raw_image = np.zeros_like(image, dtype=preferred_dtype)
|
87
|
+
return raw_image, header
|
88
|
+
else:
|
89
|
+
slope = float(max_val - min_val)
|
90
|
+
intercept = float(min_val)
|
91
|
+
raw_image = ((image - intercept) / slope * dtype_max).astype(
|
92
|
+
preferred_dtype
|
93
|
+
)
|
94
|
+
header = PixelDataHeader(
|
95
|
+
RESCALE_SLOPE=slope,
|
96
|
+
RESCALE_INTERCEPT=intercept,
|
97
|
+
PIXEL_DTYPE=preferred_dtype,
|
98
|
+
ORIGINAL_PIXEL_DTYPE=dtype,
|
99
|
+
MAX_VAL=max_val,
|
100
|
+
MIN_VAL=min_val,
|
101
|
+
)
|
102
|
+
return raw_image, header
|
103
|
+
else:
|
104
|
+
raise NotImplementedError("Unsupported dtype")
|
105
|
+
|
106
|
+
|
107
|
+
def get_float_data(
|
108
|
+
raw_image: np.ndarray, pixel_header: PixelDataHeader, dtype="float32"
|
109
|
+
) -> np.ndarray:
|
110
|
+
"""Get image data as floating point array with slope/intercept applied.
|
111
|
+
|
112
|
+
Inspired by NIfTI's get_fdata method, this converts the raw image data
|
113
|
+
to floating point format and applies the rescale slope and intercept.
|
114
|
+
|
115
|
+
Args:
|
116
|
+
raw_image (np.ndarray): Raw image data array.
|
117
|
+
pixel_header (PixelDataHeader): Pixel data header containing rescale information.
|
118
|
+
dtype (str): Output data type, must be one of: float16, float32, float64.
|
119
|
+
Defaults to "float32".
|
120
|
+
|
121
|
+
Returns:
|
122
|
+
np.ndarray: Floating point image data with rescale factors applied.
|
123
|
+
|
124
|
+
Raises:
|
125
|
+
AssertionError: If dtype is not one of the allowed float types.
|
126
|
+
"""
|
127
|
+
assert dtype in (
|
128
|
+
"float16",
|
129
|
+
"float32",
|
130
|
+
"float64",
|
131
|
+
), "only accept float16, float32, float64"
|
132
|
+
|
133
|
+
# Note: Output may be positive or negative depending on original dtype and slope/intercept
|
134
|
+
output_img = raw_image.astype(dtype)
|
135
|
+
if pixel_header.RESCALE_SLOPE is not None:
|
136
|
+
slope = np.array(pixel_header.RESCALE_SLOPE).astype(dtype)
|
137
|
+
output_img *= slope
|
138
|
+
if pixel_header.RESCALE_INTERCEPT is not None:
|
139
|
+
intercept = np.array(pixel_header.RESCALE_INTERCEPT).astype(dtype)
|
140
|
+
output_img += intercept
|
141
|
+
return output_img
|
dicube/utils/__init__.py
ADDED
dicube/validation.py
ADDED
@@ -0,0 +1,380 @@
|
|
1
|
+
"""Validation utilities for DiCube library.
|
2
|
+
|
3
|
+
This module provides reusable validation functions that automatically raise
|
4
|
+
appropriate DicomCubeError exceptions with consistent error messages and context.
|
5
|
+
These utilities help minimize code duplication and provide clean error handling
|
6
|
+
throughout the DiCube library.
|
7
|
+
"""
|
8
|
+
|
9
|
+
import os
|
10
|
+
from typing import Any, Type, Union, Optional
|
11
|
+
import numpy as np
|
12
|
+
|
13
|
+
from .exceptions import (
|
14
|
+
DicomCubeError,
|
15
|
+
InvalidCubeFileError,
|
16
|
+
DataConsistencyError,
|
17
|
+
MetaDataError
|
18
|
+
)
|
19
|
+
|
20
|
+
|
21
|
+
def validate_not_none(
|
22
|
+
value: Any,
|
23
|
+
name: str,
|
24
|
+
context: str = "",
|
25
|
+
exception_class: Type[DicomCubeError] = DicomCubeError
|
26
|
+
) -> Any:
|
27
|
+
"""Validate that a value is not None.
|
28
|
+
|
29
|
+
Args:
|
30
|
+
value: The value to validate.
|
31
|
+
name (str): Name of the parameter being validated.
|
32
|
+
context (str): Context about the operation being performed.
|
33
|
+
exception_class (Type[DicomCubeError]): Exception class to raise on failure.
|
34
|
+
|
35
|
+
Returns:
|
36
|
+
Any: The validated value if not None.
|
37
|
+
|
38
|
+
Raises:
|
39
|
+
DicomCubeError: If the value is None.
|
40
|
+
"""
|
41
|
+
if value is None:
|
42
|
+
raise exception_class(
|
43
|
+
f"Parameter '{name}' cannot be None",
|
44
|
+
context=context,
|
45
|
+
details={"parameter": name, "value": value}
|
46
|
+
)
|
47
|
+
return value
|
48
|
+
|
49
|
+
|
50
|
+
def validate_file_exists(
|
51
|
+
file_path: str,
|
52
|
+
context: str = "",
|
53
|
+
exception_class: Type[DicomCubeError] = InvalidCubeFileError
|
54
|
+
) -> str:
|
55
|
+
"""Validate that a file exists.
|
56
|
+
|
57
|
+
Args:
|
58
|
+
file_path (str): Path to the file to validate.
|
59
|
+
context (str): Context about the operation being performed.
|
60
|
+
exception_class (Type[DicomCubeError]): Exception class to raise on failure.
|
61
|
+
|
62
|
+
Returns:
|
63
|
+
str: The validated file path.
|
64
|
+
|
65
|
+
Raises:
|
66
|
+
InvalidCubeFileError: If the file does not exist.
|
67
|
+
"""
|
68
|
+
validate_not_none(file_path, "file_path", context, exception_class)
|
69
|
+
|
70
|
+
if not os.path.exists(file_path):
|
71
|
+
raise exception_class(
|
72
|
+
f"File not found",
|
73
|
+
context=context,
|
74
|
+
details={"file_path": file_path},
|
75
|
+
suggestion="Verify the file path is correct and the file exists"
|
76
|
+
)
|
77
|
+
|
78
|
+
if not os.path.isfile(file_path):
|
79
|
+
raise exception_class(
|
80
|
+
f"Path exists but is not a file",
|
81
|
+
context=context,
|
82
|
+
details={"file_path": file_path},
|
83
|
+
suggestion="Ensure the path points to a file, not a directory"
|
84
|
+
)
|
85
|
+
|
86
|
+
return file_path
|
87
|
+
|
88
|
+
|
89
|
+
def validate_folder_exists(
|
90
|
+
folder_path: str,
|
91
|
+
context: str = "",
|
92
|
+
exception_class: Type[DicomCubeError] = InvalidCubeFileError
|
93
|
+
) -> str:
|
94
|
+
"""Validate that a folder exists.
|
95
|
+
|
96
|
+
Args:
|
97
|
+
folder_path (str): Path to the folder to validate.
|
98
|
+
context (str): Context about the operation being performed.
|
99
|
+
exception_class (Type[DicomCubeError]): Exception class to raise on failure.
|
100
|
+
|
101
|
+
Returns:
|
102
|
+
str: The validated folder path.
|
103
|
+
|
104
|
+
Raises:
|
105
|
+
InvalidCubeFileError: If the folder does not exist or is not a directory.
|
106
|
+
"""
|
107
|
+
validate_not_none(folder_path, "folder_path", context, exception_class)
|
108
|
+
|
109
|
+
if not os.path.exists(folder_path):
|
110
|
+
raise exception_class(
|
111
|
+
f"Folder not found",
|
112
|
+
context=context,
|
113
|
+
details={"folder_path": folder_path},
|
114
|
+
suggestion="Verify the folder path is correct and the folder exists"
|
115
|
+
)
|
116
|
+
|
117
|
+
if not os.path.isdir(folder_path):
|
118
|
+
raise exception_class(
|
119
|
+
f"Path exists but is not a directory",
|
120
|
+
context=context,
|
121
|
+
details={"folder_path": folder_path},
|
122
|
+
suggestion="Ensure the path points to a directory, not a file"
|
123
|
+
)
|
124
|
+
|
125
|
+
return folder_path
|
126
|
+
|
127
|
+
|
128
|
+
def validate_array_shape(
|
129
|
+
array: np.ndarray,
|
130
|
+
expected_dims: Optional[int] = None,
|
131
|
+
min_dims: Optional[int] = None,
|
132
|
+
max_dims: Optional[int] = None,
|
133
|
+
name: str = "array",
|
134
|
+
context: str = "",
|
135
|
+
exception_class: Type[DicomCubeError] = DataConsistencyError
|
136
|
+
) -> np.ndarray:
|
137
|
+
"""Validate numpy array shape and dimensions.
|
138
|
+
|
139
|
+
Args:
|
140
|
+
array (np.ndarray): The array to validate.
|
141
|
+
expected_dims (int, optional): Expected number of dimensions.
|
142
|
+
min_dims (int, optional): Minimum number of dimensions.
|
143
|
+
max_dims (int, optional): Maximum number of dimensions.
|
144
|
+
name (str): Name of the array parameter.
|
145
|
+
context (str): Context about the operation being performed.
|
146
|
+
exception_class (Type[DicomCubeError]): Exception class to raise on failure.
|
147
|
+
|
148
|
+
Returns:
|
149
|
+
np.ndarray: The validated array.
|
150
|
+
|
151
|
+
Raises:
|
152
|
+
DataConsistencyError: If array shape validation fails.
|
153
|
+
"""
|
154
|
+
validate_not_none(array, name, context, exception_class)
|
155
|
+
|
156
|
+
if not isinstance(array, np.ndarray):
|
157
|
+
raise exception_class(
|
158
|
+
f"Parameter '{name}' must be a numpy array",
|
159
|
+
context=context,
|
160
|
+
details={"parameter": name, "type": type(array).__name__},
|
161
|
+
suggestion="Convert the data to a numpy array before passing"
|
162
|
+
)
|
163
|
+
|
164
|
+
actual_dims = array.ndim
|
165
|
+
|
166
|
+
if expected_dims is not None and actual_dims != expected_dims:
|
167
|
+
raise exception_class(
|
168
|
+
f"Array '{name}' has incorrect dimensions",
|
169
|
+
context=context,
|
170
|
+
details={
|
171
|
+
"parameter": name,
|
172
|
+
"expected_dims": expected_dims,
|
173
|
+
"actual_dims": actual_dims,
|
174
|
+
"shape": array.shape
|
175
|
+
},
|
176
|
+
suggestion=f"Ensure the array has exactly {expected_dims} dimensions"
|
177
|
+
)
|
178
|
+
|
179
|
+
if min_dims is not None and actual_dims < min_dims:
|
180
|
+
raise exception_class(
|
181
|
+
f"Array '{name}' has too few dimensions",
|
182
|
+
context=context,
|
183
|
+
details={
|
184
|
+
"parameter": name,
|
185
|
+
"min_dims": min_dims,
|
186
|
+
"actual_dims": actual_dims,
|
187
|
+
"shape": array.shape
|
188
|
+
},
|
189
|
+
suggestion=f"Ensure the array has at least {min_dims} dimensions"
|
190
|
+
)
|
191
|
+
|
192
|
+
if max_dims is not None and actual_dims > max_dims:
|
193
|
+
raise exception_class(
|
194
|
+
f"Array '{name}' has too many dimensions",
|
195
|
+
context=context,
|
196
|
+
details={
|
197
|
+
"parameter": name,
|
198
|
+
"max_dims": max_dims,
|
199
|
+
"actual_dims": actual_dims,
|
200
|
+
"shape": array.shape
|
201
|
+
},
|
202
|
+
suggestion=f"Ensure the array has at most {max_dims} dimensions"
|
203
|
+
)
|
204
|
+
|
205
|
+
return array
|
206
|
+
|
207
|
+
|
208
|
+
def validate_parameter_type(
|
209
|
+
value: Any,
|
210
|
+
expected_type: Union[Type, tuple],
|
211
|
+
name: str,
|
212
|
+
context: str = "",
|
213
|
+
exception_class: Type[DicomCubeError] = DataConsistencyError
|
214
|
+
) -> Any:
|
215
|
+
"""Validate parameter type.
|
216
|
+
|
217
|
+
Args:
|
218
|
+
value: The value to validate.
|
219
|
+
expected_type (Type or tuple): Expected type or tuple of types.
|
220
|
+
name (str): Name of the parameter being validated.
|
221
|
+
context (str): Context about the operation being performed.
|
222
|
+
exception_class (Type[DicomCubeError]): Exception class to raise on failure.
|
223
|
+
|
224
|
+
Returns:
|
225
|
+
Any: The validated value.
|
226
|
+
|
227
|
+
Raises:
|
228
|
+
DataConsistencyError: If the value is not of the expected type.
|
229
|
+
"""
|
230
|
+
validate_not_none(value, name, context, exception_class)
|
231
|
+
|
232
|
+
if not isinstance(value, expected_type):
|
233
|
+
if isinstance(expected_type, tuple):
|
234
|
+
type_names = [t.__name__ for t in expected_type]
|
235
|
+
expected_str = " or ".join(type_names)
|
236
|
+
else:
|
237
|
+
expected_str = expected_type.__name__
|
238
|
+
|
239
|
+
raise exception_class(
|
240
|
+
f"Parameter '{name}' has incorrect type",
|
241
|
+
context=context,
|
242
|
+
details={
|
243
|
+
"parameter": name,
|
244
|
+
"expected_type": expected_str,
|
245
|
+
"actual_type": type(value).__name__,
|
246
|
+
"value": repr(value)
|
247
|
+
},
|
248
|
+
suggestion=f"Ensure '{name}' is of type {expected_str}"
|
249
|
+
)
|
250
|
+
|
251
|
+
return value
|
252
|
+
|
253
|
+
|
254
|
+
def validate_shape_consistency(
|
255
|
+
array1: np.ndarray,
|
256
|
+
array2: np.ndarray,
|
257
|
+
name1: str = "array1",
|
258
|
+
name2: str = "array2",
|
259
|
+
context: str = "",
|
260
|
+
exception_class: Type[DicomCubeError] = DataConsistencyError
|
261
|
+
) -> tuple:
|
262
|
+
"""Validate that two arrays have consistent shapes.
|
263
|
+
|
264
|
+
Args:
|
265
|
+
array1 (np.ndarray): First array to compare.
|
266
|
+
array2 (np.ndarray): Second array to compare.
|
267
|
+
name1 (str): Name of the first array parameter.
|
268
|
+
name2 (str): Name of the second array parameter.
|
269
|
+
context (str): Context about the operation being performed.
|
270
|
+
exception_class (Type[DicomCubeError]): Exception class to raise on failure.
|
271
|
+
|
272
|
+
Returns:
|
273
|
+
tuple: Both validated arrays.
|
274
|
+
|
275
|
+
Raises:
|
276
|
+
DataConsistencyError: If array shapes are inconsistent.
|
277
|
+
"""
|
278
|
+
validate_array_shape(array1, name=name1, context=context, exception_class=exception_class)
|
279
|
+
validate_array_shape(array2, name=name2, context=context, exception_class=exception_class)
|
280
|
+
|
281
|
+
if array1.shape != array2.shape:
|
282
|
+
raise exception_class(
|
283
|
+
f"Arrays '{name1}' and '{name2}' have inconsistent shapes",
|
284
|
+
context=context,
|
285
|
+
details={
|
286
|
+
name1 + "_shape": array1.shape,
|
287
|
+
name2 + "_shape": array2.shape
|
288
|
+
},
|
289
|
+
suggestion="Ensure both arrays have the same dimensions and shape"
|
290
|
+
)
|
291
|
+
|
292
|
+
return array1, array2
|
293
|
+
|
294
|
+
|
295
|
+
def validate_string_not_empty(
|
296
|
+
value: str,
|
297
|
+
name: str,
|
298
|
+
context: str = "",
|
299
|
+
exception_class: Type[DicomCubeError] = DicomCubeError
|
300
|
+
) -> str:
|
301
|
+
"""Validate that a string is not None or empty.
|
302
|
+
|
303
|
+
Args:
|
304
|
+
value (str): The string value to validate.
|
305
|
+
name (str): Name of the parameter being validated.
|
306
|
+
context (str): Context about the operation being performed.
|
307
|
+
exception_class (Type[DicomCubeError]): Exception class to raise on failure.
|
308
|
+
|
309
|
+
Returns:
|
310
|
+
str: The validated string.
|
311
|
+
|
312
|
+
Raises:
|
313
|
+
DicomCubeError: If the string is None or empty.
|
314
|
+
"""
|
315
|
+
validate_not_none(value, name, context, exception_class)
|
316
|
+
validate_parameter_type(value, str, name, context, exception_class)
|
317
|
+
|
318
|
+
if not value.strip():
|
319
|
+
raise exception_class(
|
320
|
+
f"Parameter '{name}' cannot be empty",
|
321
|
+
context=context,
|
322
|
+
details={"parameter": name, "value": repr(value)},
|
323
|
+
suggestion=f"Provide a non-empty value for '{name}'"
|
324
|
+
)
|
325
|
+
|
326
|
+
return value
|
327
|
+
|
328
|
+
|
329
|
+
def validate_numeric_range(
|
330
|
+
value: Union[int, float],
|
331
|
+
name: str,
|
332
|
+
min_value: Optional[Union[int, float]] = None,
|
333
|
+
max_value: Optional[Union[int, float]] = None,
|
334
|
+
context: str = "",
|
335
|
+
exception_class: Type[DicomCubeError] = DataConsistencyError
|
336
|
+
) -> Union[int, float]:
|
337
|
+
"""Validate that a numeric value is within a specified range.
|
338
|
+
|
339
|
+
Args:
|
340
|
+
value (int or float): The numeric value to validate.
|
341
|
+
name (str): Name of the parameter being validated.
|
342
|
+
min_value (int or float, optional): Minimum allowed value.
|
343
|
+
max_value (int or float, optional): Maximum allowed value.
|
344
|
+
context (str): Context about the operation being performed.
|
345
|
+
exception_class (Type[DicomCubeError]): Exception class to raise on failure.
|
346
|
+
|
347
|
+
Returns:
|
348
|
+
int or float: The validated value.
|
349
|
+
|
350
|
+
Raises:
|
351
|
+
DataConsistencyError: If the value is outside the specified range.
|
352
|
+
"""
|
353
|
+
validate_not_none(value, name, context, exception_class)
|
354
|
+
validate_parameter_type(value, (int, float), name, context, exception_class)
|
355
|
+
|
356
|
+
if min_value is not None and value < min_value:
|
357
|
+
raise exception_class(
|
358
|
+
f"Parameter '{name}' is below minimum value",
|
359
|
+
context=context,
|
360
|
+
details={
|
361
|
+
"parameter": name,
|
362
|
+
"value": value,
|
363
|
+
"min_value": min_value
|
364
|
+
},
|
365
|
+
suggestion=f"Ensure '{name}' is at least {min_value}"
|
366
|
+
)
|
367
|
+
|
368
|
+
if max_value is not None and value > max_value:
|
369
|
+
raise exception_class(
|
370
|
+
f"Parameter '{name}' exceeds maximum value",
|
371
|
+
context=context,
|
372
|
+
details={
|
373
|
+
"parameter": name,
|
374
|
+
"value": value,
|
375
|
+
"max_value": max_value
|
376
|
+
},
|
377
|
+
suggestion=f"Ensure '{name}' is at most {max_value}"
|
378
|
+
)
|
379
|
+
|
380
|
+
return value
|