stouputils 1.2.41__tar.gz → 1.2.43__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {stouputils-1.2.41 → stouputils-1.2.43}/PKG-INFO +1 -1
- {stouputils-1.2.41 → stouputils-1.2.43}/pyproject.toml +1 -1
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/all_doctests.py +7 -4
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/config/set.py +3 -3
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/blur.py +1 -1
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/brightness.py +1 -1
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/clahe.py +4 -4
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/contrast.py +1 -1
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/histogram_equalization.py +4 -4
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/invert.py +1 -1
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/noise.py +1 -1
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/random_erase.py +1 -1
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/sharpening.py +1 -1
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/technique.py +1 -1
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/dataset/image_loader.py +1 -1
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/dataset/xy_tuple.py +2 -2
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/metric_utils.py +16 -16
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/mlflow_utils.py +13 -7
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/models/all.py +0 -14
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/models/keras/all.py +1 -19
- stouputils-1.2.43/stouputils/data_science/models/keras/resnet.py +52 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/models/keras_utils/callbacks/progressive_unfreezing.py +1 -1
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/models/model_interface.py +2 -2
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/range_tuple.py +4 -4
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/scripts/exhaustive_process.py +2 -2
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/scripts/routine.py +2 -2
- stouputils-1.2.41/stouputils/data_science/models/keras/resnet.py +0 -99
- {stouputils-1.2.41 → stouputils-1.2.43}/.gitignore +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/LICENSE +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/README.md +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/__init__.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/applications/__init__.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/applications/automatic_docs.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/applications/upscaler/__init__.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/applications/upscaler/config.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/applications/upscaler/image.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/applications/upscaler/video.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/archive.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/backup.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/collections.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/continuous_delivery/__init__.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/continuous_delivery/cd_utils.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/continuous_delivery/github.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/continuous_delivery/pypi.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/continuous_delivery/pyproject.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/ctx.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/config/get.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/__init__.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/auto_contrast.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/axis_flip.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/bias_field_correction.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/binary_threshold.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/canny.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/common.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/curvature_flow_filter.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/denoise.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/laplacian.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/median_blur.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/normalize.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/resize.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/rotation.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/salt_pepper.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/shearing.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/threshold.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/translation.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/zoom.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image_augmentation.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image_preprocess.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/prosthesis_detection.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/dataset/__init__.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/dataset/dataset.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/dataset/dataset_loader.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/dataset/grouping_strategy.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/metric_dictionnary.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/models/abstract_model.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/models/base_keras.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/models/keras/convnext.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/models/keras/densenet.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/models/keras/efficientnet.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/models/keras/mobilenet.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/models/keras/squeezenet.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/models/keras/vgg.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/models/keras/xception.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/models/keras_utils/callbacks/__init__.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/models/keras_utils/callbacks/colored_progress_bar.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/models/keras_utils/callbacks/learning_rate_finder.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/models/keras_utils/callbacks/warmup_scheduler.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/models/keras_utils/losses/__init__.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/models/keras_utils/losses/next_generation_loss.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/models/keras_utils/visualizations.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/models/sandbox.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/scripts/augment_dataset.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/scripts/preprocess_dataset.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/utils.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/decorators.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/dont_look/zip_file_override.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/image.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/installer/__init__.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/installer/common.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/installer/downloader.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/installer/linux.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/installer/main.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/installer/windows.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/io.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/parallel.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/print.py +0 -0
- {stouputils-1.2.41 → stouputils-1.2.43}/stouputils/py.typed +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: stouputils
|
|
3
|
-
Version: 1.2.
|
|
3
|
+
Version: 1.2.43
|
|
4
4
|
Summary: Stouputils is a collection of utility modules designed to simplify and enhance the development process. It includes a range of tools for tasks such as execution of doctests, display utilities, decorators, as well as context managers, and many more.
|
|
5
5
|
Project-URL: Homepage, https://github.com/Stoupy51/stouputils
|
|
6
6
|
Project-URL: Issues, https://github.com/Stoupy51/stouputils/issues
|
|
@@ -5,7 +5,7 @@ build-backend = "hatchling.build"
|
|
|
5
5
|
|
|
6
6
|
[project]
|
|
7
7
|
name = "stouputils"
|
|
8
|
-
version = "1.2.
|
|
8
|
+
version = "1.2.43"
|
|
9
9
|
description = "Stouputils is a collection of utility modules designed to simplify and enhance the development process. It includes a range of tools for tasks such as execution of doctests, display utilities, decorators, as well as context managers, and many more."
|
|
10
10
|
readme = "README.md"
|
|
11
11
|
requires-python = ">=3.10"
|
|
@@ -14,8 +14,8 @@ from doctest import TestResults, testmod
|
|
|
14
14
|
from types import ModuleType
|
|
15
15
|
|
|
16
16
|
from . import decorators
|
|
17
|
-
from .decorators import LogLevels,
|
|
18
|
-
from .print import error, info, progress
|
|
17
|
+
from .decorators import LogLevels, measure_time
|
|
18
|
+
from .print import error, info, progress, warning
|
|
19
19
|
from .io import clean_path, relative_path
|
|
20
20
|
|
|
21
21
|
|
|
@@ -104,12 +104,15 @@ def launch_tests(root_dir: str, importing_errors: LogLevels = LogLevels.WARNING_
|
|
|
104
104
|
for module_path in modules_file_paths:
|
|
105
105
|
separator: str = " " * (max_length - len(module_path))
|
|
106
106
|
|
|
107
|
-
@handle_error(error_log=importing_errors)
|
|
108
107
|
@measure_time(progress, message=f"Importing module '{module_path}' {separator}took")
|
|
109
108
|
def internal(a: str = module_path, b: str = separator) -> None:
|
|
110
109
|
modules.append(importlib.import_module(a))
|
|
111
110
|
separators.append(b)
|
|
112
|
-
|
|
111
|
+
|
|
112
|
+
try:
|
|
113
|
+
internal()
|
|
114
|
+
except Exception as e:
|
|
115
|
+
warning(f"Failed to import module '{module_path}': ({type(e).__name__}) {e}")
|
|
113
116
|
|
|
114
117
|
# Run tests for each module
|
|
115
118
|
info(f"Testing {len(modules)} modules...")
|
|
@@ -24,13 +24,13 @@ class DataScienceConfig:
|
|
|
24
24
|
""" Log level for errors for all functions. """
|
|
25
25
|
|
|
26
26
|
AUGMENTED_FILE_SUFFIX: str = "_aug_"
|
|
27
|
-
""" Suffix for augmented files,
|
|
27
|
+
""" Suffix for augmented files, e.g. 'image_008_aug_1.png'. """
|
|
28
28
|
|
|
29
29
|
AUGMENTED_DIRECTORY_PREFIX: str = "aug_"
|
|
30
|
-
""" Prefix for augmented directories,
|
|
30
|
+
""" Prefix for augmented directories, e.g. 'data/hip_implant' -> 'data/aug_hip_implant'. """
|
|
31
31
|
|
|
32
32
|
PREPROCESSED_DIRECTORY_SUFFIX: str = "_preprocessed"
|
|
33
|
-
""" Suffix for preprocessed directories,
|
|
33
|
+
""" Suffix for preprocessed directories, e.g. 'data/hip_implant' -> 'data/hip_implant_preprocessed'. """
|
|
34
34
|
|
|
35
35
|
|
|
36
36
|
# Directories
|
{stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/blur.py
RENAMED
|
@@ -26,7 +26,7 @@ def blur_image(image: NDArray[Any], blur_strength: float, ignore_dtype: bool = F
|
|
|
26
26
|
>>> img = np.zeros((5,5), dtype=np.uint8)
|
|
27
27
|
>>> img[2,2] = 255 # Single bright pixel
|
|
28
28
|
>>> blurred = blur_image(img, 1.0)
|
|
29
|
-
>>> blurred[2,2] < 255 # Center should be blurred
|
|
29
|
+
>>> bool(blurred[2,2] < 255) # Center should be blurred
|
|
30
30
|
True
|
|
31
31
|
|
|
32
32
|
>>> rgb = np.full((3,3,3), 128, dtype=np.uint8)
|
{stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/brightness.py
RENAMED
|
@@ -26,7 +26,7 @@ def brightness_image(image: NDArray[Any], brightness_factor: float, ignore_dtype
|
|
|
26
26
|
>>> img = np.full((3,3), 100, dtype=np.uint8)
|
|
27
27
|
>>> bright = brightness_image(img, 2.0)
|
|
28
28
|
>>> dark = brightness_image(img, 0.5)
|
|
29
|
-
>>> np.mean(bright) > np.mean(img) > np.mean(dark)
|
|
29
|
+
>>> bool(np.mean(bright) > np.mean(img) > np.mean(dark))
|
|
30
30
|
True
|
|
31
31
|
|
|
32
32
|
>>> rgb = np.full((3,3,3), 128, dtype=np.uint8)
|
{stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/clahe.py
RENAMED
|
@@ -34,17 +34,17 @@ def clahe_image(
|
|
|
34
34
|
>>> img = np.full((10,10), 128, dtype=np.uint8)
|
|
35
35
|
>>> img[2:8, 2:8] = 200 # Create a bright region
|
|
36
36
|
>>> clahed = clahe_image(img, 2.0, 4)
|
|
37
|
-
>>> np.mean(clahed) > np.mean(img) # Should enhance contrast
|
|
37
|
+
>>> bool(np.mean(clahed) > np.mean(img)) # Should enhance contrast
|
|
38
38
|
True
|
|
39
|
-
>>> np.std(clahed) > np.std(img) # Should enhance contrast
|
|
39
|
+
>>> bool(np.std(clahed) > np.std(img)) # Should enhance contrast
|
|
40
40
|
True
|
|
41
41
|
|
|
42
42
|
>>> rgb = np.full((10,10,3), 128, dtype=np.uint8)
|
|
43
43
|
>>> rgb[2:8, 2:8, :] = 50 # Create a dark region
|
|
44
44
|
>>> clahed_rgb = clahe_image(rgb, 2.0, 4)
|
|
45
|
-
>>> np.mean(clahed_rgb) > np.mean(rgb) # Should enhance contrast
|
|
45
|
+
>>> bool(np.mean(clahed_rgb) > np.mean(rgb)) # Should enhance contrast
|
|
46
46
|
True
|
|
47
|
-
>>> np.std(clahed_rgb) > np.std(rgb) # Should enhance contrast
|
|
47
|
+
>>> bool(np.std(clahed_rgb) > np.std(rgb)) # Should enhance contrast
|
|
48
48
|
True
|
|
49
49
|
>>> clahed_rgb.shape == rgb.shape
|
|
50
50
|
True
|
{stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/contrast.py
RENAMED
|
@@ -23,7 +23,7 @@ def contrast_image(image: NDArray[Any], factor: float, ignore_dtype: bool = Fals
|
|
|
23
23
|
>>> img = np.array([[50, 100, 150]], dtype=np.uint8)
|
|
24
24
|
>>> high = contrast_image(img, 2.0)
|
|
25
25
|
>>> low = contrast_image(img, 0.5)
|
|
26
|
-
>>> high.std() > img.std() > low.std() # Higher contrast = higher std
|
|
26
|
+
>>> bool(high.std() > img.std() > low.std()) # Higher contrast = higher std
|
|
27
27
|
True
|
|
28
28
|
|
|
29
29
|
>>> rgb = np.full((3,3,3), 128, dtype=np.uint8)
|
|
@@ -52,7 +52,7 @@ def histogram_equalization_image(
|
|
|
52
52
|
>>> rgb = np.full((3,3,3), 128, dtype=np.uint8)
|
|
53
53
|
>>> rgb[1, 1, :] = 50 # Create a dark region
|
|
54
54
|
>>> equalized_rgb = histogram_equalization_image(rgb)
|
|
55
|
-
>>> np.std(equalized_rgb) > np.std(rgb) # Should enhance contrast
|
|
55
|
+
>>> bool(np.std(equalized_rgb) > np.std(rgb)) # Should enhance contrast
|
|
56
56
|
True
|
|
57
57
|
>>> equalized_rgb.tolist()
|
|
58
58
|
[[[255, 255, 255], [255, 255, 255], [255, 255, 255]], [[255, 255, 255], [0, 0, 0], [255, 255, 255]], [[255, 255, 255], [255, 255, 255], [255, 255, 255]]]
|
|
@@ -65,21 +65,21 @@ def histogram_equalization_image(
|
|
|
65
65
|
>>> lab_result = histogram_equalization_image(test_img, color_space="lab")
|
|
66
66
|
>>> isinstance(lab_result, np.ndarray) and lab_result.shape == test_img.shape
|
|
67
67
|
True
|
|
68
|
-
>>> np.std(lab_result) > np.std(test_img) # Verify contrast enhancement
|
|
68
|
+
>>> bool(np.std(lab_result) > np.std(test_img)) # Verify contrast enhancement
|
|
69
69
|
True
|
|
70
70
|
|
|
71
71
|
>>> # Test YCbCr color space
|
|
72
72
|
>>> ycbcr_result = histogram_equalization_image(test_img, color_space="ycbcr")
|
|
73
73
|
>>> isinstance(ycbcr_result, np.ndarray) and ycbcr_result.shape == test_img.shape
|
|
74
74
|
True
|
|
75
|
-
>>> np.std(ycbcr_result) > np.std(test_img) # Verify contrast enhancement
|
|
75
|
+
>>> bool(np.std(ycbcr_result) > np.std(test_img)) # Verify contrast enhancement
|
|
76
76
|
True
|
|
77
77
|
|
|
78
78
|
>>> # Test HSV color space
|
|
79
79
|
>>> hsv_result = histogram_equalization_image(test_img, color_space="hsv")
|
|
80
80
|
>>> isinstance(hsv_result, np.ndarray) and hsv_result.shape == test_img.shape
|
|
81
81
|
True
|
|
82
|
-
>>> np.std(hsv_result) > np.std(test_img) # Verify contrast enhancement
|
|
82
|
+
>>> bool(np.std(hsv_result) > np.std(test_img)) # Verify contrast enhancement
|
|
83
83
|
True
|
|
84
84
|
|
|
85
85
|
>>> ## Test invalid inputs
|
{stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/invert.py
RENAMED
|
@@ -24,7 +24,7 @@ def invert_image(image: NDArray[Any], ignore_dtype: bool = False) -> NDArray[Any
|
|
|
24
24
|
|
|
25
25
|
>>> # Test with floating point image
|
|
26
26
|
>>> float_img = np.array([[0.1, 0.2], [0.3, 0.4]], dtype=np.float32)
|
|
27
|
-
>>> [round(x, 1) for x in invert_image(float_img).flatten()]
|
|
27
|
+
>>> [round(float(x), 1) for x in invert_image(float_img).flatten()]
|
|
28
28
|
[0.9, 0.8, 0.7, 0.6]
|
|
29
29
|
|
|
30
30
|
>>> # Test with RGB image
|
{stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/noise.py
RENAMED
|
@@ -19,7 +19,7 @@ def noise_image(image: NDArray[Any], amount: float, ignore_dtype: bool = False)
|
|
|
19
19
|
>>> noisy = noise_image(image.astype(np.uint8), 0.5)
|
|
20
20
|
>>> noisy.shape == image.shape
|
|
21
21
|
True
|
|
22
|
-
>>> np.all(noisy >= 0) and np.all(noisy <= 255)
|
|
22
|
+
>>> bool(np.all(noisy >= 0) and np.all(noisy <= 255))
|
|
23
23
|
True
|
|
24
24
|
|
|
25
25
|
>>> np.random.seed(0)
|
|
@@ -25,7 +25,7 @@ def random_erase_image(image: NDArray[Any], erase_factor: float, ignore_dtype: b
|
|
|
25
25
|
>>> np.random.seed(42)
|
|
26
26
|
>>> img = np.ones((5,5), dtype=np.uint8) * 255
|
|
27
27
|
>>> erased = random_erase_image(img, 0.4)
|
|
28
|
-
>>> np.any(erased == 0) # Should have some erased pixels
|
|
28
|
+
>>> bool(np.any(erased == 0)) # Should have some erased pixels
|
|
29
29
|
True
|
|
30
30
|
|
|
31
31
|
>>> rgb = np.full((3,3,3), 128, dtype=np.uint8)
|
{stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/sharpening.py
RENAMED
|
@@ -26,7 +26,7 @@ def sharpen_image(image: NDArray[Any], alpha: float, ignore_dtype: bool = False)
|
|
|
26
26
|
>>> img = np.full((5,5), 128, dtype=np.uint8)
|
|
27
27
|
>>> img[2,2] = 255 # Center bright pixel
|
|
28
28
|
>>> sharp = sharpen_image(img, 1.0)
|
|
29
|
-
>>> sharp[2,2] > img[2,2] * 0.9 # Center should stay bright
|
|
29
|
+
>>> bool(sharp[2,2] > img[2,2] * 0.9) # Center should stay bright
|
|
30
30
|
True
|
|
31
31
|
|
|
32
32
|
>>> rgb = np.full((3,3,3), 128, dtype=np.uint8)
|
{stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/technique.py
RENAMED
|
@@ -104,7 +104,7 @@ class ProcessingTechnique(NamedTuple):
|
|
|
104
104
|
Should be used on techniques like "axis_flip" or "random_erase"
|
|
105
105
|
where the probability of applying the technique is not 100%. """
|
|
106
106
|
custom: Callable[..., NDArray[Any]] | None = None
|
|
107
|
-
""" Custom processing technique (callable), name must be "custom",
|
|
107
|
+
""" Custom processing technique (callable), name must be "custom", e.g. ProcessingTechnique("custom", custom=f) """
|
|
108
108
|
|
|
109
109
|
def __str__(self) -> str:
|
|
110
110
|
return (
|
|
@@ -74,7 +74,7 @@ def load_images_from_directory(
|
|
|
74
74
|
|
|
75
75
|
# Add channel dimension if grayscale
|
|
76
76
|
if is_grayscale:
|
|
77
|
-
img_array = np.expand_dims(img_array, axis=-1) # Add channel dimension,
|
|
77
|
+
img_array = np.expand_dims(img_array, axis=-1) # Add channel dimension, e.g. (224, 224, 1)
|
|
78
78
|
|
|
79
79
|
return img_array, img_path
|
|
80
80
|
|
|
@@ -125,7 +125,7 @@ class XyTuple(tuple[list[list[Any]], list[Any], tuple[tuple[str, ...], ...]]):
|
|
|
125
125
|
self.filepaths: tuple[tuple[str, ...], ...] = self[2]
|
|
126
126
|
""" List of filepaths corresponding to the features (one file = list with one element) """
|
|
127
127
|
self.augmented_files: dict[str, str] = self.update_augmented_files()
|
|
128
|
-
""" Dictionary mapping all files to their original filepath,
|
|
128
|
+
""" Dictionary mapping all files to their original filepath, e.g. {"file1_aug_1.jpg": "file1.jpg"} """
|
|
129
129
|
|
|
130
130
|
@property
|
|
131
131
|
def n_samples(self) -> int:
|
|
@@ -269,7 +269,7 @@ class XyTuple(tuple[list[list[Any]], list[Any], tuple[tuple[str, ...], ...]]):
|
|
|
269
269
|
>>> indices, labels = xy.group_by_original()
|
|
270
270
|
>>> sorted(indices.items())
|
|
271
271
|
[('file1.jpg', [0, 2]), ('file2.jpg', [1])]
|
|
272
|
-
>>> sorted(labels.items())
|
|
272
|
+
>>> [(x, str(y)) for x, y in sorted(labels.items())]
|
|
273
273
|
[('file1.jpg', 'a'), ('file2.jpg', 'b')]
|
|
274
274
|
"""
|
|
275
275
|
# Initializations
|
|
@@ -73,17 +73,17 @@ class MetricUtils:
|
|
|
73
73
|
... metrics = MetricUtils.metrics(dataset, predictions, run_name="")
|
|
74
74
|
|
|
75
75
|
>>> # Check metrics
|
|
76
|
-
>>> round(metrics[MetricDictionnary.ACCURACY], 2)
|
|
76
|
+
>>> round(float(metrics[MetricDictionnary.ACCURACY]), 2)
|
|
77
77
|
0.67
|
|
78
|
-
>>> round(metrics[MetricDictionnary.PRECISION], 2)
|
|
78
|
+
>>> round(float(metrics[MetricDictionnary.PRECISION]), 2)
|
|
79
79
|
0.5
|
|
80
|
-
>>> round(metrics[MetricDictionnary.RECALL], 2)
|
|
80
|
+
>>> round(float(metrics[MetricDictionnary.RECALL]), 2)
|
|
81
81
|
1.0
|
|
82
|
-
>>> round(metrics[MetricDictionnary.F1_SCORE], 2)
|
|
82
|
+
>>> round(float(metrics[MetricDictionnary.F1_SCORE]), 2)
|
|
83
83
|
0.67
|
|
84
|
-
>>> round(metrics[MetricDictionnary.AUC], 2)
|
|
84
|
+
>>> round(float(metrics[MetricDictionnary.AUC]), 2)
|
|
85
85
|
0.75
|
|
86
|
-
>>> round(metrics[MetricDictionnary.MATTHEWS_CORRELATION_COEFFICIENT], 2)
|
|
86
|
+
>>> round(float(metrics[MetricDictionnary.MATTHEWS_CORRELATION_COEFFICIENT]), 2)
|
|
87
87
|
0.5
|
|
88
88
|
"""
|
|
89
89
|
# Initialize metrics
|
|
@@ -159,15 +159,15 @@ class MetricUtils:
|
|
|
159
159
|
... metrics = MetricUtils.confusion_matrix(true_classes, pred_classes, labels, run_name="")
|
|
160
160
|
|
|
161
161
|
>>> # Check metrics
|
|
162
|
-
>>> metrics[MetricDictionnary.CONFUSION_MATRIX_TN]
|
|
162
|
+
>>> int(metrics[MetricDictionnary.CONFUSION_MATRIX_TN])
|
|
163
163
|
1
|
|
164
|
-
>>> metrics[MetricDictionnary.CONFUSION_MATRIX_FP]
|
|
164
|
+
>>> int(metrics[MetricDictionnary.CONFUSION_MATRIX_FP])
|
|
165
165
|
1
|
|
166
|
-
>>> metrics[MetricDictionnary.CONFUSION_MATRIX_FN]
|
|
166
|
+
>>> int(metrics[MetricDictionnary.CONFUSION_MATRIX_FN])
|
|
167
167
|
0
|
|
168
|
-
>>> metrics[MetricDictionnary.CONFUSION_MATRIX_TP]
|
|
168
|
+
>>> int(metrics[MetricDictionnary.CONFUSION_MATRIX_TP])
|
|
169
169
|
1
|
|
170
|
-
>>> metrics[MetricDictionnary.FALSE_POSITIVE_RATE]
|
|
170
|
+
>>> round(float(metrics[MetricDictionnary.FALSE_POSITIVE_RATE]), 2)
|
|
171
171
|
0.5
|
|
172
172
|
"""
|
|
173
173
|
metrics: dict[str, float] = {}
|
|
@@ -243,7 +243,7 @@ class MetricUtils:
|
|
|
243
243
|
>>> from stouputils.ctx import Muffle
|
|
244
244
|
>>> with Muffle():
|
|
245
245
|
... metrics = MetricUtils.f_scores(precision=0.5, recall=1.0)
|
|
246
|
-
>>> [round(x, 2) for x in metrics.values()]
|
|
246
|
+
>>> [round(float(x), 2) for x in metrics.values()]
|
|
247
247
|
[0.5, 0.51, 0.54, 0.58, 0.62, 0.67, 0.71, 0.75, 0.78, 0.81, 0.83]
|
|
248
248
|
|
|
249
249
|
"""
|
|
@@ -279,7 +279,7 @@ class MetricUtils:
|
|
|
279
279
|
>>> from stouputils.ctx import Muffle
|
|
280
280
|
>>> with Muffle():
|
|
281
281
|
... metrics = MetricUtils.matthews_correlation(true_classes, pred_classes)
|
|
282
|
-
>>> metrics[MetricDictionnary.MATTHEWS_CORRELATION_COEFFICIENT]
|
|
282
|
+
>>> float(metrics[MetricDictionnary.MATTHEWS_CORRELATION_COEFFICIENT])
|
|
283
283
|
0.5
|
|
284
284
|
"""
|
|
285
285
|
return {MetricDictionnary.MATTHEWS_CORRELATION_COEFFICIENT: matthews_corrcoef(true_classes, pred_classes)}
|
|
@@ -314,11 +314,11 @@ class MetricUtils:
|
|
|
314
314
|
... metrics = MetricUtils.roc_and_auc(true_classes, pred_probs, run_name="")
|
|
315
315
|
|
|
316
316
|
>>> # Check metrics
|
|
317
|
-
>>> metrics[MetricDictionnary.AUC]
|
|
317
|
+
>>> round(float(metrics[MetricDictionnary.AUC]), 2)
|
|
318
318
|
0.75
|
|
319
|
-
>>> metrics[MetricDictionnary.OPTIMAL_THRESHOLD_YOUDEN]
|
|
319
|
+
>>> round(float(metrics[MetricDictionnary.OPTIMAL_THRESHOLD_YOUDEN]), 2)
|
|
320
320
|
0.9
|
|
321
|
-
>>> metrics[MetricDictionnary.OPTIMAL_THRESHOLD_COST]
|
|
321
|
+
>>> float(metrics[MetricDictionnary.OPTIMAL_THRESHOLD_COST])
|
|
322
322
|
inf
|
|
323
323
|
"""
|
|
324
324
|
true_classes = Utils.convert_to_class_indices(true_classes)
|
|
@@ -18,16 +18,18 @@ import mlflow
|
|
|
18
18
|
from mlflow.entities import Experiment, Run
|
|
19
19
|
|
|
20
20
|
from ..decorators import handle_error, LogLevels
|
|
21
|
+
from ..io import clean_path
|
|
21
22
|
|
|
22
23
|
|
|
23
24
|
# Get artifact path
|
|
24
|
-
def get_artifact_path(from_string: str = "") -> str:
|
|
25
|
+
def get_artifact_path(from_string: str = "", os_name: str = os.name) -> str:
|
|
25
26
|
""" Get the artifact path from the current mlflow run (without the file:// prefix).
|
|
26
27
|
|
|
27
28
|
Handles the different path formats for Windows and Unix-based systems.
|
|
28
29
|
|
|
29
30
|
Args:
|
|
30
31
|
from_string (str): Path to the artifact (optional, defaults to the current mlflow run)
|
|
32
|
+
os_name (str): OS name (optional, defaults to os.name)
|
|
31
33
|
Returns:
|
|
32
34
|
str: The artifact path
|
|
33
35
|
"""
|
|
@@ -38,26 +40,30 @@ def get_artifact_path(from_string: str = "") -> str:
|
|
|
38
40
|
artifact_path: str = from_string
|
|
39
41
|
|
|
40
42
|
# Handle the different path formats for Windows and Unix-based systems
|
|
41
|
-
if
|
|
43
|
+
if os_name == "nt":
|
|
42
44
|
return artifact_path.replace("file:///", "")
|
|
43
45
|
else:
|
|
44
46
|
return artifact_path.replace("file://", "")
|
|
45
47
|
|
|
46
48
|
# Get weights path
|
|
47
|
-
def get_weights_path(from_string: str = "", weights_name: str = "best_model.keras") -> str:
|
|
49
|
+
def get_weights_path(from_string: str = "", weights_name: str = "best_model.keras", os_name: str = os.name) -> str:
|
|
48
50
|
""" Get the weights path from the current mlflow run.
|
|
49
51
|
|
|
50
52
|
Args:
|
|
51
|
-
from_string
|
|
52
|
-
weights_name
|
|
53
|
+
from_string (str): Path to the artifact (optional, defaults to the current mlflow run)
|
|
54
|
+
weights_name (str): Name of the weights file (optional, defaults to "best_model.keras")
|
|
55
|
+
os_name (str): OS name (optional, defaults to os.name)
|
|
53
56
|
Returns:
|
|
54
57
|
str: The weights path
|
|
55
58
|
|
|
56
59
|
Examples:
|
|
57
|
-
>>> get_weights_path(from_string="file:///path/to/artifact", weights_name="best_model.keras")
|
|
60
|
+
>>> get_weights_path(from_string="file:///path/to/artifact", weights_name="best_model.keras", os_name="posix")
|
|
58
61
|
'/path/to/artifact/best_model.keras'
|
|
62
|
+
|
|
63
|
+
>>> get_weights_path(from_string="file:///C:/path/to/artifact", weights_name="best_model.keras", os_name="nt")
|
|
64
|
+
'C:/path/to/artifact/best_model.keras'
|
|
59
65
|
"""
|
|
60
|
-
return
|
|
66
|
+
return clean_path(f"{get_artifact_path(from_string=from_string, os_name=os_name)}/{weights_name}")
|
|
61
67
|
|
|
62
68
|
# Get runs by experiment name
|
|
63
69
|
def get_runs_by_experiment_name(experiment_name: str, filter_string: str = "", set_experiment: bool = False) -> list[Run]:
|
|
@@ -25,13 +25,6 @@ from .keras.all import (
|
|
|
25
25
|
ResNet50V2,
|
|
26
26
|
ResNet101V2,
|
|
27
27
|
ResNet152V2,
|
|
28
|
-
ResNetRS50,
|
|
29
|
-
ResNetRS101,
|
|
30
|
-
ResNetRS152,
|
|
31
|
-
ResNetRS200,
|
|
32
|
-
ResNetRS270,
|
|
33
|
-
ResNetRS350,
|
|
34
|
-
ResNetRS420,
|
|
35
28
|
SqueezeNet,
|
|
36
29
|
Xception,
|
|
37
30
|
)
|
|
@@ -76,13 +69,6 @@ CLASS_MAP: ModelClassMap = ModelClassMap({
|
|
|
76
69
|
ResNet50V2: ("resnet50v2", "resnetsv2", "resnets", "all", "often"),
|
|
77
70
|
ResNet101V2: ("resnet101v2", "resnetsv2", "resnets", "all", "often"),
|
|
78
71
|
ResNet152V2: ("resnet152v2", "resnetsv2", "resnets", "all", "often"),
|
|
79
|
-
ResNetRS50: ("resnetrs50", "resnetrs", "resnets", "all"),
|
|
80
|
-
ResNetRS101: ("resnetrs101", "resnetrs", "resnets", "all"),
|
|
81
|
-
ResNetRS152: ("resnetrs152", "resnetrs", "resnets", "all"),
|
|
82
|
-
ResNetRS200: ("resnetrs200", "resnetrs", "resnets", "all"),
|
|
83
|
-
ResNetRS270: ("resnetrs270", "resnetrs", "resnets", "all"),
|
|
84
|
-
ResNetRS350: ("resnetrs350", "resnetrs", "resnets", "all"),
|
|
85
|
-
ResNetRS420: ("resnetrs420", "resnetrs", "resnets", "all"),
|
|
86
72
|
|
|
87
73
|
Xception: ("xception", "xceptions", "all", "often"),
|
|
88
74
|
Sandbox: ("sandbox",),
|
|
@@ -4,18 +4,7 @@ from .convnext import ConvNeXtBase, ConvNeXtLarge, ConvNeXtSmall, ConvNeXtTiny,
|
|
|
4
4
|
from .densenet import DenseNet121, DenseNet169, DenseNet201
|
|
5
5
|
from .efficientnet import EfficientNetB0, EfficientNetV2B0, EfficientNetV2L, EfficientNetV2M, EfficientNetV2S
|
|
6
6
|
from .mobilenet import MobileNet, MobileNetV2, MobileNetV3Large, MobileNetV3Small
|
|
7
|
-
from .resnet import
|
|
8
|
-
ResNet50V2,
|
|
9
|
-
ResNet101V2,
|
|
10
|
-
ResNet152V2,
|
|
11
|
-
ResNetRS50,
|
|
12
|
-
ResNetRS101,
|
|
13
|
-
ResNetRS152,
|
|
14
|
-
ResNetRS200,
|
|
15
|
-
ResNetRS270,
|
|
16
|
-
ResNetRS350,
|
|
17
|
-
ResNetRS420,
|
|
18
|
-
)
|
|
7
|
+
from .resnet import ResNet50V2, ResNet101V2, ResNet152V2
|
|
19
8
|
from .squeezenet import SqueezeNet
|
|
20
9
|
from .vgg import VGG16, VGG19
|
|
21
10
|
from .xception import Xception
|
|
@@ -43,13 +32,6 @@ __all__ = [
|
|
|
43
32
|
"ResNet50V2",
|
|
44
33
|
"ResNet101V2",
|
|
45
34
|
"ResNet152V2",
|
|
46
|
-
"ResNetRS50",
|
|
47
|
-
"ResNetRS101",
|
|
48
|
-
"ResNetRS152",
|
|
49
|
-
"ResNetRS200",
|
|
50
|
-
"ResNetRS270",
|
|
51
|
-
"ResNetRS350",
|
|
52
|
-
"ResNetRS420",
|
|
53
35
|
"SqueezeNet",
|
|
54
36
|
"Xception",
|
|
55
37
|
]
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
""" ResNet models implementation.
|
|
2
|
+
|
|
3
|
+
This module provides wrapper classes for the ResNet family of models from the Keras applications.
|
|
4
|
+
It includes both ResNetV2 models with pre-activation residual blocks and ResNetRS
|
|
5
|
+
(ResNet with Revisited Scaling) models that offer improved performance
|
|
6
|
+
through various scaling techniques.
|
|
7
|
+
|
|
8
|
+
Available models:
|
|
9
|
+
|
|
10
|
+
- ResNetV2 family: Improved ResNet architectures with pre-activation blocks
|
|
11
|
+
- ResNet50V2
|
|
12
|
+
- ResNet101V2
|
|
13
|
+
- ResNet152V2
|
|
14
|
+
|
|
15
|
+
All models support transfer learning from ImageNet pre-trained weights.
|
|
16
|
+
"""
|
|
17
|
+
# pyright: reportUnknownVariableType=false
|
|
18
|
+
# pyright: reportMissingTypeStubs=false
|
|
19
|
+
|
|
20
|
+
# Imports
|
|
21
|
+
from __future__ import annotations
|
|
22
|
+
|
|
23
|
+
from keras.models import Model
|
|
24
|
+
from keras.src.applications.resnet_v2 import ResNet50V2 as ResNet50V2_keras
|
|
25
|
+
from keras.src.applications.resnet_v2 import ResNet101V2 as ResNet101V2_keras
|
|
26
|
+
from keras.src.applications.resnet_v2 import ResNet152V2 as ResNet152V2_keras
|
|
27
|
+
|
|
28
|
+
from ....decorators import simple_cache
|
|
29
|
+
from ..base_keras import BaseKeras
|
|
30
|
+
from ..model_interface import CLASS_ROUTINE_DOCSTRING, MODEL_DOCSTRING
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
# Classes
|
|
34
|
+
class ResNet50V2(BaseKeras):
|
|
35
|
+
def _get_base_model(self) -> Model:
|
|
36
|
+
return ResNet50V2_keras(include_top=False, classes=self.num_classes)
|
|
37
|
+
|
|
38
|
+
class ResNet101V2(BaseKeras):
|
|
39
|
+
def _get_base_model(self) -> Model:
|
|
40
|
+
return ResNet101V2_keras(include_top=False, classes=self.num_classes)
|
|
41
|
+
|
|
42
|
+
class ResNet152V2(BaseKeras):
|
|
43
|
+
def _get_base_model(self) -> Model:
|
|
44
|
+
return ResNet152V2_keras(include_top=False, classes=self.num_classes)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
# Docstrings
|
|
48
|
+
for model in [ResNet50V2, ResNet101V2, ResNet152V2]:
|
|
49
|
+
model.__doc__ = MODEL_DOCSTRING.format(model=model.__name__)
|
|
50
|
+
model.class_routine = simple_cache(model.class_routine)
|
|
51
|
+
model.class_routine.__doc__ = CLASS_ROUTINE_DOCSTRING.format(model=model.__name__)
|
|
52
|
+
|
|
@@ -41,7 +41,7 @@ class ProgressiveUnfreezing(Callback):
|
|
|
41
41
|
reset_weights (bool): If True, reset weights after each unfreeze.
|
|
42
42
|
reset_optimizer_function (Callable | None):
|
|
43
43
|
If set, use this function to reset the optimizer every update_interval.
|
|
44
|
-
The function should return a compiled optimizer,
|
|
44
|
+
The function should return a compiled optimizer, e.g. `lambda: model._get_optimizer(AdamW(...))`.
|
|
45
45
|
update_per_epoch (bool): If True, unfreeze per epoch, else per batch.
|
|
46
46
|
update_interval (int): Number of steps between each unfreeze to allow model to stabilize.
|
|
47
47
|
progressive_freeze (bool): If True, start with all layers unfrozen and progressively freeze them.
|
|
@@ -386,7 +386,7 @@ class ModelInterface(AbstractModel):
|
|
|
386
386
|
return mlflow_utils.get_weights_path(from_string=str(run.info.artifact_uri))
|
|
387
387
|
|
|
388
388
|
def _get_total_layers(self) -> int:
|
|
389
|
-
""" Get the total number of layers in the model architecture,
|
|
389
|
+
""" Get the total number of layers in the model architecture, e.g. 427 for DenseNet121.
|
|
390
390
|
|
|
391
391
|
Compatible with Keras/TensorFlow and PyTorch models.
|
|
392
392
|
|
|
@@ -465,7 +465,7 @@ class ModelInterface(AbstractModel):
|
|
|
465
465
|
Args:
|
|
466
466
|
y_train (NDArray[Any]): Training labels
|
|
467
467
|
Returns:
|
|
468
|
-
dict[int, float]: Dictionary mapping class indices to weights,
|
|
468
|
+
dict[int, float]: Dictionary mapping class indices to weights, e.g. {0: 0.34, 1: 0.66}
|
|
469
469
|
"""
|
|
470
470
|
# Get the true classes (one-hot -> class indices)
|
|
471
471
|
true_classes: NDArray[Any] = Utils.convert_to_class_indices(y_train)
|
|
@@ -16,7 +16,7 @@ This class contains methods for:
|
|
|
16
16
|
from __future__ import annotations
|
|
17
17
|
|
|
18
18
|
from collections.abc import Generator
|
|
19
|
-
from typing import NamedTuple
|
|
19
|
+
from typing import Any, NamedTuple
|
|
20
20
|
|
|
21
21
|
import numpy as np
|
|
22
22
|
|
|
@@ -87,7 +87,7 @@ class RangeTuple(_RangeTupleBase):
|
|
|
87
87
|
def __repr__(self) -> str:
|
|
88
88
|
return f"RangeTuple(mini={self.mini!r}, maxi={self.maxi!r}, step={self.step!r}, default={self.default!r})"
|
|
89
89
|
|
|
90
|
-
def __iter__(self) -> Generator[float,
|
|
90
|
+
def __iter__(self) -> Generator[float, Any, Any]:
|
|
91
91
|
""" Iterate over the range values.
|
|
92
92
|
If the range is not initialized (mini or maxi is None), yield the default value.
|
|
93
93
|
Else, yield from np.arange(...)
|
|
@@ -103,10 +103,10 @@ class RangeTuple(_RangeTupleBase):
|
|
|
103
103
|
>>> list(r)
|
|
104
104
|
[1.0]
|
|
105
105
|
"""
|
|
106
|
-
if self.mini is None or self.maxi is None or self.step is None:
|
|
106
|
+
if self.mini is None or self.maxi is None or self.step is None and self.default is not None:
|
|
107
107
|
yield float(self.default) # pyright: ignore [reportArgumentType]
|
|
108
108
|
else:
|
|
109
|
-
yield from np.arange(self.mini, self.maxi, self.step)
|
|
109
|
+
yield from [float(x) for x in np.arange(self.mini, self.maxi, self.step)]
|
|
110
110
|
|
|
111
111
|
def __len__(self) -> int:
|
|
112
112
|
""" Return the number of values in the range.
|
{stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/scripts/exhaustive_process.py
RENAMED
|
@@ -47,8 +47,8 @@ def exhaustive_process(
|
|
|
47
47
|
|
|
48
48
|
Args:
|
|
49
49
|
datasets_to_process (list[tuple[str, str]]): List of dataset paths to process.
|
|
50
|
-
Each tuple contains (dataset_path, based_of_path),
|
|
51
|
-
main_script_path (str): Path to the main script,
|
|
50
|
+
Each tuple contains (dataset_path, based_of_path), e.g. [("aug_preprocessed_path", "preprocessed_path")].
|
|
51
|
+
main_script_path (str): Path to the main script, e.g. "src/main.py"
|
|
52
52
|
default_model (str): Default model architecture to use for training.
|
|
53
53
|
default_kfold (int): Default number of folds for k-fold cross validation.
|
|
54
54
|
default_transfer_learning (str): Default source for transfer learning.
|
|
@@ -13,8 +13,8 @@ from ..models.all import ALL_MODELS, CLASS_MAP, ModelInterface
|
|
|
13
13
|
|
|
14
14
|
# Constants
|
|
15
15
|
MODEL_HELP: str = "Model(s) name or alias to use"
|
|
16
|
-
INPUT_HELP: str = "Path to the dataset,
|
|
17
|
-
BASED_OF_HELP: str = "Path to the base dataset for filtering train/test,
|
|
16
|
+
INPUT_HELP: str = "Path to the dataset, e.g. 'data/aug_hip_implant'"
|
|
17
|
+
BASED_OF_HELP: str = "Path to the base dataset for filtering train/test, e.g. 'data/hip_implant'"
|
|
18
18
|
TRANSFER_LEARNING_HELP: str = "Transfer learning source (imagenet, None, 'data/dataset_folder')"
|
|
19
19
|
GROUPING_HELP: str = "Grouping strategy for the dataset"
|
|
20
20
|
K_FOLD_HELP: str = "Number of folds for k-fold cross validation (0 = no k-fold, negative = LeavePOut)"
|
|
@@ -1,99 +0,0 @@
|
|
|
1
|
-
""" ResNet models implementation.
|
|
2
|
-
|
|
3
|
-
This module provides wrapper classes for the ResNet family of models from the Keras applications.
|
|
4
|
-
It includes both ResNetV2 models with pre-activation residual blocks and ResNetRS
|
|
5
|
-
(ResNet with Revisited Scaling) models that offer improved performance
|
|
6
|
-
through various scaling techniques.
|
|
7
|
-
|
|
8
|
-
Available models:
|
|
9
|
-
|
|
10
|
-
- ResNetV2 family: Improved ResNet architectures with pre-activation blocks
|
|
11
|
-
- ResNet50V2
|
|
12
|
-
- ResNet101V2
|
|
13
|
-
- ResNet152V2
|
|
14
|
-
- ResNetRS family: ResNet models with revisited scaling for better efficiency
|
|
15
|
-
- ResNetRS50
|
|
16
|
-
- ResNetRS101
|
|
17
|
-
- ResNetRS152
|
|
18
|
-
- ResNetRS200
|
|
19
|
-
- ResNetRS270
|
|
20
|
-
- ResNetRS350
|
|
21
|
-
- ResNetRS420
|
|
22
|
-
|
|
23
|
-
All models support transfer learning from ImageNet pre-trained weights.
|
|
24
|
-
"""
|
|
25
|
-
# pyright: reportUnknownVariableType=false
|
|
26
|
-
# pyright: reportMissingTypeStubs=false
|
|
27
|
-
|
|
28
|
-
# Imports
|
|
29
|
-
from __future__ import annotations
|
|
30
|
-
|
|
31
|
-
from keras.models import Model
|
|
32
|
-
from keras.src.applications.resnet_rs import ResNetRS50 as ResNetRS50_keras
|
|
33
|
-
from keras.src.applications.resnet_rs import ResNetRS101 as ResNetRS101_keras
|
|
34
|
-
from keras.src.applications.resnet_rs import ResNetRS152 as ResNetRS152_keras
|
|
35
|
-
from keras.src.applications.resnet_rs import ResNetRS200 as ResNetRS200_keras
|
|
36
|
-
from keras.src.applications.resnet_rs import ResNetRS270 as ResNetRS270_keras
|
|
37
|
-
from keras.src.applications.resnet_rs import ResNetRS350 as ResNetRS350_keras
|
|
38
|
-
from keras.src.applications.resnet_rs import ResNetRS420 as ResNetRS420_keras
|
|
39
|
-
from keras.src.applications.resnet_v2 import ResNet50V2 as ResNet50V2_keras
|
|
40
|
-
from keras.src.applications.resnet_v2 import ResNet101V2 as ResNet101V2_keras
|
|
41
|
-
from keras.src.applications.resnet_v2 import ResNet152V2 as ResNet152V2_keras
|
|
42
|
-
|
|
43
|
-
from ....decorators import simple_cache
|
|
44
|
-
from ..base_keras import BaseKeras
|
|
45
|
-
from ..model_interface import CLASS_ROUTINE_DOCSTRING, MODEL_DOCSTRING
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
# Classes
|
|
49
|
-
class ResNet50V2(BaseKeras):
|
|
50
|
-
def _get_base_model(self) -> Model:
|
|
51
|
-
return ResNet50V2_keras(include_top=False, classes=self.num_classes)
|
|
52
|
-
|
|
53
|
-
class ResNet101V2(BaseKeras):
|
|
54
|
-
def _get_base_model(self) -> Model:
|
|
55
|
-
return ResNet101V2_keras(include_top=False, classes=self.num_classes)
|
|
56
|
-
|
|
57
|
-
class ResNet152V2(BaseKeras):
|
|
58
|
-
def _get_base_model(self) -> Model:
|
|
59
|
-
return ResNet152V2_keras(include_top=False, classes=self.num_classes)
|
|
60
|
-
|
|
61
|
-
class ResNetRS50(BaseKeras):
|
|
62
|
-
def _get_base_model(self) -> Model:
|
|
63
|
-
return ResNetRS50_keras(include_top=False, classes=self.num_classes)
|
|
64
|
-
|
|
65
|
-
class ResNetRS101(BaseKeras):
|
|
66
|
-
def _get_base_model(self) -> Model:
|
|
67
|
-
return ResNetRS101_keras(include_top=False, classes=self.num_classes)
|
|
68
|
-
|
|
69
|
-
class ResNetRS152(BaseKeras):
|
|
70
|
-
def _get_base_model(self) -> Model:
|
|
71
|
-
return ResNetRS152_keras(include_top=False, classes=self.num_classes)
|
|
72
|
-
|
|
73
|
-
class ResNetRS200(BaseKeras):
|
|
74
|
-
def _get_base_model(self) -> Model:
|
|
75
|
-
return ResNetRS200_keras(include_top=False, classes=self.num_classes)
|
|
76
|
-
|
|
77
|
-
class ResNetRS270(BaseKeras):
|
|
78
|
-
def _get_base_model(self) -> Model:
|
|
79
|
-
return ResNetRS270_keras(include_top=False, classes=self.num_classes)
|
|
80
|
-
|
|
81
|
-
class ResNetRS350(BaseKeras):
|
|
82
|
-
def _get_base_model(self) -> Model:
|
|
83
|
-
return ResNetRS350_keras(include_top=False, classes=self.num_classes)
|
|
84
|
-
|
|
85
|
-
class ResNetRS420(BaseKeras):
|
|
86
|
-
def _get_base_model(self) -> Model:
|
|
87
|
-
return ResNetRS420_keras(include_top=False, classes=self.num_classes)
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
# Docstrings
|
|
91
|
-
for model in [
|
|
92
|
-
ResNet50V2, ResNet101V2, ResNet152V2,
|
|
93
|
-
ResNetRS50, ResNetRS101, ResNetRS152, ResNetRS200,
|
|
94
|
-
ResNetRS270, ResNetRS350, ResNetRS420
|
|
95
|
-
]:
|
|
96
|
-
model.__doc__ = MODEL_DOCSTRING.format(model=model.__name__)
|
|
97
|
-
model.class_routine = simple_cache(model.class_routine)
|
|
98
|
-
model.class_routine.__doc__ = CLASS_ROUTINE_DOCSTRING.format(model=model.__name__)
|
|
99
|
-
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
{stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/axis_flip.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/canny.py
RENAMED
|
File without changes
|
{stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/common.py
RENAMED
|
File without changes
|
|
File without changes
|
{stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/denoise.py
RENAMED
|
File without changes
|
{stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/laplacian.py
RENAMED
|
File without changes
|
{stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/median_blur.py
RENAMED
|
File without changes
|
{stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/normalize.py
RENAMED
|
File without changes
|
{stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/resize.py
RENAMED
|
File without changes
|
{stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/rotation.py
RENAMED
|
File without changes
|
{stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/salt_pepper.py
RENAMED
|
File without changes
|
{stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/shearing.py
RENAMED
|
File without changes
|
{stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/threshold.py
RENAMED
|
File without changes
|
{stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/translation.py
RENAMED
|
File without changes
|
{stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image/zoom.py
RENAMED
|
File without changes
|
|
File without changes
|
{stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/data_processing/image_preprocess.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/dataset/grouping_strategy.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/models/keras/efficientnet.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/models/keras_utils/visualizations.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{stouputils-1.2.41 → stouputils-1.2.43}/stouputils/data_science/scripts/preprocess_dataset.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|