stouputils 1.14.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- stouputils/__init__.py +40 -0
- stouputils/__main__.py +86 -0
- stouputils/_deprecated.py +37 -0
- stouputils/all_doctests.py +160 -0
- stouputils/applications/__init__.py +22 -0
- stouputils/applications/automatic_docs.py +634 -0
- stouputils/applications/upscaler/__init__.py +39 -0
- stouputils/applications/upscaler/config.py +128 -0
- stouputils/applications/upscaler/image.py +247 -0
- stouputils/applications/upscaler/video.py +287 -0
- stouputils/archive.py +344 -0
- stouputils/backup.py +488 -0
- stouputils/collections.py +244 -0
- stouputils/continuous_delivery/__init__.py +27 -0
- stouputils/continuous_delivery/cd_utils.py +243 -0
- stouputils/continuous_delivery/github.py +522 -0
- stouputils/continuous_delivery/pypi.py +130 -0
- stouputils/continuous_delivery/pyproject.py +147 -0
- stouputils/continuous_delivery/stubs.py +86 -0
- stouputils/ctx.py +408 -0
- stouputils/data_science/config/get.py +51 -0
- stouputils/data_science/config/set.py +125 -0
- stouputils/data_science/data_processing/image/__init__.py +66 -0
- stouputils/data_science/data_processing/image/auto_contrast.py +79 -0
- stouputils/data_science/data_processing/image/axis_flip.py +58 -0
- stouputils/data_science/data_processing/image/bias_field_correction.py +74 -0
- stouputils/data_science/data_processing/image/binary_threshold.py +73 -0
- stouputils/data_science/data_processing/image/blur.py +59 -0
- stouputils/data_science/data_processing/image/brightness.py +54 -0
- stouputils/data_science/data_processing/image/canny.py +110 -0
- stouputils/data_science/data_processing/image/clahe.py +92 -0
- stouputils/data_science/data_processing/image/common.py +30 -0
- stouputils/data_science/data_processing/image/contrast.py +53 -0
- stouputils/data_science/data_processing/image/curvature_flow_filter.py +74 -0
- stouputils/data_science/data_processing/image/denoise.py +378 -0
- stouputils/data_science/data_processing/image/histogram_equalization.py +123 -0
- stouputils/data_science/data_processing/image/invert.py +64 -0
- stouputils/data_science/data_processing/image/laplacian.py +60 -0
- stouputils/data_science/data_processing/image/median_blur.py +52 -0
- stouputils/data_science/data_processing/image/noise.py +59 -0
- stouputils/data_science/data_processing/image/normalize.py +65 -0
- stouputils/data_science/data_processing/image/random_erase.py +66 -0
- stouputils/data_science/data_processing/image/resize.py +69 -0
- stouputils/data_science/data_processing/image/rotation.py +80 -0
- stouputils/data_science/data_processing/image/salt_pepper.py +68 -0
- stouputils/data_science/data_processing/image/sharpening.py +55 -0
- stouputils/data_science/data_processing/image/shearing.py +64 -0
- stouputils/data_science/data_processing/image/threshold.py +64 -0
- stouputils/data_science/data_processing/image/translation.py +71 -0
- stouputils/data_science/data_processing/image/zoom.py +83 -0
- stouputils/data_science/data_processing/image_augmentation.py +118 -0
- stouputils/data_science/data_processing/image_preprocess.py +183 -0
- stouputils/data_science/data_processing/prosthesis_detection.py +359 -0
- stouputils/data_science/data_processing/technique.py +481 -0
- stouputils/data_science/dataset/__init__.py +45 -0
- stouputils/data_science/dataset/dataset.py +292 -0
- stouputils/data_science/dataset/dataset_loader.py +135 -0
- stouputils/data_science/dataset/grouping_strategy.py +296 -0
- stouputils/data_science/dataset/image_loader.py +100 -0
- stouputils/data_science/dataset/xy_tuple.py +696 -0
- stouputils/data_science/metric_dictionnary.py +106 -0
- stouputils/data_science/metric_utils.py +847 -0
- stouputils/data_science/mlflow_utils.py +206 -0
- stouputils/data_science/models/abstract_model.py +149 -0
- stouputils/data_science/models/all.py +85 -0
- stouputils/data_science/models/base_keras.py +765 -0
- stouputils/data_science/models/keras/all.py +38 -0
- stouputils/data_science/models/keras/convnext.py +62 -0
- stouputils/data_science/models/keras/densenet.py +50 -0
- stouputils/data_science/models/keras/efficientnet.py +60 -0
- stouputils/data_science/models/keras/mobilenet.py +56 -0
- stouputils/data_science/models/keras/resnet.py +52 -0
- stouputils/data_science/models/keras/squeezenet.py +233 -0
- stouputils/data_science/models/keras/vgg.py +42 -0
- stouputils/data_science/models/keras/xception.py +38 -0
- stouputils/data_science/models/keras_utils/callbacks/__init__.py +20 -0
- stouputils/data_science/models/keras_utils/callbacks/colored_progress_bar.py +219 -0
- stouputils/data_science/models/keras_utils/callbacks/learning_rate_finder.py +148 -0
- stouputils/data_science/models/keras_utils/callbacks/model_checkpoint_v2.py +31 -0
- stouputils/data_science/models/keras_utils/callbacks/progressive_unfreezing.py +249 -0
- stouputils/data_science/models/keras_utils/callbacks/warmup_scheduler.py +66 -0
- stouputils/data_science/models/keras_utils/losses/__init__.py +12 -0
- stouputils/data_science/models/keras_utils/losses/next_generation_loss.py +56 -0
- stouputils/data_science/models/keras_utils/visualizations.py +416 -0
- stouputils/data_science/models/model_interface.py +939 -0
- stouputils/data_science/models/sandbox.py +116 -0
- stouputils/data_science/range_tuple.py +234 -0
- stouputils/data_science/scripts/augment_dataset.py +77 -0
- stouputils/data_science/scripts/exhaustive_process.py +133 -0
- stouputils/data_science/scripts/preprocess_dataset.py +70 -0
- stouputils/data_science/scripts/routine.py +168 -0
- stouputils/data_science/utils.py +285 -0
- stouputils/decorators.py +605 -0
- stouputils/image.py +441 -0
- stouputils/installer/__init__.py +18 -0
- stouputils/installer/common.py +67 -0
- stouputils/installer/downloader.py +101 -0
- stouputils/installer/linux.py +144 -0
- stouputils/installer/main.py +223 -0
- stouputils/installer/windows.py +136 -0
- stouputils/io.py +486 -0
- stouputils/parallel.py +483 -0
- stouputils/print.py +482 -0
- stouputils/py.typed +1 -0
- stouputils/stouputils/__init__.pyi +15 -0
- stouputils/stouputils/_deprecated.pyi +12 -0
- stouputils/stouputils/all_doctests.pyi +46 -0
- stouputils/stouputils/applications/__init__.pyi +2 -0
- stouputils/stouputils/applications/automatic_docs.pyi +106 -0
- stouputils/stouputils/applications/upscaler/__init__.pyi +3 -0
- stouputils/stouputils/applications/upscaler/config.pyi +18 -0
- stouputils/stouputils/applications/upscaler/image.pyi +109 -0
- stouputils/stouputils/applications/upscaler/video.pyi +60 -0
- stouputils/stouputils/archive.pyi +67 -0
- stouputils/stouputils/backup.pyi +109 -0
- stouputils/stouputils/collections.pyi +86 -0
- stouputils/stouputils/continuous_delivery/__init__.pyi +5 -0
- stouputils/stouputils/continuous_delivery/cd_utils.pyi +129 -0
- stouputils/stouputils/continuous_delivery/github.pyi +162 -0
- stouputils/stouputils/continuous_delivery/pypi.pyi +53 -0
- stouputils/stouputils/continuous_delivery/pyproject.pyi +67 -0
- stouputils/stouputils/continuous_delivery/stubs.pyi +39 -0
- stouputils/stouputils/ctx.pyi +211 -0
- stouputils/stouputils/decorators.pyi +252 -0
- stouputils/stouputils/image.pyi +172 -0
- stouputils/stouputils/installer/__init__.pyi +5 -0
- stouputils/stouputils/installer/common.pyi +39 -0
- stouputils/stouputils/installer/downloader.pyi +24 -0
- stouputils/stouputils/installer/linux.pyi +39 -0
- stouputils/stouputils/installer/main.pyi +57 -0
- stouputils/stouputils/installer/windows.pyi +31 -0
- stouputils/stouputils/io.pyi +213 -0
- stouputils/stouputils/parallel.pyi +216 -0
- stouputils/stouputils/print.pyi +136 -0
- stouputils/stouputils/version_pkg.pyi +15 -0
- stouputils/version_pkg.py +189 -0
- stouputils-1.14.0.dist-info/METADATA +178 -0
- stouputils-1.14.0.dist-info/RECORD +140 -0
- stouputils-1.14.0.dist-info/WHEEL +4 -0
- stouputils-1.14.0.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
|
|
2
|
+
# Imports
|
|
3
|
+
from .convnext import ConvNeXtBase, ConvNeXtLarge, ConvNeXtSmall, ConvNeXtTiny, ConvNeXtXLarge
|
|
4
|
+
from .densenet import DenseNet121, DenseNet169, DenseNet201
|
|
5
|
+
from .efficientnet import EfficientNetB0, EfficientNetV2B0, EfficientNetV2L, EfficientNetV2M, EfficientNetV2S
|
|
6
|
+
from .mobilenet import MobileNet, MobileNetV2, MobileNetV3Large, MobileNetV3Small
|
|
7
|
+
from .resnet import ResNet50V2, ResNet101V2, ResNet152V2
|
|
8
|
+
from .squeezenet import SqueezeNet
|
|
9
|
+
from .vgg import VGG16, VGG19
|
|
10
|
+
from .xception import Xception
|
|
11
|
+
|
|
12
|
+
__all__ = [
|
|
13
|
+
"VGG16",
|
|
14
|
+
"VGG19",
|
|
15
|
+
"ConvNeXtBase",
|
|
16
|
+
"ConvNeXtLarge",
|
|
17
|
+
"ConvNeXtSmall",
|
|
18
|
+
"ConvNeXtTiny",
|
|
19
|
+
"ConvNeXtXLarge",
|
|
20
|
+
"DenseNet121",
|
|
21
|
+
"DenseNet169",
|
|
22
|
+
"DenseNet201",
|
|
23
|
+
"EfficientNetB0",
|
|
24
|
+
"EfficientNetV2B0",
|
|
25
|
+
"EfficientNetV2L",
|
|
26
|
+
"EfficientNetV2M",
|
|
27
|
+
"EfficientNetV2S",
|
|
28
|
+
"MobileNet",
|
|
29
|
+
"MobileNetV2",
|
|
30
|
+
"MobileNetV3Large",
|
|
31
|
+
"MobileNetV3Small",
|
|
32
|
+
"ResNet50V2",
|
|
33
|
+
"ResNet101V2",
|
|
34
|
+
"ResNet152V2",
|
|
35
|
+
"SqueezeNet",
|
|
36
|
+
"Xception",
|
|
37
|
+
]
|
|
38
|
+
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
""" ConvNeXt models implementation.
|
|
2
|
+
|
|
3
|
+
This module provides wrapper classes for the ConvNeXt family of models from the Keras applications.
|
|
4
|
+
ConvNeXt models are a family of pure convolutional networks that match or outperform
|
|
5
|
+
Vision Transformers (ViTs) while maintaining the simplicity and efficiency of CNNs.
|
|
6
|
+
|
|
7
|
+
Available models:
|
|
8
|
+
|
|
9
|
+
- ConvNeXtTiny: Smallest variant with fewer parameters for resource-constrained environments
|
|
10
|
+
- ConvNeXtSmall: Compact model balancing performance and size
|
|
11
|
+
- ConvNeXtBase: Standard model with good performance for general use cases
|
|
12
|
+
- ConvNeXtLarge: Larger model with higher capacity for complex tasks
|
|
13
|
+
- ConvNeXtXLarge: Largest variant with maximum capacity for demanding applications
|
|
14
|
+
|
|
15
|
+
All models support transfer learning from ImageNet pre-trained weights.
|
|
16
|
+
"""
|
|
17
|
+
# pyright: reportUnknownVariableType=false
|
|
18
|
+
# pyright: reportMissingTypeStubs=false
|
|
19
|
+
|
|
20
|
+
# Imports
|
|
21
|
+
from __future__ import annotations
|
|
22
|
+
|
|
23
|
+
from keras.models import Model
|
|
24
|
+
from keras.src.applications.convnext import ConvNeXtBase as ConvNeXtBase_keras
|
|
25
|
+
from keras.src.applications.convnext import ConvNeXtLarge as ConvNeXtLarge_keras
|
|
26
|
+
from keras.src.applications.convnext import ConvNeXtSmall as ConvNeXtSmall_keras
|
|
27
|
+
from keras.src.applications.convnext import ConvNeXtTiny as ConvNeXtTiny_keras
|
|
28
|
+
from keras.src.applications.convnext import ConvNeXtXLarge as ConvNeXtXLarge_keras
|
|
29
|
+
|
|
30
|
+
from ....decorators import simple_cache
|
|
31
|
+
from ..base_keras import BaseKeras
|
|
32
|
+
from ..model_interface import CLASS_ROUTINE_DOCSTRING, MODEL_DOCSTRING
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
# Classes
|
|
36
|
+
class ConvNeXtTiny(BaseKeras):
|
|
37
|
+
def _get_base_model(self) -> Model:
|
|
38
|
+
return ConvNeXtTiny_keras(include_top=False, classes=self.num_classes)
|
|
39
|
+
|
|
40
|
+
class ConvNeXtSmall(BaseKeras):
|
|
41
|
+
def _get_base_model(self) -> Model:
|
|
42
|
+
return ConvNeXtSmall_keras(include_top=False, classes=self.num_classes)
|
|
43
|
+
|
|
44
|
+
class ConvNeXtBase(BaseKeras):
|
|
45
|
+
def _get_base_model(self) -> Model:
|
|
46
|
+
return ConvNeXtBase_keras(include_top=False, classes=self.num_classes)
|
|
47
|
+
|
|
48
|
+
class ConvNeXtLarge(BaseKeras):
|
|
49
|
+
def _get_base_model(self) -> Model:
|
|
50
|
+
return ConvNeXtLarge_keras(include_top=False, classes=self.num_classes)
|
|
51
|
+
|
|
52
|
+
class ConvNeXtXLarge(BaseKeras):
|
|
53
|
+
def _get_base_model(self) -> Model:
|
|
54
|
+
return ConvNeXtXLarge_keras(include_top=False, classes=self.num_classes)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
# Docstrings
|
|
58
|
+
for model in [ConvNeXtTiny, ConvNeXtSmall, ConvNeXtBase, ConvNeXtLarge, ConvNeXtXLarge]:
|
|
59
|
+
model.__doc__ = MODEL_DOCSTRING.format(model=model.__name__)
|
|
60
|
+
model.class_routine = simple_cache(model.class_routine)
|
|
61
|
+
model.class_routine.__doc__ = CLASS_ROUTINE_DOCSTRING.format(model=model.__name__)
|
|
62
|
+
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
""" DenseNet models implementation.
|
|
2
|
+
|
|
3
|
+
This module provides wrapper classes for the DenseNet family of models from the Keras applications.
|
|
4
|
+
DenseNet models utilize dense connections between layers, where each layer obtains additional inputs
|
|
5
|
+
from all preceding layers and passes on its feature-maps to all subsequent layers.
|
|
6
|
+
|
|
7
|
+
Available models:
|
|
8
|
+
|
|
9
|
+
- DenseNet121: Smallest variant with 121 layers
|
|
10
|
+
- DenseNet169: Medium-sized variant with 169 layers
|
|
11
|
+
- DenseNet201: Largest variant with 201 layers
|
|
12
|
+
|
|
13
|
+
All models support transfer learning from ImageNet pre-trained weights.
|
|
14
|
+
"""
|
|
15
|
+
# pyright: reportUnknownVariableType=false
|
|
16
|
+
# pyright: reportMissingTypeStubs=false
|
|
17
|
+
|
|
18
|
+
# Imports
|
|
19
|
+
from __future__ import annotations
|
|
20
|
+
|
|
21
|
+
from keras.models import Model
|
|
22
|
+
from keras.src.applications.densenet import DenseNet121 as DenseNet121_keras
|
|
23
|
+
from keras.src.applications.densenet import DenseNet169 as DenseNet169_keras
|
|
24
|
+
from keras.src.applications.densenet import DenseNet201 as DenseNet201_keras
|
|
25
|
+
|
|
26
|
+
from ....decorators import simple_cache
|
|
27
|
+
from ..base_keras import BaseKeras
|
|
28
|
+
from ..model_interface import CLASS_ROUTINE_DOCSTRING, MODEL_DOCSTRING
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
# Classes
|
|
32
|
+
class DenseNet121(BaseKeras):
|
|
33
|
+
def _get_base_model(self) -> Model:
|
|
34
|
+
return DenseNet121_keras(include_top=False, classes=self.num_classes)
|
|
35
|
+
|
|
36
|
+
class DenseNet169(BaseKeras):
|
|
37
|
+
def _get_base_model(self) -> Model:
|
|
38
|
+
return DenseNet169_keras(include_top=False, classes=self.num_classes)
|
|
39
|
+
|
|
40
|
+
class DenseNet201(BaseKeras):
|
|
41
|
+
def _get_base_model(self) -> Model:
|
|
42
|
+
return DenseNet201_keras(include_top=False, classes=self.num_classes)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
# Docstrings
|
|
46
|
+
for model in [DenseNet121, DenseNet169, DenseNet201]:
|
|
47
|
+
model.__doc__ = MODEL_DOCSTRING.format(model=model.__name__)
|
|
48
|
+
model.class_routine = simple_cache(model.class_routine)
|
|
49
|
+
model.class_routine.__doc__ = CLASS_ROUTINE_DOCSTRING.format(model=model.__name__)
|
|
50
|
+
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
""" EfficientNetV2 models implementation.
|
|
2
|
+
|
|
3
|
+
This module provides wrapper classes for the EfficientNetV2 family of models from the Keras applications.
|
|
4
|
+
EfficientNetV2 models are a family of convolutional neural networks that achieve better
|
|
5
|
+
parameter efficiency and faster training speed compared to prior models.
|
|
6
|
+
|
|
7
|
+
Available models:
|
|
8
|
+
|
|
9
|
+
- EfficientNetV2M: Medium-sized variant balancing performance and computational cost
|
|
10
|
+
- EfficientNetV2L: Large variant with higher capacity for complex tasks
|
|
11
|
+
|
|
12
|
+
All models support transfer learning from ImageNet pre-trained weights.
|
|
13
|
+
"""
|
|
14
|
+
# pyright: reportUnknownVariableType=false
|
|
15
|
+
# pyright: reportMissingTypeStubs=false
|
|
16
|
+
|
|
17
|
+
# Imports
|
|
18
|
+
from __future__ import annotations
|
|
19
|
+
|
|
20
|
+
from keras.models import Model
|
|
21
|
+
from keras.src.applications.efficientnet import EfficientNetB0 as EfficientNetB0_keras
|
|
22
|
+
from keras.src.applications.efficientnet_v2 import EfficientNetV2B0 as EfficientNetV2B0_keras
|
|
23
|
+
from keras.src.applications.efficientnet_v2 import EfficientNetV2L as EfficientNetV2L_keras
|
|
24
|
+
from keras.src.applications.efficientnet_v2 import EfficientNetV2M as EfficientNetV2M_keras
|
|
25
|
+
from keras.src.applications.efficientnet_v2 import EfficientNetV2S as EfficientNetV2S_keras
|
|
26
|
+
|
|
27
|
+
from ....decorators import simple_cache
|
|
28
|
+
from ..base_keras import BaseKeras
|
|
29
|
+
from ..model_interface import CLASS_ROUTINE_DOCSTRING, MODEL_DOCSTRING
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
# Classes
|
|
33
|
+
class EfficientNetV2M(BaseKeras):
|
|
34
|
+
def _get_base_model(self) -> Model:
|
|
35
|
+
return EfficientNetV2M_keras(include_top=False, classes=self.num_classes)
|
|
36
|
+
|
|
37
|
+
class EfficientNetV2L(BaseKeras):
|
|
38
|
+
def _get_base_model(self) -> Model:
|
|
39
|
+
return EfficientNetV2L_keras(include_top=False, classes=self.num_classes)
|
|
40
|
+
|
|
41
|
+
class EfficientNetV2B0(BaseKeras):
|
|
42
|
+
def _get_base_model(self) -> Model:
|
|
43
|
+
return EfficientNetV2B0_keras(include_top=False, classes=self.num_classes)
|
|
44
|
+
|
|
45
|
+
class EfficientNetV2S(BaseKeras):
|
|
46
|
+
def _get_base_model(self) -> Model:
|
|
47
|
+
return EfficientNetV2S_keras(include_top=False, classes=self.num_classes)
|
|
48
|
+
|
|
49
|
+
# Classes for original EfficientNet models
|
|
50
|
+
class EfficientNetB0(BaseKeras):
|
|
51
|
+
def _get_base_model(self) -> Model:
|
|
52
|
+
return EfficientNetB0_keras(include_top=False, classes=self.num_classes)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
# Docstrings
|
|
56
|
+
for model in [EfficientNetV2M, EfficientNetV2L, EfficientNetV2B0, EfficientNetV2S, EfficientNetB0]:
|
|
57
|
+
model.__doc__ = MODEL_DOCSTRING.format(model=model.__name__)
|
|
58
|
+
model.class_routine = simple_cache(model.class_routine)
|
|
59
|
+
model.class_routine.__doc__ = CLASS_ROUTINE_DOCSTRING.format(model=model.__name__)
|
|
60
|
+
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
""" MobileNet models implementation.
|
|
2
|
+
|
|
3
|
+
This module provides wrapper classes for the MobileNet family of models from the Keras applications.
|
|
4
|
+
MobileNet models are designed for mobile and embedded vision applications,
|
|
5
|
+
offering efficient architectures that deliver high accuracy with low computational requirements.
|
|
6
|
+
|
|
7
|
+
Available models:
|
|
8
|
+
|
|
9
|
+
- MobileNet: Original MobileNet architecture using depthwise separable convolutions
|
|
10
|
+
- MobileNetV2: Lightweight architecture using inverted residuals and linear bottlenecks
|
|
11
|
+
- MobileNetV3Small: Compact variant of MobileNetV3 optimized for mobile devices
|
|
12
|
+
- MobileNetV3Large: Larger variant of MobileNetV3 with higher capacity
|
|
13
|
+
|
|
14
|
+
All models support transfer learning from ImageNet pre-trained weights.
|
|
15
|
+
"""
|
|
16
|
+
# pyright: reportUnknownVariableType=false
|
|
17
|
+
# pyright: reportMissingTypeStubs=false
|
|
18
|
+
|
|
19
|
+
# Imports
|
|
20
|
+
from __future__ import annotations
|
|
21
|
+
|
|
22
|
+
from keras.models import Model
|
|
23
|
+
from keras.src.applications.mobilenet import MobileNet as MobileNet_keras
|
|
24
|
+
from keras.src.applications.mobilenet_v2 import MobileNetV2 as MobileNetV2_keras
|
|
25
|
+
from keras.src.applications.mobilenet_v3 import MobileNetV3Large as MobileNetV3Large_keras
|
|
26
|
+
from keras.src.applications.mobilenet_v3 import MobileNetV3Small as MobileNetV3Small_keras
|
|
27
|
+
|
|
28
|
+
from ....decorators import simple_cache
|
|
29
|
+
from ..base_keras import BaseKeras
|
|
30
|
+
from ..model_interface import CLASS_ROUTINE_DOCSTRING, MODEL_DOCSTRING
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
# Classes
|
|
34
|
+
class MobileNet(BaseKeras):
|
|
35
|
+
def _get_base_model(self) -> Model:
|
|
36
|
+
return MobileNet_keras(include_top=False, classes=self.num_classes)
|
|
37
|
+
|
|
38
|
+
class MobileNetV2(BaseKeras):
|
|
39
|
+
def _get_base_model(self) -> Model:
|
|
40
|
+
return MobileNetV2_keras(include_top=False, classes=self.num_classes)
|
|
41
|
+
|
|
42
|
+
class MobileNetV3Small(BaseKeras):
|
|
43
|
+
def _get_base_model(self) -> Model:
|
|
44
|
+
return MobileNetV3Small_keras(include_top=False, classes=self.num_classes)
|
|
45
|
+
|
|
46
|
+
class MobileNetV3Large(BaseKeras):
|
|
47
|
+
def _get_base_model(self) -> Model:
|
|
48
|
+
return MobileNetV3Large_keras(include_top=False, classes=self.num_classes)
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
# Docstrings
|
|
52
|
+
for model in [MobileNet, MobileNetV2, MobileNetV3Small, MobileNetV3Large]:
|
|
53
|
+
model.__doc__ = MODEL_DOCSTRING.format(model=model.__name__)
|
|
54
|
+
model.class_routine = simple_cache(model.class_routine)
|
|
55
|
+
model.class_routine.__doc__ = CLASS_ROUTINE_DOCSTRING.format(model=model.__name__)
|
|
56
|
+
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
""" ResNet models implementation.
|
|
2
|
+
|
|
3
|
+
This module provides wrapper classes for the ResNet family of models from the Keras applications.
|
|
4
|
+
It includes both ResNetV2 models with pre-activation residual blocks and ResNetRS
|
|
5
|
+
(ResNet with Revisited Scaling) models that offer improved performance
|
|
6
|
+
through various scaling techniques.
|
|
7
|
+
|
|
8
|
+
Available models:
|
|
9
|
+
|
|
10
|
+
- ResNetV2 family: Improved ResNet architectures with pre-activation blocks
|
|
11
|
+
- ResNet50V2
|
|
12
|
+
- ResNet101V2
|
|
13
|
+
- ResNet152V2
|
|
14
|
+
|
|
15
|
+
All models support transfer learning from ImageNet pre-trained weights.
|
|
16
|
+
"""
|
|
17
|
+
# pyright: reportUnknownVariableType=false
|
|
18
|
+
# pyright: reportMissingTypeStubs=false
|
|
19
|
+
|
|
20
|
+
# Imports
|
|
21
|
+
from __future__ import annotations
|
|
22
|
+
|
|
23
|
+
from keras.models import Model
|
|
24
|
+
from keras.src.applications.resnet_v2 import ResNet50V2 as ResNet50V2_keras
|
|
25
|
+
from keras.src.applications.resnet_v2 import ResNet101V2 as ResNet101V2_keras
|
|
26
|
+
from keras.src.applications.resnet_v2 import ResNet152V2 as ResNet152V2_keras
|
|
27
|
+
|
|
28
|
+
from ....decorators import simple_cache
|
|
29
|
+
from ..base_keras import BaseKeras
|
|
30
|
+
from ..model_interface import CLASS_ROUTINE_DOCSTRING, MODEL_DOCSTRING
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
# Classes
|
|
34
|
+
class ResNet50V2(BaseKeras):
|
|
35
|
+
def _get_base_model(self) -> Model:
|
|
36
|
+
return ResNet50V2_keras(include_top=False, classes=self.num_classes)
|
|
37
|
+
|
|
38
|
+
class ResNet101V2(BaseKeras):
|
|
39
|
+
def _get_base_model(self) -> Model:
|
|
40
|
+
return ResNet101V2_keras(include_top=False, classes=self.num_classes)
|
|
41
|
+
|
|
42
|
+
class ResNet152V2(BaseKeras):
|
|
43
|
+
def _get_base_model(self) -> Model:
|
|
44
|
+
return ResNet152V2_keras(include_top=False, classes=self.num_classes)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
# Docstrings
|
|
48
|
+
for model in [ResNet50V2, ResNet101V2, ResNet152V2]:
|
|
49
|
+
model.__doc__ = MODEL_DOCSTRING.format(model=model.__name__)
|
|
50
|
+
model.class_routine = simple_cache(model.class_routine)
|
|
51
|
+
model.class_routine.__doc__ = CLASS_ROUTINE_DOCSTRING.format(model=model.__name__)
|
|
52
|
+
|
|
@@ -0,0 +1,233 @@
|
|
|
1
|
+
""" SqueezeNet model implementation.
|
|
2
|
+
|
|
3
|
+
This module provides a wrapper class for the SqueezeNet model, a lightweight CNN architecture
|
|
4
|
+
that achieves AlexNet-level accuracy with 50x fewer parameters and a model size of less than 0.5MB.
|
|
5
|
+
SqueezeNet uses "fire modules" consisting of a squeeze layer with 1x1 filters followed by an
|
|
6
|
+
expand layer with a mix of 1x1 and 3x3 convolution filters.
|
|
7
|
+
|
|
8
|
+
Available models:
|
|
9
|
+
- SqueezeNet: Compact model with excellent performance-to-parameter ratio
|
|
10
|
+
|
|
11
|
+
The model supports transfer learning from ImageNet pre-trained weights.
|
|
12
|
+
"""
|
|
13
|
+
# pyright: reportUnknownArgumentType=false
|
|
14
|
+
# pyright: reportUnknownMemberType=false
|
|
15
|
+
# pyright: reportUnknownVariableType=false
|
|
16
|
+
# pyright: reportMissingTypeStubs=false
|
|
17
|
+
|
|
18
|
+
# Imports
|
|
19
|
+
from __future__ import annotations
|
|
20
|
+
|
|
21
|
+
from typing import Any
|
|
22
|
+
|
|
23
|
+
from keras import backend
|
|
24
|
+
from keras.layers import (
|
|
25
|
+
Activation,
|
|
26
|
+
Convolution2D,
|
|
27
|
+
Dropout,
|
|
28
|
+
GlobalAveragePooling2D,
|
|
29
|
+
GlobalMaxPooling2D,
|
|
30
|
+
Input,
|
|
31
|
+
MaxPooling2D,
|
|
32
|
+
concatenate,
|
|
33
|
+
)
|
|
34
|
+
from keras.models import Model
|
|
35
|
+
from keras.utils import get_file, get_source_inputs
|
|
36
|
+
|
|
37
|
+
from ....decorators import simple_cache
|
|
38
|
+
from ..base_keras import BaseKeras
|
|
39
|
+
from ..model_interface import CLASS_ROUTINE_DOCSTRING, MODEL_DOCSTRING
|
|
40
|
+
|
|
41
|
+
# Constants
|
|
42
|
+
SQ1X1: str = "squeeze1x1"
|
|
43
|
+
|
|
44
|
+
WEIGHTS_PATH = "https://github.com/rcmalli/keras-squeezenet/releases/download/v1.0/squeezenet_weights_tf_dim_ordering_tf_kernels.h5"
|
|
45
|
+
WEIGHTS_PATH_NO_TOP = "https://github.com/rcmalli/keras-squeezenet/releases/download/v1.0/squeezenet_weights_tf_dim_ordering_tf_kernels_notop.h5"
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
# Modular function for Fire Node
|
|
49
|
+
def fire_module(x: Any, fire_id: int, squeeze: int = 16, expand: int = 64):
|
|
50
|
+
""" Create a fire module with specified parameters.
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
x (Tensor): Input tensor
|
|
54
|
+
fire_id (int): ID for the fire module
|
|
55
|
+
squeeze (int): Number of filters for squeeze layer
|
|
56
|
+
expand (int): Number of filters for expand layers
|
|
57
|
+
|
|
58
|
+
Returns:
|
|
59
|
+
Tensor: Output tensor from the fire module
|
|
60
|
+
"""
|
|
61
|
+
s_id: str = f"fire{fire_id}"
|
|
62
|
+
|
|
63
|
+
if backend.image_data_format() == "channels_first":
|
|
64
|
+
channel_axis: int = 1
|
|
65
|
+
else:
|
|
66
|
+
channel_axis: int = 3
|
|
67
|
+
|
|
68
|
+
x = Convolution2D(squeeze, (1, 1), padding="valid", name=f"{s_id}/squeeze1x1")(x)
|
|
69
|
+
x = Activation("relu", name=f"{s_id}/relu_squeeze1x1")(x)
|
|
70
|
+
|
|
71
|
+
left = Convolution2D(expand, (1, 1), padding="valid", name=f"{s_id}/expand1x1")(x)
|
|
72
|
+
left = Activation("relu", name=f"{s_id}/relu_expand1x1")(left)
|
|
73
|
+
|
|
74
|
+
right = Convolution2D(expand, (3, 3), padding="same", name=f"{s_id}/expand3x3")(x)
|
|
75
|
+
right = Activation("relu", name=f"{s_id}/relu_expand3x3")(right)
|
|
76
|
+
|
|
77
|
+
x = concatenate([left, right], axis=channel_axis, name=f"{s_id}/concat")
|
|
78
|
+
return x
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
# Original SqueezeNet from paper
|
|
82
|
+
def SqueezeNet_keras( # noqa: N802
|
|
83
|
+
include_top: bool = True,
|
|
84
|
+
weights: str = "imagenet",
|
|
85
|
+
input_tensor: Any = None,
|
|
86
|
+
input_shape: tuple[Any, ...] | None = None,
|
|
87
|
+
pooling: str | None = None,
|
|
88
|
+
classes: int = 1000
|
|
89
|
+
) -> Model:
|
|
90
|
+
""" Instantiates the SqueezeNet architecture.
|
|
91
|
+
|
|
92
|
+
Args:
|
|
93
|
+
include_top (bool): Whether to include the fully-connected layer at the top
|
|
94
|
+
weights (str): One of `None` or 'imagenet'
|
|
95
|
+
input_tensor (Tensor): Optional Keras tensor as input
|
|
96
|
+
input_shape (tuple): Optional shape tuple
|
|
97
|
+
pooling (str): Optional pooling mode for feature extraction
|
|
98
|
+
classes (int): Number of classes to classify images into
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
Model: A Keras model instance
|
|
102
|
+
"""
|
|
103
|
+
|
|
104
|
+
if weights not in {'imagenet', None}:
|
|
105
|
+
raise ValueError(
|
|
106
|
+
"The `weights` argument should be either `None` (random initialization) "
|
|
107
|
+
"or `imagenet` (pre-training on ImageNet)."
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
if include_top and weights == 'imagenet' and classes != 1000:
|
|
111
|
+
raise ValueError(
|
|
112
|
+
"If using `weights` as imagenet with `include_top` as true, `classes` should be 1000"
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
# Manually handle input shape logic instead of _obtain_input_shape
|
|
116
|
+
default_size: int = 227
|
|
117
|
+
min_size: int = 48
|
|
118
|
+
if backend.image_data_format() == 'channels_first':
|
|
119
|
+
default_shape: tuple[int, int, int] = (3, default_size, default_size)
|
|
120
|
+
if weights == 'imagenet' and include_top and input_shape is not None and input_shape[0] != 3:
|
|
121
|
+
raise ValueError(
|
|
122
|
+
"When specifying `input_shape` and loading 'imagenet' weights, 'channels_first' input_shape "
|
|
123
|
+
"should be (3, H, W)."
|
|
124
|
+
)
|
|
125
|
+
else: # channels_last
|
|
126
|
+
default_shape = (default_size, default_size, 3)
|
|
127
|
+
if weights == 'imagenet' and include_top and input_shape is not None and input_shape[2] != 3:
|
|
128
|
+
raise ValueError(
|
|
129
|
+
"When specifying `input_shape` and loading 'imagenet' weights, 'channels_last' input_shape "
|
|
130
|
+
"should be (H, W, 3)."
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
if input_shape is None:
|
|
134
|
+
input_shape = default_shape
|
|
135
|
+
else:
|
|
136
|
+
# Basic validation
|
|
137
|
+
if len(input_shape) != 3:
|
|
138
|
+
raise ValueError("`input_shape` must be a tuple of three integers.")
|
|
139
|
+
if backend.image_data_format() == 'channels_first':
|
|
140
|
+
if input_shape[1] is not None and input_shape[1] < min_size:
|
|
141
|
+
raise ValueError(f"Input size must be at least {min_size}x{min_size}, got `input_shape=`{input_shape}")
|
|
142
|
+
if input_shape[2] is not None and input_shape[2] < min_size:
|
|
143
|
+
raise ValueError(f"Input size must be at least {min_size}x{min_size}, got `input_shape=`{input_shape}")
|
|
144
|
+
else: # channels_last
|
|
145
|
+
if input_shape[0] is not None and input_shape[0] < min_size:
|
|
146
|
+
raise ValueError(f"Input size must be at least {min_size}x{min_size}, got `input_shape=`{input_shape}")
|
|
147
|
+
if input_shape[1] is not None and input_shape[1] < min_size:
|
|
148
|
+
raise ValueError(f"Input size must be at least {min_size}x{min_size}, got `input_shape=`{input_shape}")
|
|
149
|
+
|
|
150
|
+
# Handle input tensor
|
|
151
|
+
if input_tensor is None:
|
|
152
|
+
img_input = Input(shape=input_shape)
|
|
153
|
+
else:
|
|
154
|
+
if not backend.is_keras_tensor(input_tensor):
|
|
155
|
+
img_input = Input(tensor=input_tensor, shape=input_shape)
|
|
156
|
+
else:
|
|
157
|
+
img_input = input_tensor
|
|
158
|
+
|
|
159
|
+
x = Convolution2D(64, (3, 3), strides=(2, 2), padding='valid', name='conv1')(img_input)
|
|
160
|
+
x = Activation('relu', name='relu_conv1')(x)
|
|
161
|
+
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool1')(x)
|
|
162
|
+
|
|
163
|
+
x = fire_module(x, fire_id=2, squeeze=16, expand=64)
|
|
164
|
+
x = fire_module(x, fire_id=3, squeeze=16, expand=64)
|
|
165
|
+
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool3')(x)
|
|
166
|
+
|
|
167
|
+
x = fire_module(x, fire_id=4, squeeze=32, expand=128)
|
|
168
|
+
x = fire_module(x, fire_id=5, squeeze=32, expand=128)
|
|
169
|
+
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool5')(x)
|
|
170
|
+
|
|
171
|
+
x = fire_module(x, fire_id=6, squeeze=48, expand=192)
|
|
172
|
+
x = fire_module(x, fire_id=7, squeeze=48, expand=192)
|
|
173
|
+
x = fire_module(x, fire_id=8, squeeze=64, expand=256)
|
|
174
|
+
x = fire_module(x, fire_id=9, squeeze=64, expand=256)
|
|
175
|
+
|
|
176
|
+
if include_top:
|
|
177
|
+
# It's not obvious where to cut the network...
|
|
178
|
+
# Could do the 8th or 9th layer... some work recommends cutting earlier layers.
|
|
179
|
+
|
|
180
|
+
x = Dropout(0.5, name='drop9')(x)
|
|
181
|
+
|
|
182
|
+
x = Convolution2D(classes, (1, 1), padding='valid', name='conv10')(x)
|
|
183
|
+
x = Activation('relu', name='relu_conv10')(x)
|
|
184
|
+
x = GlobalAveragePooling2D()(x)
|
|
185
|
+
x = Activation('softmax', name='loss')(x)
|
|
186
|
+
else:
|
|
187
|
+
if pooling == 'avg':
|
|
188
|
+
x = GlobalAveragePooling2D()(x)
|
|
189
|
+
elif pooling == 'max':
|
|
190
|
+
x = GlobalMaxPooling2D()(x)
|
|
191
|
+
elif pooling is None:
|
|
192
|
+
pass
|
|
193
|
+
else:
|
|
194
|
+
raise ValueError("Unknown argument for 'pooling'=" + pooling)
|
|
195
|
+
|
|
196
|
+
# Ensure that the model takes into account
|
|
197
|
+
# any potential predecessors of `input_tensor`.
|
|
198
|
+
if input_tensor is not None:
|
|
199
|
+
inputs = get_source_inputs(input_tensor)
|
|
200
|
+
else:
|
|
201
|
+
inputs = img_input
|
|
202
|
+
|
|
203
|
+
model = Model(inputs, x, name='squeezenet')
|
|
204
|
+
|
|
205
|
+
# load weights
|
|
206
|
+
if weights == 'imagenet':
|
|
207
|
+
if include_top:
|
|
208
|
+
weights_path = get_file('squeezenet_weights_tf_dim_ordering_tf_kernels.h5',
|
|
209
|
+
WEIGHTS_PATH,
|
|
210
|
+
cache_subdir='models')
|
|
211
|
+
else:
|
|
212
|
+
weights_path = get_file('squeezenet_weights_tf_dim_ordering_tf_kernels_notop.h5',
|
|
213
|
+
WEIGHTS_PATH_NO_TOP,
|
|
214
|
+
cache_subdir='models')
|
|
215
|
+
|
|
216
|
+
model.load_weights(weights_path)
|
|
217
|
+
return model
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
# Classes
|
|
221
|
+
class SqueezeNet(BaseKeras):
|
|
222
|
+
def _get_base_model(self) -> Model:
|
|
223
|
+
return SqueezeNet_keras(
|
|
224
|
+
include_top=False, classes=self.num_classes, input_shape=(224, 224, 3)
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
# Docstrings
|
|
229
|
+
for model in [SqueezeNet]:
|
|
230
|
+
model.__doc__ = MODEL_DOCSTRING.format(model=model.__name__)
|
|
231
|
+
model.class_routine = simple_cache(model.class_routine)
|
|
232
|
+
model.class_routine.__doc__ = CLASS_ROUTINE_DOCSTRING.format(model=model.__name__)
|
|
233
|
+
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
""" VGG models implementation.
|
|
2
|
+
|
|
3
|
+
This module provides wrapper classes for the VGG family of models from the Keras applications.
|
|
4
|
+
VGG models are characterized by their simplicity, using only 3x3 convolutional layers
|
|
5
|
+
stacked on top of each other with increasing depth.
|
|
6
|
+
|
|
7
|
+
Available models:
|
|
8
|
+
- VGG16: 16-layer model with 13 convolutional layers and 3 fully connected layers
|
|
9
|
+
- VGG19: 19-layer model with 16 convolutional layers and 3 fully connected layers
|
|
10
|
+
|
|
11
|
+
Both models support transfer learning from ImageNet pre-trained weights.
|
|
12
|
+
"""
|
|
13
|
+
# pyright: reportUnknownVariableType=false
|
|
14
|
+
# pyright: reportMissingTypeStubs=false
|
|
15
|
+
|
|
16
|
+
# Imports
|
|
17
|
+
from __future__ import annotations
|
|
18
|
+
|
|
19
|
+
from keras.models import Model
|
|
20
|
+
from keras.src.applications.vgg16 import VGG16 as VGG16_keras # noqa: N811
|
|
21
|
+
from keras.src.applications.vgg19 import VGG19 as VGG19_keras # noqa: N811
|
|
22
|
+
|
|
23
|
+
from ....decorators import simple_cache
|
|
24
|
+
from ..base_keras import BaseKeras
|
|
25
|
+
from ..model_interface import CLASS_ROUTINE_DOCSTRING, MODEL_DOCSTRING
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
# Base class
|
|
29
|
+
class VGG19(BaseKeras):
|
|
30
|
+
def _get_base_model(self) -> Model:
|
|
31
|
+
return VGG19_keras(include_top=False, classes=self.num_classes)
|
|
32
|
+
class VGG16(BaseKeras):
|
|
33
|
+
def _get_base_model(self) -> Model:
|
|
34
|
+
return VGG16_keras(include_top=False, classes=self.num_classes)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
# Docstrings
|
|
38
|
+
for model in [VGG19, VGG16]:
|
|
39
|
+
model.__doc__ = MODEL_DOCSTRING.format(model=model.__name__)
|
|
40
|
+
model.class_routine = simple_cache(model.class_routine)
|
|
41
|
+
model.class_routine.__doc__ = CLASS_ROUTINE_DOCSTRING.format(model=model.__name__)
|
|
42
|
+
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
""" Xception model implementation.
|
|
2
|
+
|
|
3
|
+
This module provides a wrapper class for the Xception model, a deep convolutional neural network
|
|
4
|
+
designed for efficient image classification. Xception uses depthwise separable convolutions,
|
|
5
|
+
which significantly reduce the number of parameters and computational complexity compared to
|
|
6
|
+
standard convolutional layers.
|
|
7
|
+
|
|
8
|
+
Available models:
|
|
9
|
+
- Xception: The standard Xception model
|
|
10
|
+
|
|
11
|
+
The model supports transfer learning from ImageNet pre-trained weights.
|
|
12
|
+
"""
|
|
13
|
+
# pyright: reportUnknownVariableType=false
|
|
14
|
+
# pyright: reportMissingTypeStubs=false
|
|
15
|
+
|
|
16
|
+
# Imports
|
|
17
|
+
from __future__ import annotations
|
|
18
|
+
|
|
19
|
+
from keras.models import Model
|
|
20
|
+
from keras.src.applications.xception import Xception as Xception_keras
|
|
21
|
+
|
|
22
|
+
from ....decorators import simple_cache
|
|
23
|
+
from ..base_keras import BaseKeras
|
|
24
|
+
from ..model_interface import CLASS_ROUTINE_DOCSTRING, MODEL_DOCSTRING
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
# Base class
|
|
28
|
+
class Xception(BaseKeras):
|
|
29
|
+
def _get_base_model(self) -> Model:
|
|
30
|
+
return Xception_keras(include_top=False, classes=self.num_classes)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
# Docstrings
|
|
34
|
+
for model in [Xception]:
|
|
35
|
+
model.__doc__ = MODEL_DOCSTRING.format(model=model.__name__)
|
|
36
|
+
model.class_routine = simple_cache(model.class_routine)
|
|
37
|
+
model.class_routine.__doc__ = CLASS_ROUTINE_DOCSTRING.format(model=model.__name__)
|
|
38
|
+
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
""" Custom callbacks for Keras models.
|
|
2
|
+
|
|
3
|
+
Features:
|
|
4
|
+
|
|
5
|
+
- Learning rate finder callback for finding the optimal learning rate
|
|
6
|
+
- Warmup scheduler callback for warmup training
|
|
7
|
+
- Progressive unfreezing callback for unfreezing layers during training (incompatible with model.fit(), need a custom training loop)
|
|
8
|
+
- Tqdm progress bar callback for better training visualization
|
|
9
|
+
- Model checkpoint callback that only starts checkpointing after a given number of epochs
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
# Imports
|
|
13
|
+
from .colored_progress_bar import ColoredProgressBar
|
|
14
|
+
from .learning_rate_finder import LearningRateFinder
|
|
15
|
+
from .model_checkpoint_v2 import ModelCheckpointV2
|
|
16
|
+
from .progressive_unfreezing import ProgressiveUnfreezing
|
|
17
|
+
from .warmup_scheduler import WarmupScheduler
|
|
18
|
+
|
|
19
|
+
__all__ = ["ColoredProgressBar", "LearningRateFinder", "ModelCheckpointV2", "ProgressiveUnfreezing", "WarmupScheduler"]
|
|
20
|
+
|