stouputils 1.14.2__py3-none-any.whl → 1.15.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (113) hide show
  1. stouputils/continuous_delivery/pypi.py +1 -1
  2. stouputils/continuous_delivery/pypi.pyi +3 -2
  3. stouputils/data_science/config/get.py +51 -51
  4. stouputils/data_science/data_processing/image/__init__.py +66 -66
  5. stouputils/data_science/data_processing/image/auto_contrast.py +79 -79
  6. stouputils/data_science/data_processing/image/axis_flip.py +58 -58
  7. stouputils/data_science/data_processing/image/bias_field_correction.py +74 -74
  8. stouputils/data_science/data_processing/image/binary_threshold.py +73 -73
  9. stouputils/data_science/data_processing/image/blur.py +59 -59
  10. stouputils/data_science/data_processing/image/brightness.py +54 -54
  11. stouputils/data_science/data_processing/image/canny.py +110 -110
  12. stouputils/data_science/data_processing/image/clahe.py +92 -92
  13. stouputils/data_science/data_processing/image/common.py +30 -30
  14. stouputils/data_science/data_processing/image/contrast.py +53 -53
  15. stouputils/data_science/data_processing/image/curvature_flow_filter.py +74 -74
  16. stouputils/data_science/data_processing/image/denoise.py +378 -378
  17. stouputils/data_science/data_processing/image/histogram_equalization.py +123 -123
  18. stouputils/data_science/data_processing/image/invert.py +64 -64
  19. stouputils/data_science/data_processing/image/laplacian.py +60 -60
  20. stouputils/data_science/data_processing/image/median_blur.py +52 -52
  21. stouputils/data_science/data_processing/image/noise.py +59 -59
  22. stouputils/data_science/data_processing/image/normalize.py +65 -65
  23. stouputils/data_science/data_processing/image/random_erase.py +66 -66
  24. stouputils/data_science/data_processing/image/resize.py +69 -69
  25. stouputils/data_science/data_processing/image/rotation.py +80 -80
  26. stouputils/data_science/data_processing/image/salt_pepper.py +68 -68
  27. stouputils/data_science/data_processing/image/sharpening.py +55 -55
  28. stouputils/data_science/data_processing/image/shearing.py +64 -64
  29. stouputils/data_science/data_processing/image/threshold.py +64 -64
  30. stouputils/data_science/data_processing/image/translation.py +71 -71
  31. stouputils/data_science/data_processing/image/zoom.py +83 -83
  32. stouputils/data_science/data_processing/image_augmentation.py +118 -118
  33. stouputils/data_science/data_processing/image_preprocess.py +183 -183
  34. stouputils/data_science/data_processing/prosthesis_detection.py +359 -359
  35. stouputils/data_science/data_processing/technique.py +481 -481
  36. stouputils/data_science/dataset/__init__.py +45 -45
  37. stouputils/data_science/dataset/dataset.py +292 -292
  38. stouputils/data_science/dataset/dataset_loader.py +135 -135
  39. stouputils/data_science/dataset/grouping_strategy.py +296 -296
  40. stouputils/data_science/dataset/image_loader.py +100 -100
  41. stouputils/data_science/dataset/xy_tuple.py +696 -696
  42. stouputils/data_science/metric_dictionnary.py +106 -106
  43. stouputils/data_science/mlflow_utils.py +206 -206
  44. stouputils/data_science/models/abstract_model.py +149 -149
  45. stouputils/data_science/models/all.py +85 -85
  46. stouputils/data_science/models/keras/all.py +38 -38
  47. stouputils/data_science/models/keras/convnext.py +62 -62
  48. stouputils/data_science/models/keras/densenet.py +50 -50
  49. stouputils/data_science/models/keras/efficientnet.py +60 -60
  50. stouputils/data_science/models/keras/mobilenet.py +56 -56
  51. stouputils/data_science/models/keras/resnet.py +52 -52
  52. stouputils/data_science/models/keras/squeezenet.py +233 -233
  53. stouputils/data_science/models/keras/vgg.py +42 -42
  54. stouputils/data_science/models/keras/xception.py +38 -38
  55. stouputils/data_science/models/keras_utils/callbacks/__init__.py +20 -20
  56. stouputils/data_science/models/keras_utils/callbacks/colored_progress_bar.py +219 -219
  57. stouputils/data_science/models/keras_utils/callbacks/learning_rate_finder.py +148 -148
  58. stouputils/data_science/models/keras_utils/callbacks/model_checkpoint_v2.py +31 -31
  59. stouputils/data_science/models/keras_utils/callbacks/progressive_unfreezing.py +249 -249
  60. stouputils/data_science/models/keras_utils/callbacks/warmup_scheduler.py +66 -66
  61. stouputils/data_science/models/keras_utils/losses/__init__.py +12 -12
  62. stouputils/data_science/models/keras_utils/losses/next_generation_loss.py +56 -56
  63. stouputils/data_science/models/keras_utils/visualizations.py +416 -416
  64. stouputils/data_science/models/sandbox.py +116 -116
  65. stouputils/data_science/range_tuple.py +234 -234
  66. stouputils/data_science/utils.py +285 -285
  67. stouputils/decorators.py +53 -39
  68. stouputils/decorators.pyi +12 -2
  69. stouputils/installer/__init__.py +18 -18
  70. stouputils/installer/linux.py +144 -144
  71. stouputils/installer/main.py +223 -223
  72. stouputils/installer/windows.py +136 -136
  73. stouputils/io.py +16 -9
  74. stouputils/parallel.pyi +12 -7
  75. stouputils/print.py +229 -2
  76. stouputils/print.pyi +92 -3
  77. stouputils/py.typed +1 -1
  78. {stouputils-1.14.2.dist-info → stouputils-1.15.0.dist-info}/METADATA +1 -1
  79. stouputils-1.15.0.dist-info/RECORD +140 -0
  80. {stouputils-1.14.2.dist-info → stouputils-1.15.0.dist-info}/WHEEL +1 -1
  81. stouputils/stouputils/__init__.pyi +0 -15
  82. stouputils/stouputils/_deprecated.pyi +0 -12
  83. stouputils/stouputils/all_doctests.pyi +0 -46
  84. stouputils/stouputils/applications/__init__.pyi +0 -2
  85. stouputils/stouputils/applications/automatic_docs.pyi +0 -106
  86. stouputils/stouputils/applications/upscaler/__init__.pyi +0 -3
  87. stouputils/stouputils/applications/upscaler/config.pyi +0 -18
  88. stouputils/stouputils/applications/upscaler/image.pyi +0 -109
  89. stouputils/stouputils/applications/upscaler/video.pyi +0 -60
  90. stouputils/stouputils/archive.pyi +0 -67
  91. stouputils/stouputils/backup.pyi +0 -109
  92. stouputils/stouputils/collections.pyi +0 -86
  93. stouputils/stouputils/continuous_delivery/__init__.pyi +0 -5
  94. stouputils/stouputils/continuous_delivery/cd_utils.pyi +0 -129
  95. stouputils/stouputils/continuous_delivery/github.pyi +0 -162
  96. stouputils/stouputils/continuous_delivery/pypi.pyi +0 -53
  97. stouputils/stouputils/continuous_delivery/pyproject.pyi +0 -67
  98. stouputils/stouputils/continuous_delivery/stubs.pyi +0 -39
  99. stouputils/stouputils/ctx.pyi +0 -211
  100. stouputils/stouputils/decorators.pyi +0 -252
  101. stouputils/stouputils/image.pyi +0 -172
  102. stouputils/stouputils/installer/__init__.pyi +0 -5
  103. stouputils/stouputils/installer/common.pyi +0 -39
  104. stouputils/stouputils/installer/downloader.pyi +0 -24
  105. stouputils/stouputils/installer/linux.pyi +0 -39
  106. stouputils/stouputils/installer/main.pyi +0 -57
  107. stouputils/stouputils/installer/windows.pyi +0 -31
  108. stouputils/stouputils/io.pyi +0 -213
  109. stouputils/stouputils/parallel.pyi +0 -216
  110. stouputils/stouputils/print.pyi +0 -136
  111. stouputils/stouputils/version_pkg.pyi +0 -15
  112. stouputils-1.14.2.dist-info/RECORD +0 -171
  113. {stouputils-1.14.2.dist-info → stouputils-1.15.0.dist-info}/entry_points.txt +0 -0
@@ -1,148 +1,148 @@
1
-
2
- # pyright: reportMissingTypeStubs=false
3
-
4
- # Imports
5
- from typing import Any
6
-
7
- import tensorflow as tf
8
- from keras.callbacks import Callback
9
- from keras.models import Model
10
-
11
-
12
- class LearningRateFinder(Callback):
13
- """ Callback to find optimal learning rate by increasing LR during training.
14
-
15
- Sources:
16
- - Inspired by: https://github.com/WittmannF/LRFinder
17
- - Cyclical Learning Rates for Training Neural Networks: https://arxiv.org/abs/1506.01186 (first description of the method)
18
-
19
- This callback gradually increases the learning rate from a minimum to a maximum value
20
- during training, allowing you to identify the optimal learning rate range for your model.
21
-
22
- It works by:
23
-
24
- 1. Starting with a very small learning rate
25
- 2. Exponentially increasing it after each batch or epoch
26
- 3. Recording the loss at each learning rate
27
- 4. Restoring the model's initial weights after training
28
-
29
- The optimal learning rate is typically found where the loss is decreasing most rapidly
30
- before it starts to diverge.
31
-
32
- .. image:: https://blog.dataiku.com/hubfs/training%20loss.png
33
- :alt: Learning rate finder curve example
34
- """
35
-
36
- def __init__(
37
- self,
38
- min_lr: float,
39
- max_lr: float,
40
- steps_per_epoch: int,
41
- epochs: int,
42
- update_per_epoch: bool = False,
43
- update_interval: int = 5
44
- ) -> None:
45
- """ Initialize the learning rate finder.
46
-
47
- Args:
48
- min_lr (float): Minimum learning rate
49
- max_lr (float): Maximum learning rate
50
- steps_per_epoch (int): Steps per epoch
51
- epochs (int): Number of epochs
52
- update_per_epoch (bool): If True, update LR once per epoch instead of every batch.
53
- update_interval (int): Number of steps between each lr increase, bigger value means more stable loss.
54
- """
55
- super().__init__()
56
- self.min_lr: float = min_lr
57
- """ Minimum learning rate. """
58
- self.max_lr: float = max_lr
59
- """ Maximum learning rate. """
60
- self.total_updates: int = (epochs if update_per_epoch else steps_per_epoch * epochs) // update_interval
61
- """ Total number of update steps (considering update_interval). """
62
- self.update_per_epoch: bool = update_per_epoch
63
- """ Whether to update learning rate per epoch instead of per batch. """
64
- self.update_interval: int = max(1, int(update_interval))
65
- """ Number of steps between each lr increase, bigger value means more stable loss. """
66
- self.lr_mult: float = (max_lr / min_lr) ** (1 / self.total_updates)
67
- """ Learning rate multiplier. """
68
- self.learning_rates: list[float] = []
69
- """ List of learning rates. """
70
- self.losses: list[float] = []
71
- """ List of losses. """
72
- self.best_lr: float = min_lr
73
- """ Best learning rate. """
74
- self.best_loss: float = float("inf")
75
- """ Best loss. """
76
- self.model: Model
77
- """ Model to apply the learning rate finder to. """
78
- self.initial_weights: list[Any] | None = None
79
- """ Stores the initial weights of the model. """
80
-
81
- def on_train_begin(self, logs: dict[str, Any] | None = None) -> None:
82
- """ Set initial learning rate and save initial model weights at the start of training.
83
-
84
- Args:
85
- logs (dict | None): Training logs.
86
- """
87
- self.initial_weights = self.model.get_weights()
88
- tf.keras.backend.set_value(self.model.optimizer.learning_rate, self.min_lr) # type: ignore
89
-
90
- def _update_lr_and_track_metrics(self, logs: dict[str, Any] | None = None) -> None:
91
- """ Update learning rate and track metrics.
92
-
93
- Args:
94
- logs (dict | None): Logs from training
95
- """
96
- if logs is None:
97
- return
98
-
99
- # Get current learning rate and loss
100
- current_lr: float = float(tf.keras.backend.get_value(self.model.optimizer.learning_rate)) # type: ignore
101
- current_loss: float = logs["loss"]
102
-
103
- # Record values
104
- self.learning_rates.append(current_lr)
105
- self.losses.append(current_loss)
106
-
107
- # Track best values
108
- if current_loss < self.best_loss:
109
- self.best_loss = current_loss
110
- self.best_lr = current_lr
111
-
112
- # Update learning rate
113
- new_lr: float = current_lr * self.lr_mult
114
- tf.keras.backend.set_value(self.model.optimizer.learning_rate, new_lr) # type: ignore
115
-
116
- def on_batch_end(self, batch: int, logs: dict[str, Any] | None = None) -> None:
117
- """ Record loss and increase learning rate after each batch if not updating per epoch.
118
-
119
- Args:
120
- batch (int): Current batch index.
121
- logs (dict | None): Training logs.
122
- """
123
- if self.update_per_epoch:
124
- return
125
- if batch % self.update_interval == 0:
126
- self._update_lr_and_track_metrics(logs)
127
-
128
- def on_epoch_end(self, epoch: int, logs: dict[str, Any] | None = None) -> None:
129
- """ Record loss and increase learning rate after each epoch if updating per epoch.
130
-
131
- Args:
132
- epoch (int): Current epoch index.
133
- logs (dict | None): Training logs.
134
- """
135
- if not self.update_per_epoch:
136
- return
137
- if epoch % self.update_interval == 0:
138
- self._update_lr_and_track_metrics(logs)
139
-
140
- def on_train_end(self, logs: dict[str, Any] | None = None) -> None:
141
- """ Restore initial model weights at the end of training.
142
-
143
- Args:
144
- logs (dict | None): Training logs.
145
- """
146
- if self.initial_weights is not None:
147
- self.model.set_weights(self.initial_weights) # pyright: ignore [reportUnknownMemberType]
148
-
1
+
2
+ # pyright: reportMissingTypeStubs=false
3
+
4
+ # Imports
5
+ from typing import Any
6
+
7
+ import tensorflow as tf
8
+ from keras.callbacks import Callback
9
+ from keras.models import Model
10
+
11
+
12
+ class LearningRateFinder(Callback):
13
+ """ Callback to find optimal learning rate by increasing LR during training.
14
+
15
+ Sources:
16
+ - Inspired by: https://github.com/WittmannF/LRFinder
17
+ - Cyclical Learning Rates for Training Neural Networks: https://arxiv.org/abs/1506.01186 (first description of the method)
18
+
19
+ This callback gradually increases the learning rate from a minimum to a maximum value
20
+ during training, allowing you to identify the optimal learning rate range for your model.
21
+
22
+ It works by:
23
+
24
+ 1. Starting with a very small learning rate
25
+ 2. Exponentially increasing it after each batch or epoch
26
+ 3. Recording the loss at each learning rate
27
+ 4. Restoring the model's initial weights after training
28
+
29
+ The optimal learning rate is typically found where the loss is decreasing most rapidly
30
+ before it starts to diverge.
31
+
32
+ .. image:: https://blog.dataiku.com/hubfs/training%20loss.png
33
+ :alt: Learning rate finder curve example
34
+ """
35
+
36
+ def __init__(
37
+ self,
38
+ min_lr: float,
39
+ max_lr: float,
40
+ steps_per_epoch: int,
41
+ epochs: int,
42
+ update_per_epoch: bool = False,
43
+ update_interval: int = 5
44
+ ) -> None:
45
+ """ Initialize the learning rate finder.
46
+
47
+ Args:
48
+ min_lr (float): Minimum learning rate
49
+ max_lr (float): Maximum learning rate
50
+ steps_per_epoch (int): Steps per epoch
51
+ epochs (int): Number of epochs
52
+ update_per_epoch (bool): If True, update LR once per epoch instead of every batch.
53
+ update_interval (int): Number of steps between each lr increase, bigger value means more stable loss.
54
+ """
55
+ super().__init__()
56
+ self.min_lr: float = min_lr
57
+ """ Minimum learning rate. """
58
+ self.max_lr: float = max_lr
59
+ """ Maximum learning rate. """
60
+ self.total_updates: int = (epochs if update_per_epoch else steps_per_epoch * epochs) // update_interval
61
+ """ Total number of update steps (considering update_interval). """
62
+ self.update_per_epoch: bool = update_per_epoch
63
+ """ Whether to update learning rate per epoch instead of per batch. """
64
+ self.update_interval: int = max(1, int(update_interval))
65
+ """ Number of steps between each lr increase, bigger value means more stable loss. """
66
+ self.lr_mult: float = (max_lr / min_lr) ** (1 / self.total_updates)
67
+ """ Learning rate multiplier. """
68
+ self.learning_rates: list[float] = []
69
+ """ List of learning rates. """
70
+ self.losses: list[float] = []
71
+ """ List of losses. """
72
+ self.best_lr: float = min_lr
73
+ """ Best learning rate. """
74
+ self.best_loss: float = float("inf")
75
+ """ Best loss. """
76
+ self.model: Model
77
+ """ Model to apply the learning rate finder to. """
78
+ self.initial_weights: list[Any] | None = None
79
+ """ Stores the initial weights of the model. """
80
+
81
+ def on_train_begin(self, logs: dict[str, Any] | None = None) -> None:
82
+ """ Set initial learning rate and save initial model weights at the start of training.
83
+
84
+ Args:
85
+ logs (dict | None): Training logs.
86
+ """
87
+ self.initial_weights = self.model.get_weights()
88
+ tf.keras.backend.set_value(self.model.optimizer.learning_rate, self.min_lr) # type: ignore
89
+
90
+ def _update_lr_and_track_metrics(self, logs: dict[str, Any] | None = None) -> None:
91
+ """ Update learning rate and track metrics.
92
+
93
+ Args:
94
+ logs (dict | None): Logs from training
95
+ """
96
+ if logs is None:
97
+ return
98
+
99
+ # Get current learning rate and loss
100
+ current_lr: float = float(tf.keras.backend.get_value(self.model.optimizer.learning_rate)) # type: ignore
101
+ current_loss: float = logs["loss"]
102
+
103
+ # Record values
104
+ self.learning_rates.append(current_lr)
105
+ self.losses.append(current_loss)
106
+
107
+ # Track best values
108
+ if current_loss < self.best_loss:
109
+ self.best_loss = current_loss
110
+ self.best_lr = current_lr
111
+
112
+ # Update learning rate
113
+ new_lr: float = current_lr * self.lr_mult
114
+ tf.keras.backend.set_value(self.model.optimizer.learning_rate, new_lr) # type: ignore
115
+
116
+ def on_batch_end(self, batch: int, logs: dict[str, Any] | None = None) -> None:
117
+ """ Record loss and increase learning rate after each batch if not updating per epoch.
118
+
119
+ Args:
120
+ batch (int): Current batch index.
121
+ logs (dict | None): Training logs.
122
+ """
123
+ if self.update_per_epoch:
124
+ return
125
+ if batch % self.update_interval == 0:
126
+ self._update_lr_and_track_metrics(logs)
127
+
128
+ def on_epoch_end(self, epoch: int, logs: dict[str, Any] | None = None) -> None:
129
+ """ Record loss and increase learning rate after each epoch if updating per epoch.
130
+
131
+ Args:
132
+ epoch (int): Current epoch index.
133
+ logs (dict | None): Training logs.
134
+ """
135
+ if not self.update_per_epoch:
136
+ return
137
+ if epoch % self.update_interval == 0:
138
+ self._update_lr_and_track_metrics(logs)
139
+
140
+ def on_train_end(self, logs: dict[str, Any] | None = None) -> None:
141
+ """ Restore initial model weights at the end of training.
142
+
143
+ Args:
144
+ logs (dict | None): Training logs.
145
+ """
146
+ if self.initial_weights is not None:
147
+ self.model.set_weights(self.initial_weights) # pyright: ignore [reportUnknownMemberType]
148
+
@@ -1,31 +1,31 @@
1
-
2
- # pyright: reportMissingTypeStubs=false
3
- # pyright: reportUnknownMemberType=false
4
-
5
- # Imports
6
- from typing import Any
7
-
8
- from keras.callbacks import ModelCheckpoint
9
-
10
-
11
- class ModelCheckpointV2(ModelCheckpoint):
12
- """ Model checkpoint callback but only starts after a given number of epochs.
13
-
14
- Args:
15
- epochs_before_start (int): Number of epochs before starting the checkpointing
16
- """
17
-
18
- def __init__(self, epochs_before_start: int = 3, *args: Any, **kwargs: Any) -> None:
19
- super().__init__(*args, **kwargs)
20
- self.epochs_before_start = epochs_before_start
21
- self.current_epoch = 0
22
-
23
- def on_batch_end(self, batch: int, logs: dict[str, Any] | None = None) -> None:
24
- if self.current_epoch >= self.epochs_before_start:
25
- super().on_batch_end(batch, logs)
26
-
27
- def on_epoch_end(self, epoch: int, logs: dict[str, Any] | None = None) -> None:
28
- self.current_epoch = epoch
29
- if epoch >= self.epochs_before_start:
30
- super().on_epoch_end(epoch, logs)
31
-
1
+
2
+ # pyright: reportMissingTypeStubs=false
3
+ # pyright: reportUnknownMemberType=false
4
+
5
+ # Imports
6
+ from typing import Any
7
+
8
+ from keras.callbacks import ModelCheckpoint
9
+
10
+
11
+ class ModelCheckpointV2(ModelCheckpoint):
12
+ """ Model checkpoint callback but only starts after a given number of epochs.
13
+
14
+ Args:
15
+ epochs_before_start (int): Number of epochs before starting the checkpointing
16
+ """
17
+
18
+ def __init__(self, epochs_before_start: int = 3, *args: Any, **kwargs: Any) -> None:
19
+ super().__init__(*args, **kwargs)
20
+ self.epochs_before_start = epochs_before_start
21
+ self.current_epoch = 0
22
+
23
+ def on_batch_end(self, batch: int, logs: dict[str, Any] | None = None) -> None:
24
+ if self.current_epoch >= self.epochs_before_start:
25
+ super().on_batch_end(batch, logs)
26
+
27
+ def on_epoch_end(self, epoch: int, logs: dict[str, Any] | None = None) -> None:
28
+ self.current_epoch = epoch
29
+ if epoch >= self.epochs_before_start:
30
+ super().on_epoch_end(epoch, logs)
31
+