ilovetools 0.2.25__tar.gz → 0.2.26__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. {ilovetools-0.2.25/ilovetools.egg-info → ilovetools-0.2.26}/PKG-INFO +2 -2
  2. ilovetools-0.2.26/ilovetools/ml/dropout.py +546 -0
  3. {ilovetools-0.2.25 → ilovetools-0.2.26/ilovetools.egg-info}/PKG-INFO +2 -2
  4. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools.egg-info/SOURCES.txt +2 -0
  5. {ilovetools-0.2.25 → ilovetools-0.2.26}/pyproject.toml +2 -2
  6. {ilovetools-0.2.25 → ilovetools-0.2.26}/setup.py +2 -2
  7. ilovetools-0.2.26/tests/test_dropout.py +478 -0
  8. {ilovetools-0.2.25 → ilovetools-0.2.26}/LICENSE +0 -0
  9. {ilovetools-0.2.25 → ilovetools-0.2.26}/MANIFEST.in +0 -0
  10. {ilovetools-0.2.25 → ilovetools-0.2.26}/README.md +0 -0
  11. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/__init__.py +0 -0
  12. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/ai/__init__.py +0 -0
  13. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/ai/embeddings.py +0 -0
  14. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/ai/inference.py +0 -0
  15. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/ai/llm_helpers.py +0 -0
  16. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/audio/__init__.py +0 -0
  17. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/automation/__init__.py +0 -0
  18. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/automation/file_organizer.py +0 -0
  19. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/conversion/__init__.py +0 -0
  20. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/conversion/config_converter.py +0 -0
  21. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/conversion/config_converter_fixed_header.py +0 -0
  22. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/data/__init__.py +0 -0
  23. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/data/feature_engineering.py +0 -0
  24. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/data/preprocessing.py +0 -0
  25. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/database/__init__.py +0 -0
  26. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/datetime/__init__.py +0 -0
  27. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/email/__init__.py +0 -0
  28. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/email/template_engine.py +0 -0
  29. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/files/__init__.py +0 -0
  30. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/image/__init__.py +0 -0
  31. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/ml/__init__.py +0 -0
  32. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/ml/activations.py +0 -0
  33. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/ml/anomaly_detection.py +0 -0
  34. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/ml/attention.py +0 -0
  35. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/ml/clustering.py +0 -0
  36. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/ml/cnn.py +0 -0
  37. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/ml/cross_validation.py +0 -0
  38. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/ml/dimensionality.py +0 -0
  39. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/ml/ensemble.py +0 -0
  40. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/ml/feature_selection.py +0 -0
  41. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/ml/gradient_descent.py +0 -0
  42. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/ml/imbalanced.py +0 -0
  43. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/ml/interpretation.py +0 -0
  44. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/ml/loss_functions.py +0 -0
  45. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/ml/lr_schedulers.py +0 -0
  46. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/ml/metrics.py +0 -0
  47. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/ml/neural_network.py +0 -0
  48. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/ml/normalization.py +0 -0
  49. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/ml/normalization_advanced.py +0 -0
  50. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/ml/optimizers.py +0 -0
  51. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/ml/pipeline.py +0 -0
  52. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/ml/positional_encoding.py +0 -0
  53. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/ml/regularization.py +0 -0
  54. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/ml/rnn.py +0 -0
  55. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/ml/timeseries.py +0 -0
  56. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/ml/tuning.py +0 -0
  57. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/ml/weight_init.py +0 -0
  58. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/security/__init__.py +0 -0
  59. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/security/password_checker.py +0 -0
  60. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/text/__init__.py +0 -0
  61. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/utils/__init__.py +0 -0
  62. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/utils/cache_system.py +0 -0
  63. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/utils/logger.py +0 -0
  64. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/utils/rate_limiter.py +0 -0
  65. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/utils/retry.py +0 -0
  66. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/validation/__init__.py +0 -0
  67. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/validation/data_validator.py +0 -0
  68. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/web/__init__.py +0 -0
  69. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/web/scraper.py +0 -0
  70. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools/web/url_shortener.py +0 -0
  71. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools.egg-info/dependency_links.txt +0 -0
  72. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools.egg-info/requires.txt +0 -0
  73. {ilovetools-0.2.25 → ilovetools-0.2.26}/ilovetools.egg-info/top_level.txt +0 -0
  74. {ilovetools-0.2.25 → ilovetools-0.2.26}/requirements.txt +0 -0
  75. {ilovetools-0.2.25 → ilovetools-0.2.26}/setup.cfg +0 -0
  76. {ilovetools-0.2.25 → ilovetools-0.2.26}/tests/__init__.py +0 -0
  77. {ilovetools-0.2.25 → ilovetools-0.2.26}/tests/test_activations.py +0 -0
  78. {ilovetools-0.2.25 → ilovetools-0.2.26}/tests/test_attention.py +0 -0
  79. {ilovetools-0.2.25 → ilovetools-0.2.26}/tests/test_cnn.py +0 -0
  80. {ilovetools-0.2.25 → ilovetools-0.2.26}/tests/test_gradient_descent.py +0 -0
  81. {ilovetools-0.2.25 → ilovetools-0.2.26}/tests/test_loss_functions.py +0 -0
  82. {ilovetools-0.2.25 → ilovetools-0.2.26}/tests/test_lr_schedulers.py +0 -0
  83. {ilovetools-0.2.25 → ilovetools-0.2.26}/tests/test_neural_network.py +0 -0
  84. {ilovetools-0.2.25 → ilovetools-0.2.26}/tests/test_normalization.py +0 -0
  85. {ilovetools-0.2.25 → ilovetools-0.2.26}/tests/test_normalization_advanced.py +0 -0
  86. {ilovetools-0.2.25 → ilovetools-0.2.26}/tests/test_optimizers.py +0 -0
  87. {ilovetools-0.2.25 → ilovetools-0.2.26}/tests/test_positional_encoding.py +0 -0
  88. {ilovetools-0.2.25 → ilovetools-0.2.26}/tests/test_pypi_installation.py +0 -0
  89. {ilovetools-0.2.25 → ilovetools-0.2.26}/tests/test_regularization.py +0 -0
  90. {ilovetools-0.2.25 → ilovetools-0.2.26}/tests/test_rnn.py +0 -0
  91. {ilovetools-0.2.25 → ilovetools-0.2.26}/tests/test_weight_init.py +0 -0
  92. {ilovetools-0.2.25 → ilovetools-0.2.26}/tests/verify_positional_encoding.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ilovetools
3
- Version: 0.2.25
3
+ Version: 0.2.26
4
4
  Summary: A comprehensive Python utility library with modular tools for AI/ML, data processing, and daily programming needs
5
5
  Home-page: https://github.com/AliMehdi512/ilovetools
6
6
  Author: Ali Mehdi
@@ -11,7 +11,7 @@ Project-URL: Repository, https://github.com/AliMehdi512/ilovetools
11
11
  Project-URL: Issues, https://github.com/AliMehdi512/ilovetools/issues
12
12
  Project-URL: Bug Reports, https://github.com/AliMehdi512/ilovetools/issues
13
13
  Project-URL: Source, https://github.com/AliMehdi512/ilovetools
14
- Keywords: utilities,tools,ai,ml,data-processing,automation,weight-initialization,xavier-initialization,he-initialization,kaiming-initialization,deep-learning,neural-networks
14
+ Keywords: utilities,tools,ai,ml,data-processing,automation,dropout,regularization,overfitting,spatial-dropout,variational-dropout,dropconnect,alpha-dropout,deep-learning,neural-networks
15
15
  Classifier: Development Status :: 3 - Alpha
16
16
  Classifier: Intended Audience :: Developers
17
17
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
@@ -0,0 +1,546 @@
1
+ """
2
+ Dropout Regularization Techniques
3
+
4
+ This module implements various dropout regularization techniques for preventing
5
+ overfitting in neural networks. Dropout randomly deactivates neurons during training,
6
+ forcing the network to learn more robust features.
7
+
8
+ Implemented Techniques:
9
+ 1. Standard Dropout - Random neuron dropout
10
+ 2. Spatial Dropout (Dropout2D) - Drops entire feature maps
11
+ 3. Spatial Dropout 3D (Dropout3D) - Drops entire 3D feature maps
12
+ 4. Variational Dropout - Same mask across time steps
13
+ 5. DropConnect - Drops connections instead of neurons
14
+ 6. Alpha Dropout - Maintains mean and variance (for SELU)
15
+
16
+ References:
17
+ - "Dropout: A Simple Way to Prevent Neural Networks from Overfitting" (Srivastava et al., 2014)
18
+ - "Efficient Object Localization Using Convolutional Networks" (Tompson et al., 2015)
19
+ - "Variational Dropout and the Local Reparameterization Trick" (Kingma et al., 2015)
20
+ - "Regularization of Neural Networks using DropConnect" (Wan et al., 2013)
21
+ - "Self-Normalizing Neural Networks" (Klambauer et al., 2017)
22
+
23
+ Author: Ali Mehdi
24
+ Date: January 12, 2026
25
+ """
26
+
27
+ import numpy as np
28
+ from typing import Tuple, Optional, Union
29
+
30
+
31
+ class Dropout:
32
+ """
33
+ Standard Dropout implementation.
34
+
35
+ Randomly sets a fraction of input units to 0 at each update during training,
36
+ which helps prevent overfitting.
37
+
38
+ Args:
39
+ rate: Float between 0 and 1. Fraction of the input units to drop.
40
+ seed: Random seed for reproducibility.
41
+
42
+ Example:
43
+ >>> dropout = Dropout(rate=0.5)
44
+ >>> x = np.random.randn(32, 128)
45
+ >>> x_train = dropout(x, training=True)
46
+ >>> x_test = dropout(x, training=False)
47
+
48
+ Reference:
49
+ Srivastava et al., "Dropout: A Simple Way to Prevent Neural Networks
50
+ from Overfitting", JMLR 2014
51
+ """
52
+
53
+ def __init__(self, rate: float = 0.5, seed: Optional[int] = None):
54
+ if not 0 <= rate < 1:
55
+ raise ValueError(f"Dropout rate must be in [0, 1), got {rate}")
56
+
57
+ self.rate = rate
58
+ self.seed = seed
59
+ self._rng = np.random.RandomState(seed)
60
+
61
+ def __call__(self, x: np.ndarray, training: bool = True) -> np.ndarray:
62
+ """
63
+ Apply dropout to input.
64
+
65
+ Args:
66
+ x: Input array of any shape
67
+ training: Whether in training mode (apply dropout) or inference mode
68
+
69
+ Returns:
70
+ Output array with dropout applied (if training=True)
71
+ """
72
+ if not training or self.rate == 0:
73
+ return x
74
+
75
+ # Generate binary mask
76
+ keep_prob = 1 - self.rate
77
+ mask = self._rng.binomial(1, keep_prob, size=x.shape)
78
+
79
+ # Apply mask and scale
80
+ return x * mask / keep_prob
81
+
82
+ def get_mask(self, shape: Tuple[int, ...]) -> np.ndarray:
83
+ """Generate dropout mask for given shape."""
84
+ keep_prob = 1 - self.rate
85
+ return self._rng.binomial(1, keep_prob, size=shape)
86
+
87
+
88
+ class SpatialDropout2D:
89
+ """
90
+ Spatial Dropout for 2D inputs (e.g., images).
91
+
92
+ This version performs the same function as Dropout, however it drops
93
+ entire 2D feature maps instead of individual elements. Useful for
94
+ convolutional layers where adjacent pixels are highly correlated.
95
+
96
+ Args:
97
+ rate: Float between 0 and 1. Fraction of the feature maps to drop.
98
+ data_format: 'channels_first' or 'channels_last'
99
+ seed: Random seed for reproducibility.
100
+
101
+ Example:
102
+ >>> dropout = SpatialDropout2D(rate=0.2)
103
+ >>> x = np.random.randn(32, 64, 28, 28) # (batch, channels, height, width)
104
+ >>> x_dropped = dropout(x, training=True)
105
+
106
+ Reference:
107
+ Tompson et al., "Efficient Object Localization Using Convolutional Networks",
108
+ ICCV 2015
109
+ """
110
+
111
+ def __init__(self, rate: float = 0.5, data_format: str = 'channels_first',
112
+ seed: Optional[int] = None):
113
+ if not 0 <= rate < 1:
114
+ raise ValueError(f"Dropout rate must be in [0, 1), got {rate}")
115
+ if data_format not in ['channels_first', 'channels_last']:
116
+ raise ValueError(f"data_format must be 'channels_first' or 'channels_last'")
117
+
118
+ self.rate = rate
119
+ self.data_format = data_format
120
+ self.seed = seed
121
+ self._rng = np.random.RandomState(seed)
122
+
123
+ def __call__(self, x: np.ndarray, training: bool = True) -> np.ndarray:
124
+ """
125
+ Apply spatial dropout to input.
126
+
127
+ Args:
128
+ x: Input array of shape (batch, channels, height, width) for channels_first
129
+ or (batch, height, width, channels) for channels_last
130
+ training: Whether in training mode
131
+
132
+ Returns:
133
+ Output array with spatial dropout applied
134
+ """
135
+ if not training or self.rate == 0:
136
+ return x
137
+
138
+ if x.ndim != 4:
139
+ raise ValueError(f"Expected 4D input, got shape {x.shape}")
140
+
141
+ keep_prob = 1 - self.rate
142
+
143
+ # Determine noise shape (drop entire feature maps)
144
+ if self.data_format == 'channels_first':
145
+ # (batch, channels, 1, 1)
146
+ noise_shape = (x.shape[0], x.shape[1], 1, 1)
147
+ else:
148
+ # (batch, 1, 1, channels)
149
+ noise_shape = (x.shape[0], 1, 1, x.shape[3])
150
+
151
+ # Generate mask and broadcast
152
+ mask = self._rng.binomial(1, keep_prob, size=noise_shape)
153
+ mask = np.broadcast_to(mask, x.shape)
154
+
155
+ return x * mask / keep_prob
156
+
157
+
158
+ class SpatialDropout3D:
159
+ """
160
+ Spatial Dropout for 3D inputs (e.g., 3D convolutions, video).
161
+
162
+ Drops entire 3D feature maps instead of individual elements.
163
+
164
+ Args:
165
+ rate: Float between 0 and 1. Fraction of the feature maps to drop.
166
+ data_format: 'channels_first' or 'channels_last'
167
+ seed: Random seed for reproducibility.
168
+
169
+ Example:
170
+ >>> dropout = SpatialDropout3D(rate=0.2)
171
+ >>> x = np.random.randn(16, 32, 10, 28, 28) # (batch, channels, depth, height, width)
172
+ >>> x_dropped = dropout(x, training=True)
173
+ """
174
+
175
+ def __init__(self, rate: float = 0.5, data_format: str = 'channels_first',
176
+ seed: Optional[int] = None):
177
+ if not 0 <= rate < 1:
178
+ raise ValueError(f"Dropout rate must be in [0, 1), got {rate}")
179
+ if data_format not in ['channels_first', 'channels_last']:
180
+ raise ValueError(f"data_format must be 'channels_first' or 'channels_last'")
181
+
182
+ self.rate = rate
183
+ self.data_format = data_format
184
+ self.seed = seed
185
+ self._rng = np.random.RandomState(seed)
186
+
187
+ def __call__(self, x: np.ndarray, training: bool = True) -> np.ndarray:
188
+ """Apply spatial dropout to 3D input."""
189
+ if not training or self.rate == 0:
190
+ return x
191
+
192
+ if x.ndim != 5:
193
+ raise ValueError(f"Expected 5D input, got shape {x.shape}")
194
+
195
+ keep_prob = 1 - self.rate
196
+
197
+ # Determine noise shape
198
+ if self.data_format == 'channels_first':
199
+ # (batch, channels, 1, 1, 1)
200
+ noise_shape = (x.shape[0], x.shape[1], 1, 1, 1)
201
+ else:
202
+ # (batch, 1, 1, 1, channels)
203
+ noise_shape = (x.shape[0], 1, 1, 1, x.shape[4])
204
+
205
+ # Generate mask and broadcast
206
+ mask = self._rng.binomial(1, keep_prob, size=noise_shape)
207
+ mask = np.broadcast_to(mask, x.shape)
208
+
209
+ return x * mask / keep_prob
210
+
211
+
212
+ class VariationalDropout:
213
+ """
214
+ Variational Dropout for RNNs.
215
+
216
+ Uses the same dropout mask across all time steps, which is more effective
217
+ for recurrent networks than applying different masks at each time step.
218
+
219
+ Args:
220
+ rate: Float between 0 and 1. Fraction of the input units to drop.
221
+ seed: Random seed for reproducibility.
222
+
223
+ Example:
224
+ >>> dropout = VariationalDropout(rate=0.3)
225
+ >>> x = np.random.randn(32, 10, 128) # (batch, time_steps, features)
226
+ >>> x_dropped = dropout(x, training=True)
227
+
228
+ Reference:
229
+ Gal & Ghahramani, "A Theoretically Grounded Application of Dropout in
230
+ Recurrent Neural Networks", NIPS 2016
231
+ """
232
+
233
+ def __init__(self, rate: float = 0.5, seed: Optional[int] = None):
234
+ if not 0 <= rate < 1:
235
+ raise ValueError(f"Dropout rate must be in [0, 1), got {rate}")
236
+
237
+ self.rate = rate
238
+ self.seed = seed
239
+ self._rng = np.random.RandomState(seed)
240
+
241
+ def __call__(self, x: np.ndarray, training: bool = True) -> np.ndarray:
242
+ """
243
+ Apply variational dropout to input.
244
+
245
+ Args:
246
+ x: Input array of shape (batch, time_steps, features)
247
+ training: Whether in training mode
248
+
249
+ Returns:
250
+ Output array with same dropout mask across time steps
251
+ """
252
+ if not training or self.rate == 0:
253
+ return x
254
+
255
+ if x.ndim != 3:
256
+ raise ValueError(f"Expected 3D input (batch, time, features), got shape {x.shape}")
257
+
258
+ keep_prob = 1 - self.rate
259
+
260
+ # Generate mask for (batch, 1, features) and broadcast across time
261
+ noise_shape = (x.shape[0], 1, x.shape[2])
262
+ mask = self._rng.binomial(1, keep_prob, size=noise_shape)
263
+ mask = np.broadcast_to(mask, x.shape)
264
+
265
+ return x * mask / keep_prob
266
+
267
+
268
+ class DropConnect:
269
+ """
270
+ DropConnect implementation.
271
+
272
+ Instead of dropping neuron activations, DropConnect drops connections (weights)
273
+ between neurons. This provides stronger regularization than standard dropout.
274
+
275
+ Args:
276
+ rate: Float between 0 and 1. Fraction of connections to drop.
277
+ seed: Random seed for reproducibility.
278
+
279
+ Example:
280
+ >>> dropconnect = DropConnect(rate=0.5)
281
+ >>> x = np.random.randn(32, 128)
282
+ >>> W = np.random.randn(128, 64)
283
+ >>> output = dropconnect.apply(x, W, training=True)
284
+
285
+ Reference:
286
+ Wan et al., "Regularization of Neural Networks using DropConnect", ICML 2013
287
+ """
288
+
289
+ def __init__(self, rate: float = 0.5, seed: Optional[int] = None):
290
+ if not 0 <= rate < 1:
291
+ raise ValueError(f"Dropout rate must be in [0, 1), got {rate}")
292
+
293
+ self.rate = rate
294
+ self.seed = seed
295
+ self._rng = np.random.RandomState(seed)
296
+
297
+ def apply(self, x: np.ndarray, weights: np.ndarray,
298
+ training: bool = True) -> np.ndarray:
299
+ """
300
+ Apply DropConnect to weight matrix.
301
+
302
+ Args:
303
+ x: Input array of shape (batch, input_dim)
304
+ weights: Weight matrix of shape (input_dim, output_dim)
305
+ training: Whether in training mode
306
+
307
+ Returns:
308
+ Output array of shape (batch, output_dim)
309
+ """
310
+ if not training or self.rate == 0:
311
+ return np.dot(x, weights)
312
+
313
+ keep_prob = 1 - self.rate
314
+
315
+ # Drop connections (weights)
316
+ mask = self._rng.binomial(1, keep_prob, size=weights.shape)
317
+ masked_weights = weights * mask / keep_prob
318
+
319
+ return np.dot(x, masked_weights)
320
+
321
+ def get_mask(self, shape: Tuple[int, ...]) -> np.ndarray:
322
+ """Generate DropConnect mask for given weight shape."""
323
+ keep_prob = 1 - self.rate
324
+ return self._rng.binomial(1, keep_prob, size=shape)
325
+
326
+
327
+ class AlphaDropout:
328
+ """
329
+ Alpha Dropout for Self-Normalizing Neural Networks (SNNs).
330
+
331
+ Alpha Dropout maintains the self-normalizing property of SNNs by ensuring
332
+ that mean and variance are preserved. Works with SELU activation.
333
+
334
+ Args:
335
+ rate: Float between 0 and 1. Fraction of the input units to drop.
336
+ seed: Random seed for reproducibility.
337
+
338
+ Example:
339
+ >>> dropout = AlphaDropout(rate=0.1)
340
+ >>> x = np.random.randn(32, 128)
341
+ >>> x_dropped = dropout(x, training=True)
342
+
343
+ Reference:
344
+ Klambauer et al., "Self-Normalizing Neural Networks", NIPS 2017
345
+ """
346
+
347
+ def __init__(self, rate: float = 0.5, seed: Optional[int] = None):
348
+ if not 0 <= rate < 1:
349
+ raise ValueError(f"Dropout rate must be in [0, 1), got {rate}")
350
+
351
+ self.rate = rate
352
+ self.seed = seed
353
+ self._rng = np.random.RandomState(seed)
354
+
355
+ # SELU parameters
356
+ self.alpha = 1.6732632423543772848170429916717
357
+ self.scale = 1.0507009873554804934193349852946
358
+
359
+ # Alpha dropout parameters
360
+ self.alpha_p = -self.alpha * self.scale
361
+
362
+ def __call__(self, x: np.ndarray, training: bool = True) -> np.ndarray:
363
+ """
364
+ Apply alpha dropout to input.
365
+
366
+ Args:
367
+ x: Input array of any shape
368
+ training: Whether in training mode
369
+
370
+ Returns:
371
+ Output array with alpha dropout applied
372
+ """
373
+ if not training or self.rate == 0:
374
+ return x
375
+
376
+ keep_prob = 1 - self.rate
377
+
378
+ # Calculate affine transformation parameters
379
+ a = ((1 - self.rate) * (1 + self.rate * self.alpha_p ** 2)) ** -0.5
380
+ b = -a * self.alpha_p * self.rate
381
+
382
+ # Generate mask
383
+ mask = self._rng.binomial(1, keep_prob, size=x.shape)
384
+
385
+ # Apply alpha dropout
386
+ x_dropped = np.where(mask, x, self.alpha_p)
387
+
388
+ # Affine transformation to preserve mean and variance
389
+ return a * x_dropped + b
390
+
391
+
392
+ # Convenience functions
393
+ def dropout(x: np.ndarray, rate: float = 0.5, training: bool = True,
394
+ seed: Optional[int] = None) -> np.ndarray:
395
+ """
396
+ Apply standard dropout to input.
397
+
398
+ Args:
399
+ x: Input array
400
+ rate: Dropout rate (fraction to drop)
401
+ training: Whether in training mode
402
+ seed: Random seed
403
+
404
+ Returns:
405
+ Output with dropout applied
406
+
407
+ Example:
408
+ >>> x = np.random.randn(32, 128)
409
+ >>> x_dropped = dropout(x, rate=0.5, training=True)
410
+ """
411
+ drop = Dropout(rate=rate, seed=seed)
412
+ return drop(x, training=training)
413
+
414
+
415
+ def spatial_dropout_2d(x: np.ndarray, rate: float = 0.5, training: bool = True,
416
+ data_format: str = 'channels_first',
417
+ seed: Optional[int] = None) -> np.ndarray:
418
+ """
419
+ Apply spatial dropout to 2D input.
420
+
421
+ Args:
422
+ x: Input array of shape (batch, channels, height, width) or
423
+ (batch, height, width, channels)
424
+ rate: Dropout rate
425
+ training: Whether in training mode
426
+ data_format: 'channels_first' or 'channels_last'
427
+ seed: Random seed
428
+
429
+ Returns:
430
+ Output with spatial dropout applied
431
+
432
+ Example:
433
+ >>> x = np.random.randn(32, 64, 28, 28)
434
+ >>> x_dropped = spatial_dropout_2d(x, rate=0.2, training=True)
435
+ """
436
+ drop = SpatialDropout2D(rate=rate, data_format=data_format, seed=seed)
437
+ return drop(x, training=training)
438
+
439
+
440
+ def spatial_dropout_3d(x: np.ndarray, rate: float = 0.5, training: bool = True,
441
+ data_format: str = 'channels_first',
442
+ seed: Optional[int] = None) -> np.ndarray:
443
+ """
444
+ Apply spatial dropout to 3D input.
445
+
446
+ Args:
447
+ x: Input array of shape (batch, channels, depth, height, width) or
448
+ (batch, depth, height, width, channels)
449
+ rate: Dropout rate
450
+ training: Whether in training mode
451
+ data_format: 'channels_first' or 'channels_last'
452
+ seed: Random seed
453
+
454
+ Returns:
455
+ Output with spatial dropout applied
456
+
457
+ Example:
458
+ >>> x = np.random.randn(16, 32, 10, 28, 28)
459
+ >>> x_dropped = spatial_dropout_3d(x, rate=0.2, training=True)
460
+ """
461
+ drop = SpatialDropout3D(rate=rate, data_format=data_format, seed=seed)
462
+ return drop(x, training=training)
463
+
464
+
465
+ def variational_dropout(x: np.ndarray, rate: float = 0.5, training: bool = True,
466
+ seed: Optional[int] = None) -> np.ndarray:
467
+ """
468
+ Apply variational dropout to sequential input.
469
+
470
+ Args:
471
+ x: Input array of shape (batch, time_steps, features)
472
+ rate: Dropout rate
473
+ training: Whether in training mode
474
+ seed: Random seed
475
+
476
+ Returns:
477
+ Output with variational dropout applied
478
+
479
+ Example:
480
+ >>> x = np.random.randn(32, 10, 128)
481
+ >>> x_dropped = variational_dropout(x, rate=0.3, training=True)
482
+ """
483
+ drop = VariationalDropout(rate=rate, seed=seed)
484
+ return drop(x, training=training)
485
+
486
+
487
+ def dropconnect(x: np.ndarray, weights: np.ndarray, rate: float = 0.5,
488
+ training: bool = True, seed: Optional[int] = None) -> np.ndarray:
489
+ """
490
+ Apply DropConnect to weight matrix.
491
+
492
+ Args:
493
+ x: Input array of shape (batch, input_dim)
494
+ weights: Weight matrix of shape (input_dim, output_dim)
495
+ rate: Dropout rate
496
+ training: Whether in training mode
497
+ seed: Random seed
498
+
499
+ Returns:
500
+ Output array of shape (batch, output_dim)
501
+
502
+ Example:
503
+ >>> x = np.random.randn(32, 128)
504
+ >>> W = np.random.randn(128, 64)
505
+ >>> output = dropconnect(x, W, rate=0.5, training=True)
506
+ """
507
+ drop = DropConnect(rate=rate, seed=seed)
508
+ return drop.apply(x, weights, training=training)
509
+
510
+
511
+ def alpha_dropout(x: np.ndarray, rate: float = 0.5, training: bool = True,
512
+ seed: Optional[int] = None) -> np.ndarray:
513
+ """
514
+ Apply alpha dropout to input.
515
+
516
+ Args:
517
+ x: Input array
518
+ rate: Dropout rate
519
+ training: Whether in training mode
520
+ seed: Random seed
521
+
522
+ Returns:
523
+ Output with alpha dropout applied
524
+
525
+ Example:
526
+ >>> x = np.random.randn(32, 128)
527
+ >>> x_dropped = alpha_dropout(x, rate=0.1, training=True)
528
+ """
529
+ drop = AlphaDropout(rate=rate, seed=seed)
530
+ return drop(x, training=training)
531
+
532
+
533
+ __all__ = [
534
+ 'Dropout',
535
+ 'SpatialDropout2D',
536
+ 'SpatialDropout3D',
537
+ 'VariationalDropout',
538
+ 'DropConnect',
539
+ 'AlphaDropout',
540
+ 'dropout',
541
+ 'spatial_dropout_2d',
542
+ 'spatial_dropout_3d',
543
+ 'variational_dropout',
544
+ 'dropconnect',
545
+ 'alpha_dropout',
546
+ ]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ilovetools
3
- Version: 0.2.25
3
+ Version: 0.2.26
4
4
  Summary: A comprehensive Python utility library with modular tools for AI/ML, data processing, and daily programming needs
5
5
  Home-page: https://github.com/AliMehdi512/ilovetools
6
6
  Author: Ali Mehdi
@@ -11,7 +11,7 @@ Project-URL: Repository, https://github.com/AliMehdi512/ilovetools
11
11
  Project-URL: Issues, https://github.com/AliMehdi512/ilovetools/issues
12
12
  Project-URL: Bug Reports, https://github.com/AliMehdi512/ilovetools/issues
13
13
  Project-URL: Source, https://github.com/AliMehdi512/ilovetools
14
- Keywords: utilities,tools,ai,ml,data-processing,automation,weight-initialization,xavier-initialization,he-initialization,kaiming-initialization,deep-learning,neural-networks
14
+ Keywords: utilities,tools,ai,ml,data-processing,automation,dropout,regularization,overfitting,spatial-dropout,variational-dropout,dropconnect,alpha-dropout,deep-learning,neural-networks
15
15
  Classifier: Development Status :: 3 - Alpha
16
16
  Classifier: Intended Audience :: Developers
17
17
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
@@ -37,6 +37,7 @@ ilovetools/ml/clustering.py
37
37
  ilovetools/ml/cnn.py
38
38
  ilovetools/ml/cross_validation.py
39
39
  ilovetools/ml/dimensionality.py
40
+ ilovetools/ml/dropout.py
40
41
  ilovetools/ml/ensemble.py
41
42
  ilovetools/ml/feature_selection.py
42
43
  ilovetools/ml/gradient_descent.py
@@ -73,6 +74,7 @@ tests/__init__.py
73
74
  tests/test_activations.py
74
75
  tests/test_attention.py
75
76
  tests/test_cnn.py
77
+ tests/test_dropout.py
76
78
  tests/test_gradient_descent.py
77
79
  tests/test_loss_functions.py
78
80
  tests/test_lr_schedulers.py
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "ilovetools"
7
- version = "0.2.25"
7
+ version = "0.2.26"
8
8
  description = "A comprehensive Python utility library with modular tools for AI/ML, data processing, and daily programming needs"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.8"
@@ -12,7 +12,7 @@ license = "MIT"
12
12
  authors = [
13
13
  {name = "Ali Mehdi", email = "ali.mehdi.dev579@gmail.com"}
14
14
  ]
15
- keywords = ["utilities", "tools", "ai", "ml", "data-processing", "automation", "weight-initialization", "xavier-initialization", "he-initialization", "kaiming-initialization", "deep-learning", "neural-networks"]
15
+ keywords = ["utilities", "tools", "ai", "ml", "data-processing", "automation", "dropout", "regularization", "overfitting", "spatial-dropout", "variational-dropout", "dropconnect", "alpha-dropout", "deep-learning", "neural-networks"]
16
16
  classifiers = [
17
17
  "Development Status :: 3 - Alpha",
18
18
  "Intended Audience :: Developers",
@@ -5,7 +5,7 @@ with open("README.md", "r", encoding="utf-8") as fh:
5
5
 
6
6
  setup(
7
7
  name="ilovetools",
8
- version="0.2.25",
8
+ version="0.2.26",
9
9
  author="Ali Mehdi",
10
10
  author_email="ali.mehdi.dev579@gmail.com",
11
11
  description="A comprehensive Python utility library with modular tools for AI/ML, data processing, and daily programming needs",
@@ -57,7 +57,7 @@ setup(
57
57
  "soundfile>=0.12.0",
58
58
  ],
59
59
  },
60
- keywords="utilities, tools, ai, ml, data-processing, automation, python-library, neural-networks, weight-initialization, xavier-initialization, he-initialization, kaiming-initialization, deep-learning",
60
+ keywords="utilities, tools, ai, ml, data-processing, automation, python-library, neural-networks, dropout, regularization, overfitting, spatial-dropout, variational-dropout, dropconnect, alpha-dropout, deep-learning",
61
61
  project_urls={
62
62
  "Bug Reports": "https://github.com/AliMehdi512/ilovetools/issues",
63
63
  "Source": "https://github.com/AliMehdi512/ilovetools",