ilovetools 0.2.20__tar.gz → 0.2.21__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. {ilovetools-0.2.20/ilovetools.egg-info → ilovetools-0.2.21}/PKG-INFO +2 -2
  2. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/__init__.py +2 -2
  3. ilovetools-0.2.21/ilovetools/ml/normalization_advanced.py +471 -0
  4. {ilovetools-0.2.20 → ilovetools-0.2.21/ilovetools.egg-info}/PKG-INFO +2 -2
  5. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools.egg-info/SOURCES.txt +2 -0
  6. {ilovetools-0.2.20 → ilovetools-0.2.21}/pyproject.toml +2 -2
  7. {ilovetools-0.2.20 → ilovetools-0.2.21}/setup.py +2 -2
  8. ilovetools-0.2.21/tests/test_normalization_advanced.py +375 -0
  9. {ilovetools-0.2.20 → ilovetools-0.2.21}/LICENSE +0 -0
  10. {ilovetools-0.2.20 → ilovetools-0.2.21}/MANIFEST.in +0 -0
  11. {ilovetools-0.2.20 → ilovetools-0.2.21}/README.md +0 -0
  12. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/ai/__init__.py +0 -0
  13. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/ai/embeddings.py +0 -0
  14. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/ai/inference.py +0 -0
  15. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/ai/llm_helpers.py +0 -0
  16. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/audio/__init__.py +0 -0
  17. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/automation/__init__.py +0 -0
  18. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/automation/file_organizer.py +0 -0
  19. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/conversion/__init__.py +0 -0
  20. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/conversion/config_converter.py +0 -0
  21. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/conversion/config_converter_fixed_header.py +0 -0
  22. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/data/__init__.py +0 -0
  23. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/data/feature_engineering.py +0 -0
  24. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/data/preprocessing.py +0 -0
  25. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/database/__init__.py +0 -0
  26. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/datetime/__init__.py +0 -0
  27. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/email/__init__.py +0 -0
  28. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/email/template_engine.py +0 -0
  29. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/files/__init__.py +0 -0
  30. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/image/__init__.py +0 -0
  31. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/ml/__init__.py +0 -0
  32. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/ml/activations.py +0 -0
  33. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/ml/anomaly_detection.py +0 -0
  34. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/ml/attention.py +0 -0
  35. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/ml/clustering.py +0 -0
  36. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/ml/cnn.py +0 -0
  37. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/ml/cross_validation.py +0 -0
  38. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/ml/dimensionality.py +0 -0
  39. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/ml/ensemble.py +0 -0
  40. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/ml/feature_selection.py +0 -0
  41. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/ml/gradient_descent.py +0 -0
  42. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/ml/imbalanced.py +0 -0
  43. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/ml/interpretation.py +0 -0
  44. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/ml/loss_functions.py +0 -0
  45. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/ml/metrics.py +0 -0
  46. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/ml/neural_network.py +0 -0
  47. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/ml/normalization.py +0 -0
  48. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/ml/optimizers.py +0 -0
  49. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/ml/pipeline.py +0 -0
  50. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/ml/regularization.py +0 -0
  51. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/ml/rnn.py +0 -0
  52. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/ml/timeseries.py +0 -0
  53. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/ml/tuning.py +0 -0
  54. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/security/__init__.py +0 -0
  55. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/security/password_checker.py +0 -0
  56. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/text/__init__.py +0 -0
  57. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/utils/__init__.py +0 -0
  58. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/utils/cache_system.py +0 -0
  59. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/utils/logger.py +0 -0
  60. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/utils/rate_limiter.py +0 -0
  61. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/utils/retry.py +0 -0
  62. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/validation/__init__.py +0 -0
  63. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/validation/data_validator.py +0 -0
  64. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/web/__init__.py +0 -0
  65. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/web/scraper.py +0 -0
  66. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools/web/url_shortener.py +0 -0
  67. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools.egg-info/dependency_links.txt +0 -0
  68. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools.egg-info/requires.txt +0 -0
  69. {ilovetools-0.2.20 → ilovetools-0.2.21}/ilovetools.egg-info/top_level.txt +0 -0
  70. {ilovetools-0.2.20 → ilovetools-0.2.21}/requirements.txt +0 -0
  71. {ilovetools-0.2.20 → ilovetools-0.2.21}/setup.cfg +0 -0
  72. {ilovetools-0.2.20 → ilovetools-0.2.21}/tests/__init__.py +0 -0
  73. {ilovetools-0.2.20 → ilovetools-0.2.21}/tests/test_activations.py +0 -0
  74. {ilovetools-0.2.20 → ilovetools-0.2.21}/tests/test_attention.py +0 -0
  75. {ilovetools-0.2.20 → ilovetools-0.2.21}/tests/test_cnn.py +0 -0
  76. {ilovetools-0.2.20 → ilovetools-0.2.21}/tests/test_gradient_descent.py +0 -0
  77. {ilovetools-0.2.20 → ilovetools-0.2.21}/tests/test_loss_functions.py +0 -0
  78. {ilovetools-0.2.20 → ilovetools-0.2.21}/tests/test_neural_network.py +0 -0
  79. {ilovetools-0.2.20 → ilovetools-0.2.21}/tests/test_normalization.py +0 -0
  80. {ilovetools-0.2.20 → ilovetools-0.2.21}/tests/test_optimizers.py +0 -0
  81. {ilovetools-0.2.20 → ilovetools-0.2.21}/tests/test_regularization.py +0 -0
  82. {ilovetools-0.2.20 → ilovetools-0.2.21}/tests/test_rnn.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ilovetools
3
- Version: 0.2.20
3
+ Version: 0.2.21
4
4
  Summary: A comprehensive Python utility library with modular tools for AI/ML, data processing, and daily programming needs
5
5
  Home-page: https://github.com/AliMehdi512/ilovetools
6
6
  Author: Ali Mehdi
@@ -11,7 +11,7 @@ Project-URL: Repository, https://github.com/AliMehdi512/ilovetools
11
11
  Project-URL: Issues, https://github.com/AliMehdi512/ilovetools/issues
12
12
  Project-URL: Bug Reports, https://github.com/AliMehdi512/ilovetools/issues
13
13
  Project-URL: Source, https://github.com/AliMehdi512/ilovetools
14
- Keywords: utilities,tools,ai,ml,data-processing,automation,rnn,lstm,gru,recurrent-neural-networks,sequence-modeling,nlp
14
+ Keywords: utilities,tools,ai,ml,data-processing,automation,batch-normalization,layer-normalization,normalization-techniques,deep-learning
15
15
  Classifier: Development Status :: 3 - Alpha
16
16
  Classifier: Intended Audience :: Developers
17
17
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
@@ -2,8 +2,8 @@
2
2
  ilovetools - A comprehensive Python utility library
3
3
  """
4
4
 
5
- __version__ = "0.2.19"
6
- # release marker: 0.2.19
5
+ __version__ = "0.2.20"
6
+ # release marker: 0.2.20
7
7
  __author__ = "Ali Mehdi"
8
8
  __email__ = "ali.mehdi.dev579@gmail.com"
9
9
 
@@ -0,0 +1,471 @@
1
+ """
2
+ Advanced Normalization Techniques
3
+
4
+ This module provides advanced normalization methods:
5
+ - Batch Normalization (BatchNorm)
6
+ - Layer Normalization (LayerNorm)
7
+ - Instance Normalization (InstanceNorm)
8
+ - Group Normalization (GroupNorm)
9
+ - Weight Normalization
10
+ - Spectral Normalization
11
+
12
+ All operations support batched inputs and are optimized for deep learning.
13
+ """
14
+
15
+ import numpy as np
16
+ from typing import Tuple, Optional
17
+
18
+
19
+ # ============================================================================
20
+ # BATCH NORMALIZATION
21
+ # ============================================================================
22
+
23
+ def batch_norm_forward(
24
+ x: np.ndarray,
25
+ gamma: np.ndarray,
26
+ beta: np.ndarray,
27
+ running_mean: Optional[np.ndarray] = None,
28
+ running_var: Optional[np.ndarray] = None,
29
+ momentum: float = 0.9,
30
+ eps: float = 1e-5,
31
+ training: bool = True
32
+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
33
+ """
34
+ Batch Normalization forward pass
35
+
36
+ Normalizes across the batch dimension. Used in CNNs.
37
+
38
+ Formula:
39
+ μ_B = (1/m) Σ x_i
40
+ σ²_B = (1/m) Σ (x_i - μ_B)²
41
+ x̂ = (x - μ_B) / √(σ²_B + ε)
42
+ y = γ x̂ + β
43
+
44
+ Args:
45
+ x: Input tensor, shape (batch, channels, height, width) or (batch, features)
46
+ gamma: Scale parameter, shape (channels,) or (features,)
47
+ beta: Shift parameter, shape (channels,) or (features,)
48
+ running_mean: Running mean for inference
49
+ running_var: Running variance for inference
50
+ momentum: Momentum for running statistics
51
+ eps: Small constant for numerical stability
52
+ training: Whether in training mode
53
+
54
+ Returns:
55
+ Tuple of (output, updated_running_mean, updated_running_var)
56
+
57
+ Example:
58
+ >>> # For CNNs
59
+ >>> x = np.random.randn(32, 64, 28, 28) # (batch, channels, H, W)
60
+ >>> gamma = np.ones(64)
61
+ >>> beta = np.zeros(64)
62
+ >>> output, mean, var = batch_norm_forward(x, gamma, beta, training=True)
63
+ >>> print(output.shape) # (32, 64, 28, 28)
64
+
65
+ >>> # For fully connected
66
+ >>> x = np.random.randn(32, 256) # (batch, features)
67
+ >>> gamma = np.ones(256)
68
+ >>> beta = np.zeros(256)
69
+ >>> output, mean, var = batch_norm_forward(x, gamma, beta, training=True)
70
+ >>> print(output.shape) # (32, 256)
71
+ """
72
+ if training:
73
+ # Calculate batch statistics
74
+ if x.ndim == 4: # Conv: (batch, channels, height, width)
75
+ # Mean and variance across batch, height, width
76
+ axes = (0, 2, 3)
77
+ mean = np.mean(x, axis=axes, keepdims=True)
78
+ var = np.var(x, axis=axes, keepdims=True)
79
+
80
+ # Squeeze for running stats
81
+ mean_squeeze = np.squeeze(mean, axis=(0, 2, 3))
82
+ var_squeeze = np.squeeze(var, axis=(0, 2, 3))
83
+ else: # FC: (batch, features)
84
+ axes = 0
85
+ mean = np.mean(x, axis=axes, keepdims=True)
86
+ var = np.var(x, axis=axes, keepdims=True)
87
+
88
+ mean_squeeze = np.squeeze(mean, axis=0)
89
+ var_squeeze = np.squeeze(var, axis=0)
90
+
91
+ # Update running statistics
92
+ if running_mean is None:
93
+ running_mean = mean_squeeze
94
+ else:
95
+ running_mean = momentum * running_mean + (1 - momentum) * mean_squeeze
96
+
97
+ if running_var is None:
98
+ running_var = var_squeeze
99
+ else:
100
+ running_var = momentum * running_var + (1 - momentum) * var_squeeze
101
+
102
+ # Normalize
103
+ x_normalized = (x - mean) / np.sqrt(var + eps)
104
+ else:
105
+ # Use running statistics for inference
106
+ if running_mean is None or running_var is None:
107
+ raise ValueError("Running statistics required for inference mode")
108
+
109
+ if x.ndim == 4:
110
+ mean = running_mean.reshape(1, -1, 1, 1)
111
+ var = running_var.reshape(1, -1, 1, 1)
112
+ else:
113
+ mean = running_mean.reshape(1, -1)
114
+ var = running_var.reshape(1, -1)
115
+
116
+ x_normalized = (x - mean) / np.sqrt(var + eps)
117
+
118
+ # Scale and shift
119
+ if x.ndim == 4:
120
+ gamma = gamma.reshape(1, -1, 1, 1)
121
+ beta = beta.reshape(1, -1, 1, 1)
122
+ else:
123
+ gamma = gamma.reshape(1, -1)
124
+ beta = beta.reshape(1, -1)
125
+
126
+ output = gamma * x_normalized + beta
127
+
128
+ return output, running_mean, running_var
129
+
130
+
131
+ # ============================================================================
132
+ # LAYER NORMALIZATION
133
+ # ============================================================================
134
+
135
+ def layer_norm_forward(
136
+ x: np.ndarray,
137
+ gamma: np.ndarray,
138
+ beta: np.ndarray,
139
+ eps: float = 1e-5
140
+ ) -> np.ndarray:
141
+ """
142
+ Layer Normalization forward pass
143
+
144
+ Normalizes across the feature dimension. Used in Transformers and RNNs.
145
+
146
+ Formula:
147
+ μ = (1/H) Σ x_i
148
+ σ² = (1/H) Σ (x_i - μ)²
149
+ x̂ = (x - μ) / √(σ² + ε)
150
+ y = γ x̂ + β
151
+
152
+ Args:
153
+ x: Input tensor, shape (batch, features) or (batch, seq_len, features)
154
+ gamma: Scale parameter, shape (features,)
155
+ beta: Shift parameter, shape (features,)
156
+ eps: Small constant for numerical stability
157
+
158
+ Returns:
159
+ Normalized output with same shape as input
160
+
161
+ Example:
162
+ >>> # For Transformers
163
+ >>> x = np.random.randn(32, 10, 512) # (batch, seq_len, features)
164
+ >>> gamma = np.ones(512)
165
+ >>> beta = np.zeros(512)
166
+ >>> output = layer_norm_forward(x, gamma, beta)
167
+ >>> print(output.shape) # (32, 10, 512)
168
+
169
+ >>> # For fully connected
170
+ >>> x = np.random.randn(32, 256) # (batch, features)
171
+ >>> gamma = np.ones(256)
172
+ >>> beta = np.zeros(256)
173
+ >>> output = layer_norm_forward(x, gamma, beta)
174
+ >>> print(output.shape) # (32, 256)
175
+ """
176
+ # Calculate mean and variance across feature dimension
177
+ if x.ndim == 3: # (batch, seq_len, features)
178
+ axes = -1
179
+ mean = np.mean(x, axis=axes, keepdims=True)
180
+ var = np.var(x, axis=axes, keepdims=True)
181
+ elif x.ndim == 2: # (batch, features)
182
+ axes = -1
183
+ mean = np.mean(x, axis=axes, keepdims=True)
184
+ var = np.var(x, axis=axes, keepdims=True)
185
+ else:
186
+ raise ValueError(f"Unsupported input shape: {x.shape}")
187
+
188
+ # Normalize
189
+ x_normalized = (x - mean) / np.sqrt(var + eps)
190
+
191
+ # Scale and shift
192
+ output = gamma * x_normalized + beta
193
+
194
+ return output
195
+
196
+
197
+ # ============================================================================
198
+ # INSTANCE NORMALIZATION
199
+ # ============================================================================
200
+
201
+ def instance_norm_forward(
202
+ x: np.ndarray,
203
+ gamma: np.ndarray,
204
+ beta: np.ndarray,
205
+ eps: float = 1e-5
206
+ ) -> np.ndarray:
207
+ """
208
+ Instance Normalization forward pass
209
+
210
+ Normalizes each sample and channel independently. Used in style transfer.
211
+
212
+ Args:
213
+ x: Input tensor, shape (batch, channels, height, width)
214
+ gamma: Scale parameter, shape (channels,)
215
+ beta: Shift parameter, shape (channels,)
216
+ eps: Small constant for numerical stability
217
+
218
+ Returns:
219
+ Normalized output with same shape as input
220
+
221
+ Example:
222
+ >>> x = np.random.randn(32, 64, 28, 28) # (batch, channels, H, W)
223
+ >>> gamma = np.ones(64)
224
+ >>> beta = np.zeros(64)
225
+ >>> output = instance_norm_forward(x, gamma, beta)
226
+ >>> print(output.shape) # (32, 64, 28, 28)
227
+ """
228
+ if x.ndim != 4:
229
+ raise ValueError("Instance normalization requires 4D input (batch, channels, H, W)")
230
+
231
+ # Calculate mean and variance per instance, per channel
232
+ # Normalize across spatial dimensions (H, W)
233
+ axes = (2, 3)
234
+ mean = np.mean(x, axis=axes, keepdims=True)
235
+ var = np.var(x, axis=axes, keepdims=True)
236
+
237
+ # Normalize
238
+ x_normalized = (x - mean) / np.sqrt(var + eps)
239
+
240
+ # Scale and shift
241
+ gamma = gamma.reshape(1, -1, 1, 1)
242
+ beta = beta.reshape(1, -1, 1, 1)
243
+ output = gamma * x_normalized + beta
244
+
245
+ return output
246
+
247
+
248
+ # ============================================================================
249
+ # GROUP NORMALIZATION
250
+ # ============================================================================
251
+
252
+ def group_norm_forward(
253
+ x: np.ndarray,
254
+ gamma: np.ndarray,
255
+ beta: np.ndarray,
256
+ num_groups: int = 32,
257
+ eps: float = 1e-5
258
+ ) -> np.ndarray:
259
+ """
260
+ Group Normalization forward pass
261
+
262
+ Divides channels into groups and normalizes within each group.
263
+ Good for small batch sizes.
264
+
265
+ Args:
266
+ x: Input tensor, shape (batch, channels, height, width)
267
+ gamma: Scale parameter, shape (channels,)
268
+ beta: Shift parameter, shape (channels,)
269
+ num_groups: Number of groups to divide channels into
270
+ eps: Small constant for numerical stability
271
+
272
+ Returns:
273
+ Normalized output with same shape as input
274
+
275
+ Example:
276
+ >>> x = np.random.randn(32, 64, 28, 28) # (batch, channels, H, W)
277
+ >>> gamma = np.ones(64)
278
+ >>> beta = np.zeros(64)
279
+ >>> output = group_norm_forward(x, gamma, beta, num_groups=32)
280
+ >>> print(output.shape) # (32, 64, 28, 28)
281
+ """
282
+ if x.ndim != 4:
283
+ raise ValueError("Group normalization requires 4D input (batch, channels, H, W)")
284
+
285
+ batch_size, channels, height, width = x.shape
286
+
287
+ if channels % num_groups != 0:
288
+ raise ValueError(f"Number of channels ({channels}) must be divisible by num_groups ({num_groups})")
289
+
290
+ # Reshape to separate groups
291
+ x_grouped = x.reshape(batch_size, num_groups, channels // num_groups, height, width)
292
+
293
+ # Calculate mean and variance per group
294
+ axes = (2, 3, 4)
295
+ mean = np.mean(x_grouped, axis=axes, keepdims=True)
296
+ var = np.var(x_grouped, axis=axes, keepdims=True)
297
+
298
+ # Normalize
299
+ x_normalized = (x_grouped - mean) / np.sqrt(var + eps)
300
+
301
+ # Reshape back
302
+ x_normalized = x_normalized.reshape(batch_size, channels, height, width)
303
+
304
+ # Scale and shift
305
+ gamma = gamma.reshape(1, -1, 1, 1)
306
+ beta = beta.reshape(1, -1, 1, 1)
307
+ output = gamma * x_normalized + beta
308
+
309
+ return output
310
+
311
+
312
+ # ============================================================================
313
+ # WEIGHT NORMALIZATION
314
+ # ============================================================================
315
+
316
+ def weight_norm(
317
+ weight: np.ndarray,
318
+ dim: int = 0
319
+ ) -> Tuple[np.ndarray, np.ndarray]:
320
+ """
321
+ Weight Normalization
322
+
323
+ Reparameterizes weight vectors to decouple magnitude and direction.
324
+
325
+ Formula:
326
+ w = g * (v / ||v||)
327
+
328
+ Args:
329
+ weight: Weight tensor
330
+ dim: Dimension along which to compute norm
331
+
332
+ Returns:
333
+ Tuple of (normalized_weight, norm)
334
+
335
+ Example:
336
+ >>> weight = np.random.randn(64, 128) # (out_features, in_features)
337
+ >>> w_normalized, g = weight_norm(weight, dim=1)
338
+ >>> print(w_normalized.shape) # (64, 128)
339
+ >>> print(g.shape) # (64, 1)
340
+ """
341
+ # Calculate norm along specified dimension
342
+ norm = np.linalg.norm(weight, axis=dim, keepdims=True)
343
+
344
+ # Normalize
345
+ normalized_weight = weight / (norm + 1e-8)
346
+
347
+ return normalized_weight, norm
348
+
349
+
350
+ # ============================================================================
351
+ # SPECTRAL NORMALIZATION
352
+ # ============================================================================
353
+
354
+ def spectral_norm(
355
+ weight: np.ndarray,
356
+ num_iterations: int = 1
357
+ ) -> np.ndarray:
358
+ """
359
+ Spectral Normalization
360
+
361
+ Normalizes weight matrix by its largest singular value.
362
+ Used in GANs for training stability.
363
+
364
+ Args:
365
+ weight: Weight tensor, shape (out_features, in_features)
366
+ num_iterations: Number of power iterations
367
+
368
+ Returns:
369
+ Spectrally normalized weight
370
+
371
+ Example:
372
+ >>> weight = np.random.randn(64, 128)
373
+ >>> w_normalized = spectral_norm(weight, num_iterations=1)
374
+ >>> print(w_normalized.shape) # (64, 128)
375
+ """
376
+ # Reshape weight to 2D if needed
377
+ original_shape = weight.shape
378
+ if weight.ndim > 2:
379
+ weight = weight.reshape(weight.shape[0], -1)
380
+
381
+ # Power iteration to estimate largest singular value
382
+ u = np.random.randn(weight.shape[0])
383
+ u = u / np.linalg.norm(u)
384
+
385
+ for _ in range(num_iterations):
386
+ v = np.dot(weight.T, u)
387
+ v = v / np.linalg.norm(v)
388
+ u = np.dot(weight, v)
389
+ u = u / np.linalg.norm(u)
390
+
391
+ # Calculate spectral norm (largest singular value)
392
+ sigma = np.dot(u, np.dot(weight, v))
393
+
394
+ # Normalize by spectral norm
395
+ normalized_weight = weight / sigma
396
+
397
+ # Reshape back to original shape
398
+ normalized_weight = normalized_weight.reshape(original_shape)
399
+
400
+ return normalized_weight
401
+
402
+
403
+ # ============================================================================
404
+ # UTILITY FUNCTIONS
405
+ # ============================================================================
406
+
407
+ def initialize_norm_params(
408
+ num_features: int
409
+ ) -> Tuple[np.ndarray, np.ndarray]:
410
+ """
411
+ Initialize normalization parameters
412
+
413
+ Args:
414
+ num_features: Number of features/channels
415
+
416
+ Returns:
417
+ Tuple of (gamma, beta)
418
+ - gamma: Scale parameter initialized to 1
419
+ - beta: Shift parameter initialized to 0
420
+
421
+ Example:
422
+ >>> gamma, beta = initialize_norm_params(256)
423
+ >>> print(gamma.shape, beta.shape) # (256,) (256,)
424
+ """
425
+ gamma = np.ones(num_features)
426
+ beta = np.zeros(num_features)
427
+ return gamma, beta
428
+
429
+
430
+ def compute_norm_stats(
431
+ x: np.ndarray,
432
+ norm_type: str = 'batch'
433
+ ) -> Tuple[np.ndarray, np.ndarray]:
434
+ """
435
+ Compute normalization statistics
436
+
437
+ Args:
438
+ x: Input tensor
439
+ norm_type: Type of normalization ('batch', 'layer', 'instance', 'group')
440
+
441
+ Returns:
442
+ Tuple of (mean, variance)
443
+
444
+ Example:
445
+ >>> x = np.random.randn(32, 64, 28, 28)
446
+ >>> mean, var = compute_norm_stats(x, norm_type='batch')
447
+ >>> print(mean.shape, var.shape)
448
+ """
449
+ if norm_type == 'batch':
450
+ if x.ndim == 4:
451
+ axes = (0, 2, 3)
452
+ else:
453
+ axes = 0
454
+ elif norm_type == 'layer':
455
+ axes = -1
456
+ elif norm_type == 'instance':
457
+ axes = (2, 3)
458
+ else:
459
+ raise ValueError(f"Unknown norm_type: {norm_type}")
460
+
461
+ mean = np.mean(x, axis=axes, keepdims=True)
462
+ var = np.var(x, axis=axes, keepdims=True)
463
+
464
+ return mean, var
465
+
466
+
467
+ # Aliases for convenience
468
+ batch_norm = batch_norm_forward
469
+ layer_norm = layer_norm_forward
470
+ instance_norm = instance_norm_forward
471
+ group_norm = group_norm_forward
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ilovetools
3
- Version: 0.2.20
3
+ Version: 0.2.21
4
4
  Summary: A comprehensive Python utility library with modular tools for AI/ML, data processing, and daily programming needs
5
5
  Home-page: https://github.com/AliMehdi512/ilovetools
6
6
  Author: Ali Mehdi
@@ -11,7 +11,7 @@ Project-URL: Repository, https://github.com/AliMehdi512/ilovetools
11
11
  Project-URL: Issues, https://github.com/AliMehdi512/ilovetools/issues
12
12
  Project-URL: Bug Reports, https://github.com/AliMehdi512/ilovetools/issues
13
13
  Project-URL: Source, https://github.com/AliMehdi512/ilovetools
14
- Keywords: utilities,tools,ai,ml,data-processing,automation,rnn,lstm,gru,recurrent-neural-networks,sequence-modeling,nlp
14
+ Keywords: utilities,tools,ai,ml,data-processing,automation,batch-normalization,layer-normalization,normalization-techniques,deep-learning
15
15
  Classifier: Development Status :: 3 - Alpha
16
16
  Classifier: Intended Audience :: Developers
17
17
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
@@ -46,6 +46,7 @@ ilovetools/ml/loss_functions.py
46
46
  ilovetools/ml/metrics.py
47
47
  ilovetools/ml/neural_network.py
48
48
  ilovetools/ml/normalization.py
49
+ ilovetools/ml/normalization_advanced.py
49
50
  ilovetools/ml/optimizers.py
50
51
  ilovetools/ml/pipeline.py
51
52
  ilovetools/ml/regularization.py
@@ -73,6 +74,7 @@ tests/test_gradient_descent.py
73
74
  tests/test_loss_functions.py
74
75
  tests/test_neural_network.py
75
76
  tests/test_normalization.py
77
+ tests/test_normalization_advanced.py
76
78
  tests/test_optimizers.py
77
79
  tests/test_regularization.py
78
80
  tests/test_rnn.py
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "ilovetools"
7
- version = "0.2.20"
7
+ version = "0.2.21"
8
8
  description = "A comprehensive Python utility library with modular tools for AI/ML, data processing, and daily programming needs"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.8"
@@ -12,7 +12,7 @@ license = "MIT"
12
12
  authors = [
13
13
  {name = "Ali Mehdi", email = "ali.mehdi.dev579@gmail.com"}
14
14
  ]
15
- keywords = ["utilities", "tools", "ai", "ml", "data-processing", "automation", "rnn", "lstm", "gru", "recurrent-neural-networks", "sequence-modeling", "nlp"]
15
+ keywords = ["utilities", "tools", "ai", "ml", "data-processing", "automation", "batch-normalization", "layer-normalization", "normalization-techniques", "deep-learning"]
16
16
  classifiers = [
17
17
  "Development Status :: 3 - Alpha",
18
18
  "Intended Audience :: Developers",
@@ -5,7 +5,7 @@ with open("README.md", "r", encoding="utf-8") as fh:
5
5
 
6
6
  setup(
7
7
  name="ilovetools",
8
- version="0.2.20",
8
+ version="0.2.21",
9
9
  author="Ali Mehdi",
10
10
  author_email="ali.mehdi.dev579@gmail.com",
11
11
  description="A comprehensive Python utility library with modular tools for AI/ML, data processing, and daily programming needs",
@@ -57,7 +57,7 @@ setup(
57
57
  "soundfile>=0.12.0",
58
58
  ],
59
59
  },
60
- keywords="utilities, tools, ai, ml, data-processing, automation, python-library, neural-networks, rnn, lstm, gru, recurrent-neural-networks, sequence-modeling, nlp",
60
+ keywords="utilities, tools, ai, ml, data-processing, automation, python-library, neural-networks, batch-normalization, layer-normalization, normalization-techniques, deep-learning",
61
61
  project_urls={
62
62
  "Bug Reports": "https://github.com/AliMehdi512/ilovetools/issues",
63
63
  "Source": "https://github.com/AliMehdi512/ilovetools",
@@ -0,0 +1,375 @@
1
+ """
2
+ Tests for advanced normalization techniques module
3
+ """
4
+
5
+ import numpy as np
6
+ import sys
7
+ import os
8
+
9
+ # Add parent directory to path
10
+ sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
11
+
12
+ from ilovetools.ml.normalization_advanced import (
13
+ # Batch Normalization
14
+ batch_norm_forward,
15
+ # Layer Normalization
16
+ layer_norm_forward,
17
+ # Instance Normalization
18
+ instance_norm_forward,
19
+ # Group Normalization
20
+ group_norm_forward,
21
+ # Weight Normalization
22
+ weight_norm,
23
+ # Spectral Normalization
24
+ spectral_norm,
25
+ # Utilities
26
+ initialize_norm_params,
27
+ compute_norm_stats,
28
+ # Aliases
29
+ batch_norm,
30
+ layer_norm,
31
+ instance_norm,
32
+ group_norm,
33
+ )
34
+
35
+
36
+ def test_batch_norm_training():
37
+ """Test batch normalization in training mode"""
38
+ print("Testing batch_norm_forward (training)...")
39
+
40
+ # Test with 4D input (CNN)
41
+ x = np.random.randn(32, 64, 28, 28)
42
+ gamma, beta = initialize_norm_params(64)
43
+
44
+ output, running_mean, running_var = batch_norm_forward(
45
+ x, gamma, beta, training=True
46
+ )
47
+
48
+ assert output.shape == x.shape, "Output shape should match input"
49
+ assert running_mean.shape == (64,), "Running mean shape incorrect"
50
+ assert running_var.shape == (64,), "Running var shape incorrect"
51
+
52
+ # Check normalization (approximately zero mean, unit variance)
53
+ output_mean = np.mean(output, axis=(0, 2, 3))
54
+ output_var = np.var(output, axis=(0, 2, 3))
55
+ assert np.allclose(output_mean, 0, atol=1e-5), "Mean should be close to 0"
56
+ assert np.allclose(output_var, 1, atol=0.1), "Variance should be close to 1"
57
+
58
+ print("✓ batch_norm_forward (training) passed")
59
+
60
+
61
+ def test_batch_norm_inference():
62
+ """Test batch normalization in inference mode"""
63
+ print("Testing batch_norm_forward (inference)...")
64
+
65
+ x = np.random.randn(32, 64, 28, 28)
66
+ gamma, beta = initialize_norm_params(64)
67
+ running_mean = np.zeros(64)
68
+ running_var = np.ones(64)
69
+
70
+ output, _, _ = batch_norm_forward(
71
+ x, gamma, beta, running_mean, running_var, training=False
72
+ )
73
+
74
+ assert output.shape == x.shape, "Output shape should match input"
75
+
76
+ print("✓ batch_norm_forward (inference) passed")
77
+
78
+
79
+ def test_batch_norm_fc():
80
+ """Test batch normalization with fully connected layers"""
81
+ print("Testing batch_norm_forward (FC)...")
82
+
83
+ x = np.random.randn(32, 256)
84
+ gamma, beta = initialize_norm_params(256)
85
+
86
+ output, running_mean, running_var = batch_norm_forward(
87
+ x, gamma, beta, training=True
88
+ )
89
+
90
+ assert output.shape == x.shape, "Output shape should match input"
91
+
92
+ # Check normalization
93
+ output_mean = np.mean(output, axis=0)
94
+ output_var = np.var(output, axis=0)
95
+ assert np.allclose(output_mean, 0, atol=1e-5), "Mean should be close to 0"
96
+ assert np.allclose(output_var, 1, atol=0.1), "Variance should be close to 1"
97
+
98
+ print("✓ batch_norm_forward (FC) passed")
99
+
100
+
101
+ def test_layer_norm_2d():
102
+ """Test layer normalization with 2D input"""
103
+ print("Testing layer_norm_forward (2D)...")
104
+
105
+ x = np.random.randn(32, 256)
106
+ gamma, beta = initialize_norm_params(256)
107
+
108
+ output = layer_norm_forward(x, gamma, beta)
109
+
110
+ assert output.shape == x.shape, "Output shape should match input"
111
+
112
+ # Check normalization per sample
113
+ for i in range(x.shape[0]):
114
+ sample_mean = np.mean(output[i])
115
+ sample_var = np.var(output[i])
116
+ assert np.isclose(sample_mean, 0, atol=1e-5), f"Sample {i} mean should be 0"
117
+ assert np.isclose(sample_var, 1, atol=0.1), f"Sample {i} variance should be 1"
118
+
119
+ print("✓ layer_norm_forward (2D) passed")
120
+
121
+
122
+ def test_layer_norm_3d():
123
+ """Test layer normalization with 3D input (Transformers)"""
124
+ print("Testing layer_norm_forward (3D)...")
125
+
126
+ x = np.random.randn(32, 10, 512) # (batch, seq_len, features)
127
+ gamma, beta = initialize_norm_params(512)
128
+
129
+ output = layer_norm_forward(x, gamma, beta)
130
+
131
+ assert output.shape == x.shape, "Output shape should match input"
132
+
133
+ # Check normalization per sample, per timestep
134
+ for i in range(x.shape[0]):
135
+ for t in range(x.shape[1]):
136
+ sample_mean = np.mean(output[i, t])
137
+ sample_var = np.var(output[i, t])
138
+ assert np.isclose(sample_mean, 0, atol=1e-5), "Mean should be 0"
139
+ assert np.isclose(sample_var, 1, atol=0.1), "Variance should be 1"
140
+
141
+ print("✓ layer_norm_forward (3D) passed")
142
+
143
+
144
+ def test_instance_norm():
145
+ """Test instance normalization"""
146
+ print("Testing instance_norm_forward...")
147
+
148
+ x = np.random.randn(32, 64, 28, 28)
149
+ gamma, beta = initialize_norm_params(64)
150
+
151
+ output = instance_norm_forward(x, gamma, beta)
152
+
153
+ assert output.shape == x.shape, "Output shape should match input"
154
+
155
+ # Check normalization per instance, per channel
156
+ for i in range(x.shape[0]):
157
+ for c in range(x.shape[1]):
158
+ channel_mean = np.mean(output[i, c])
159
+ channel_var = np.var(output[i, c])
160
+ assert np.isclose(channel_mean, 0, atol=1e-5), "Mean should be 0"
161
+ assert np.isclose(channel_var, 1, atol=0.1), "Variance should be 1"
162
+
163
+ print("✓ instance_norm_forward passed")
164
+
165
+
166
+ def test_group_norm():
167
+ """Test group normalization"""
168
+ print("Testing group_norm_forward...")
169
+
170
+ x = np.random.randn(32, 64, 28, 28)
171
+ gamma, beta = initialize_norm_params(64)
172
+
173
+ output = group_norm_forward(x, gamma, beta, num_groups=32)
174
+
175
+ assert output.shape == x.shape, "Output shape should match input"
176
+
177
+ print("✓ group_norm_forward passed")
178
+
179
+
180
+ def test_group_norm_invalid_groups():
181
+ """Test group normalization with invalid number of groups"""
182
+ print("Testing group_norm_forward with invalid groups...")
183
+
184
+ x = np.random.randn(32, 64, 28, 28)
185
+ gamma, beta = initialize_norm_params(64)
186
+
187
+ try:
188
+ output = group_norm_forward(x, gamma, beta, num_groups=7) # 64 not divisible by 7
189
+ assert False, "Should raise ValueError"
190
+ except ValueError as e:
191
+ assert "divisible" in str(e).lower()
192
+
193
+ print("✓ group_norm_forward invalid groups passed")
194
+
195
+
196
+ def test_weight_norm():
197
+ """Test weight normalization"""
198
+ print("Testing weight_norm...")
199
+
200
+ weight = np.random.randn(64, 128)
201
+
202
+ w_normalized, g = weight_norm(weight, dim=1)
203
+
204
+ assert w_normalized.shape == weight.shape, "Normalized weight shape should match"
205
+ assert g.shape == (64, 1), "Norm shape incorrect"
206
+
207
+ # Check that each row has unit norm
208
+ row_norms = np.linalg.norm(w_normalized, axis=1)
209
+ assert np.allclose(row_norms, 1.0), "Each row should have unit norm"
210
+
211
+ print("✓ weight_norm passed")
212
+
213
+
214
+ def test_spectral_norm():
215
+ """Test spectral normalization"""
216
+ print("Testing spectral_norm...")
217
+
218
+ weight = np.random.randn(64, 128)
219
+
220
+ w_normalized = spectral_norm(weight, num_iterations=1)
221
+
222
+ assert w_normalized.shape == weight.shape, "Normalized weight shape should match"
223
+
224
+ # Check that largest singular value is approximately 1
225
+ u, s, v = np.linalg.svd(w_normalized, full_matrices=False)
226
+ assert np.isclose(s[0], 1.0, atol=0.1), "Largest singular value should be close to 1"
227
+
228
+ print("✓ spectral_norm passed")
229
+
230
+
231
+ def test_initialize_norm_params():
232
+ """Test normalization parameter initialization"""
233
+ print("Testing initialize_norm_params...")
234
+
235
+ gamma, beta = initialize_norm_params(256)
236
+
237
+ assert gamma.shape == (256,), "Gamma shape incorrect"
238
+ assert beta.shape == (256,), "Beta shape incorrect"
239
+ assert np.allclose(gamma, 1.0), "Gamma should be initialized to 1"
240
+ assert np.allclose(beta, 0.0), "Beta should be initialized to 0"
241
+
242
+ print("✓ initialize_norm_params passed")
243
+
244
+
245
+ def test_compute_norm_stats():
246
+ """Test normalization statistics computation"""
247
+ print("Testing compute_norm_stats...")
248
+
249
+ x = np.random.randn(32, 64, 28, 28)
250
+
251
+ # Batch norm stats
252
+ mean, var = compute_norm_stats(x, norm_type='batch')
253
+ assert mean.shape == (1, 64, 1, 1), "Batch norm mean shape incorrect"
254
+ assert var.shape == (1, 64, 1, 1), "Batch norm var shape incorrect"
255
+
256
+ # Layer norm stats
257
+ mean, var = compute_norm_stats(x, norm_type='layer')
258
+ assert mean.shape == (32, 64, 28, 1), "Layer norm mean shape incorrect"
259
+
260
+ # Instance norm stats
261
+ mean, var = compute_norm_stats(x, norm_type='instance')
262
+ assert mean.shape == (32, 64, 1, 1), "Instance norm mean shape incorrect"
263
+
264
+ print("✓ compute_norm_stats passed")
265
+
266
+
267
+ def test_aliases():
268
+ """Test function aliases"""
269
+ print("Testing aliases...")
270
+
271
+ x = np.random.randn(32, 64, 28, 28)
272
+ gamma, beta = initialize_norm_params(64)
273
+
274
+ # Test batch_norm alias
275
+ out1, _, _ = batch_norm(x, gamma, beta, training=True)
276
+ out2, _, _ = batch_norm_forward(x, gamma, beta, training=True)
277
+ assert np.allclose(out1, out2), "batch_norm alias should work"
278
+
279
+ # Test layer_norm alias
280
+ x_2d = np.random.randn(32, 256)
281
+ gamma_2d, beta_2d = initialize_norm_params(256)
282
+ out1 = layer_norm(x_2d, gamma_2d, beta_2d)
283
+ out2 = layer_norm_forward(x_2d, gamma_2d, beta_2d)
284
+ assert np.allclose(out1, out2), "layer_norm alias should work"
285
+
286
+ print("✓ aliases passed")
287
+
288
+
289
+ def test_batch_norm_running_stats():
290
+ """Test batch normalization running statistics update"""
291
+ print("Testing batch_norm running statistics...")
292
+
293
+ x = np.random.randn(32, 64, 28, 28)
294
+ gamma, beta = initialize_norm_params(64)
295
+
296
+ running_mean = np.zeros(64)
297
+ running_var = np.ones(64)
298
+
299
+ # First forward pass
300
+ _, running_mean_1, running_var_1 = batch_norm_forward(
301
+ x, gamma, beta, running_mean, running_var, momentum=0.9, training=True
302
+ )
303
+
304
+ # Check that running stats were updated
305
+ assert not np.allclose(running_mean_1, running_mean), "Running mean should be updated"
306
+ assert not np.allclose(running_var_1, running_var), "Running var should be updated"
307
+
308
+ print("✓ batch_norm running statistics passed")
309
+
310
+
311
+ def test_normalization_with_scale_shift():
312
+ """Test that scale and shift parameters work correctly"""
313
+ print("Testing normalization with scale and shift...")
314
+
315
+ x = np.random.randn(32, 256)
316
+
317
+ # Custom gamma and beta
318
+ gamma = np.ones(256) * 2.0 # Scale by 2
319
+ beta = np.ones(256) * 3.0 # Shift by 3
320
+
321
+ output = layer_norm_forward(x, gamma, beta)
322
+
323
+ # After normalization, mean should be close to beta, std close to gamma
324
+ output_mean = np.mean(output, axis=0)
325
+ assert np.allclose(output_mean, 3.0, atol=0.1), "Mean should be close to beta"
326
+
327
+ print("✓ normalization with scale and shift passed")
328
+
329
+
330
+ def run_all_tests():
331
+ """Run all tests"""
332
+ print("\n" + "="*60)
333
+ print("ADVANCED NORMALIZATION TECHNIQUES MODULE TESTS")
334
+ print("="*60 + "\n")
335
+
336
+ # Batch Normalization tests
337
+ test_batch_norm_training()
338
+ test_batch_norm_inference()
339
+ test_batch_norm_fc()
340
+ test_batch_norm_running_stats()
341
+
342
+ # Layer Normalization tests
343
+ test_layer_norm_2d()
344
+ test_layer_norm_3d()
345
+
346
+ # Instance Normalization tests
347
+ test_instance_norm()
348
+
349
+ # Group Normalization tests
350
+ test_group_norm()
351
+ test_group_norm_invalid_groups()
352
+
353
+ # Weight Normalization tests
354
+ test_weight_norm()
355
+
356
+ # Spectral Normalization tests
357
+ test_spectral_norm()
358
+
359
+ # Utility tests
360
+ test_initialize_norm_params()
361
+ test_compute_norm_stats()
362
+
363
+ # Aliases
364
+ test_aliases()
365
+
366
+ # Additional tests
367
+ test_normalization_with_scale_shift()
368
+
369
+ print("\n" + "="*60)
370
+ print("ALL TESTS PASSED! ✓")
371
+ print("="*60 + "\n")
372
+
373
+
374
+ if __name__ == "__main__":
375
+ run_all_tests()
File without changes
File without changes
File without changes
File without changes