ilovetools 0.2.16__tar.gz → 0.2.17__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. {ilovetools-0.2.16/ilovetools.egg-info → ilovetools-0.2.17}/PKG-INFO +2 -2
  2. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/__init__.py +2 -2
  3. ilovetools-0.2.17/ilovetools/ml/normalization.py +523 -0
  4. {ilovetools-0.2.16 → ilovetools-0.2.17/ilovetools.egg-info}/PKG-INFO +2 -2
  5. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools.egg-info/SOURCES.txt +2 -0
  6. {ilovetools-0.2.16 → ilovetools-0.2.17}/pyproject.toml +2 -2
  7. {ilovetools-0.2.16 → ilovetools-0.2.17}/setup.py +2 -2
  8. ilovetools-0.2.17/tests/test_normalization.py +439 -0
  9. {ilovetools-0.2.16 → ilovetools-0.2.17}/LICENSE +0 -0
  10. {ilovetools-0.2.16 → ilovetools-0.2.17}/MANIFEST.in +0 -0
  11. {ilovetools-0.2.16 → ilovetools-0.2.17}/README.md +0 -0
  12. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/ai/__init__.py +0 -0
  13. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/ai/embeddings.py +0 -0
  14. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/ai/inference.py +0 -0
  15. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/ai/llm_helpers.py +0 -0
  16. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/audio/__init__.py +0 -0
  17. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/automation/__init__.py +0 -0
  18. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/automation/file_organizer.py +0 -0
  19. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/conversion/__init__.py +0 -0
  20. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/conversion/config_converter.py +0 -0
  21. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/conversion/config_converter_fixed_header.py +0 -0
  22. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/data/__init__.py +0 -0
  23. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/data/feature_engineering.py +0 -0
  24. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/data/preprocessing.py +0 -0
  25. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/database/__init__.py +0 -0
  26. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/datetime/__init__.py +0 -0
  27. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/email/__init__.py +0 -0
  28. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/email/template_engine.py +0 -0
  29. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/files/__init__.py +0 -0
  30. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/image/__init__.py +0 -0
  31. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/ml/__init__.py +0 -0
  32. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/ml/activations.py +0 -0
  33. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/ml/anomaly_detection.py +0 -0
  34. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/ml/clustering.py +0 -0
  35. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/ml/cross_validation.py +0 -0
  36. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/ml/dimensionality.py +0 -0
  37. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/ml/ensemble.py +0 -0
  38. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/ml/feature_selection.py +0 -0
  39. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/ml/gradient_descent.py +0 -0
  40. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/ml/imbalanced.py +0 -0
  41. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/ml/interpretation.py +0 -0
  42. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/ml/loss_functions.py +0 -0
  43. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/ml/metrics.py +0 -0
  44. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/ml/neural_network.py +0 -0
  45. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/ml/optimizers.py +0 -0
  46. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/ml/pipeline.py +0 -0
  47. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/ml/regularization.py +0 -0
  48. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/ml/timeseries.py +0 -0
  49. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/ml/tuning.py +0 -0
  50. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/security/__init__.py +0 -0
  51. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/security/password_checker.py +0 -0
  52. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/text/__init__.py +0 -0
  53. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/utils/__init__.py +0 -0
  54. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/utils/cache_system.py +0 -0
  55. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/utils/logger.py +0 -0
  56. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/utils/rate_limiter.py +0 -0
  57. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/utils/retry.py +0 -0
  58. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/validation/__init__.py +0 -0
  59. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/validation/data_validator.py +0 -0
  60. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/web/__init__.py +0 -0
  61. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/web/scraper.py +0 -0
  62. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools/web/url_shortener.py +0 -0
  63. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools.egg-info/dependency_links.txt +0 -0
  64. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools.egg-info/requires.txt +0 -0
  65. {ilovetools-0.2.16 → ilovetools-0.2.17}/ilovetools.egg-info/top_level.txt +0 -0
  66. {ilovetools-0.2.16 → ilovetools-0.2.17}/requirements.txt +0 -0
  67. {ilovetools-0.2.16 → ilovetools-0.2.17}/setup.cfg +0 -0
  68. {ilovetools-0.2.16 → ilovetools-0.2.17}/tests/__init__.py +0 -0
  69. {ilovetools-0.2.16 → ilovetools-0.2.17}/tests/test_activations.py +0 -0
  70. {ilovetools-0.2.16 → ilovetools-0.2.17}/tests/test_gradient_descent.py +0 -0
  71. {ilovetools-0.2.16 → ilovetools-0.2.17}/tests/test_loss_functions.py +0 -0
  72. {ilovetools-0.2.16 → ilovetools-0.2.17}/tests/test_neural_network.py +0 -0
  73. {ilovetools-0.2.16 → ilovetools-0.2.17}/tests/test_optimizers.py +0 -0
  74. {ilovetools-0.2.16 → ilovetools-0.2.17}/tests/test_regularization.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ilovetools
3
- Version: 0.2.16
3
+ Version: 0.2.17
4
4
  Summary: A comprehensive Python utility library with modular tools for AI/ML, data processing, and daily programming needs
5
5
  Home-page: https://github.com/AliMehdi512/ilovetools
6
6
  Author: Ali Mehdi
@@ -11,7 +11,7 @@ Project-URL: Repository, https://github.com/AliMehdi512/ilovetools
11
11
  Project-URL: Issues, https://github.com/AliMehdi512/ilovetools/issues
12
12
  Project-URL: Bug Reports, https://github.com/AliMehdi512/ilovetools/issues
13
13
  Project-URL: Source, https://github.com/AliMehdi512/ilovetools
14
- Keywords: utilities,tools,ai,ml,data-processing,automation,gradient-descent,optimization,regularization,loss-functions,activation-functions,optimizers,adam,adamw,radam,lamb
14
+ Keywords: utilities,tools,ai,ml,data-processing,automation,batch-normalization,layer-normalization,group-normalization,instance-normalization,weight-normalization,deep-learning,transformers
15
15
  Classifier: Development Status :: 3 - Alpha
16
16
  Classifier: Intended Audience :: Developers
17
17
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
@@ -2,8 +2,8 @@
2
2
  ilovetools - A comprehensive Python utility library
3
3
  """
4
4
 
5
- __version__ = "0.2.15"
6
- # release marker: 0.2.15
5
+ __version__ = "0.2.16"
6
+ # release marker: 0.2.16
7
7
  __author__ = "Ali Mehdi"
8
8
  __email__ = "ali.mehdi.dev579@gmail.com"
9
9
 
@@ -0,0 +1,523 @@
1
+ """
2
+ Normalization Techniques for Neural Networks
3
+
4
+ This module provides various normalization techniques used in deep learning:
5
+ - Batch Normalization
6
+ - Layer Normalization
7
+ - Group Normalization
8
+ - Instance Normalization
9
+ - Weight Normalization
10
+
11
+ All normalization functions support both training and inference modes.
12
+ """
13
+
14
+ import numpy as np
15
+ from typing import Tuple, Optional
16
+
17
+
18
+ # ============================================================================
19
+ # BATCH NORMALIZATION
20
+ # ============================================================================
21
+
22
+ def batch_normalization(
23
+ x: np.ndarray,
24
+ gamma: np.ndarray,
25
+ beta: np.ndarray,
26
+ running_mean: Optional[np.ndarray] = None,
27
+ running_var: Optional[np.ndarray] = None,
28
+ training: bool = True,
29
+ momentum: float = 0.9,
30
+ epsilon: float = 1e-5
31
+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
32
+ """
33
+ Batch Normalization
34
+
35
+ Normalizes activations across the batch dimension. Reduces internal covariate shift
36
+ and allows higher learning rates.
37
+
38
+ Formula: y = gamma * (x - mean) / sqrt(var + epsilon) + beta
39
+
40
+ Args:
41
+ x: Input tensor of shape (N, D) or (N, C, H, W)
42
+ gamma: Scale parameter of shape (D,) or (C,)
43
+ beta: Shift parameter of shape (D,) or (C,)
44
+ running_mean: Running mean for inference (updated during training)
45
+ running_var: Running variance for inference (updated during training)
46
+ training: Whether in training mode
47
+ momentum: Momentum for running statistics (default: 0.9)
48
+ epsilon: Small constant for numerical stability (default: 1e-5)
49
+
50
+ Returns:
51
+ Tuple of (normalized_output, updated_running_mean, updated_running_var)
52
+
53
+ Example:
54
+ >>> x = np.random.randn(32, 64) # Batch of 32, 64 features
55
+ >>> gamma = np.ones(64)
56
+ >>> beta = np.zeros(64)
57
+ >>> out, mean, var = batch_normalization(x, gamma, beta, training=True)
58
+ >>> print(out.shape) # (32, 64)
59
+ """
60
+ if running_mean is None:
61
+ running_mean = np.zeros(gamma.shape)
62
+ if running_var is None:
63
+ running_var = np.ones(gamma.shape)
64
+
65
+ if training:
66
+ # Compute batch statistics
67
+ if x.ndim == 2:
68
+ # Fully connected layer: (N, D)
69
+ batch_mean = np.mean(x, axis=0)
70
+ batch_var = np.var(x, axis=0)
71
+ elif x.ndim == 4:
72
+ # Convolutional layer: (N, C, H, W)
73
+ batch_mean = np.mean(x, axis=(0, 2, 3), keepdims=True)
74
+ batch_var = np.var(x, axis=(0, 2, 3), keepdims=True)
75
+ else:
76
+ raise ValueError(f"Unsupported input shape: {x.shape}")
77
+
78
+ # Normalize
79
+ x_normalized = (x - batch_mean) / np.sqrt(batch_var + epsilon)
80
+
81
+ # Update running statistics
82
+ running_mean = momentum * running_mean + (1 - momentum) * batch_mean.squeeze()
83
+ running_var = momentum * running_var + (1 - momentum) * batch_var.squeeze()
84
+ else:
85
+ # Use running statistics for inference
86
+ if x.ndim == 2:
87
+ x_normalized = (x - running_mean) / np.sqrt(running_var + epsilon)
88
+ elif x.ndim == 4:
89
+ mean_reshaped = running_mean.reshape(1, -1, 1, 1)
90
+ var_reshaped = running_var.reshape(1, -1, 1, 1)
91
+ x_normalized = (x - mean_reshaped) / np.sqrt(var_reshaped + epsilon)
92
+
93
+ # Scale and shift
94
+ if x.ndim == 2:
95
+ out = gamma * x_normalized + beta
96
+ elif x.ndim == 4:
97
+ gamma_reshaped = gamma.reshape(1, -1, 1, 1)
98
+ beta_reshaped = beta.reshape(1, -1, 1, 1)
99
+ out = gamma_reshaped * x_normalized + beta_reshaped
100
+
101
+ return out, running_mean, running_var
102
+
103
+
104
+ def batch_norm_forward(
105
+ x: np.ndarray,
106
+ gamma: np.ndarray,
107
+ beta: np.ndarray,
108
+ bn_params: dict
109
+ ) -> Tuple[np.ndarray, dict]:
110
+ """
111
+ Forward pass for batch normalization with cache
112
+
113
+ Args:
114
+ x: Input data
115
+ gamma: Scale parameter
116
+ beta: Shift parameter
117
+ bn_params: Dictionary with keys:
118
+ - mode: 'train' or 'test'
119
+ - eps: Epsilon for numerical stability
120
+ - momentum: Momentum for running statistics
121
+ - running_mean: Running mean
122
+ - running_var: Running variance
123
+
124
+ Returns:
125
+ Tuple of (output, cache)
126
+
127
+ Example:
128
+ >>> x = np.random.randn(32, 64)
129
+ >>> gamma = np.ones(64)
130
+ >>> beta = np.zeros(64)
131
+ >>> bn_params = {
132
+ ... 'mode': 'train',
133
+ ... 'eps': 1e-5,
134
+ ... 'momentum': 0.9,
135
+ ... 'running_mean': np.zeros(64),
136
+ ... 'running_var': np.ones(64)
137
+ ... }
138
+ >>> out, cache = batch_norm_forward(x, gamma, beta, bn_params)
139
+ """
140
+ mode = bn_params.get('mode', 'train')
141
+ eps = bn_params.get('eps', 1e-5)
142
+ momentum = bn_params.get('momentum', 0.9)
143
+
144
+ running_mean = bn_params.get('running_mean', np.zeros(gamma.shape))
145
+ running_var = bn_params.get('running_var', np.ones(gamma.shape))
146
+
147
+ training = (mode == 'train')
148
+
149
+ out, running_mean, running_var = batch_normalization(
150
+ x, gamma, beta, running_mean, running_var,
151
+ training=training, momentum=momentum, epsilon=eps
152
+ )
153
+
154
+ # Update running statistics in params
155
+ bn_params['running_mean'] = running_mean
156
+ bn_params['running_var'] = running_var
157
+
158
+ # Cache for backward pass
159
+ cache = {
160
+ 'x': x,
161
+ 'gamma': gamma,
162
+ 'beta': beta,
163
+ 'eps': eps,
164
+ 'x_normalized': (x - running_mean) / np.sqrt(running_var + eps),
165
+ 'mean': running_mean,
166
+ 'var': running_var
167
+ }
168
+
169
+ return out, cache
170
+
171
+
172
+ # ============================================================================
173
+ # LAYER NORMALIZATION
174
+ # ============================================================================
175
+
176
+ def layer_normalization(
177
+ x: np.ndarray,
178
+ gamma: np.ndarray,
179
+ beta: np.ndarray,
180
+ epsilon: float = 1e-5
181
+ ) -> np.ndarray:
182
+ """
183
+ Layer Normalization
184
+
185
+ Normalizes activations across the feature dimension. Independent of batch size,
186
+ making it suitable for RNNs and Transformers.
187
+
188
+ Formula: y = gamma * (x - mean) / sqrt(var + epsilon) + beta
189
+
190
+ Args:
191
+ x: Input tensor of shape (N, D) or (N, L, D)
192
+ gamma: Scale parameter of shape (D,)
193
+ beta: Shift parameter of shape (D,)
194
+ epsilon: Small constant for numerical stability (default: 1e-5)
195
+
196
+ Returns:
197
+ Normalized output of same shape as input
198
+
199
+ Example:
200
+ >>> x = np.random.randn(32, 512) # Batch of 32, 512 features
201
+ >>> gamma = np.ones(512)
202
+ >>> beta = np.zeros(512)
203
+ >>> out = layer_normalization(x, gamma, beta)
204
+ >>> print(out.shape) # (32, 512)
205
+
206
+ >>> # For sequences (Transformers)
207
+ >>> x = np.random.randn(32, 10, 512) # Batch 32, seq len 10, dim 512
208
+ >>> out = layer_normalization(x, gamma, beta)
209
+ >>> print(out.shape) # (32, 10, 512)
210
+ """
211
+ # Compute mean and variance across feature dimension
212
+ if x.ndim == 2:
213
+ # (N, D) - Fully connected
214
+ mean = np.mean(x, axis=1, keepdims=True)
215
+ var = np.var(x, axis=1, keepdims=True)
216
+ elif x.ndim == 3:
217
+ # (N, L, D) - Sequence data (Transformers)
218
+ mean = np.mean(x, axis=2, keepdims=True)
219
+ var = np.var(x, axis=2, keepdims=True)
220
+ else:
221
+ raise ValueError(f"Unsupported input shape: {x.shape}")
222
+
223
+ # Normalize
224
+ x_normalized = (x - mean) / np.sqrt(var + epsilon)
225
+
226
+ # Scale and shift
227
+ out = gamma * x_normalized + beta
228
+
229
+ return out
230
+
231
+
232
+ def layer_norm_forward(
233
+ x: np.ndarray,
234
+ gamma: np.ndarray,
235
+ beta: np.ndarray,
236
+ epsilon: float = 1e-5
237
+ ) -> Tuple[np.ndarray, dict]:
238
+ """
239
+ Forward pass for layer normalization with cache
240
+
241
+ Args:
242
+ x: Input data
243
+ gamma: Scale parameter
244
+ beta: Shift parameter
245
+ epsilon: Epsilon for numerical stability
246
+
247
+ Returns:
248
+ Tuple of (output, cache)
249
+
250
+ Example:
251
+ >>> x = np.random.randn(32, 512)
252
+ >>> gamma = np.ones(512)
253
+ >>> beta = np.zeros(512)
254
+ >>> out, cache = layer_norm_forward(x, gamma, beta)
255
+ """
256
+ # Compute statistics
257
+ if x.ndim == 2:
258
+ mean = np.mean(x, axis=1, keepdims=True)
259
+ var = np.var(x, axis=1, keepdims=True)
260
+ elif x.ndim == 3:
261
+ mean = np.mean(x, axis=2, keepdims=True)
262
+ var = np.var(x, axis=2, keepdims=True)
263
+ else:
264
+ raise ValueError(f"Unsupported input shape: {x.shape}")
265
+
266
+ # Normalize
267
+ x_normalized = (x - mean) / np.sqrt(var + epsilon)
268
+
269
+ # Scale and shift
270
+ out = gamma * x_normalized + beta
271
+
272
+ # Cache for backward pass
273
+ cache = {
274
+ 'x': x,
275
+ 'gamma': gamma,
276
+ 'beta': beta,
277
+ 'eps': epsilon,
278
+ 'x_normalized': x_normalized,
279
+ 'mean': mean,
280
+ 'var': var
281
+ }
282
+
283
+ return out, cache
284
+
285
+
286
+ # ============================================================================
287
+ # GROUP NORMALIZATION
288
+ # ============================================================================
289
+
290
+ def group_normalization(
291
+ x: np.ndarray,
292
+ gamma: np.ndarray,
293
+ beta: np.ndarray,
294
+ num_groups: int = 32,
295
+ epsilon: float = 1e-5
296
+ ) -> np.ndarray:
297
+ """
298
+ Group Normalization
299
+
300
+ Divides channels into groups and normalizes within each group.
301
+ Works well with small batch sizes.
302
+
303
+ Args:
304
+ x: Input tensor of shape (N, C, H, W)
305
+ gamma: Scale parameter of shape (C,)
306
+ beta: Shift parameter of shape (C,)
307
+ num_groups: Number of groups (default: 32)
308
+ epsilon: Small constant for numerical stability
309
+
310
+ Returns:
311
+ Normalized output of same shape as input
312
+
313
+ Example:
314
+ >>> x = np.random.randn(8, 64, 32, 32) # Small batch
315
+ >>> gamma = np.ones(64)
316
+ >>> beta = np.zeros(64)
317
+ >>> out = group_normalization(x, gamma, beta, num_groups=32)
318
+ >>> print(out.shape) # (8, 64, 32, 32)
319
+ """
320
+ N, C, H, W = x.shape
321
+
322
+ if C % num_groups != 0:
323
+ raise ValueError(f"Number of channels ({C}) must be divisible by num_groups ({num_groups})")
324
+
325
+ # Reshape to (N, num_groups, C // num_groups, H, W)
326
+ x_grouped = x.reshape(N, num_groups, C // num_groups, H, W)
327
+
328
+ # Compute mean and variance per group
329
+ mean = np.mean(x_grouped, axis=(2, 3, 4), keepdims=True)
330
+ var = np.var(x_grouped, axis=(2, 3, 4), keepdims=True)
331
+
332
+ # Normalize
333
+ x_normalized = (x_grouped - mean) / np.sqrt(var + epsilon)
334
+
335
+ # Reshape back
336
+ x_normalized = x_normalized.reshape(N, C, H, W)
337
+
338
+ # Scale and shift
339
+ gamma_reshaped = gamma.reshape(1, -1, 1, 1)
340
+ beta_reshaped = beta.reshape(1, -1, 1, 1)
341
+ out = gamma_reshaped * x_normalized + beta_reshaped
342
+
343
+ return out
344
+
345
+
346
+ # ============================================================================
347
+ # INSTANCE NORMALIZATION
348
+ # ============================================================================
349
+
350
+ def instance_normalization(
351
+ x: np.ndarray,
352
+ gamma: np.ndarray,
353
+ beta: np.ndarray,
354
+ epsilon: float = 1e-5
355
+ ) -> np.ndarray:
356
+ """
357
+ Instance Normalization
358
+
359
+ Normalizes each sample independently. Used in style transfer and GANs.
360
+
361
+ Args:
362
+ x: Input tensor of shape (N, C, H, W)
363
+ gamma: Scale parameter of shape (C,)
364
+ beta: Shift parameter of shape (C,)
365
+ epsilon: Small constant for numerical stability
366
+
367
+ Returns:
368
+ Normalized output of same shape as input
369
+
370
+ Example:
371
+ >>> x = np.random.randn(8, 64, 32, 32)
372
+ >>> gamma = np.ones(64)
373
+ >>> beta = np.zeros(64)
374
+ >>> out = instance_normalization(x, gamma, beta)
375
+ >>> print(out.shape) # (8, 64, 32, 32)
376
+ """
377
+ N, C, H, W = x.shape
378
+
379
+ # Compute mean and variance per instance per channel
380
+ mean = np.mean(x, axis=(2, 3), keepdims=True)
381
+ var = np.var(x, axis=(2, 3), keepdims=True)
382
+
383
+ # Normalize
384
+ x_normalized = (x - mean) / np.sqrt(var + epsilon)
385
+
386
+ # Scale and shift
387
+ gamma_reshaped = gamma.reshape(1, -1, 1, 1)
388
+ beta_reshaped = beta.reshape(1, -1, 1, 1)
389
+ out = gamma_reshaped * x_normalized + beta_reshaped
390
+
391
+ return out
392
+
393
+
394
+ # ============================================================================
395
+ # WEIGHT NORMALIZATION
396
+ # ============================================================================
397
+
398
+ def weight_normalization(
399
+ w: np.ndarray,
400
+ g: Optional[np.ndarray] = None,
401
+ axis: int = 0
402
+ ) -> Tuple[np.ndarray, np.ndarray]:
403
+ """
404
+ Weight Normalization
405
+
406
+ Decouples the magnitude and direction of weight vectors.
407
+ Faster than batch normalization.
408
+
409
+ Formula: w = g * (v / ||v||)
410
+
411
+ Args:
412
+ w: Weight matrix
413
+ g: Magnitude parameter (if None, computed from w)
414
+ axis: Axis along which to normalize (default: 0)
415
+
416
+ Returns:
417
+ Tuple of (normalized_weights, magnitude)
418
+
419
+ Example:
420
+ >>> w = np.random.randn(512, 256) # Weight matrix
421
+ >>> w_norm, g = weight_normalization(w)
422
+ >>> print(w_norm.shape) # (512, 256)
423
+ >>> print(g.shape) # (512,) or (256,) depending on axis
424
+ """
425
+ # Compute norm along specified axis
426
+ norm = np.linalg.norm(w, axis=axis, keepdims=True)
427
+
428
+ # Normalize direction
429
+ v = w / (norm + 1e-8)
430
+
431
+ # Compute or use provided magnitude
432
+ if g is None:
433
+ g = norm.squeeze()
434
+
435
+ # Reconstruct weights
436
+ if axis == 0:
437
+ w_normalized = g.reshape(-1, 1) * v
438
+ else:
439
+ w_normalized = g.reshape(1, -1) * v
440
+
441
+ return w_normalized, g
442
+
443
+
444
+ # ============================================================================
445
+ # UTILITY FUNCTIONS
446
+ # ============================================================================
447
+
448
+ def create_normalization_params(
449
+ num_features: int,
450
+ norm_type: str = 'batch'
451
+ ) -> dict:
452
+ """
453
+ Create parameters for normalization layers
454
+
455
+ Args:
456
+ num_features: Number of features/channels
457
+ norm_type: Type of normalization ('batch', 'layer', 'group', 'instance')
458
+
459
+ Returns:
460
+ Dictionary with initialized parameters
461
+
462
+ Example:
463
+ >>> params = create_normalization_params(64, 'batch')
464
+ >>> print(params.keys())
465
+ dict_keys(['gamma', 'beta', 'running_mean', 'running_var'])
466
+ """
467
+ params = {
468
+ 'gamma': np.ones(num_features),
469
+ 'beta': np.zeros(num_features)
470
+ }
471
+
472
+ if norm_type == 'batch':
473
+ params['running_mean'] = np.zeros(num_features)
474
+ params['running_var'] = np.ones(num_features)
475
+
476
+ return params
477
+
478
+
479
+ def apply_normalization(
480
+ x: np.ndarray,
481
+ norm_type: str,
482
+ gamma: np.ndarray,
483
+ beta: np.ndarray,
484
+ **kwargs
485
+ ) -> np.ndarray:
486
+ """
487
+ Apply normalization by type name
488
+
489
+ Args:
490
+ x: Input tensor
491
+ norm_type: Type of normalization
492
+ gamma: Scale parameter
493
+ beta: Shift parameter
494
+ **kwargs: Additional arguments for specific normalization
495
+
496
+ Returns:
497
+ Normalized output
498
+
499
+ Example:
500
+ >>> x = np.random.randn(32, 64)
501
+ >>> gamma = np.ones(64)
502
+ >>> beta = np.zeros(64)
503
+ >>> out = apply_normalization(x, 'layer', gamma, beta)
504
+ """
505
+ if norm_type == 'batch':
506
+ out, _, _ = batch_normalization(x, gamma, beta, **kwargs)
507
+ return out
508
+ elif norm_type == 'layer':
509
+ return layer_normalization(x, gamma, beta, **kwargs)
510
+ elif norm_type == 'group':
511
+ return group_normalization(x, gamma, beta, **kwargs)
512
+ elif norm_type == 'instance':
513
+ return instance_normalization(x, gamma, beta, **kwargs)
514
+ else:
515
+ raise ValueError(f"Unknown normalization type: {norm_type}")
516
+
517
+
518
+ # Aliases for convenience
519
+ batchnorm = batch_normalization
520
+ layernorm = layer_normalization
521
+ groupnorm = group_normalization
522
+ instancenorm = instance_normalization
523
+ weightnorm = weight_normalization
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ilovetools
3
- Version: 0.2.16
3
+ Version: 0.2.17
4
4
  Summary: A comprehensive Python utility library with modular tools for AI/ML, data processing, and daily programming needs
5
5
  Home-page: https://github.com/AliMehdi512/ilovetools
6
6
  Author: Ali Mehdi
@@ -11,7 +11,7 @@ Project-URL: Repository, https://github.com/AliMehdi512/ilovetools
11
11
  Project-URL: Issues, https://github.com/AliMehdi512/ilovetools/issues
12
12
  Project-URL: Bug Reports, https://github.com/AliMehdi512/ilovetools/issues
13
13
  Project-URL: Source, https://github.com/AliMehdi512/ilovetools
14
- Keywords: utilities,tools,ai,ml,data-processing,automation,gradient-descent,optimization,regularization,loss-functions,activation-functions,optimizers,adam,adamw,radam,lamb
14
+ Keywords: utilities,tools,ai,ml,data-processing,automation,batch-normalization,layer-normalization,group-normalization,instance-normalization,weight-normalization,deep-learning,transformers
15
15
  Classifier: Development Status :: 3 - Alpha
16
16
  Classifier: Intended Audience :: Developers
17
17
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
@@ -43,6 +43,7 @@ ilovetools/ml/interpretation.py
43
43
  ilovetools/ml/loss_functions.py
44
44
  ilovetools/ml/metrics.py
45
45
  ilovetools/ml/neural_network.py
46
+ ilovetools/ml/normalization.py
46
47
  ilovetools/ml/optimizers.py
47
48
  ilovetools/ml/pipeline.py
48
49
  ilovetools/ml/regularization.py
@@ -66,5 +67,6 @@ tests/test_activations.py
66
67
  tests/test_gradient_descent.py
67
68
  tests/test_loss_functions.py
68
69
  tests/test_neural_network.py
70
+ tests/test_normalization.py
69
71
  tests/test_optimizers.py
70
72
  tests/test_regularization.py
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "ilovetools"
7
- version = "0.2.16"
7
+ version = "0.2.17"
8
8
  description = "A comprehensive Python utility library with modular tools for AI/ML, data processing, and daily programming needs"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.8"
@@ -12,7 +12,7 @@ license = "MIT"
12
12
  authors = [
13
13
  {name = "Ali Mehdi", email = "ali.mehdi.dev579@gmail.com"}
14
14
  ]
15
- keywords = ["utilities", "tools", "ai", "ml", "data-processing", "automation", "gradient-descent", "optimization", "regularization", "loss-functions", "activation-functions", "optimizers", "adam", "adamw", "radam", "lamb"]
15
+ keywords = ["utilities", "tools", "ai", "ml", "data-processing", "automation", "batch-normalization", "layer-normalization", "group-normalization", "instance-normalization", "weight-normalization", "deep-learning", "transformers"]
16
16
  classifiers = [
17
17
  "Development Status :: 3 - Alpha",
18
18
  "Intended Audience :: Developers",
@@ -5,7 +5,7 @@ with open("README.md", "r", encoding="utf-8") as fh:
5
5
 
6
6
  setup(
7
7
  name="ilovetools",
8
- version="0.2.16",
8
+ version="0.2.17",
9
9
  author="Ali Mehdi",
10
10
  author_email="ali.mehdi.dev579@gmail.com",
11
11
  description="A comprehensive Python utility library with modular tools for AI/ML, data processing, and daily programming needs",
@@ -57,7 +57,7 @@ setup(
57
57
  "soundfile>=0.12.0",
58
58
  ],
59
59
  },
60
- keywords="utilities, tools, ai, ml, data-processing, automation, python-library, neural-networks, gradient-descent, optimization, regularization, loss-functions, activation-functions, optimizers, adam, adamw, radam, lamb",
60
+ keywords="utilities, tools, ai, ml, data-processing, automation, python-library, neural-networks, batch-normalization, layer-normalization, group-normalization, instance-normalization, weight-normalization, deep-learning, transformers",
61
61
  project_urls={
62
62
  "Bug Reports": "https://github.com/AliMehdi512/ilovetools/issues",
63
63
  "Source": "https://github.com/AliMehdi512/ilovetools",
@@ -0,0 +1,439 @@
1
+ """
2
+ Tests for normalization techniques module
3
+ """
4
+
5
+ import numpy as np
6
+ import sys
7
+ import os
8
+
9
+ # Add parent directory to path
10
+ sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
11
+
12
+ from ilovetools.ml.normalization import (
13
+ # Main functions
14
+ batch_normalization,
15
+ layer_normalization,
16
+ group_normalization,
17
+ instance_normalization,
18
+ weight_normalization,
19
+ # Forward passes with cache
20
+ batch_norm_forward,
21
+ layer_norm_forward,
22
+ # Utilities
23
+ create_normalization_params,
24
+ apply_normalization,
25
+ # Aliases
26
+ batchnorm,
27
+ layernorm,
28
+ groupnorm,
29
+ instancenorm,
30
+ weightnorm,
31
+ )
32
+
33
+
34
+ def test_batch_normalization():
35
+ """Test Batch Normalization"""
36
+ print("Testing batch_normalization...")
37
+
38
+ # Test 2D input (fully connected)
39
+ x = np.random.randn(32, 64)
40
+ gamma = np.ones(64)
41
+ beta = np.zeros(64)
42
+
43
+ out, running_mean, running_var = batch_normalization(
44
+ x, gamma, beta, training=True
45
+ )
46
+
47
+ assert out.shape == x.shape, "Output shape should match input"
48
+ assert running_mean.shape == (64,), "Running mean shape incorrect"
49
+ assert running_var.shape == (64,), "Running var shape incorrect"
50
+
51
+ # Check normalization (mean ≈ 0, var ≈ 1)
52
+ assert np.abs(np.mean(out)) < 0.1, "Mean should be close to 0"
53
+ assert np.abs(np.var(out) - 1.0) < 0.2, "Variance should be close to 1"
54
+
55
+ print("✓ batch_normalization passed")
56
+
57
+
58
+ def test_batch_normalization_4d():
59
+ """Test Batch Normalization with 4D input (CNN)"""
60
+ print("Testing batch_normalization with 4D input...")
61
+
62
+ # Test 4D input (convolutional)
63
+ x = np.random.randn(8, 32, 16, 16) # (N, C, H, W)
64
+ gamma = np.ones(32)
65
+ beta = np.zeros(32)
66
+
67
+ out, running_mean, running_var = batch_normalization(
68
+ x, gamma, beta, training=True
69
+ )
70
+
71
+ assert out.shape == x.shape, "Output shape should match input"
72
+ assert running_mean.shape == (32,), "Running mean shape incorrect"
73
+
74
+ print("✓ batch_normalization 4D passed")
75
+
76
+
77
+ def test_batch_normalization_inference():
78
+ """Test Batch Normalization in inference mode"""
79
+ print("Testing batch_normalization inference mode...")
80
+
81
+ x = np.random.randn(32, 64)
82
+ gamma = np.ones(64)
83
+ beta = np.zeros(64)
84
+ running_mean = np.random.randn(64)
85
+ running_var = np.abs(np.random.randn(64))
86
+
87
+ out, _, _ = batch_normalization(
88
+ x, gamma, beta, running_mean, running_var, training=False
89
+ )
90
+
91
+ assert out.shape == x.shape, "Output shape should match input"
92
+ print("✓ batch_normalization inference passed")
93
+
94
+
95
+ def test_layer_normalization():
96
+ """Test Layer Normalization"""
97
+ print("Testing layer_normalization...")
98
+
99
+ # Test 2D input
100
+ x = np.random.randn(32, 512)
101
+ gamma = np.ones(512)
102
+ beta = np.zeros(512)
103
+
104
+ out = layer_normalization(x, gamma, beta)
105
+
106
+ assert out.shape == x.shape, "Output shape should match input"
107
+
108
+ # Check normalization per sample
109
+ for i in range(32):
110
+ sample_mean = np.mean(out[i])
111
+ sample_var = np.var(out[i])
112
+ assert np.abs(sample_mean) < 0.1, f"Sample {i} mean should be close to 0"
113
+ assert np.abs(sample_var - 1.0) < 0.2, f"Sample {i} variance should be close to 1"
114
+
115
+ print("✓ layer_normalization passed")
116
+
117
+
118
+ def test_layer_normalization_3d():
119
+ """Test Layer Normalization with 3D input (sequences)"""
120
+ print("Testing layer_normalization with 3D input...")
121
+
122
+ # Test 3D input (sequences for Transformers)
123
+ x = np.random.randn(32, 10, 512) # (N, L, D)
124
+ gamma = np.ones(512)
125
+ beta = np.zeros(512)
126
+
127
+ out = layer_normalization(x, gamma, beta)
128
+
129
+ assert out.shape == x.shape, "Output shape should match input"
130
+
131
+ # Check normalization per sample per timestep
132
+ for i in range(32):
133
+ for t in range(10):
134
+ token_mean = np.mean(out[i, t])
135
+ token_var = np.var(out[i, t])
136
+ assert np.abs(token_mean) < 0.1, "Token mean should be close to 0"
137
+ assert np.abs(token_var - 1.0) < 0.2, "Token variance should be close to 1"
138
+
139
+ print("✓ layer_normalization 3D passed")
140
+
141
+
142
+ def test_group_normalization():
143
+ """Test Group Normalization"""
144
+ print("Testing group_normalization...")
145
+
146
+ x = np.random.randn(8, 64, 32, 32) # (N, C, H, W)
147
+ gamma = np.ones(64)
148
+ beta = np.zeros(64)
149
+
150
+ out = group_normalization(x, gamma, beta, num_groups=32)
151
+
152
+ assert out.shape == x.shape, "Output shape should match input"
153
+ print("✓ group_normalization passed")
154
+
155
+
156
+ def test_group_normalization_invalid_groups():
157
+ """Test Group Normalization with invalid number of groups"""
158
+ print("Testing group_normalization with invalid groups...")
159
+
160
+ x = np.random.randn(8, 64, 32, 32)
161
+ gamma = np.ones(64)
162
+ beta = np.zeros(64)
163
+
164
+ try:
165
+ out = group_normalization(x, gamma, beta, num_groups=30)
166
+ assert False, "Should raise ValueError"
167
+ except ValueError as e:
168
+ assert "divisible" in str(e).lower()
169
+
170
+ print("✓ group_normalization invalid groups passed")
171
+
172
+
173
+ def test_instance_normalization():
174
+ """Test Instance Normalization"""
175
+ print("Testing instance_normalization...")
176
+
177
+ x = np.random.randn(8, 64, 32, 32) # (N, C, H, W)
178
+ gamma = np.ones(64)
179
+ beta = np.zeros(64)
180
+
181
+ out = instance_normalization(x, gamma, beta)
182
+
183
+ assert out.shape == x.shape, "Output shape should match input"
184
+
185
+ # Check normalization per instance per channel
186
+ for n in range(8):
187
+ for c in range(64):
188
+ channel_mean = np.mean(out[n, c])
189
+ channel_var = np.var(out[n, c])
190
+ assert np.abs(channel_mean) < 0.1, "Channel mean should be close to 0"
191
+ assert np.abs(channel_var - 1.0) < 0.2, "Channel variance should be close to 1"
192
+
193
+ print("✓ instance_normalization passed")
194
+
195
+
196
+ def test_weight_normalization():
197
+ """Test Weight Normalization"""
198
+ print("Testing weight_normalization...")
199
+
200
+ w = np.random.randn(512, 256)
201
+
202
+ w_norm, g = weight_normalization(w, axis=0)
203
+
204
+ assert w_norm.shape == w.shape, "Output shape should match input"
205
+ assert g.shape == (512,), "Magnitude shape incorrect"
206
+
207
+ # Check that each row has unit norm
208
+ for i in range(512):
209
+ row_norm = np.linalg.norm(w_norm[i])
210
+ expected_norm = g[i]
211
+ assert np.abs(row_norm - expected_norm) < 1e-5, "Row norm should match magnitude"
212
+
213
+ print("✓ weight_normalization passed")
214
+
215
+
216
+ def test_weight_normalization_axis1():
217
+ """Test Weight Normalization with axis=1"""
218
+ print("Testing weight_normalization with axis=1...")
219
+
220
+ w = np.random.randn(512, 256)
221
+
222
+ w_norm, g = weight_normalization(w, axis=1)
223
+
224
+ assert w_norm.shape == w.shape, "Output shape should match input"
225
+ assert g.shape == (256,), "Magnitude shape incorrect"
226
+
227
+ print("✓ weight_normalization axis=1 passed")
228
+
229
+
230
+ def test_batch_norm_forward():
231
+ """Test Batch Normalization forward with cache"""
232
+ print("Testing batch_norm_forward...")
233
+
234
+ x = np.random.randn(32, 64)
235
+ gamma = np.ones(64)
236
+ beta = np.zeros(64)
237
+
238
+ bn_params = {
239
+ 'mode': 'train',
240
+ 'eps': 1e-5,
241
+ 'momentum': 0.9,
242
+ 'running_mean': np.zeros(64),
243
+ 'running_var': np.ones(64)
244
+ }
245
+
246
+ out, cache = batch_norm_forward(x, gamma, beta, bn_params)
247
+
248
+ assert out.shape == x.shape, "Output shape should match input"
249
+ assert 'x' in cache, "Cache should contain input"
250
+ assert 'gamma' in cache, "Cache should contain gamma"
251
+ assert 'x_normalized' in cache, "Cache should contain normalized input"
252
+
253
+ print("✓ batch_norm_forward passed")
254
+
255
+
256
+ def test_layer_norm_forward():
257
+ """Test Layer Normalization forward with cache"""
258
+ print("Testing layer_norm_forward...")
259
+
260
+ x = np.random.randn(32, 512)
261
+ gamma = np.ones(512)
262
+ beta = np.zeros(512)
263
+
264
+ out, cache = layer_norm_forward(x, gamma, beta)
265
+
266
+ assert out.shape == x.shape, "Output shape should match input"
267
+ assert 'x' in cache, "Cache should contain input"
268
+ assert 'gamma' in cache, "Cache should contain gamma"
269
+ assert 'x_normalized' in cache, "Cache should contain normalized input"
270
+
271
+ print("✓ layer_norm_forward passed")
272
+
273
+
274
+ def test_create_normalization_params():
275
+ """Test create_normalization_params utility"""
276
+ print("Testing create_normalization_params...")
277
+
278
+ # Test batch norm params
279
+ params = create_normalization_params(64, 'batch')
280
+ assert 'gamma' in params, "Should have gamma"
281
+ assert 'beta' in params, "Should have beta"
282
+ assert 'running_mean' in params, "Batch norm should have running_mean"
283
+ assert 'running_var' in params, "Batch norm should have running_var"
284
+ assert params['gamma'].shape == (64,), "Gamma shape incorrect"
285
+
286
+ # Test layer norm params
287
+ params = create_normalization_params(512, 'layer')
288
+ assert 'gamma' in params, "Should have gamma"
289
+ assert 'beta' in params, "Should have beta"
290
+ assert 'running_mean' not in params, "Layer norm should not have running_mean"
291
+
292
+ print("✓ create_normalization_params passed")
293
+
294
+
295
+ def test_apply_normalization():
296
+ """Test apply_normalization utility"""
297
+ print("Testing apply_normalization...")
298
+
299
+ x = np.random.randn(32, 64)
300
+ gamma = np.ones(64)
301
+ beta = np.zeros(64)
302
+
303
+ # Test batch norm
304
+ out = apply_normalization(x, 'batch', gamma, beta, training=True)
305
+ assert out.shape == x.shape, "Batch norm output shape incorrect"
306
+
307
+ # Test layer norm
308
+ out = apply_normalization(x, 'layer', gamma, beta)
309
+ assert out.shape == x.shape, "Layer norm output shape incorrect"
310
+
311
+ print("✓ apply_normalization passed")
312
+
313
+
314
+ def test_apply_normalization_invalid():
315
+ """Test apply_normalization with invalid type"""
316
+ print("Testing apply_normalization with invalid type...")
317
+
318
+ x = np.random.randn(32, 64)
319
+ gamma = np.ones(64)
320
+ beta = np.zeros(64)
321
+
322
+ try:
323
+ out = apply_normalization(x, 'invalid', gamma, beta)
324
+ assert False, "Should raise ValueError"
325
+ except ValueError as e:
326
+ assert "unknown" in str(e).lower()
327
+
328
+ print("✓ apply_normalization invalid type passed")
329
+
330
+
331
+ def test_aliases():
332
+ """Test function aliases"""
333
+ print("Testing aliases...")
334
+
335
+ x = np.random.randn(32, 64)
336
+ gamma = np.ones(64)
337
+ beta = np.zeros(64)
338
+
339
+ # Test batchnorm alias
340
+ out1, _, _ = batchnorm(x, gamma, beta, training=True)
341
+ out2, _, _ = batch_normalization(x, gamma, beta, training=True)
342
+ assert np.allclose(out1, out2), "Batchnorm alias should work"
343
+
344
+ # Test layernorm alias
345
+ out1 = layernorm(x, gamma, beta)
346
+ out2 = layer_normalization(x, gamma, beta)
347
+ assert np.allclose(out1, out2), "Layernorm alias should work"
348
+
349
+ print("✓ aliases passed")
350
+
351
+
352
+ def test_batch_vs_layer_norm():
353
+ """Test difference between batch and layer normalization"""
354
+ print("Testing batch vs layer normalization...")
355
+
356
+ x = np.random.randn(32, 64)
357
+ gamma = np.ones(64)
358
+ beta = np.zeros(64)
359
+
360
+ # Batch norm normalizes across batch
361
+ out_batch, _, _ = batch_normalization(x, gamma, beta, training=True)
362
+
363
+ # Layer norm normalizes across features
364
+ out_layer = layer_normalization(x, gamma, beta)
365
+
366
+ # They should produce different results
367
+ assert not np.allclose(out_batch, out_layer), "Batch and layer norm should differ"
368
+
369
+ print("✓ batch vs layer norm comparison passed")
370
+
371
+
372
+ def test_normalization_stability():
373
+ """Test numerical stability with extreme values"""
374
+ print("Testing normalization stability...")
375
+
376
+ # Test with very large values
377
+ x = np.random.randn(32, 64) * 1000
378
+ gamma = np.ones(64)
379
+ beta = np.zeros(64)
380
+
381
+ out, _, _ = batch_normalization(x, gamma, beta, training=True)
382
+ assert not np.any(np.isnan(out)), "Should not produce NaN"
383
+ assert not np.any(np.isinf(out)), "Should not produce Inf"
384
+
385
+ # Test with very small values
386
+ x = np.random.randn(32, 64) * 1e-10
387
+ out, _, _ = batch_normalization(x, gamma, beta, training=True)
388
+ assert not np.any(np.isnan(out)), "Should not produce NaN with small values"
389
+
390
+ print("✓ normalization stability passed")
391
+
392
+
393
+ def run_all_tests():
394
+ """Run all tests"""
395
+ print("\n" + "="*60)
396
+ print("NORMALIZATION TECHNIQUES MODULE TESTS")
397
+ print("="*60 + "\n")
398
+
399
+ # Batch Normalization
400
+ test_batch_normalization()
401
+ test_batch_normalization_4d()
402
+ test_batch_normalization_inference()
403
+ test_batch_norm_forward()
404
+
405
+ # Layer Normalization
406
+ test_layer_normalization()
407
+ test_layer_normalization_3d()
408
+ test_layer_norm_forward()
409
+
410
+ # Group Normalization
411
+ test_group_normalization()
412
+ test_group_normalization_invalid_groups()
413
+
414
+ # Instance Normalization
415
+ test_instance_normalization()
416
+
417
+ # Weight Normalization
418
+ test_weight_normalization()
419
+ test_weight_normalization_axis1()
420
+
421
+ # Utilities
422
+ test_create_normalization_params()
423
+ test_apply_normalization()
424
+ test_apply_normalization_invalid()
425
+
426
+ # Aliases
427
+ test_aliases()
428
+
429
+ # Comparisons
430
+ test_batch_vs_layer_norm()
431
+ test_normalization_stability()
432
+
433
+ print("\n" + "="*60)
434
+ print("ALL TESTS PASSED! ✓")
435
+ print("="*60 + "\n")
436
+
437
+
438
+ if __name__ == "__main__":
439
+ run_all_tests()
File without changes
File without changes
File without changes
File without changes