ilovetools 0.2.28__tar.gz → 0.2.29__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (98) hide show
  1. {ilovetools-0.2.28/ilovetools.egg-info → ilovetools-0.2.29}/PKG-INFO +2 -2
  2. ilovetools-0.2.29/ilovetools/ml/schedulers.py +588 -0
  3. {ilovetools-0.2.28 → ilovetools-0.2.29/ilovetools.egg-info}/PKG-INFO +2 -2
  4. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools.egg-info/SOURCES.txt +2 -0
  5. {ilovetools-0.2.28 → ilovetools-0.2.29}/pyproject.toml +2 -2
  6. {ilovetools-0.2.28 → ilovetools-0.2.29}/setup.py +2 -2
  7. ilovetools-0.2.29/tests/test_schedulers.py +397 -0
  8. {ilovetools-0.2.28 → ilovetools-0.2.29}/LICENSE +0 -0
  9. {ilovetools-0.2.28 → ilovetools-0.2.29}/MANIFEST.in +0 -0
  10. {ilovetools-0.2.28 → ilovetools-0.2.29}/README.md +0 -0
  11. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/__init__.py +0 -0
  12. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ai/__init__.py +0 -0
  13. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ai/embeddings.py +0 -0
  14. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ai/inference.py +0 -0
  15. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ai/llm_helpers.py +0 -0
  16. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/audio/__init__.py +0 -0
  17. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/automation/__init__.py +0 -0
  18. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/automation/file_organizer.py +0 -0
  19. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/conversion/__init__.py +0 -0
  20. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/conversion/config_converter.py +0 -0
  21. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/conversion/config_converter_fixed_header.py +0 -0
  22. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/data/__init__.py +0 -0
  23. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/data/feature_engineering.py +0 -0
  24. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/data/preprocessing.py +0 -0
  25. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/database/__init__.py +0 -0
  26. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/datetime/__init__.py +0 -0
  27. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/email/__init__.py +0 -0
  28. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/email/template_engine.py +0 -0
  29. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/files/__init__.py +0 -0
  30. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/image/__init__.py +0 -0
  31. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ml/__init__.py +0 -0
  32. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ml/activations.py +0 -0
  33. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ml/anomaly_detection.py +0 -0
  34. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ml/attention.py +0 -0
  35. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ml/augmentation.py +0 -0
  36. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ml/clustering.py +0 -0
  37. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ml/cnn.py +0 -0
  38. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ml/cross_validation.py +0 -0
  39. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ml/dimensionality.py +0 -0
  40. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ml/dropout.py +0 -0
  41. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ml/ensemble.py +0 -0
  42. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ml/feature_selection.py +0 -0
  43. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ml/gradient_descent.py +0 -0
  44. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ml/imbalanced.py +0 -0
  45. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ml/interpretation.py +0 -0
  46. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ml/loss_functions.py +0 -0
  47. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ml/losses.py +0 -0
  48. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ml/lr_schedulers.py +0 -0
  49. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ml/metrics.py +0 -0
  50. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ml/neural_network.py +0 -0
  51. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ml/normalization.py +0 -0
  52. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ml/normalization_advanced.py +0 -0
  53. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ml/optimizers.py +0 -0
  54. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ml/pipeline.py +0 -0
  55. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ml/positional_encoding.py +0 -0
  56. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ml/regularization.py +0 -0
  57. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ml/rnn.py +0 -0
  58. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ml/timeseries.py +0 -0
  59. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ml/tuning.py +0 -0
  60. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/ml/weight_init.py +0 -0
  61. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/security/__init__.py +0 -0
  62. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/security/password_checker.py +0 -0
  63. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/text/__init__.py +0 -0
  64. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/utils/__init__.py +0 -0
  65. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/utils/cache_system.py +0 -0
  66. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/utils/logger.py +0 -0
  67. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/utils/rate_limiter.py +0 -0
  68. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/utils/retry.py +0 -0
  69. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/validation/__init__.py +0 -0
  70. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/validation/data_validator.py +0 -0
  71. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/web/__init__.py +0 -0
  72. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/web/scraper.py +0 -0
  73. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools/web/url_shortener.py +0 -0
  74. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools.egg-info/dependency_links.txt +0 -0
  75. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools.egg-info/requires.txt +0 -0
  76. {ilovetools-0.2.28 → ilovetools-0.2.29}/ilovetools.egg-info/top_level.txt +0 -0
  77. {ilovetools-0.2.28 → ilovetools-0.2.29}/requirements.txt +0 -0
  78. {ilovetools-0.2.28 → ilovetools-0.2.29}/setup.cfg +0 -0
  79. {ilovetools-0.2.28 → ilovetools-0.2.29}/tests/__init__.py +0 -0
  80. {ilovetools-0.2.28 → ilovetools-0.2.29}/tests/test_activations.py +0 -0
  81. {ilovetools-0.2.28 → ilovetools-0.2.29}/tests/test_attention.py +0 -0
  82. {ilovetools-0.2.28 → ilovetools-0.2.29}/tests/test_augmentation.py +0 -0
  83. {ilovetools-0.2.28 → ilovetools-0.2.29}/tests/test_cnn.py +0 -0
  84. {ilovetools-0.2.28 → ilovetools-0.2.29}/tests/test_dropout.py +0 -0
  85. {ilovetools-0.2.28 → ilovetools-0.2.29}/tests/test_gradient_descent.py +0 -0
  86. {ilovetools-0.2.28 → ilovetools-0.2.29}/tests/test_loss_functions.py +0 -0
  87. {ilovetools-0.2.28 → ilovetools-0.2.29}/tests/test_losses.py +0 -0
  88. {ilovetools-0.2.28 → ilovetools-0.2.29}/tests/test_lr_schedulers.py +0 -0
  89. {ilovetools-0.2.28 → ilovetools-0.2.29}/tests/test_neural_network.py +0 -0
  90. {ilovetools-0.2.28 → ilovetools-0.2.29}/tests/test_normalization.py +0 -0
  91. {ilovetools-0.2.28 → ilovetools-0.2.29}/tests/test_normalization_advanced.py +0 -0
  92. {ilovetools-0.2.28 → ilovetools-0.2.29}/tests/test_optimizers.py +0 -0
  93. {ilovetools-0.2.28 → ilovetools-0.2.29}/tests/test_positional_encoding.py +0 -0
  94. {ilovetools-0.2.28 → ilovetools-0.2.29}/tests/test_pypi_installation.py +0 -0
  95. {ilovetools-0.2.28 → ilovetools-0.2.29}/tests/test_regularization.py +0 -0
  96. {ilovetools-0.2.28 → ilovetools-0.2.29}/tests/test_rnn.py +0 -0
  97. {ilovetools-0.2.28 → ilovetools-0.2.29}/tests/test_weight_init.py +0 -0
  98. {ilovetools-0.2.28 → ilovetools-0.2.29}/tests/verify_positional_encoding.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ilovetools
3
- Version: 0.2.28
3
+ Version: 0.2.29
4
4
  Summary: A comprehensive Python utility library with modular tools for AI/ML, data processing, and daily programming needs
5
5
  Home-page: https://github.com/AliMehdi512/ilovetools
6
6
  Author: Ali Mehdi
@@ -11,7 +11,7 @@ Project-URL: Repository, https://github.com/AliMehdi512/ilovetools
11
11
  Project-URL: Issues, https://github.com/AliMehdi512/ilovetools/issues
12
12
  Project-URL: Bug Reports, https://github.com/AliMehdi512/ilovetools/issues
13
13
  Project-URL: Source, https://github.com/AliMehdi512/ilovetools
14
- Keywords: utilities,tools,ai,ml,data-processing,automation,data-augmentation,image-augmentation,text-augmentation,mixup,cutout,random-erasing,autoaugment,randaugment,albumentations,computer-vision,nlp,deep-learning,neural-networks,training,generalization,overfitting
14
+ Keywords: utilities,tools,ai,ml,data-processing,automation,learning-rate,schedulers,step-lr,exponential-lr,cosine-annealing,one-cycle,cyclic-lr,sgdr,warm-restarts,reduce-on-plateau,polynomial-lr,warmup,multi-step-lr,optimization,training,deep-learning,neural-networks,pytorch,tensorflow,keras,convergence,super-convergence,adaptive-learning-rate
15
15
  Classifier: Development Status :: 3 - Alpha
16
16
  Classifier: Intended Audience :: Developers
17
17
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
@@ -0,0 +1,588 @@
1
+ """
2
+ Learning Rate Schedulers Suite
3
+
4
+ This module implements various learning rate scheduling strategies for training neural networks.
5
+ Learning rate schedulers dynamically adjust the learning rate during training to improve
6
+ convergence speed, stability, and final model performance.
7
+
8
+ Implemented Schedulers:
9
+ 1. StepLR - Step decay at fixed intervals
10
+ 2. ExponentialLR - Exponential decay
11
+ 3. CosineAnnealingLR - Cosine annealing schedule
12
+ 4. CosineAnnealingWarmRestarts - Cosine annealing with warm restarts (SGDR)
13
+ 5. OneCycleLR - One cycle learning rate policy
14
+ 6. CyclicLR - Cyclic learning rate
15
+ 7. ReduceLROnPlateau - Reduce LR when metric plateaus
16
+ 8. PolynomialLR - Polynomial decay
17
+ 9. WarmupLR - Linear warmup scheduler
18
+ 10. MultiStepLR - Multi-step decay
19
+
20
+ References:
21
+ - Step Decay: Standard practice in deep learning
22
+ - Cosine Annealing: "SGDR: Stochastic Gradient Descent with Warm Restarts" (Loshchilov & Hutter, 2016)
23
+ - One Cycle: "Super-Convergence" (Smith, 2018)
24
+ - Cyclic LR: "Cyclical Learning Rates for Training Neural Networks" (Smith, 2017)
25
+
26
+ Author: Ali Mehdi
27
+ Date: January 17, 2026
28
+ """
29
+
30
+ import numpy as np
31
+ from typing import Optional, Callable
32
+
33
+
34
+ class StepLR:
35
+ """
36
+ Step Learning Rate Scheduler.
37
+
38
+ Decays the learning rate by gamma every step_size epochs.
39
+
40
+ Formula:
41
+ lr = initial_lr * gamma^(epoch // step_size)
42
+
43
+ Args:
44
+ initial_lr: Initial learning rate
45
+ step_size: Period of learning rate decay (in epochs)
46
+ gamma: Multiplicative factor of learning rate decay (default: 0.1)
47
+
48
+ Example:
49
+ >>> scheduler = StepLR(initial_lr=0.1, step_size=30, gamma=0.1)
50
+ >>> for epoch in range(100):
51
+ ... lr = scheduler.step(epoch)
52
+ ... print(f"Epoch {epoch}: LR = {lr}")
53
+
54
+ Reference:
55
+ Standard practice in deep learning
56
+ """
57
+
58
+ def __init__(self, initial_lr: float, step_size: int, gamma: float = 0.1):
59
+ if initial_lr <= 0:
60
+ raise ValueError(f"initial_lr must be positive, got {initial_lr}")
61
+ if step_size <= 0:
62
+ raise ValueError(f"step_size must be positive, got {step_size}")
63
+ if gamma <= 0 or gamma >= 1:
64
+ raise ValueError(f"gamma must be in (0, 1), got {gamma}")
65
+
66
+ self.initial_lr = initial_lr
67
+ self.step_size = step_size
68
+ self.gamma = gamma
69
+ self.current_lr = initial_lr
70
+
71
+ def step(self, epoch: int) -> float:
72
+ """
73
+ Update learning rate for given epoch.
74
+
75
+ Args:
76
+ epoch: Current epoch number
77
+
78
+ Returns:
79
+ Updated learning rate
80
+ """
81
+ self.current_lr = self.initial_lr * (self.gamma ** (epoch // self.step_size))
82
+ return self.current_lr
83
+
84
+ def get_lr(self) -> float:
85
+ """Get current learning rate."""
86
+ return self.current_lr
87
+
88
+
89
+ class ExponentialLR:
90
+ """
91
+ Exponential Learning Rate Scheduler.
92
+
93
+ Decays the learning rate exponentially every epoch.
94
+
95
+ Formula:
96
+ lr = initial_lr * gamma^epoch
97
+
98
+ Args:
99
+ initial_lr: Initial learning rate
100
+ gamma: Multiplicative factor of learning rate decay (default: 0.95)
101
+
102
+ Example:
103
+ >>> scheduler = ExponentialLR(initial_lr=0.1, gamma=0.95)
104
+ >>> for epoch in range(100):
105
+ ... lr = scheduler.step(epoch)
106
+
107
+ Reference:
108
+ Standard exponential decay
109
+ """
110
+
111
+ def __init__(self, initial_lr: float, gamma: float = 0.95):
112
+ if initial_lr <= 0:
113
+ raise ValueError(f"initial_lr must be positive, got {initial_lr}")
114
+ if gamma <= 0 or gamma >= 1:
115
+ raise ValueError(f"gamma must be in (0, 1), got {gamma}")
116
+
117
+ self.initial_lr = initial_lr
118
+ self.gamma = gamma
119
+ self.current_lr = initial_lr
120
+
121
+ def step(self, epoch: int) -> float:
122
+ """Update learning rate for given epoch."""
123
+ self.current_lr = self.initial_lr * (self.gamma ** epoch)
124
+ return self.current_lr
125
+
126
+ def get_lr(self) -> float:
127
+ """Get current learning rate."""
128
+ return self.current_lr
129
+
130
+
131
+ class CosineAnnealingLR:
132
+ """
133
+ Cosine Annealing Learning Rate Scheduler.
134
+
135
+ Sets the learning rate using a cosine annealing schedule.
136
+
137
+ Formula:
138
+ lr = eta_min + (initial_lr - eta_min) * (1 + cos(π * epoch / T_max)) / 2
139
+
140
+ Args:
141
+ initial_lr: Initial learning rate
142
+ T_max: Maximum number of iterations
143
+ eta_min: Minimum learning rate (default: 0)
144
+
145
+ Example:
146
+ >>> scheduler = CosineAnnealingLR(initial_lr=0.1, T_max=100)
147
+ >>> for epoch in range(100):
148
+ ... lr = scheduler.step(epoch)
149
+
150
+ Reference:
151
+ Loshchilov & Hutter, "SGDR: Stochastic Gradient Descent with Warm Restarts", 2016
152
+ """
153
+
154
+ def __init__(self, initial_lr: float, T_max: int, eta_min: float = 0):
155
+ if initial_lr <= 0:
156
+ raise ValueError(f"initial_lr must be positive, got {initial_lr}")
157
+ if T_max <= 0:
158
+ raise ValueError(f"T_max must be positive, got {T_max}")
159
+ if eta_min < 0:
160
+ raise ValueError(f"eta_min must be non-negative, got {eta_min}")
161
+
162
+ self.initial_lr = initial_lr
163
+ self.T_max = T_max
164
+ self.eta_min = eta_min
165
+ self.current_lr = initial_lr
166
+
167
+ def step(self, epoch: int) -> float:
168
+ """Update learning rate for given epoch."""
169
+ self.current_lr = self.eta_min + (self.initial_lr - self.eta_min) * \
170
+ (1 + np.cos(np.pi * epoch / self.T_max)) / 2
171
+ return self.current_lr
172
+
173
+ def get_lr(self) -> float:
174
+ """Get current learning rate."""
175
+ return self.current_lr
176
+
177
+
178
+ class CosineAnnealingWarmRestarts:
179
+ """
180
+ Cosine Annealing with Warm Restarts (SGDR).
181
+
182
+ Periodically restarts the learning rate schedule.
183
+
184
+ Formula:
185
+ lr = eta_min + (initial_lr - eta_min) * (1 + cos(π * T_cur / T_i)) / 2
186
+ where T_i is the current restart period
187
+
188
+ Args:
189
+ initial_lr: Initial learning rate
190
+ T_0: Number of iterations for the first restart
191
+ T_mult: Factor to increase T_i after each restart (default: 1)
192
+ eta_min: Minimum learning rate (default: 0)
193
+
194
+ Example:
195
+ >>> scheduler = CosineAnnealingWarmRestarts(initial_lr=0.1, T_0=10, T_mult=2)
196
+ >>> for epoch in range(100):
197
+ ... lr = scheduler.step(epoch)
198
+
199
+ Reference:
200
+ Loshchilov & Hutter, "SGDR: Stochastic Gradient Descent with Warm Restarts", 2016
201
+ """
202
+
203
+ def __init__(self, initial_lr: float, T_0: int, T_mult: int = 1, eta_min: float = 0):
204
+ if initial_lr <= 0:
205
+ raise ValueError(f"initial_lr must be positive, got {initial_lr}")
206
+ if T_0 <= 0:
207
+ raise ValueError(f"T_0 must be positive, got {T_0}")
208
+ if T_mult < 1:
209
+ raise ValueError(f"T_mult must be >= 1, got {T_mult}")
210
+
211
+ self.initial_lr = initial_lr
212
+ self.T_0 = T_0
213
+ self.T_mult = T_mult
214
+ self.eta_min = eta_min
215
+ self.current_lr = initial_lr
216
+ self.T_i = T_0
217
+ self.T_cur = 0
218
+
219
+ def step(self, epoch: int) -> float:
220
+ """Update learning rate for given epoch."""
221
+ if self.T_cur >= self.T_i:
222
+ self.T_cur = 0
223
+ self.T_i = self.T_i * self.T_mult
224
+
225
+ self.current_lr = self.eta_min + (self.initial_lr - self.eta_min) * \
226
+ (1 + np.cos(np.pi * self.T_cur / self.T_i)) / 2
227
+ self.T_cur += 1
228
+ return self.current_lr
229
+
230
+ def get_lr(self) -> float:
231
+ """Get current learning rate."""
232
+ return self.current_lr
233
+
234
+
235
+ class OneCycleLR:
236
+ """
237
+ One Cycle Learning Rate Policy.
238
+
239
+ Increases learning rate from initial to max, then decreases to min.
240
+
241
+ Phases:
242
+ 1. Warmup: initial_lr → max_lr (pct_start of total steps)
243
+ 2. Annealing: max_lr → final_lr (remaining steps)
244
+
245
+ Args:
246
+ initial_lr: Initial learning rate
247
+ max_lr: Maximum learning rate
248
+ total_steps: Total number of training steps
249
+ pct_start: Percentage of cycle spent increasing LR (default: 0.3)
250
+ final_lr: Final learning rate (default: initial_lr / 1000)
251
+
252
+ Example:
253
+ >>> scheduler = OneCycleLR(initial_lr=0.001, max_lr=0.1, total_steps=1000)
254
+ >>> for step in range(1000):
255
+ ... lr = scheduler.step(step)
256
+
257
+ Reference:
258
+ Smith, "Super-Convergence: Very Fast Training of Neural Networks", 2018
259
+ """
260
+
261
+ def __init__(self, initial_lr: float, max_lr: float, total_steps: int,
262
+ pct_start: float = 0.3, final_lr: Optional[float] = None):
263
+ if initial_lr <= 0:
264
+ raise ValueError(f"initial_lr must be positive, got {initial_lr}")
265
+ if max_lr <= initial_lr:
266
+ raise ValueError(f"max_lr must be > initial_lr, got {max_lr}")
267
+ if total_steps <= 0:
268
+ raise ValueError(f"total_steps must be positive, got {total_steps}")
269
+ if not 0 < pct_start < 1:
270
+ raise ValueError(f"pct_start must be in (0, 1), got {pct_start}")
271
+
272
+ self.initial_lr = initial_lr
273
+ self.max_lr = max_lr
274
+ self.total_steps = total_steps
275
+ self.pct_start = pct_start
276
+ self.final_lr = final_lr if final_lr is not None else initial_lr / 1000
277
+ self.current_lr = initial_lr
278
+
279
+ self.step_up = int(total_steps * pct_start)
280
+ self.step_down = total_steps - self.step_up
281
+
282
+ def step(self, current_step: int) -> float:
283
+ """Update learning rate for given step."""
284
+ if current_step < self.step_up:
285
+ # Phase 1: Increase from initial_lr to max_lr
286
+ pct = current_step / self.step_up
287
+ self.current_lr = self.initial_lr + (self.max_lr - self.initial_lr) * pct
288
+ else:
289
+ # Phase 2: Decrease from max_lr to final_lr
290
+ pct = (current_step - self.step_up) / self.step_down
291
+ self.current_lr = self.max_lr - (self.max_lr - self.final_lr) * pct
292
+
293
+ return self.current_lr
294
+
295
+ def get_lr(self) -> float:
296
+ """Get current learning rate."""
297
+ return self.current_lr
298
+
299
+
300
+ class CyclicLR:
301
+ """
302
+ Cyclic Learning Rate Scheduler.
303
+
304
+ Cycles the learning rate between two boundaries with a constant frequency.
305
+
306
+ Args:
307
+ base_lr: Lower boundary of the learning rate
308
+ max_lr: Upper boundary of the learning rate
309
+ step_size: Number of iterations in half a cycle
310
+ mode: One of {'triangular', 'triangular2', 'exp_range'} (default: 'triangular')
311
+ gamma: Constant for 'exp_range' mode (default: 1.0)
312
+
313
+ Example:
314
+ >>> scheduler = CyclicLR(base_lr=0.001, max_lr=0.1, step_size=2000)
315
+ >>> for step in range(10000):
316
+ ... lr = scheduler.step(step)
317
+
318
+ Reference:
319
+ Smith, "Cyclical Learning Rates for Training Neural Networks", 2017
320
+ """
321
+
322
+ def __init__(self, base_lr: float, max_lr: float, step_size: int,
323
+ mode: str = 'triangular', gamma: float = 1.0):
324
+ if base_lr <= 0:
325
+ raise ValueError(f"base_lr must be positive, got {base_lr}")
326
+ if max_lr <= base_lr:
327
+ raise ValueError(f"max_lr must be > base_lr, got {max_lr}")
328
+ if step_size <= 0:
329
+ raise ValueError(f"step_size must be positive, got {step_size}")
330
+ if mode not in ['triangular', 'triangular2', 'exp_range']:
331
+ raise ValueError(f"mode must be one of ['triangular', 'triangular2', 'exp_range'], got {mode}")
332
+
333
+ self.base_lr = base_lr
334
+ self.max_lr = max_lr
335
+ self.step_size = step_size
336
+ self.mode = mode
337
+ self.gamma = gamma
338
+ self.current_lr = base_lr
339
+
340
+ def step(self, current_step: int) -> float:
341
+ """Update learning rate for given step."""
342
+ cycle = np.floor(1 + current_step / (2 * self.step_size))
343
+ x = np.abs(current_step / self.step_size - 2 * cycle + 1)
344
+
345
+ if self.mode == 'triangular':
346
+ scale_fn = 1.0
347
+ elif self.mode == 'triangular2':
348
+ scale_fn = 1 / (2 ** (cycle - 1))
349
+ else: # exp_range
350
+ scale_fn = self.gamma ** current_step
351
+
352
+ self.current_lr = self.base_lr + (self.max_lr - self.base_lr) * \
353
+ max(0, (1 - x)) * scale_fn
354
+ return self.current_lr
355
+
356
+ def get_lr(self) -> float:
357
+ """Get current learning rate."""
358
+ return self.current_lr
359
+
360
+
361
+ class ReduceLROnPlateau:
362
+ """
363
+ Reduce Learning Rate on Plateau.
364
+
365
+ Reduces learning rate when a metric has stopped improving.
366
+
367
+ Args:
368
+ initial_lr: Initial learning rate
369
+ mode: One of {'min', 'max'} (default: 'min')
370
+ factor: Factor by which to reduce LR (default: 0.1)
371
+ patience: Number of epochs with no improvement to wait (default: 10)
372
+ threshold: Threshold for measuring improvement (default: 1e-4)
373
+ min_lr: Minimum learning rate (default: 0)
374
+
375
+ Example:
376
+ >>> scheduler = ReduceLROnPlateau(initial_lr=0.1, patience=10)
377
+ >>> for epoch in range(100):
378
+ ... val_loss = train_and_validate()
379
+ ... lr = scheduler.step(val_loss)
380
+
381
+ Reference:
382
+ Standard adaptive learning rate reduction
383
+ """
384
+
385
+ def __init__(self, initial_lr: float, mode: str = 'min', factor: float = 0.1,
386
+ patience: int = 10, threshold: float = 1e-4, min_lr: float = 0):
387
+ if initial_lr <= 0:
388
+ raise ValueError(f"initial_lr must be positive, got {initial_lr}")
389
+ if mode not in ['min', 'max']:
390
+ raise ValueError(f"mode must be 'min' or 'max', got {mode}")
391
+ if factor <= 0 or factor >= 1:
392
+ raise ValueError(f"factor must be in (0, 1), got {factor}")
393
+ if patience <= 0:
394
+ raise ValueError(f"patience must be positive, got {patience}")
395
+
396
+ self.initial_lr = initial_lr
397
+ self.mode = mode
398
+ self.factor = factor
399
+ self.patience = patience
400
+ self.threshold = threshold
401
+ self.min_lr = min_lr
402
+ self.current_lr = initial_lr
403
+
404
+ self.best_metric = np.inf if mode == 'min' else -np.inf
405
+ self.num_bad_epochs = 0
406
+
407
+ def step(self, metric: float) -> float:
408
+ """
409
+ Update learning rate based on metric.
410
+
411
+ Args:
412
+ metric: Current metric value (e.g., validation loss)
413
+
414
+ Returns:
415
+ Updated learning rate
416
+ """
417
+ if self.mode == 'min':
418
+ is_better = metric < self.best_metric - self.threshold
419
+ else:
420
+ is_better = metric > self.best_metric + self.threshold
421
+
422
+ if is_better:
423
+ self.best_metric = metric
424
+ self.num_bad_epochs = 0
425
+ else:
426
+ self.num_bad_epochs += 1
427
+
428
+ if self.num_bad_epochs >= self.patience:
429
+ self.current_lr = max(self.current_lr * self.factor, self.min_lr)
430
+ self.num_bad_epochs = 0
431
+
432
+ return self.current_lr
433
+
434
+ def get_lr(self) -> float:
435
+ """Get current learning rate."""
436
+ return self.current_lr
437
+
438
+
439
+ class PolynomialLR:
440
+ """
441
+ Polynomial Learning Rate Scheduler.
442
+
443
+ Decays the learning rate using a polynomial function.
444
+
445
+ Formula:
446
+ lr = (initial_lr - end_lr) * (1 - epoch / total_epochs)^power + end_lr
447
+
448
+ Args:
449
+ initial_lr: Initial learning rate
450
+ total_epochs: Total number of epochs
451
+ end_lr: Final learning rate (default: 0)
452
+ power: Polynomial power (default: 1.0)
453
+
454
+ Example:
455
+ >>> scheduler = PolynomialLR(initial_lr=0.1, total_epochs=100, power=2.0)
456
+ >>> for epoch in range(100):
457
+ ... lr = scheduler.step(epoch)
458
+
459
+ Reference:
460
+ Polynomial decay scheduling
461
+ """
462
+
463
+ def __init__(self, initial_lr: float, total_epochs: int,
464
+ end_lr: float = 0, power: float = 1.0):
465
+ if initial_lr <= 0:
466
+ raise ValueError(f"initial_lr must be positive, got {initial_lr}")
467
+ if total_epochs <= 0:
468
+ raise ValueError(f"total_epochs must be positive, got {total_epochs}")
469
+ if end_lr < 0:
470
+ raise ValueError(f"end_lr must be non-negative, got {end_lr}")
471
+ if power <= 0:
472
+ raise ValueError(f"power must be positive, got {power}")
473
+
474
+ self.initial_lr = initial_lr
475
+ self.total_epochs = total_epochs
476
+ self.end_lr = end_lr
477
+ self.power = power
478
+ self.current_lr = initial_lr
479
+
480
+ def step(self, epoch: int) -> float:
481
+ """Update learning rate for given epoch."""
482
+ self.current_lr = (self.initial_lr - self.end_lr) * \
483
+ (1 - epoch / self.total_epochs) ** self.power + self.end_lr
484
+ return self.current_lr
485
+
486
+ def get_lr(self) -> float:
487
+ """Get current learning rate."""
488
+ return self.current_lr
489
+
490
+
491
+ class WarmupLR:
492
+ """
493
+ Linear Warmup Learning Rate Scheduler.
494
+
495
+ Linearly increases learning rate from 0 to target over warmup steps.
496
+
497
+ Args:
498
+ target_lr: Target learning rate after warmup
499
+ warmup_steps: Number of warmup steps
500
+
501
+ Example:
502
+ >>> scheduler = WarmupLR(target_lr=0.1, warmup_steps=1000)
503
+ >>> for step in range(1000):
504
+ ... lr = scheduler.step(step)
505
+
506
+ Reference:
507
+ Standard warmup practice
508
+ """
509
+
510
+ def __init__(self, target_lr: float, warmup_steps: int):
511
+ if target_lr <= 0:
512
+ raise ValueError(f"target_lr must be positive, got {target_lr}")
513
+ if warmup_steps <= 0:
514
+ raise ValueError(f"warmup_steps must be positive, got {warmup_steps}")
515
+
516
+ self.target_lr = target_lr
517
+ self.warmup_steps = warmup_steps
518
+ self.current_lr = 0
519
+
520
+ def step(self, current_step: int) -> float:
521
+ """Update learning rate for given step."""
522
+ if current_step < self.warmup_steps:
523
+ self.current_lr = self.target_lr * (current_step / self.warmup_steps)
524
+ else:
525
+ self.current_lr = self.target_lr
526
+ return self.current_lr
527
+
528
+ def get_lr(self) -> float:
529
+ """Get current learning rate."""
530
+ return self.current_lr
531
+
532
+
533
+ class MultiStepLR:
534
+ """
535
+ Multi-Step Learning Rate Scheduler.
536
+
537
+ Decays the learning rate by gamma at specified milestones.
538
+
539
+ Args:
540
+ initial_lr: Initial learning rate
541
+ milestones: List of epoch indices for LR decay
542
+ gamma: Multiplicative factor of learning rate decay (default: 0.1)
543
+
544
+ Example:
545
+ >>> scheduler = MultiStepLR(initial_lr=0.1, milestones=[30, 60, 90])
546
+ >>> for epoch in range(100):
547
+ ... lr = scheduler.step(epoch)
548
+
549
+ Reference:
550
+ Standard multi-step decay
551
+ """
552
+
553
+ def __init__(self, initial_lr: float, milestones: list, gamma: float = 0.1):
554
+ if initial_lr <= 0:
555
+ raise ValueError(f"initial_lr must be positive, got {initial_lr}")
556
+ if not milestones:
557
+ raise ValueError("milestones cannot be empty")
558
+ if gamma <= 0 or gamma >= 1:
559
+ raise ValueError(f"gamma must be in (0, 1), got {gamma}")
560
+
561
+ self.initial_lr = initial_lr
562
+ self.milestones = sorted(milestones)
563
+ self.gamma = gamma
564
+ self.current_lr = initial_lr
565
+
566
+ def step(self, epoch: int) -> float:
567
+ """Update learning rate for given epoch."""
568
+ num_decays = sum(1 for m in self.milestones if epoch >= m)
569
+ self.current_lr = self.initial_lr * (self.gamma ** num_decays)
570
+ return self.current_lr
571
+
572
+ def get_lr(self) -> float:
573
+ """Get current learning rate."""
574
+ return self.current_lr
575
+
576
+
577
+ __all__ = [
578
+ 'StepLR',
579
+ 'ExponentialLR',
580
+ 'CosineAnnealingLR',
581
+ 'CosineAnnealingWarmRestarts',
582
+ 'OneCycleLR',
583
+ 'CyclicLR',
584
+ 'ReduceLROnPlateau',
585
+ 'PolynomialLR',
586
+ 'WarmupLR',
587
+ 'MultiStepLR',
588
+ ]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ilovetools
3
- Version: 0.2.28
3
+ Version: 0.2.29
4
4
  Summary: A comprehensive Python utility library with modular tools for AI/ML, data processing, and daily programming needs
5
5
  Home-page: https://github.com/AliMehdi512/ilovetools
6
6
  Author: Ali Mehdi
@@ -11,7 +11,7 @@ Project-URL: Repository, https://github.com/AliMehdi512/ilovetools
11
11
  Project-URL: Issues, https://github.com/AliMehdi512/ilovetools/issues
12
12
  Project-URL: Bug Reports, https://github.com/AliMehdi512/ilovetools/issues
13
13
  Project-URL: Source, https://github.com/AliMehdi512/ilovetools
14
- Keywords: utilities,tools,ai,ml,data-processing,automation,data-augmentation,image-augmentation,text-augmentation,mixup,cutout,random-erasing,autoaugment,randaugment,albumentations,computer-vision,nlp,deep-learning,neural-networks,training,generalization,overfitting
14
+ Keywords: utilities,tools,ai,ml,data-processing,automation,learning-rate,schedulers,step-lr,exponential-lr,cosine-annealing,one-cycle,cyclic-lr,sgdr,warm-restarts,reduce-on-plateau,polynomial-lr,warmup,multi-step-lr,optimization,training,deep-learning,neural-networks,pytorch,tensorflow,keras,convergence,super-convergence,adaptive-learning-rate
15
15
  Classifier: Development Status :: 3 - Alpha
16
16
  Classifier: Intended Audience :: Developers
17
17
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
@@ -56,6 +56,7 @@ ilovetools/ml/pipeline.py
56
56
  ilovetools/ml/positional_encoding.py
57
57
  ilovetools/ml/regularization.py
58
58
  ilovetools/ml/rnn.py
59
+ ilovetools/ml/schedulers.py
59
60
  ilovetools/ml/timeseries.py
60
61
  ilovetools/ml/tuning.py
61
62
  ilovetools/ml/weight_init.py
@@ -90,5 +91,6 @@ tests/test_positional_encoding.py
90
91
  tests/test_pypi_installation.py
91
92
  tests/test_regularization.py
92
93
  tests/test_rnn.py
94
+ tests/test_schedulers.py
93
95
  tests/test_weight_init.py
94
96
  tests/verify_positional_encoding.py
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "ilovetools"
7
- version = "0.2.28"
7
+ version = "0.2.29"
8
8
  description = "A comprehensive Python utility library with modular tools for AI/ML, data processing, and daily programming needs"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.8"
@@ -12,7 +12,7 @@ license = "MIT"
12
12
  authors = [
13
13
  {name = "Ali Mehdi", email = "ali.mehdi.dev579@gmail.com"}
14
14
  ]
15
- keywords = ["utilities", "tools", "ai", "ml", "data-processing", "automation", "data-augmentation", "image-augmentation", "text-augmentation", "mixup", "cutout", "random-erasing", "autoaugment", "randaugment", "albumentations", "computer-vision", "nlp", "deep-learning", "neural-networks", "training", "generalization", "overfitting"]
15
+ keywords = ["utilities", "tools", "ai", "ml", "data-processing", "automation", "learning-rate", "schedulers", "step-lr", "exponential-lr", "cosine-annealing", "one-cycle", "cyclic-lr", "sgdr", "warm-restarts", "reduce-on-plateau", "polynomial-lr", "warmup", "multi-step-lr", "optimization", "training", "deep-learning", "neural-networks", "pytorch", "tensorflow", "keras", "convergence", "super-convergence", "adaptive-learning-rate"]
16
16
  classifiers = [
17
17
  "Development Status :: 3 - Alpha",
18
18
  "Intended Audience :: Developers",
@@ -5,7 +5,7 @@ with open("README.md", "r", encoding="utf-8") as fh:
5
5
 
6
6
  setup(
7
7
  name="ilovetools",
8
- version="0.2.28",
8
+ version="0.2.29",
9
9
  author="Ali Mehdi",
10
10
  author_email="ali.mehdi.dev579@gmail.com",
11
11
  description="A comprehensive Python utility library with modular tools for AI/ML, data processing, and daily programming needs",
@@ -58,7 +58,7 @@ setup(
58
58
  "soundfile>=0.12.0",
59
59
  ],
60
60
  },
61
- keywords="utilities, tools, ai, ml, data-processing, automation, python-library, neural-networks, data-augmentation, image-augmentation, text-augmentation, mixup, cutout, random-erasing, computer-vision, nlp, deep-learning",
61
+ keywords="utilities, tools, ai, ml, data-processing, automation, python-library, neural-networks, learning-rate, schedulers, step-lr, cosine-annealing, one-cycle, cyclic-lr, sgdr, warmup, optimization, training, deep-learning, pytorch, tensorflow",
62
62
  project_urls={
63
63
  "Bug Reports": "https://github.com/AliMehdi512/ilovetools/issues",
64
64
  "Source": "https://github.com/AliMehdi512/ilovetools",