gradboard 0.1.21__tar.gz → 1.0.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of gradboard might be problematic. Click here for more details.
- {gradboard-0.1.21 → gradboard-1.0.0}/PKG-INFO +1 -1
- {gradboard-0.1.21 → gradboard-1.0.0}/gradboard/optimiser.py +2 -3
- {gradboard-0.1.21 → gradboard-1.0.0}/gradboard/scheduler.py +4 -5
- {gradboard-0.1.21 → gradboard-1.0.0}/pyproject.toml +1 -1
- {gradboard-0.1.21 → gradboard-1.0.0}/LICENSE +0 -0
- {gradboard-0.1.21 → gradboard-1.0.0}/README.md +0 -0
- {gradboard-0.1.21 → gradboard-1.0.0}/gradboard/__init__.py +0 -0
- {gradboard-0.1.21 → gradboard-1.0.0}/gradboard/cycles.py +0 -0
|
@@ -25,9 +25,7 @@ class AdamS(Optimizer):
|
|
|
25
25
|
weight_decay (float, optional): weight decay coefficient (default: 1e-4)
|
|
26
26
|
"""
|
|
27
27
|
|
|
28
|
-
def __init__(
|
|
29
|
-
self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-4
|
|
30
|
-
):
|
|
28
|
+
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.1):
|
|
31
29
|
if not 0.0 <= lr:
|
|
32
30
|
raise ValueError("Invalid learning rate: {}".format(lr))
|
|
33
31
|
if not 0.0 <= eps:
|
|
@@ -38,6 +36,7 @@ class AdamS(Optimizer):
|
|
|
38
36
|
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
|
|
39
37
|
if not 0.0 <= weight_decay:
|
|
40
38
|
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
|
|
39
|
+
weight_decay *= lr
|
|
41
40
|
defaults = {"lr": lr, "betas": betas, "eps": eps, "weight_decay": weight_decay}
|
|
42
41
|
super().__init__(params, defaults)
|
|
43
42
|
|
|
@@ -7,7 +7,6 @@ import copy
|
|
|
7
7
|
import math
|
|
8
8
|
|
|
9
9
|
from scipy.ndimage import gaussian_filter1d
|
|
10
|
-
import scipy.constants as sc
|
|
11
10
|
|
|
12
11
|
from torch.amp import GradScaler
|
|
13
12
|
|
|
@@ -164,15 +163,15 @@ class PASS:
|
|
|
164
163
|
points_left_of_min = [p for p in range_test_results if p[0] < minimum[0]]
|
|
165
164
|
highest_point_left_of_min = max(points_left_of_min, key=lambda x: x[1])
|
|
166
165
|
loss_difference = highest_point_left_of_min[1] - minimum[1]
|
|
167
|
-
cool_point_loss = min_loss + 0.
|
|
168
|
-
max_lr_loss = min_loss + 0.
|
|
166
|
+
cool_point_loss = min_loss + 0.8 * loss_difference
|
|
167
|
+
max_lr_loss = min_loss + 0.2 * loss_difference
|
|
169
168
|
for r in range_test_results:
|
|
170
169
|
if r[1] < cool_point_loss:
|
|
171
|
-
self.cool_point = r[0]
|
|
170
|
+
self.cool_point = r[0]
|
|
172
171
|
break
|
|
173
172
|
for r in range_test_results:
|
|
174
173
|
if r[1] <= max_lr_loss:
|
|
175
|
-
self.max_lr = r[0]
|
|
174
|
+
self.max_lr = r[0]
|
|
176
175
|
break
|
|
177
176
|
print("High LR", self.max_lr)
|
|
178
177
|
print("Cool point", self.cool_point)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|