gradboard 0.2.0__tar.gz → 1.0.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of gradboard might be problematic. Click here for more details.
- {gradboard-0.2.0 → gradboard-1.0.1}/PKG-INFO +1 -1
- {gradboard-0.2.0 → gradboard-1.0.1}/gradboard/optimiser.py +3 -4
- {gradboard-0.2.0 → gradboard-1.0.1}/pyproject.toml +1 -1
- {gradboard-0.2.0 → gradboard-1.0.1}/LICENSE +0 -0
- {gradboard-0.2.0 → gradboard-1.0.1}/README.md +0 -0
- {gradboard-0.2.0 → gradboard-1.0.1}/gradboard/__init__.py +0 -0
- {gradboard-0.2.0 → gradboard-1.0.1}/gradboard/cycles.py +0 -0
- {gradboard-0.2.0 → gradboard-1.0.1}/gradboard/scheduler.py +0 -0
|
@@ -25,9 +25,7 @@ class AdamS(Optimizer):
|
|
|
25
25
|
weight_decay (float, optional): weight decay coefficient (default: 1e-4)
|
|
26
26
|
"""
|
|
27
27
|
|
|
28
|
-
def __init__(
|
|
29
|
-
self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-4
|
|
30
|
-
):
|
|
28
|
+
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.1):
|
|
31
29
|
if not 0.0 <= lr:
|
|
32
30
|
raise ValueError("Invalid learning rate: {}".format(lr))
|
|
33
31
|
if not 0.0 <= eps:
|
|
@@ -38,6 +36,7 @@ class AdamS(Optimizer):
|
|
|
38
36
|
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
|
|
39
37
|
if not 0.0 <= weight_decay:
|
|
40
38
|
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
|
|
39
|
+
weight_decay *= lr
|
|
41
40
|
defaults = {"lr": lr, "betas": betas, "eps": eps, "weight_decay": weight_decay}
|
|
42
41
|
super().__init__(params, defaults)
|
|
43
42
|
|
|
@@ -130,7 +129,7 @@ class AdamS(Optimizer):
|
|
|
130
129
|
return loss
|
|
131
130
|
|
|
132
131
|
|
|
133
|
-
def get_optimiser(model, optimiser=AdamW, lr=
|
|
132
|
+
def get_optimiser(model, optimiser=AdamW, lr=1e-3, weight_decay=1e-2):
|
|
134
133
|
"""
|
|
135
134
|
Defaults are from one of the presets from the accompanying repo to Hassani
|
|
136
135
|
et al. (2023) "Escaping the Big Data Paradigm with Compact Transformers",
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|