scalar-autograd 0.1.6 → 0.1.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/Optimizers.d.ts +4 -0
- package/dist/Optimizers.js +10 -0
- package/package.json +1 -1
package/dist/Optimizers.d.ts
CHANGED
|
@@ -16,6 +16,7 @@ export declare abstract class Optimizer {
|
|
|
16
16
|
* Performs a parameter update step.
|
|
17
17
|
*/
|
|
18
18
|
abstract step(): void;
|
|
19
|
+
abstract resetStateFor(trainable: Value): void;
|
|
19
20
|
/**
|
|
20
21
|
* Sets grads of all trainables to zero.
|
|
21
22
|
*/
|
|
@@ -53,6 +54,7 @@ export declare class SGD extends Optimizer {
|
|
|
53
54
|
* Performs a parameter update using standard SGD.
|
|
54
55
|
*/
|
|
55
56
|
step(): void;
|
|
57
|
+
resetStateFor(trainable: Value): void;
|
|
56
58
|
}
|
|
57
59
|
/**
|
|
58
60
|
* Adam and AdamW optimizer parameters.
|
|
@@ -88,6 +90,7 @@ export declare class Adam extends Optimizer {
|
|
|
88
90
|
* Performs a parameter update using Adam optimization.
|
|
89
91
|
*/
|
|
90
92
|
step(): void;
|
|
93
|
+
resetStateFor(trainable: Value): void;
|
|
91
94
|
}
|
|
92
95
|
/**
|
|
93
96
|
* AdamW optimizer, supports decoupled weight decay and gradient clipping (same options as Adam).
|
|
@@ -111,4 +114,5 @@ export declare class AdamW extends Optimizer {
|
|
|
111
114
|
* Performs a parameter update using AdamW optimization (decoupled weight decay).
|
|
112
115
|
*/
|
|
113
116
|
step(): void;
|
|
117
|
+
resetStateFor(trainable: Value): void;
|
|
114
118
|
}
|
package/dist/Optimizers.js
CHANGED
|
@@ -64,6 +64,8 @@ class SGD extends Optimizer {
|
|
|
64
64
|
v.data -= this.learningRate * v.grad;
|
|
65
65
|
}
|
|
66
66
|
}
|
|
67
|
+
resetStateFor(trainable) {
|
|
68
|
+
}
|
|
67
69
|
}
|
|
68
70
|
exports.SGD = SGD;
|
|
69
71
|
/**
|
|
@@ -119,6 +121,10 @@ class Adam extends Optimizer {
|
|
|
119
121
|
this.v.set(v, vVal);
|
|
120
122
|
}
|
|
121
123
|
}
|
|
124
|
+
resetStateFor(trainable) {
|
|
125
|
+
this.m.set(trainable, 0);
|
|
126
|
+
this.v.set(trainable, 0);
|
|
127
|
+
}
|
|
122
128
|
}
|
|
123
129
|
exports.Adam = Adam;
|
|
124
130
|
/**
|
|
@@ -173,5 +179,9 @@ class AdamW extends Optimizer {
|
|
|
173
179
|
this.v.set(v, vVal);
|
|
174
180
|
}
|
|
175
181
|
}
|
|
182
|
+
resetStateFor(trainable) {
|
|
183
|
+
this.m.set(trainable, 0);
|
|
184
|
+
this.v.set(trainable, 0);
|
|
185
|
+
}
|
|
176
186
|
}
|
|
177
187
|
exports.AdamW = AdamW;
|