scalar-autograd 0.1.5 → 0.1.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/Losses.d.ts +51 -0
- package/dist/Losses.spec.d.ts +1 -0
- package/dist/Optimizers.d.ts +114 -0
- package/dist/Optimizers.edge-cases.spec.d.ts +1 -0
- package/dist/Optimizers.spec.d.ts +1 -0
- package/dist/V.d.ts +0 -0
- package/dist/Value.d.ts +260 -0
- package/dist/Value.edge-cases.spec.d.ts +1 -0
- package/dist/Value.grad-flow.spec.d.ts +1 -0
- package/dist/Value.losses-edge-cases.spec.d.ts +1 -0
- package/dist/Value.memory.spec.d.ts +1 -0
- package/dist/Value.nn.spec.d.ts +1 -0
- package/dist/Value.spec.d.ts +1 -0
- package/dist/ValueActivation.d.ts +7 -0
- package/dist/ValueArithmetic.d.ts +26 -0
- package/dist/ValueComparison.d.ts +10 -0
- package/dist/ValueTrig.d.ts +9 -0
- package/package.json +1 -1
package/dist/Losses.d.ts
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
import { Value } from "./Value";
|
|
2
|
+
export declare class Losses {
|
|
3
|
+
/**
|
|
4
|
+
* Computes mean squared error (MSE) loss between outputs and targets.
|
|
5
|
+
* @param outputs Array of Value predictions.
|
|
6
|
+
* @param targets Array of Value targets.
|
|
7
|
+
* @returns Mean squared error as a Value.
|
|
8
|
+
*/
|
|
9
|
+
static mse(outputs: Value[], targets: Value[]): Value;
|
|
10
|
+
/**
|
|
11
|
+
* Computes mean absolute error (MAE) loss between outputs and targets.
|
|
12
|
+
* @param outputs Array of Value predictions.
|
|
13
|
+
* @param targets Array of Value targets.
|
|
14
|
+
* @returns Mean absolute error as a Value.
|
|
15
|
+
*/
|
|
16
|
+
static mae(outputs: Value[], targets: Value[]): Value;
|
|
17
|
+
static EPS: number;
|
|
18
|
+
/**
|
|
19
|
+
* Computes binary cross-entropy loss between predicted outputs and targets (after sigmoid).
|
|
20
|
+
* @param outputs Array of Value predictions (expected in (0,1)).
|
|
21
|
+
* @param targets Array of Value targets (typically 0 or 1).
|
|
22
|
+
* @returns Binary cross-entropy loss as a Value.
|
|
23
|
+
*/
|
|
24
|
+
static binaryCrossEntropy(outputs: Value[], targets: Value[]): Value;
|
|
25
|
+
/**
|
|
26
|
+
* Computes categorical cross-entropy loss between outputs (logits) and integer target classes.
|
|
27
|
+
* @param outputs Array of Value logits for each class.
|
|
28
|
+
* @param targets Array of integer class indices (0-based, one per sample).
|
|
29
|
+
* @returns Categorical cross-entropy loss as a Value.
|
|
30
|
+
*/
|
|
31
|
+
static categoricalCrossEntropy(outputs: Value[], targets: number[]): Value;
|
|
32
|
+
/**
|
|
33
|
+
* Computes Huber loss between outputs and targets.
|
|
34
|
+
* Combines quadratic loss for small residuals and linear loss for large residuals.
|
|
35
|
+
* @param outputs Array of Value predictions.
|
|
36
|
+
* @param targets Array of Value targets.
|
|
37
|
+
* @param delta Threshold at which to switch from quadratic to linear (default: 1.0).
|
|
38
|
+
* @returns Huber loss as a Value.
|
|
39
|
+
*/
|
|
40
|
+
static huber(outputs: Value[], targets: Value[], delta?: number): Value;
|
|
41
|
+
/**
|
|
42
|
+
* Computes Tukey loss between outputs and targets.
|
|
43
|
+
* This robust loss function saturates for large residuals.
|
|
44
|
+
*
|
|
45
|
+
* @param outputs Array of Value predictions.
|
|
46
|
+
* @param targets Array of Value targets.
|
|
47
|
+
* @param c Threshold constant (typically 4.685).
|
|
48
|
+
* @returns Tukey loss as a Value.
|
|
49
|
+
*/
|
|
50
|
+
static tukey(outputs: Value[], targets: Value[], c?: number): Value;
|
|
51
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
import { Value } from "./Value";
|
|
2
|
+
/**
|
|
3
|
+
* Abstract base class for all optimizers.
|
|
4
|
+
* Ensures only requiresGrad parameters are optimized.
|
|
5
|
+
*/
|
|
6
|
+
export declare abstract class Optimizer {
|
|
7
|
+
protected trainables: Value[];
|
|
8
|
+
learningRate: number;
|
|
9
|
+
/**
|
|
10
|
+
* Constructs an Optimizer.
|
|
11
|
+
* @param trainables Array of Value parameters to optimize.
|
|
12
|
+
* @param learningRate Learning rate for updates.
|
|
13
|
+
*/
|
|
14
|
+
constructor(trainables: Value[], learningRate: number);
|
|
15
|
+
/**
|
|
16
|
+
* Performs a parameter update step.
|
|
17
|
+
*/
|
|
18
|
+
abstract step(): void;
|
|
19
|
+
/**
|
|
20
|
+
* Sets grads of all trainables to zero.
|
|
21
|
+
*/
|
|
22
|
+
zeroGrad(): void;
|
|
23
|
+
/**
|
|
24
|
+
* Clips global norm of gradients as regularization.
|
|
25
|
+
* @param maxNorm Maximum allowed norm for gradients.
|
|
26
|
+
*/
|
|
27
|
+
clipGradients(maxNorm: number): void;
|
|
28
|
+
}
|
|
29
|
+
/**
|
|
30
|
+
* Optional arguments for basic optimizers.
|
|
31
|
+
* @property learningRate: Overrides the step size for parameter updates (default varies by optimizer).
|
|
32
|
+
* @property weightDecay: L2 regularization multiplier (default 0). Ignored for plain SGD.
|
|
33
|
+
* @property gradientClip: Maximum absolute value for gradient updates (default 0: no clipping).
|
|
34
|
+
*/
|
|
35
|
+
export interface OptimizerOptions {
|
|
36
|
+
learningRate?: number;
|
|
37
|
+
weightDecay?: number;
|
|
38
|
+
gradientClip?: number;
|
|
39
|
+
}
|
|
40
|
+
/**
|
|
41
|
+
* Stochastic Gradient Descent (SGD) optimizer. Accepts weightDecay and gradientClip for API consistency (ignored).
|
|
42
|
+
*/
|
|
43
|
+
export declare class SGD extends Optimizer {
|
|
44
|
+
private weightDecay;
|
|
45
|
+
private gradientClip;
|
|
46
|
+
/**
|
|
47
|
+
* Constructs an SGD optimizer.
|
|
48
|
+
* @param trainables Array of Value parameters to optimize.
|
|
49
|
+
* @param opts Optional parameters (learningRate, weightDecay, gradientClip).
|
|
50
|
+
*/
|
|
51
|
+
constructor(trainables: Value[], opts?: OptimizerOptions);
|
|
52
|
+
/**
|
|
53
|
+
* Performs a parameter update using standard SGD.
|
|
54
|
+
*/
|
|
55
|
+
step(): void;
|
|
56
|
+
}
|
|
57
|
+
/**
|
|
58
|
+
* Adam and AdamW optimizer parameters.
|
|
59
|
+
* Extends OptimizerOptions.
|
|
60
|
+
* @property beta1: Exponential decay rate for 1st moment (default 0.9).
|
|
61
|
+
* @property beta2: Exponential decay rate for 2nd moment (default 0.999).
|
|
62
|
+
* @property epsilon: Numerical stability fudge factor (default 1e-8).
|
|
63
|
+
*/
|
|
64
|
+
export interface AdamOptions extends OptimizerOptions {
|
|
65
|
+
beta1?: number;
|
|
66
|
+
beta2?: number;
|
|
67
|
+
epsilon?: number;
|
|
68
|
+
}
|
|
69
|
+
/**
|
|
70
|
+
* Adam optimizer, supports decoupled weight decay and gradient clipping.
|
|
71
|
+
*/
|
|
72
|
+
export declare class Adam extends Optimizer {
|
|
73
|
+
private beta1;
|
|
74
|
+
private beta2;
|
|
75
|
+
private epsilon;
|
|
76
|
+
private weightDecay;
|
|
77
|
+
private gradientClip;
|
|
78
|
+
private m;
|
|
79
|
+
private v;
|
|
80
|
+
private stepCount;
|
|
81
|
+
/**
|
|
82
|
+
* Constructs an Adam optimizer.
|
|
83
|
+
* @param trainables Array of Value parameters to optimize.
|
|
84
|
+
* @param opts Optional parameters (learningRate, weightDecay, gradientClip, beta1, beta2, epsilon).
|
|
85
|
+
*/
|
|
86
|
+
constructor(trainables: Value[], opts?: AdamOptions);
|
|
87
|
+
/**
|
|
88
|
+
* Performs a parameter update using Adam optimization.
|
|
89
|
+
*/
|
|
90
|
+
step(): void;
|
|
91
|
+
}
|
|
92
|
+
/**
|
|
93
|
+
* AdamW optimizer, supports decoupled weight decay and gradient clipping (same options as Adam).
|
|
94
|
+
*/
|
|
95
|
+
export declare class AdamW extends Optimizer {
|
|
96
|
+
private beta1;
|
|
97
|
+
private beta2;
|
|
98
|
+
private epsilon;
|
|
99
|
+
private weightDecay;
|
|
100
|
+
private gradientClip;
|
|
101
|
+
private m;
|
|
102
|
+
private v;
|
|
103
|
+
private stepCount;
|
|
104
|
+
/**
|
|
105
|
+
* Constructs an AdamW optimizer.
|
|
106
|
+
* @param trainables Array of Value parameters to optimize.
|
|
107
|
+
* @param opts Optional parameters (learningRate, weightDecay, gradientClip, beta1, beta2, epsilon).
|
|
108
|
+
*/
|
|
109
|
+
constructor(trainables: Value[], opts?: AdamOptions);
|
|
110
|
+
/**
|
|
111
|
+
* Performs a parameter update using AdamW optimization (decoupled weight decay).
|
|
112
|
+
*/
|
|
113
|
+
step(): void;
|
|
114
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
package/dist/V.d.ts
ADDED
|
Binary file
|
package/dist/Value.d.ts
ADDED
|
@@ -0,0 +1,260 @@
|
|
|
1
|
+
export type BackwardFn = () => void;
|
|
2
|
+
export { V } from './V';
|
|
3
|
+
export { Optimizer, SGD, Adam, AdamW } from './Optimizers';
|
|
4
|
+
export type { OptimizerOptions, AdamOptions } from './Optimizers';
|
|
5
|
+
export { Losses } from './Losses';
|
|
6
|
+
export declare class Value {
|
|
7
|
+
static no_grad_mode: boolean;
|
|
8
|
+
data: number;
|
|
9
|
+
grad: number;
|
|
10
|
+
requiresGrad: boolean;
|
|
11
|
+
private backwardFn;
|
|
12
|
+
private prev;
|
|
13
|
+
label: string;
|
|
14
|
+
constructor(data: number, label?: string, requiresGrad?: boolean);
|
|
15
|
+
private static ensureValue;
|
|
16
|
+
/**
|
|
17
|
+
* Returns sin(this).
|
|
18
|
+
* @returns New Value with sin.
|
|
19
|
+
*/
|
|
20
|
+
sin(): Value;
|
|
21
|
+
/**
|
|
22
|
+
* Returns cos(this).
|
|
23
|
+
* @returns New Value with cos.
|
|
24
|
+
*/
|
|
25
|
+
cos(): Value;
|
|
26
|
+
/**
|
|
27
|
+
* Returns tan(this).
|
|
28
|
+
* @returns New Value with tan.
|
|
29
|
+
*/
|
|
30
|
+
tan(): Value;
|
|
31
|
+
/**
|
|
32
|
+
* Returns asin(this).
|
|
33
|
+
* @returns New Value with asin.
|
|
34
|
+
*/
|
|
35
|
+
asin(): Value;
|
|
36
|
+
/**
|
|
37
|
+
* Returns acos(this).
|
|
38
|
+
* @returns New Value with acos.
|
|
39
|
+
*/
|
|
40
|
+
acos(): Value;
|
|
41
|
+
/**
|
|
42
|
+
* Returns atan(this).
|
|
43
|
+
* @returns New Value with atan.
|
|
44
|
+
*/
|
|
45
|
+
atan(): Value;
|
|
46
|
+
/**
|
|
47
|
+
* Returns relu(this).
|
|
48
|
+
* @returns New Value with relu.
|
|
49
|
+
*/
|
|
50
|
+
relu(): Value;
|
|
51
|
+
/**
|
|
52
|
+
* Returns abs(this).
|
|
53
|
+
* @returns New Value with abs.
|
|
54
|
+
*/
|
|
55
|
+
abs(): Value;
|
|
56
|
+
/**
|
|
57
|
+
* Returns exp(this).
|
|
58
|
+
* @returns New Value with exp.
|
|
59
|
+
*/
|
|
60
|
+
exp(): Value;
|
|
61
|
+
/**
|
|
62
|
+
* Returns log(this).
|
|
63
|
+
* @returns New Value with log.
|
|
64
|
+
*/
|
|
65
|
+
log(): Value;
|
|
66
|
+
/**
|
|
67
|
+
* Returns min(this, other).
|
|
68
|
+
* @param other Value to compare
|
|
69
|
+
* @returns New Value with min.
|
|
70
|
+
*/
|
|
71
|
+
min(other: Value): Value;
|
|
72
|
+
/**
|
|
73
|
+
* Returns max(this, other).
|
|
74
|
+
* @param other Value to compare
|
|
75
|
+
* @returns New Value with max.
|
|
76
|
+
*/
|
|
77
|
+
max(other: Value): Value;
|
|
78
|
+
/**
|
|
79
|
+
* Adds this and other.
|
|
80
|
+
* @param other Value or number to add
|
|
81
|
+
* @returns New Value with sum.
|
|
82
|
+
*/
|
|
83
|
+
add(other: Value | number): Value;
|
|
84
|
+
/**
|
|
85
|
+
* Multiplies this and other.
|
|
86
|
+
* @param other Value or number to multiply
|
|
87
|
+
* @returns New Value with product.
|
|
88
|
+
*/
|
|
89
|
+
mul(other: Value | number): Value;
|
|
90
|
+
/**
|
|
91
|
+
* Subtracts other from this.
|
|
92
|
+
* @param other Value or number to subtract
|
|
93
|
+
* @returns New Value with difference.
|
|
94
|
+
*/
|
|
95
|
+
sub(other: Value | number): Value;
|
|
96
|
+
/**
|
|
97
|
+
* Divides this by other.
|
|
98
|
+
* @param other Value or number divisor
|
|
99
|
+
* @returns New Value with quotient.
|
|
100
|
+
*/
|
|
101
|
+
div(other: Value | number): Value;
|
|
102
|
+
/**
|
|
103
|
+
* Raises this to the power exp.
|
|
104
|
+
* @param exp Exponent
|
|
105
|
+
* @returns New Value with pow(this, exp)
|
|
106
|
+
*/
|
|
107
|
+
pow(exp: number): Value;
|
|
108
|
+
/**
|
|
109
|
+
* Raises this to a dynamic Value (other).
|
|
110
|
+
* @param other Exponent Value or number
|
|
111
|
+
* @returns New Value with pow(this, other)
|
|
112
|
+
*/
|
|
113
|
+
powValue(other: Value | number): Value;
|
|
114
|
+
/**
|
|
115
|
+
* Returns this modulo other.
|
|
116
|
+
* @param other Divisor Value
|
|
117
|
+
* @returns New Value with modulo.
|
|
118
|
+
*/
|
|
119
|
+
mod(other: Value): Value;
|
|
120
|
+
/**
|
|
121
|
+
* Returns Value indicating if this equals other.
|
|
122
|
+
* @param other Value to compare
|
|
123
|
+
* @returns New Value (1 if equal, else 0)
|
|
124
|
+
*/
|
|
125
|
+
eq(other: Value): Value;
|
|
126
|
+
/**
|
|
127
|
+
* Returns Value indicating if this not equals other.
|
|
128
|
+
* @param other Value to compare
|
|
129
|
+
* @returns New Value (1 if not equal, else 0)
|
|
130
|
+
*/
|
|
131
|
+
neq(other: Value): Value;
|
|
132
|
+
/**
|
|
133
|
+
* Returns Value indicating if this greater than other.
|
|
134
|
+
* @param other Value to compare
|
|
135
|
+
* @returns New Value (1 if true, else 0)
|
|
136
|
+
*/
|
|
137
|
+
gt(other: Value): Value;
|
|
138
|
+
/**
|
|
139
|
+
* Returns Value indicating if this less than other.
|
|
140
|
+
* @param other Value to compare
|
|
141
|
+
* @returns New Value (1 if true, else 0)
|
|
142
|
+
*/
|
|
143
|
+
lt(other: Value): Value;
|
|
144
|
+
/**
|
|
145
|
+
* Returns Value indicating if this greater than or equal to other.
|
|
146
|
+
* @param other Value to compare
|
|
147
|
+
* @returns New Value (1 if true, else 0)
|
|
148
|
+
*/
|
|
149
|
+
gte(other: Value): Value;
|
|
150
|
+
/**
|
|
151
|
+
* Returns Value indicating if this less than or equal to other.
|
|
152
|
+
* @param other Value to compare
|
|
153
|
+
* @returns New Value (1 if true, else 0)
|
|
154
|
+
*/
|
|
155
|
+
lte(other: Value): Value;
|
|
156
|
+
/**
|
|
157
|
+
* Returns softplus(this).
|
|
158
|
+
* @returns New Value with softplus.
|
|
159
|
+
*/
|
|
160
|
+
softplus(): Value;
|
|
161
|
+
/**
|
|
162
|
+
* Returns the floor of this Value.
|
|
163
|
+
* @returns New Value with floor(data).
|
|
164
|
+
*/
|
|
165
|
+
floor(): Value;
|
|
166
|
+
/**
|
|
167
|
+
* Returns the ceiling of this Value.
|
|
168
|
+
* @returns New Value with ceil(data).
|
|
169
|
+
*/
|
|
170
|
+
ceil(): Value;
|
|
171
|
+
/**
|
|
172
|
+
* Returns the rounded value of this Value.
|
|
173
|
+
* @returns New Value with rounded data.
|
|
174
|
+
*/
|
|
175
|
+
round(): Value;
|
|
176
|
+
/**
|
|
177
|
+
* Returns the square of this Value.
|
|
178
|
+
* @returns New Value with squared data.
|
|
179
|
+
*/
|
|
180
|
+
square(): Value;
|
|
181
|
+
/**
|
|
182
|
+
* Returns the cube of this Value.
|
|
183
|
+
* @returns New Value with cubed data.
|
|
184
|
+
*/
|
|
185
|
+
cube(): Value;
|
|
186
|
+
/**
|
|
187
|
+
* Returns the reciprocal (1/x) of this Value.
|
|
188
|
+
* @returns New Value with reciprocal.
|
|
189
|
+
*/
|
|
190
|
+
reciprocal(): Value;
|
|
191
|
+
/**
|
|
192
|
+
* Clamps this between min and max.
|
|
193
|
+
* @param min Minimum value
|
|
194
|
+
* @param max Maximum value
|
|
195
|
+
* @returns New clamped Value
|
|
196
|
+
*/
|
|
197
|
+
clamp(min: number, max: number): Value;
|
|
198
|
+
/**
|
|
199
|
+
* Returns the negation (-this) Value.
|
|
200
|
+
* @returns New Value which is the negation.
|
|
201
|
+
*/
|
|
202
|
+
neg(): Value;
|
|
203
|
+
/**
|
|
204
|
+
* Returns the sum of the given Values.
|
|
205
|
+
* @param vals Array of Value objects
|
|
206
|
+
* @returns New Value holding their sum.
|
|
207
|
+
*/
|
|
208
|
+
static sum(vals: Value[]): Value;
|
|
209
|
+
/**
|
|
210
|
+
* Returns the mean of the given Values.
|
|
211
|
+
* @param vals Array of Value objects
|
|
212
|
+
* @returns New Value holding their mean.
|
|
213
|
+
*/
|
|
214
|
+
static mean(vals: Value[]): Value;
|
|
215
|
+
/**
|
|
216
|
+
* Returns tanh(this).
|
|
217
|
+
* @returns New Value with tanh.
|
|
218
|
+
*/
|
|
219
|
+
tanh(): Value;
|
|
220
|
+
/**
|
|
221
|
+
* Returns sigmoid(this).
|
|
222
|
+
* @returns New Value with sigmoid.
|
|
223
|
+
*/
|
|
224
|
+
sigmoid(): Value;
|
|
225
|
+
/**
|
|
226
|
+
* Performs a reverse-mode autodiff backward pass from this Value.
|
|
227
|
+
* @param zeroGrad If true, zeroes all grads in the graph before backward
|
|
228
|
+
*/
|
|
229
|
+
backward(zeroGrad?: boolean): void;
|
|
230
|
+
/**
|
|
231
|
+
* Sets all grad fields in the computation tree (from root) to 0.
|
|
232
|
+
* @param root Value to zero tree from
|
|
233
|
+
*/
|
|
234
|
+
static zeroGradTree(root: Value): void;
|
|
235
|
+
/**
|
|
236
|
+
* Sets all grad fields in all supplied trees to 0.
|
|
237
|
+
* @param vals Values whose trees to zero
|
|
238
|
+
*/
|
|
239
|
+
static zeroGradAll(vals: Value[]): void;
|
|
240
|
+
/**
|
|
241
|
+
* Internal helper to construct a Value with correct backward fn and grads.
|
|
242
|
+
* @param data Output value data
|
|
243
|
+
* @param left Left operand Value
|
|
244
|
+
* @param right Right operand Value or null
|
|
245
|
+
* @param backwardFnBuilder Function to create backward closure
|
|
246
|
+
* @param label Node label for debugging
|
|
247
|
+
* @returns New Value node
|
|
248
|
+
*/
|
|
249
|
+
static make(data: number, left: Value, right: Value | null, backwardFnBuilder: (out: Value) => BackwardFn, label: string): Value;
|
|
250
|
+
/**
|
|
251
|
+
* Returns string representation for debugging.
|
|
252
|
+
* @returns String summary of Value
|
|
253
|
+
*/
|
|
254
|
+
toString(): string;
|
|
255
|
+
/**
|
|
256
|
+
* Temporarily disables gradient tracking within the callback scope, like torch.no_grad().
|
|
257
|
+
* Restores the previous state after running fn.
|
|
258
|
+
*/
|
|
259
|
+
static withNoGrad<T>(fn: () => T): T;
|
|
260
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import { Value } from './Value';
|
|
2
|
+
export declare class ValueArithmetic {
|
|
3
|
+
static add(a: Value, b: Value): Value;
|
|
4
|
+
static sqrt(a: Value): Value;
|
|
5
|
+
static mul(a: Value, b: Value): Value;
|
|
6
|
+
static sub(a: Value, b: Value): Value;
|
|
7
|
+
static div(a: Value, b: Value, eps?: number): Value;
|
|
8
|
+
static pow(a: Value, exp: number): Value;
|
|
9
|
+
static powValue(a: Value, b: Value, eps?: number): Value;
|
|
10
|
+
static mod(a: Value, b: Value): Value;
|
|
11
|
+
static abs(a: Value): Value;
|
|
12
|
+
static exp(a: Value): Value;
|
|
13
|
+
static log(a: Value, eps?: number): Value;
|
|
14
|
+
static min(a: Value, b: Value): Value;
|
|
15
|
+
static max(a: Value, b: Value): Value;
|
|
16
|
+
static floor(a: Value): Value;
|
|
17
|
+
static ceil(a: Value): Value;
|
|
18
|
+
static round(a: Value): Value;
|
|
19
|
+
static square(a: Value): Value;
|
|
20
|
+
static cube(a: Value): Value;
|
|
21
|
+
static reciprocal(a: Value, eps?: number): Value;
|
|
22
|
+
static clamp(a: Value, min: number, max: number): Value;
|
|
23
|
+
static sum(vals: Value[]): Value;
|
|
24
|
+
static mean(vals: Value[]): Value;
|
|
25
|
+
static neg(a: Value): Value;
|
|
26
|
+
}
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import { Value } from './Value';
|
|
2
|
+
export declare class ValueComparison {
|
|
3
|
+
static eq(a: Value, b: Value): Value;
|
|
4
|
+
static ifThenElse(cond: Value, thenVal: Value, elseVal: Value): Value;
|
|
5
|
+
static neq(a: Value, b: Value): Value;
|
|
6
|
+
static gt(a: Value, b: Value): Value;
|
|
7
|
+
static lt(a: Value, b: Value): Value;
|
|
8
|
+
static gte(a: Value, b: Value): Value;
|
|
9
|
+
static lte(a: Value, b: Value): Value;
|
|
10
|
+
}
|