scalar-autograd 0.1.7 → 0.1.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. package/README.md +127 -2
  2. package/dist/CompiledFunctions.d.ts +111 -0
  3. package/dist/CompiledFunctions.js +268 -0
  4. package/dist/CompiledResiduals.d.ts +74 -0
  5. package/dist/CompiledResiduals.js +94 -0
  6. package/dist/EigenvalueHelpers.d.ts +14 -0
  7. package/dist/EigenvalueHelpers.js +93 -0
  8. package/dist/Geometry.d.ts +131 -0
  9. package/dist/Geometry.js +213 -0
  10. package/dist/GraphBuilder.d.ts +64 -0
  11. package/dist/GraphBuilder.js +237 -0
  12. package/dist/GraphCanonicalizerNoSort.d.ts +20 -0
  13. package/dist/GraphCanonicalizerNoSort.js +190 -0
  14. package/dist/GraphHashCanonicalizer.d.ts +46 -0
  15. package/dist/GraphHashCanonicalizer.js +220 -0
  16. package/dist/GraphSignature.d.ts +7 -0
  17. package/dist/GraphSignature.js +7 -0
  18. package/dist/KernelPool.d.ts +55 -0
  19. package/dist/KernelPool.js +124 -0
  20. package/dist/LBFGS.d.ts +84 -0
  21. package/dist/LBFGS.js +313 -0
  22. package/dist/LinearSolver.d.ts +69 -0
  23. package/dist/LinearSolver.js +213 -0
  24. package/dist/Losses.d.ts +9 -0
  25. package/dist/Losses.js +42 -37
  26. package/dist/Matrix3x3.d.ts +50 -0
  27. package/dist/Matrix3x3.js +146 -0
  28. package/dist/NonlinearLeastSquares.d.ts +33 -0
  29. package/dist/NonlinearLeastSquares.js +252 -0
  30. package/dist/Optimizers.d.ts +70 -14
  31. package/dist/Optimizers.js +42 -19
  32. package/dist/V.d.ts +0 -0
  33. package/dist/V.js +0 -0
  34. package/dist/Value.d.ts +84 -2
  35. package/dist/Value.js +296 -58
  36. package/dist/ValueActivation.js +10 -14
  37. package/dist/ValueArithmetic.d.ts +1 -0
  38. package/dist/ValueArithmetic.js +58 -50
  39. package/dist/ValueComparison.js +9 -13
  40. package/dist/ValueRegistry.d.ts +38 -0
  41. package/dist/ValueRegistry.js +88 -0
  42. package/dist/ValueTrig.js +14 -18
  43. package/dist/Vec2.d.ts +45 -0
  44. package/dist/Vec2.js +93 -0
  45. package/dist/Vec3.d.ts +78 -0
  46. package/dist/Vec3.js +169 -0
  47. package/dist/Vec4.d.ts +45 -0
  48. package/dist/Vec4.js +126 -0
  49. package/dist/__tests__/duplicate-inputs.test.js +33 -0
  50. package/dist/cli/gradient-gen.d.ts +19 -0
  51. package/dist/cli/gradient-gen.js +264 -0
  52. package/dist/compileIndirectKernel.d.ts +24 -0
  53. package/dist/compileIndirectKernel.js +148 -0
  54. package/dist/index.d.ts +20 -0
  55. package/dist/index.js +20 -0
  56. package/dist/scalar-autograd.d.ts +1157 -0
  57. package/dist/symbolic/AST.d.ts +113 -0
  58. package/dist/symbolic/AST.js +128 -0
  59. package/dist/symbolic/CodeGen.d.ts +35 -0
  60. package/dist/symbolic/CodeGen.js +280 -0
  61. package/dist/symbolic/Parser.d.ts +64 -0
  62. package/dist/symbolic/Parser.js +329 -0
  63. package/dist/symbolic/Simplify.d.ts +10 -0
  64. package/dist/symbolic/Simplify.js +244 -0
  65. package/dist/symbolic/SymbolicDiff.d.ts +35 -0
  66. package/dist/symbolic/SymbolicDiff.js +339 -0
  67. package/dist/tsdoc-metadata.json +11 -0
  68. package/package.json +29 -5
  69. package/dist/Losses.spec.js +0 -54
  70. package/dist/Optimizers.edge-cases.spec.d.ts +0 -1
  71. package/dist/Optimizers.edge-cases.spec.js +0 -29
  72. package/dist/Optimizers.spec.d.ts +0 -1
  73. package/dist/Optimizers.spec.js +0 -56
  74. package/dist/Value.edge-cases.spec.d.ts +0 -1
  75. package/dist/Value.edge-cases.spec.js +0 -54
  76. package/dist/Value.grad-flow.spec.d.ts +0 -1
  77. package/dist/Value.grad-flow.spec.js +0 -24
  78. package/dist/Value.losses-edge-cases.spec.d.ts +0 -1
  79. package/dist/Value.losses-edge-cases.spec.js +0 -30
  80. package/dist/Value.memory.spec.d.ts +0 -1
  81. package/dist/Value.memory.spec.js +0 -23
  82. package/dist/Value.nn.spec.d.ts +0 -1
  83. package/dist/Value.nn.spec.js +0 -111
  84. package/dist/Value.spec.d.ts +0 -1
  85. package/dist/Value.spec.js +0 -245
  86. /package/dist/{Losses.spec.d.ts → __tests__/duplicate-inputs.test.d.ts} +0 -0
@@ -2,20 +2,35 @@ import { Value } from "./Value";
2
2
  /**
3
3
  * Abstract base class for all optimizers.
4
4
  * Ensures only requiresGrad parameters are optimized.
5
+ * @public
5
6
  */
6
7
  export declare abstract class Optimizer {
8
+ /**
9
+ * Array of trainable Value parameters filtered to only those requiring gradients.
10
+ * @public
11
+ */
7
12
  protected trainables: Value[];
13
+ /**
14
+ * Learning rate for parameter updates.
15
+ * @public
16
+ */
8
17
  learningRate: number;
9
18
  /**
10
19
  * Constructs an Optimizer.
11
- * @param trainables Array of Value parameters to optimize.
12
- * @param learningRate Learning rate for updates.
20
+ * @param trainables - Array of Value parameters to optimize.
21
+ * @param learningRate - Learning rate for updates.
13
22
  */
14
23
  constructor(trainables: Value[], learningRate: number);
15
24
  /**
16
25
  * Performs a parameter update step.
26
+ * @public
17
27
  */
18
28
  abstract step(): void;
29
+ /**
30
+ * Resets optimizer state for a specific trainable parameter.
31
+ * @param trainable - The Value parameter to reset state for.
32
+ * @public
33
+ */
19
34
  abstract resetStateFor(trainable: Value): void;
20
35
  /**
21
36
  * Sets grads of all trainables to zero.
@@ -29,47 +44,75 @@ export declare abstract class Optimizer {
29
44
  }
30
45
  /**
31
46
  * Optional arguments for basic optimizers.
32
- * @property learningRate: Overrides the step size for parameter updates (default varies by optimizer).
33
- * @property weightDecay: L2 regularization multiplier (default 0). Ignored for plain SGD.
34
- * @property gradientClip: Maximum absolute value for gradient updates (default 0: no clipping).
47
+ * @public
35
48
  */
36
49
  export interface OptimizerOptions {
50
+ /**
51
+ * Overrides the step size for parameter updates (default varies by optimizer).
52
+ * @public
53
+ */
37
54
  learningRate?: number;
55
+ /**
56
+ * L2 regularization multiplier (default 0). Ignored for plain SGD.
57
+ * @public
58
+ */
38
59
  weightDecay?: number;
60
+ /**
61
+ * Maximum absolute value for gradient updates (default 0: no clipping).
62
+ * @public
63
+ */
39
64
  gradientClip?: number;
40
65
  }
41
66
  /**
42
67
  * Stochastic Gradient Descent (SGD) optimizer. Accepts weightDecay and gradientClip for API consistency (ignored).
68
+ * @public
43
69
  */
44
70
  export declare class SGD extends Optimizer {
45
71
  private weightDecay;
46
72
  private gradientClip;
47
73
  /**
48
74
  * Constructs an SGD optimizer.
49
- * @param trainables Array of Value parameters to optimize.
50
- * @param opts Optional parameters (learningRate, weightDecay, gradientClip).
75
+ * @param trainables - Array of Value parameters to optimize.
76
+ * @param opts - Optional parameters (learningRate, weightDecay, gradientClip).
51
77
  */
52
78
  constructor(trainables: Value[], opts?: OptimizerOptions);
53
79
  /**
54
80
  * Performs a parameter update using standard SGD.
81
+ * @public
55
82
  */
56
83
  step(): void;
84
+ /**
85
+ * Resets optimizer state for a trainable (no-op for SGD).
86
+ * @param trainable - The Value parameter to reset state for.
87
+ * @public
88
+ */
57
89
  resetStateFor(trainable: Value): void;
58
90
  }
59
91
  /**
60
92
  * Adam and AdamW optimizer parameters.
61
93
  * Extends OptimizerOptions.
62
- * @property beta1: Exponential decay rate for 1st moment (default 0.9).
63
- * @property beta2: Exponential decay rate for 2nd moment (default 0.999).
64
- * @property epsilon: Numerical stability fudge factor (default 1e-8).
94
+ * @public
65
95
  */
66
96
  export interface AdamOptions extends OptimizerOptions {
97
+ /**
98
+ * Exponential decay rate for 1st moment (default 0.9).
99
+ * @public
100
+ */
67
101
  beta1?: number;
102
+ /**
103
+ * Exponential decay rate for 2nd moment (default 0.999).
104
+ * @public
105
+ */
68
106
  beta2?: number;
107
+ /**
108
+ * Numerical stability fudge factor (default 1e-8).
109
+ * @public
110
+ */
69
111
  epsilon?: number;
70
112
  }
71
113
  /**
72
114
  * Adam optimizer, supports decoupled weight decay and gradient clipping.
115
+ * @public
73
116
  */
74
117
  export declare class Adam extends Optimizer {
75
118
  private beta1;
@@ -82,18 +125,25 @@ export declare class Adam extends Optimizer {
82
125
  private stepCount;
83
126
  /**
84
127
  * Constructs an Adam optimizer.
85
- * @param trainables Array of Value parameters to optimize.
86
- * @param opts Optional parameters (learningRate, weightDecay, gradientClip, beta1, beta2, epsilon).
128
+ * @param trainables - Array of Value parameters to optimize.
129
+ * @param opts - Optional parameters (learningRate, weightDecay, gradientClip, beta1, beta2, epsilon).
87
130
  */
88
131
  constructor(trainables: Value[], opts?: AdamOptions);
89
132
  /**
90
133
  * Performs a parameter update using Adam optimization.
134
+ * @public
91
135
  */
92
136
  step(): void;
137
+ /**
138
+ * Resets optimizer state (momentum and velocity) for a specific trainable.
139
+ * @param trainable - The Value parameter to reset state for.
140
+ * @public
141
+ */
93
142
  resetStateFor(trainable: Value): void;
94
143
  }
95
144
  /**
96
145
  * AdamW optimizer, supports decoupled weight decay and gradient clipping (same options as Adam).
146
+ * @public
97
147
  */
98
148
  export declare class AdamW extends Optimizer {
99
149
  private beta1;
@@ -106,13 +156,19 @@ export declare class AdamW extends Optimizer {
106
156
  private stepCount;
107
157
  /**
108
158
  * Constructs an AdamW optimizer.
109
- * @param trainables Array of Value parameters to optimize.
110
- * @param opts Optional parameters (learningRate, weightDecay, gradientClip, beta1, beta2, epsilon).
159
+ * @param trainables - Array of Value parameters to optimize.
160
+ * @param opts - Optional parameters (learningRate, weightDecay, gradientClip, beta1, beta2, epsilon).
111
161
  */
112
162
  constructor(trainables: Value[], opts?: AdamOptions);
113
163
  /**
114
164
  * Performs a parameter update using AdamW optimization (decoupled weight decay).
165
+ * @public
115
166
  */
116
167
  step(): void;
168
+ /**
169
+ * Resets optimizer state (momentum and velocity) for a specific trainable.
170
+ * @param trainable - The Value parameter to reset state for.
171
+ * @public
172
+ */
117
173
  resetStateFor(trainable: Value): void;
118
174
  }
@@ -1,18 +1,24 @@
1
- "use strict";
2
1
  // Optimizers.ts
3
- Object.defineProperty(exports, "__esModule", { value: true });
4
- exports.AdamW = exports.Adam = exports.SGD = exports.Optimizer = void 0;
5
2
  /**
6
3
  * Abstract base class for all optimizers.
7
4
  * Ensures only requiresGrad parameters are optimized.
5
+ * @public
8
6
  */
9
- class Optimizer {
7
+ export class Optimizer {
8
+ /**
9
+ * Array of trainable Value parameters filtered to only those requiring gradients.
10
+ * @public
11
+ */
10
12
  trainables;
13
+ /**
14
+ * Learning rate for parameter updates.
15
+ * @public
16
+ */
11
17
  learningRate;
12
18
  /**
13
19
  * Constructs an Optimizer.
14
- * @param trainables Array of Value parameters to optimize.
15
- * @param learningRate Learning rate for updates.
20
+ * @param trainables - Array of Value parameters to optimize.
21
+ * @param learningRate - Learning rate for updates.
16
22
  */
17
23
  constructor(trainables, learningRate) {
18
24
  this.trainables = trainables.filter(v => v.requiresGrad);
@@ -38,17 +44,17 @@ class Optimizer {
38
44
  }
39
45
  }
40
46
  }
41
- exports.Optimizer = Optimizer;
42
47
  /**
43
48
  * Stochastic Gradient Descent (SGD) optimizer. Accepts weightDecay and gradientClip for API consistency (ignored).
49
+ * @public
44
50
  */
45
- class SGD extends Optimizer {
51
+ export class SGD extends Optimizer {
46
52
  weightDecay;
47
53
  gradientClip;
48
54
  /**
49
55
  * Constructs an SGD optimizer.
50
- * @param trainables Array of Value parameters to optimize.
51
- * @param opts Optional parameters (learningRate, weightDecay, gradientClip).
56
+ * @param trainables - Array of Value parameters to optimize.
57
+ * @param opts - Optional parameters (learningRate, weightDecay, gradientClip).
52
58
  */
53
59
  constructor(trainables, opts = {}) {
54
60
  super(trainables, opts.learningRate ?? 1e-2);
@@ -57,6 +63,7 @@ class SGD extends Optimizer {
57
63
  }
58
64
  /**
59
65
  * Performs a parameter update using standard SGD.
66
+ * @public
60
67
  */
61
68
  step() {
62
69
  // Intentionally ignoring weightDecay/gradientClip for SGD
@@ -64,14 +71,19 @@ class SGD extends Optimizer {
64
71
  v.data -= this.learningRate * v.grad;
65
72
  }
66
73
  }
74
+ /**
75
+ * Resets optimizer state for a trainable (no-op for SGD).
76
+ * @param trainable - The Value parameter to reset state for.
77
+ * @public
78
+ */
67
79
  resetStateFor(trainable) {
68
80
  }
69
81
  }
70
- exports.SGD = SGD;
71
82
  /**
72
83
  * Adam optimizer, supports decoupled weight decay and gradient clipping.
84
+ * @public
73
85
  */
74
- class Adam extends Optimizer {
86
+ export class Adam extends Optimizer {
75
87
  beta1;
76
88
  beta2;
77
89
  epsilon;
@@ -82,8 +94,8 @@ class Adam extends Optimizer {
82
94
  stepCount = 0;
83
95
  /**
84
96
  * Constructs an Adam optimizer.
85
- * @param trainables Array of Value parameters to optimize.
86
- * @param opts Optional parameters (learningRate, weightDecay, gradientClip, beta1, beta2, epsilon).
97
+ * @param trainables - Array of Value parameters to optimize.
98
+ * @param opts - Optional parameters (learningRate, weightDecay, gradientClip, beta1, beta2, epsilon).
87
99
  */
88
100
  constructor(trainables, opts = {}) {
89
101
  super(trainables, opts.learningRate ?? 0.001);
@@ -99,6 +111,7 @@ class Adam extends Optimizer {
99
111
  }
100
112
  /**
101
113
  * Performs a parameter update using Adam optimization.
114
+ * @public
102
115
  */
103
116
  step() {
104
117
  this.stepCount++;
@@ -121,16 +134,21 @@ class Adam extends Optimizer {
121
134
  this.v.set(v, vVal);
122
135
  }
123
136
  }
137
+ /**
138
+ * Resets optimizer state (momentum and velocity) for a specific trainable.
139
+ * @param trainable - The Value parameter to reset state for.
140
+ * @public
141
+ */
124
142
  resetStateFor(trainable) {
125
143
  this.m.set(trainable, 0);
126
144
  this.v.set(trainable, 0);
127
145
  }
128
146
  }
129
- exports.Adam = Adam;
130
147
  /**
131
148
  * AdamW optimizer, supports decoupled weight decay and gradient clipping (same options as Adam).
149
+ * @public
132
150
  */
133
- class AdamW extends Optimizer {
151
+ export class AdamW extends Optimizer {
134
152
  beta1;
135
153
  beta2;
136
154
  epsilon;
@@ -141,8 +159,8 @@ class AdamW extends Optimizer {
141
159
  stepCount = 0;
142
160
  /**
143
161
  * Constructs an AdamW optimizer.
144
- * @param trainables Array of Value parameters to optimize.
145
- * @param opts Optional parameters (learningRate, weightDecay, gradientClip, beta1, beta2, epsilon).
162
+ * @param trainables - Array of Value parameters to optimize.
163
+ * @param opts - Optional parameters (learningRate, weightDecay, gradientClip, beta1, beta2, epsilon).
146
164
  */
147
165
  constructor(trainables, opts = {}) {
148
166
  super(trainables, opts.learningRate ?? 0.001);
@@ -158,6 +176,7 @@ class AdamW extends Optimizer {
158
176
  }
159
177
  /**
160
178
  * Performs a parameter update using AdamW optimization (decoupled weight decay).
179
+ * @public
161
180
  */
162
181
  step() {
163
182
  this.stepCount++;
@@ -179,9 +198,13 @@ class AdamW extends Optimizer {
179
198
  this.v.set(v, vVal);
180
199
  }
181
200
  }
201
+ /**
202
+ * Resets optimizer state (momentum and velocity) for a specific trainable.
203
+ * @param trainable - The Value parameter to reset state for.
204
+ * @public
205
+ */
182
206
  resetStateFor(trainable) {
183
207
  this.m.set(trainable, 0);
184
208
  this.v.set(trainable, 0);
185
209
  }
186
210
  }
187
- exports.AdamW = AdamW;
package/dist/V.d.ts CHANGED
Binary file
package/dist/V.js CHANGED
Binary file
package/dist/Value.d.ts CHANGED
@@ -1,16 +1,83 @@
1
+ /**
2
+ * Function type for backward pass computations in automatic differentiation.
3
+ * @public
4
+ */
1
5
  export type BackwardFn = () => void;
2
6
  export { V } from './V';
3
7
  export { Optimizer, SGD, Adam, AdamW } from './Optimizers';
4
8
  export type { OptimizerOptions, AdamOptions } from './Optimizers';
5
9
  export { Losses } from './Losses';
10
+ export type { NonlinearLeastSquaresOptions, NonlinearLeastSquaresResult } from './NonlinearLeastSquares';
11
+ export { Vec2 } from './Vec2';
12
+ export { Vec3 } from './Vec3';
13
+ /**
14
+ * Represents a scalar value in the computational graph for automatic differentiation.
15
+ * Supports forward computation and reverse-mode autodiff (backpropagation).
16
+ * @public
17
+ */
6
18
  export declare class Value {
19
+ /**
20
+ * Global flag to disable gradient tracking. Use Value.withNoGrad() instead of setting directly.
21
+ * @public
22
+ */
7
23
  static no_grad_mode: boolean;
24
+ /**
25
+ * Current graph builder for incremental hash tracking (null when not building).
26
+ * @internal
27
+ */
28
+ static currentBuilder: any;
29
+ /**
30
+ * Global counter for unique Value IDs.
31
+ * @internal
32
+ */
33
+ private static nextId;
34
+ /**
35
+ * Unique ID for this Value instance (for hashing intermediate nodes).
36
+ * @internal
37
+ */
38
+ _id: number;
39
+ /**
40
+ * The numeric value stored in this node.
41
+ * @public
42
+ */
8
43
  data: number;
44
+ /**
45
+ * The gradient of the output with respect to this value.
46
+ * @public
47
+ */
9
48
  grad: number;
49
+ /**
50
+ * Whether this value participates in gradient computation.
51
+ * @public
52
+ */
10
53
  requiresGrad: boolean;
11
54
  private backwardFn;
12
- private prev;
55
+ /** @internal */ prev: Value[];
56
+ /**
57
+ * Optional label for debugging and visualization.
58
+ * @public
59
+ */
13
60
  label: string;
61
+ /**
62
+ * Operation type for JIT compilation (e.g., '+', 'exp', 'sin').
63
+ * @internal
64
+ */
65
+ _op?: string;
66
+ /**
67
+ * Parameter name for JIT compilation inputs.
68
+ * @internal
69
+ */
70
+ paramName?: string;
71
+ /**
72
+ * Registry ID for kernel reuse system.
73
+ * @internal
74
+ */
75
+ _registryId?: number;
76
+ /**
77
+ * Operation constants (e.g., min/max for clamp, exponent for pow).
78
+ * @internal
79
+ */
80
+ _opConstants?: number[];
14
81
  constructor(data: number, label?: string, requiresGrad?: boolean);
15
82
  private static ensureValue;
16
83
  /**
@@ -200,6 +267,11 @@ export declare class Value {
200
267
  * @returns New Value which is the negation.
201
268
  */
202
269
  neg(): Value;
270
+ /**
271
+ * Returns sign(this).
272
+ * @returns New Value with sign.
273
+ */
274
+ sign(): Value;
203
275
  /**
204
276
  * Returns the sum of the given Values.
205
277
  * @param vals Array of Value objects
@@ -244,9 +316,17 @@ export declare class Value {
244
316
  * @param right Right operand Value or null
245
317
  * @param backwardFnBuilder Function to create backward closure
246
318
  * @param label Node label for debugging
319
+ * @param op Operation name for JIT compilation
247
320
  * @returns New Value node
248
321
  */
249
- static make(data: number, left: Value, right: Value | null, backwardFnBuilder: (out: Value) => BackwardFn, label: string): Value;
322
+ static make(data: number, left: Value, right: Value | null, backwardFnBuilder: (out: Value) => BackwardFn, label: string, op?: string): Value;
323
+ /**
324
+ * N-ary operation helper for operations with multiple inputs
325
+ *
326
+ * TODO: Move code generation logic into makeNary instead of centralized switches.
327
+ * This would co-locate runtime and codegen logic at operation definition sites.
328
+ */
329
+ static makeNary(data: number, inputs: Value[], backwardFnBuilder: (out: Value) => BackwardFn, label: string, op?: string): Value;
250
330
  /**
251
331
  * Returns string representation for debugging.
252
332
  * @returns String summary of Value
@@ -257,4 +337,6 @@ export declare class Value {
257
337
  * Restores the previous state after running fn.
258
338
  */
259
339
  static withNoGrad<T>(fn: () => T): T;
340
+ getForwardCode(childCodes: string[]): string;
341
+ getBackwardCode(gradVar: string, childGrads: string[], childVars: string[]): string;
260
342
  }