scalar-autograd 0.1.7 → 0.1.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. package/README.md +127 -2
  2. package/dist/CompiledFunctions.d.ts +111 -0
  3. package/dist/CompiledFunctions.js +268 -0
  4. package/dist/CompiledResiduals.d.ts +74 -0
  5. package/dist/CompiledResiduals.js +94 -0
  6. package/dist/EigenvalueHelpers.d.ts +14 -0
  7. package/dist/EigenvalueHelpers.js +93 -0
  8. package/dist/Geometry.d.ts +131 -0
  9. package/dist/Geometry.js +213 -0
  10. package/dist/GraphBuilder.d.ts +64 -0
  11. package/dist/GraphBuilder.js +237 -0
  12. package/dist/GraphCanonicalizerNoSort.d.ts +20 -0
  13. package/dist/GraphCanonicalizerNoSort.js +190 -0
  14. package/dist/GraphHashCanonicalizer.d.ts +46 -0
  15. package/dist/GraphHashCanonicalizer.js +220 -0
  16. package/dist/GraphSignature.d.ts +7 -0
  17. package/dist/GraphSignature.js +7 -0
  18. package/dist/KernelPool.d.ts +55 -0
  19. package/dist/KernelPool.js +124 -0
  20. package/dist/LBFGS.d.ts +84 -0
  21. package/dist/LBFGS.js +313 -0
  22. package/dist/LinearSolver.d.ts +69 -0
  23. package/dist/LinearSolver.js +213 -0
  24. package/dist/Losses.d.ts +9 -0
  25. package/dist/Losses.js +42 -37
  26. package/dist/Matrix3x3.d.ts +50 -0
  27. package/dist/Matrix3x3.js +146 -0
  28. package/dist/NonlinearLeastSquares.d.ts +33 -0
  29. package/dist/NonlinearLeastSquares.js +252 -0
  30. package/dist/Optimizers.d.ts +70 -14
  31. package/dist/Optimizers.js +42 -19
  32. package/dist/V.d.ts +0 -0
  33. package/dist/V.js +0 -0
  34. package/dist/Value.d.ts +84 -2
  35. package/dist/Value.js +296 -58
  36. package/dist/ValueActivation.js +10 -14
  37. package/dist/ValueArithmetic.d.ts +1 -0
  38. package/dist/ValueArithmetic.js +58 -50
  39. package/dist/ValueComparison.js +9 -13
  40. package/dist/ValueRegistry.d.ts +38 -0
  41. package/dist/ValueRegistry.js +88 -0
  42. package/dist/ValueTrig.js +14 -18
  43. package/dist/Vec2.d.ts +45 -0
  44. package/dist/Vec2.js +93 -0
  45. package/dist/Vec3.d.ts +78 -0
  46. package/dist/Vec3.js +169 -0
  47. package/dist/Vec4.d.ts +45 -0
  48. package/dist/Vec4.js +126 -0
  49. package/dist/__tests__/duplicate-inputs.test.js +33 -0
  50. package/dist/cli/gradient-gen.d.ts +19 -0
  51. package/dist/cli/gradient-gen.js +264 -0
  52. package/dist/compileIndirectKernel.d.ts +24 -0
  53. package/dist/compileIndirectKernel.js +148 -0
  54. package/dist/index.d.ts +20 -0
  55. package/dist/index.js +20 -0
  56. package/dist/scalar-autograd.d.ts +1157 -0
  57. package/dist/symbolic/AST.d.ts +113 -0
  58. package/dist/symbolic/AST.js +128 -0
  59. package/dist/symbolic/CodeGen.d.ts +35 -0
  60. package/dist/symbolic/CodeGen.js +280 -0
  61. package/dist/symbolic/Parser.d.ts +64 -0
  62. package/dist/symbolic/Parser.js +329 -0
  63. package/dist/symbolic/Simplify.d.ts +10 -0
  64. package/dist/symbolic/Simplify.js +244 -0
  65. package/dist/symbolic/SymbolicDiff.d.ts +35 -0
  66. package/dist/symbolic/SymbolicDiff.js +339 -0
  67. package/dist/tsdoc-metadata.json +11 -0
  68. package/package.json +29 -5
  69. package/dist/Losses.spec.js +0 -54
  70. package/dist/Optimizers.edge-cases.spec.d.ts +0 -1
  71. package/dist/Optimizers.edge-cases.spec.js +0 -29
  72. package/dist/Optimizers.spec.d.ts +0 -1
  73. package/dist/Optimizers.spec.js +0 -56
  74. package/dist/Value.edge-cases.spec.d.ts +0 -1
  75. package/dist/Value.edge-cases.spec.js +0 -54
  76. package/dist/Value.grad-flow.spec.d.ts +0 -1
  77. package/dist/Value.grad-flow.spec.js +0 -24
  78. package/dist/Value.losses-edge-cases.spec.d.ts +0 -1
  79. package/dist/Value.losses-edge-cases.spec.js +0 -30
  80. package/dist/Value.memory.spec.d.ts +0 -1
  81. package/dist/Value.memory.spec.js +0 -23
  82. package/dist/Value.nn.spec.d.ts +0 -1
  83. package/dist/Value.nn.spec.js +0 -111
  84. package/dist/Value.spec.d.ts +0 -1
  85. package/dist/Value.spec.js +0 -245
  86. /package/dist/{Losses.spec.d.ts → __tests__/duplicate-inputs.test.d.ts} +0 -0
package/README.md CHANGED
@@ -81,6 +81,126 @@ console.log('Fitted b:', b.data); // ~3
81
81
 
82
82
  This pattern—forward pass, backward for gradients, and calling `optimizer.step()`—applies to more complex optimization tasks and neural networks as well!
83
83
 
84
+ ## Example: Nonlinear Least Squares Solver
85
+
86
+ For problems where you need to minimize the sum of squared residuals, the built-in Levenberg-Marquardt solver is much faster than gradient descent:
87
+
88
+ ```typescript
89
+ import { V } from './V';
90
+
91
+ // Fit a circle to noisy points
92
+ const params = [V.W(0), V.W(0), V.W(5)]; // cx, cy, radius
93
+
94
+ // Generate noisy circle data
95
+ const points = Array.from({ length: 50 }, (_, i) => {
96
+ const angle = (i / 50) * 2 * Math.PI;
97
+ return {
98
+ x: 10 * Math.cos(angle) + (Math.random() - 0.5) * 0.5,
99
+ y: 10 * Math.sin(angle) + (Math.random() - 0.5) * 0.5,
100
+ };
101
+ });
102
+
103
+ const result = V.nonlinearLeastSquares(
104
+ params,
105
+ ([cx, cy, r]) => {
106
+ // Compute residual for each point (distance from circle)
107
+ return points.map(p => {
108
+ const dx = V.sub(p.x, cx);
109
+ const dy = V.sub(p.y, cy);
110
+ const dist = V.sqrt(V.add(V.square(dx), V.square(dy)));
111
+ return V.sub(dist, r);
112
+ });
113
+ },
114
+ {
115
+ maxIterations: 100,
116
+ costTolerance: 1e-6,
117
+ verbose: true,
118
+ }
119
+ );
120
+
121
+ console.log('Circle fitted in', result.iterations, 'iterations');
122
+ console.log('Center:', params[0].data, params[1].data);
123
+ console.log('Radius:', params[2].data);
124
+ ```
125
+
126
+ The Levenberg-Marquardt algorithm typically converges 100-1000x faster than gradient descent for least squares problems.
127
+
128
+ ## Choosing the Right Optimizer
129
+
130
+ ScalarAutograd provides three categories of optimizers, each suited for different problem types:
131
+
132
+ ### 1. Gradient Descent Optimizers (SGD, Adam, AdamW)
133
+ **Best for:** Training neural networks, iterative refinement, online learning
134
+
135
+ ```typescript
136
+ const opt = new Adam([w, b], { learningRate: 0.01 });
137
+ for (let epoch = 0; epoch < 1000; epoch++) {
138
+ const loss = computeLoss();
139
+ opt.zeroGrad();
140
+ loss.backward();
141
+ opt.step();
142
+ }
143
+ ```
144
+
145
+ **Pros:** Simple, works on any differentiable objective, good for streaming data
146
+ **Cons:** Slow convergence, requires tuning learning rate and iterations
147
+
148
+ ### 2. Levenberg-Marquardt (`V.nonlinearLeastSquares`)
149
+ **Best for:** Nonlinear least squares: minimizing Σ rᵢ(x)²
150
+
151
+ ```typescript
152
+ const result = V.nonlinearLeastSquares(
153
+ params,
154
+ (p) => points.map(pt => residualFunction(p, pt)), // Returns array of residuals
155
+ { maxIterations: 100 }
156
+ );
157
+ ```
158
+
159
+ **Use when:**
160
+ - Problem is naturally formulated as sum of squared residuals
161
+ - You have overdetermined systems (more equations than unknowns)
162
+ - Examples: curve fitting, calibration, parameter estimation, circle/sphere fitting
163
+
164
+ **Pros:** 10-100x faster than gradient descent, exploits Jacobian structure
165
+ **Cons:** Only works for least squares problems, requires residual formulation
166
+
167
+ ### 3. L-BFGS (`lbfgs`)
168
+ **Best for:** General unconstrained optimization
169
+
170
+ ```typescript
171
+ import { lbfgs } from 'scalar-autograd';
172
+
173
+ const result = lbfgs(
174
+ params,
175
+ (p) => computeObjective(p), // Returns single Value (the cost)
176
+ { maxIterations: 100 }
177
+ );
178
+ ```
179
+
180
+ **Use when:**
181
+ - Objective has no special structure (not sum-of-squares)
182
+ - High-dimensional problems (100s-1000s of parameters)
183
+ - Memory constrained (stores only ~10 recent gradient pairs)
184
+ - Examples: energy minimization, ML losses, geometric optimization, developable surfaces
185
+
186
+ **Pros:** Memory efficient, handles non-quadratic objectives well, faster than gradient descent
187
+ **Cons:** Not as fast as LM for least squares, requires smooth objectives
188
+
189
+ ### Quick Decision Guide
190
+
191
+ ```
192
+ Can you write your objective as f(x) = Σ rᵢ(x)² ?
193
+ ├─ YES → Use V.nonlinearLeastSquares() (Levenberg-Marquardt)
194
+ │ Fastest for curve fitting, calibration, parameter estimation
195
+
196
+ └─ NO → Is it a general smooth objective?
197
+ ├─ YES → Use lbfgs() for large-scale or L-BFGS for efficiency
198
+ │ Good for energy minimization, geometric optimization
199
+
200
+ └─ NO → Use Adam/AdamW for training neural networks
201
+ Good for online learning, streaming data
202
+ ```
203
+
84
204
  ## API Overview
85
205
  - **Core Value construction:**
86
206
  - `V.C(data, label?)` — constant (non-differentiable), e.g. for data/inputs.
@@ -95,9 +215,14 @@ This pattern—forward pass, backward for gradients, and calling `optimizer.step
95
215
  - `.backward()` — trigger automatic differentiation from this node.
96
216
  - `.grad` — access the computed gradient after backward pass.
97
217
  - **Optimizers:**
98
- - E.g. `const opt = new SGD([w, b], {learningRate: 0.01})`
218
+ - `SGD`, `Adam`, `AdamW` - E.g. `const opt = new SGD([w, b], {learningRate: 0.01})`
99
219
  - **Losses:**
100
- - Import from `Losses.ts` (e.g. `import { mse } from './Losses'`)
220
+ - `Losses.mse()`, `Losses.mae()`, `Losses.binaryCrossEntropy()`, `Losses.categoricalCrossEntropy()`, `Losses.huber()`, `Losses.tukey()`
221
+ - **Advanced Optimization:**
222
+ - `V.nonlinearLeastSquares(params, residualFn, options)` — Levenberg-Marquardt solver for nonlinear least squares problems (minimizing Σ rᵢ²)
223
+ - `lbfgs(params, objectiveFn, options)` — L-BFGS optimizer for general unconstrained optimization
224
+ - **Vector utilities:**
225
+ - `Vec2`, `Vec3` — Differentiable 2D/3D vectors with dot, cross, normalize operations
101
226
 
102
227
  All API operations work with both `Value` and raw number inputs (numbers are automatically wrapped as non-grad constants).
103
228
 
@@ -0,0 +1,111 @@
1
+ import { Value } from "./Value";
2
+ /**
3
+ * Pre-compiled scalar functions with automatic differentiation and kernel reuse.
4
+ *
5
+ * Uses kernel reuse: topologically identical functions share the same compiled kernel.
6
+ *
7
+ * Compile once, reuse many times - ideal for optimization, IK, animation, or any scenario
8
+ * where you repeatedly evaluate the same structure with different parameter values.
9
+ *
10
+ * @example
11
+ * ```typescript
12
+ * // Single objective (L-BFGS, Adam, SGD)
13
+ * const compiled = CompiledFunctions.compile(params, (p) => [loss(p)]);
14
+ * const { value, gradient } = compiled.evaluateGradient(params);
15
+ *
16
+ * // Multiple residuals (Levenberg-Marquardt)
17
+ * const compiled = CompiledFunctions.compile(params, (p) => residuals(p));
18
+ * const { values, jacobian } = compiled.evaluateJacobian(params);
19
+ * ```
20
+ */
21
+ export declare class CompiledFunctions {
22
+ private registry;
23
+ private kernelPool;
24
+ private functionDescriptors;
25
+ private numParams;
26
+ private constructor();
27
+ /**
28
+ * Compile scalar functions for reuse with kernel sharing.
29
+ *
30
+ * @param params - Parameter Values (must have .paramName set)
31
+ * @param functionsFn - Function that builds scalar outputs from params
32
+ * @returns Compiled functions ready for optimization
33
+ */
34
+ static compile(params: Value[], functionsFn: (params: Value[]) => Value[]): CompiledFunctions;
35
+ /**
36
+ * Count nodes in a computation graph.
37
+ * @internal
38
+ */
39
+ private static countGraphNodes;
40
+ /**
41
+ * Compile scalar functions for reuse with kernel sharing (async version).
42
+ * Yields to browser between chunks to prevent UI freezing on large problems.
43
+ *
44
+ * @param params - Parameter Values (must have .paramName set)
45
+ * @param functionsFn - Function that builds scalar outputs from params
46
+ * @param chunkSize - Number of functions to process per chunk (default: 50)
47
+ * @param onProgress - Optional callback for progress updates (current, total, percent)
48
+ * @returns Compiled functions ready for optimization
49
+ */
50
+ static compileAsync(params: Value[], functionsFn: (params: Value[]) => Value[], chunkSize?: number, onProgress?: (current: number, total: number, percent: number) => void): Promise<CompiledFunctions>;
51
+ /**
52
+ * Evaluate gradient of first function (for single objective optimization).
53
+ *
54
+ * @param params - Current parameter values
55
+ * @returns Function value and gradient vector
56
+ */
57
+ evaluateGradient(params: Value[]): {
58
+ value: number;
59
+ gradient: number[];
60
+ };
61
+ /**
62
+ * Evaluate sum of all functions with accumulated gradient (for L-BFGS, Adam, etc).
63
+ *
64
+ * This is the key method for kernel reuse with gradient-based optimizers.
65
+ * When you have N structurally identical residuals, this will:
66
+ * - Compile ~1 kernel instead of N
67
+ * - Evaluate all N residuals, accumulating their gradients
68
+ * - Return total loss and accumulated gradient
69
+ *
70
+ * @param params - Current parameter values
71
+ * @returns Sum of all function values and accumulated gradient
72
+ */
73
+ evaluateSumWithGradient(params: Value[]): {
74
+ value: number;
75
+ gradient: number[];
76
+ };
77
+ /**
78
+ * Evaluate all functions and their Jacobian matrix.
79
+ *
80
+ * @param params - Current parameter values
81
+ * @returns Function values and Jacobian matrix
82
+ */
83
+ evaluateJacobian(params: Value[]): {
84
+ values: number[];
85
+ jacobian: number[][];
86
+ };
87
+ /**
88
+ * Backward compatibility: evaluate as least-squares residuals.
89
+ *
90
+ * @deprecated Use evaluateJacobian() instead and compute cost yourself
91
+ * @param params - Current parameter values
92
+ * @returns Residuals, Jacobian, and sum of squared residuals
93
+ */
94
+ evaluate(params: Value[]): {
95
+ residuals: number[];
96
+ J: number[][];
97
+ cost: number;
98
+ };
99
+ /** Get number of compiled functions */
100
+ get numFunctions(): number;
101
+ /** @deprecated Use numFunctions instead */
102
+ get numResiduals(): number;
103
+ /** Get number of unique kernels (for metrics) */
104
+ get kernelCount(): number;
105
+ /** Get kernel reuse factor */
106
+ get kernelReuseFactor(): number;
107
+ }
108
+ /**
109
+ * @deprecated Use CompiledFunctions instead
110
+ */
111
+ export declare const CompiledResiduals: typeof CompiledFunctions;
@@ -0,0 +1,268 @@
1
+ import { ValueRegistry } from "./ValueRegistry";
2
+ import { KernelPool } from "./KernelPool";
3
+ import { extractInputIndices } from "./compileIndirectKernel";
4
+ /**
5
+ * Pre-compiled scalar functions with automatic differentiation and kernel reuse.
6
+ *
7
+ * Uses kernel reuse: topologically identical functions share the same compiled kernel.
8
+ *
9
+ * Compile once, reuse many times - ideal for optimization, IK, animation, or any scenario
10
+ * where you repeatedly evaluate the same structure with different parameter values.
11
+ *
12
+ * @example
13
+ * ```typescript
14
+ * // Single objective (L-BFGS, Adam, SGD)
15
+ * const compiled = CompiledFunctions.compile(params, (p) => [loss(p)]);
16
+ * const { value, gradient } = compiled.evaluateGradient(params);
17
+ *
18
+ * // Multiple residuals (Levenberg-Marquardt)
19
+ * const compiled = CompiledFunctions.compile(params, (p) => residuals(p));
20
+ * const { values, jacobian } = compiled.evaluateJacobian(params);
21
+ * ```
22
+ */
23
+ export class CompiledFunctions {
24
+ registry;
25
+ kernelPool;
26
+ functionDescriptors;
27
+ numParams;
28
+ constructor(registry, kernelPool, functionDescriptors, numParams) {
29
+ this.registry = registry;
30
+ this.kernelPool = kernelPool;
31
+ this.functionDescriptors = functionDescriptors;
32
+ this.numParams = numParams;
33
+ }
34
+ /**
35
+ * Compile scalar functions for reuse with kernel sharing.
36
+ *
37
+ * @param params - Parameter Values (must have .paramName set)
38
+ * @param functionsFn - Function that builds scalar outputs from params
39
+ * @returns Compiled functions ready for optimization
40
+ */
41
+ static compile(params, functionsFn) {
42
+ // Ensure params have names for compilation
43
+ params.forEach((p, i) => {
44
+ if (!p.paramName) {
45
+ p.paramName = `p${i}`;
46
+ }
47
+ });
48
+ const registry = new ValueRegistry();
49
+ const kernelPool = new KernelPool();
50
+ // Register all params first
51
+ params.forEach(p => registry.register(p));
52
+ // Build function graphs
53
+ const functionValues = functionsFn(params);
54
+ // Build param index map for gradient mapping
55
+ const paramIndexMap = new Map(params.map((p, i) => [registry.getId(p), i]));
56
+ // Compile kernels with reuse
57
+ console.log(`[CompiledFunctions] Compiling ${functionValues.length} functions...`);
58
+ const functionDescriptors = functionValues.map((f, idx) => {
59
+ if (idx % 100 === 0) {
60
+ console.log(`[CompiledFunctions] Processing function ${idx}/${functionValues.length}`);
61
+ }
62
+ // Get or compile kernel for this graph structure
63
+ const descriptor = kernelPool.getOrCompile(f, params, registry);
64
+ // Extract input indices for this specific function
65
+ const inputIndices = extractInputIndices(f, registry);
66
+ // Build gradient indices: maps kernel local inputs → global gradient positions
67
+ const gradientIndices = inputIndices.map(regId => {
68
+ // Check if this input is a param (needs gradient)
69
+ if (paramIndexMap.has(regId)) {
70
+ return paramIndexMap.get(regId);
71
+ }
72
+ // Not a param (constant) - no gradient
73
+ return -1;
74
+ });
75
+ return {
76
+ kernelHash: descriptor.canonicalString,
77
+ inputIndices,
78
+ gradientIndices
79
+ };
80
+ });
81
+ return new CompiledFunctions(registry, kernelPool, functionDescriptors, params.length);
82
+ }
83
+ /**
84
+ * Count nodes in a computation graph.
85
+ * @internal
86
+ */
87
+ static countGraphNodes(output) {
88
+ const visited = new Set();
89
+ function traverse(node) {
90
+ if (visited.has(node))
91
+ return;
92
+ visited.add(node);
93
+ const prev = node.prev;
94
+ for (const child of prev) {
95
+ traverse(child);
96
+ }
97
+ }
98
+ traverse(output);
99
+ return visited.size;
100
+ }
101
+ /**
102
+ * Compile scalar functions for reuse with kernel sharing (async version).
103
+ * Yields to browser between chunks to prevent UI freezing on large problems.
104
+ *
105
+ * @param params - Parameter Values (must have .paramName set)
106
+ * @param functionsFn - Function that builds scalar outputs from params
107
+ * @param chunkSize - Number of functions to process per chunk (default: 50)
108
+ * @param onProgress - Optional callback for progress updates (current, total, percent)
109
+ * @returns Compiled functions ready for optimization
110
+ */
111
+ static async compileAsync(params, functionsFn, chunkSize = 50, onProgress) {
112
+ // Ensure params have names for compilation
113
+ params.forEach((p, i) => {
114
+ if (!p.paramName) {
115
+ p.paramName = `p${i}`;
116
+ }
117
+ });
118
+ const registry = new ValueRegistry();
119
+ const kernelPool = new KernelPool();
120
+ // Register all params first
121
+ params.forEach(p => registry.register(p));
122
+ // Build function graphs
123
+ const functionValues = functionsFn(params);
124
+ // Build param index map for gradient mapping
125
+ const paramIndexMap = new Map(params.map((p, i) => [registry.getId(p), i]));
126
+ console.log(`[CompiledFunctions] Compiling ${functionValues.length} functions...`);
127
+ const functionDescriptors = [];
128
+ const totalChunks = Math.ceil(functionValues.length / chunkSize);
129
+ let lastLoggedPercent = 0;
130
+ let totalGraphSize = 0;
131
+ for (let i = 0; i < functionValues.length; i += chunkSize) {
132
+ const chunkEnd = Math.min(i + chunkSize, functionValues.length);
133
+ // Only log every 25% progress to reduce clutter
134
+ const percentComplete = Math.floor((chunkEnd / functionValues.length) * 100);
135
+ if (percentComplete >= lastLoggedPercent + 25) {
136
+ console.log(`[CompiledFunctions] Processing ${chunkEnd}/${functionValues.length} (${percentComplete}%)`);
137
+ lastLoggedPercent = percentComplete;
138
+ }
139
+ // Call progress callback
140
+ if (onProgress) {
141
+ onProgress(chunkEnd, functionValues.length, percentComplete);
142
+ }
143
+ for (let j = i; j < chunkEnd; j++) {
144
+ const f = functionValues[j];
145
+ // Count graph size (number of nodes)
146
+ const graphSize = this.countGraphNodes(f);
147
+ totalGraphSize += graphSize;
148
+ const descriptor = kernelPool.getOrCompile(f, params, registry);
149
+ const inputIndices = extractInputIndices(f, registry);
150
+ const gradientIndices = inputIndices.map(regId => paramIndexMap.has(regId) ? paramIndexMap.get(regId) : -1);
151
+ functionDescriptors.push({
152
+ kernelHash: descriptor.canonicalString,
153
+ inputIndices,
154
+ gradientIndices
155
+ });
156
+ }
157
+ // Yield to browser
158
+ await new Promise(resolve => setTimeout(resolve, 0));
159
+ }
160
+ const avgGraphSize = Math.round(totalGraphSize / functionValues.length);
161
+ console.log(`[CompiledFunctions] Complete: ${kernelPool.kernels.size} unique kernels, ${(functionValues.length / kernelPool.kernels.size).toFixed(1)}x reuse (${kernelPool.canonMode}, avg ${avgGraphSize} nodes)`);
162
+ return new CompiledFunctions(registry, kernelPool, functionDescriptors, params.length);
163
+ }
164
+ /**
165
+ * Evaluate gradient of first function (for single objective optimization).
166
+ *
167
+ * @param params - Current parameter values
168
+ * @returns Function value and gradient vector
169
+ */
170
+ evaluateGradient(params) {
171
+ if (this.functionDescriptors.length === 0) {
172
+ throw new Error('No functions compiled');
173
+ }
174
+ // Update registry with current param values
175
+ const allValues = this.registry.getDataArray();
176
+ params.forEach(p => {
177
+ const id = this.registry.getId(p);
178
+ allValues[id] = p.data;
179
+ });
180
+ const gradient = new Array(this.numParams).fill(0);
181
+ const desc = this.functionDescriptors[0];
182
+ const kernelDesc = this.kernelPool.kernels.get(desc.kernelHash);
183
+ const value = kernelDesc.kernel(allValues, desc.inputIndices, desc.gradientIndices, gradient);
184
+ return { value, gradient };
185
+ }
186
+ /**
187
+ * Evaluate sum of all functions with accumulated gradient (for L-BFGS, Adam, etc).
188
+ *
189
+ * This is the key method for kernel reuse with gradient-based optimizers.
190
+ * When you have N structurally identical residuals, this will:
191
+ * - Compile ~1 kernel instead of N
192
+ * - Evaluate all N residuals, accumulating their gradients
193
+ * - Return total loss and accumulated gradient
194
+ *
195
+ * @param params - Current parameter values
196
+ * @returns Sum of all function values and accumulated gradient
197
+ */
198
+ evaluateSumWithGradient(params) {
199
+ // Update registry with current param values
200
+ const allValues = this.registry.getDataArray();
201
+ params.forEach(p => {
202
+ const id = this.registry.getId(p);
203
+ allValues[id] = p.data;
204
+ });
205
+ const gradient = new Array(this.numParams).fill(0);
206
+ let totalValue = 0;
207
+ for (const desc of this.functionDescriptors) {
208
+ const kernelDesc = this.kernelPool.kernels.get(desc.kernelHash);
209
+ totalValue += kernelDesc.kernel(allValues, desc.inputIndices, desc.gradientIndices, gradient);
210
+ }
211
+ return { value: totalValue, gradient };
212
+ }
213
+ /**
214
+ * Evaluate all functions and their Jacobian matrix.
215
+ *
216
+ * @param params - Current parameter values
217
+ * @returns Function values and Jacobian matrix
218
+ */
219
+ evaluateJacobian(params) {
220
+ // Update registry with current param values
221
+ const allValues = this.registry.getDataArray();
222
+ params.forEach(p => {
223
+ const id = this.registry.getId(p);
224
+ allValues[id] = p.data;
225
+ });
226
+ const numFunctions = this.functionDescriptors.length;
227
+ const values = new Array(numFunctions);
228
+ const jacobian = Array(numFunctions).fill(0).map(() => new Array(this.numParams).fill(0));
229
+ for (let i = 0; i < numFunctions; i++) {
230
+ const desc = this.functionDescriptors[i];
231
+ const kernelDesc = this.kernelPool.kernels.get(desc.kernelHash);
232
+ values[i] = kernelDesc.kernel(allValues, desc.inputIndices, desc.gradientIndices, jacobian[i]);
233
+ }
234
+ return { values, jacobian };
235
+ }
236
+ /**
237
+ * Backward compatibility: evaluate as least-squares residuals.
238
+ *
239
+ * @deprecated Use evaluateJacobian() instead and compute cost yourself
240
+ * @param params - Current parameter values
241
+ * @returns Residuals, Jacobian, and sum of squared residuals
242
+ */
243
+ evaluate(params) {
244
+ const { values, jacobian } = this.evaluateJacobian(params);
245
+ const cost = values.reduce((sum, v) => sum + v * v, 0);
246
+ return { residuals: values, J: jacobian, cost };
247
+ }
248
+ /** Get number of compiled functions */
249
+ get numFunctions() {
250
+ return this.functionDescriptors.length;
251
+ }
252
+ /** @deprecated Use numFunctions instead */
253
+ get numResiduals() {
254
+ return this.numFunctions;
255
+ }
256
+ /** Get number of unique kernels (for metrics) */
257
+ get kernelCount() {
258
+ return this.kernelPool.size;
259
+ }
260
+ /** Get kernel reuse factor */
261
+ get kernelReuseFactor() {
262
+ return this.numFunctions / this.kernelPool.size;
263
+ }
264
+ }
265
+ /**
266
+ * @deprecated Use CompiledFunctions instead
267
+ */
268
+ export const CompiledResiduals = CompiledFunctions;
@@ -0,0 +1,74 @@
1
+ import { Value } from "./Value";
2
+ /**
3
+ * Pre-compiled residual functions for efficient repeated optimization.
4
+ *
5
+ * Wrapper around CompiledFunctions that provides a simpler API for
6
+ * Levenberg-Marquardt optimization use cases.
7
+ *
8
+ * Compile once, reuse many times - ideal for IK, animation, or any scenario
9
+ * where you solve the same structure with different parameter values.
10
+ *
11
+ * @example
12
+ * ```typescript
13
+ * // Compile once
14
+ * const compiled = CompiledResiduals.compile(params, residualFn);
15
+ *
16
+ * // Solve many times with different initial values
17
+ * for (let i = 0; i < 100; i++) {
18
+ * params.forEach(p => p.data = randomInitialValue());
19
+ * V.nonlinearLeastSquares(params, compiled);
20
+ * }
21
+ * ```
22
+ */
23
+ export declare class CompiledResiduals {
24
+ private compiledFunctions;
25
+ private constructor();
26
+ /**
27
+ * Compile residual functions for reuse.
28
+ *
29
+ * @param params - Parameter Values (must have .paramName set)
30
+ * @param residualFn - Function that builds residuals from params
31
+ * @returns Compiled residual functions ready for optimization
32
+ */
33
+ static compile(params: Value[], residualFn: (params: Value[]) => Value[]): CompiledResiduals;
34
+ /**
35
+ * Compile residual functions asynchronously with progress reporting.
36
+ *
37
+ * @param params - Parameter Values (must have .paramName set)
38
+ * @param residualFn - Function that builds residuals from params
39
+ * @param chunkSize - Number of residuals to compile per chunk
40
+ * @param onProgress - Callback for progress updates
41
+ * @returns Compiled residual functions ready for optimization
42
+ */
43
+ static compileAsync(params: Value[], residualFn: (params: Value[]) => Value[], chunkSize?: number, onProgress?: (current: number, total: number, percent: number) => void): Promise<CompiledResiduals>;
44
+ /**
45
+ * Evaluate compiled residuals and Jacobian.
46
+ *
47
+ * @param params - Current parameter values
48
+ * @returns Residuals, Jacobian, and cost
49
+ */
50
+ evaluate(params: Value[]): {
51
+ residuals: number[];
52
+ J: number[][];
53
+ cost: number;
54
+ };
55
+ /** Get number of residuals */
56
+ get numResiduals(): number;
57
+ /** Get number of functions (alias for numResiduals) */
58
+ get numFunctions(): number;
59
+ /** Get number of unique kernels */
60
+ get kernelCount(): number;
61
+ /** Get kernel reuse factor */
62
+ get kernelReuseFactor(): number;
63
+ /**
64
+ * Evaluate sum of all residuals with accumulated gradient.
65
+ * Used by LBFGS for scalar optimization.
66
+ *
67
+ * @param params - Current parameter values
68
+ * @returns Sum of all residuals and accumulated gradient
69
+ */
70
+ evaluateSumWithGradient(params: Value[]): {
71
+ value: number;
72
+ gradient: number[];
73
+ };
74
+ }
@@ -0,0 +1,94 @@
1
+ import { CompiledFunctions } from "./CompiledFunctions";
2
+ /**
3
+ * Pre-compiled residual functions for efficient repeated optimization.
4
+ *
5
+ * Wrapper around CompiledFunctions that provides a simpler API for
6
+ * Levenberg-Marquardt optimization use cases.
7
+ *
8
+ * Compile once, reuse many times - ideal for IK, animation, or any scenario
9
+ * where you solve the same structure with different parameter values.
10
+ *
11
+ * @example
12
+ * ```typescript
13
+ * // Compile once
14
+ * const compiled = CompiledResiduals.compile(params, residualFn);
15
+ *
16
+ * // Solve many times with different initial values
17
+ * for (let i = 0; i < 100; i++) {
18
+ * params.forEach(p => p.data = randomInitialValue());
19
+ * V.nonlinearLeastSquares(params, compiled);
20
+ * }
21
+ * ```
22
+ */
23
+ export class CompiledResiduals {
24
+ compiledFunctions;
25
+ constructor(compiledFunctions) {
26
+ this.compiledFunctions = compiledFunctions;
27
+ }
28
+ /**
29
+ * Compile residual functions for reuse.
30
+ *
31
+ * @param params - Parameter Values (must have .paramName set)
32
+ * @param residualFn - Function that builds residuals from params
33
+ * @returns Compiled residual functions ready for optimization
34
+ */
35
+ static compile(params, residualFn) {
36
+ const compiledFunctions = CompiledFunctions.compile(params, residualFn);
37
+ return new CompiledResiduals(compiledFunctions);
38
+ }
39
+ /**
40
+ * Compile residual functions asynchronously with progress reporting.
41
+ *
42
+ * @param params - Parameter Values (must have .paramName set)
43
+ * @param residualFn - Function that builds residuals from params
44
+ * @param chunkSize - Number of residuals to compile per chunk
45
+ * @param onProgress - Callback for progress updates
46
+ * @returns Compiled residual functions ready for optimization
47
+ */
48
+ static async compileAsync(params, residualFn, chunkSize = 50, onProgress) {
49
+ const compiledFunctions = await CompiledFunctions.compileAsync(params, residualFn, chunkSize, onProgress);
50
+ return new CompiledResiduals(compiledFunctions);
51
+ }
52
+ /**
53
+ * Evaluate compiled residuals and Jacobian.
54
+ *
55
+ * @param params - Current parameter values
56
+ * @returns Residuals, Jacobian, and cost
57
+ */
58
+ evaluate(params) {
59
+ const { values, jacobian } = this.compiledFunctions.evaluateJacobian(params);
60
+ const residuals = values;
61
+ const J = jacobian;
62
+ let cost = 0;
63
+ for (const r of residuals) {
64
+ cost += r * r;
65
+ }
66
+ return { residuals, J, cost };
67
+ }
68
+ /** Get number of residuals */
69
+ get numResiduals() {
70
+ return this.compiledFunctions.numFunctions;
71
+ }
72
+ /** Get number of functions (alias for numResiduals) */
73
+ get numFunctions() {
74
+ return this.compiledFunctions.numFunctions;
75
+ }
76
+ /** Get number of unique kernels */
77
+ get kernelCount() {
78
+ return this.compiledFunctions.kernelCount;
79
+ }
80
+ /** Get kernel reuse factor */
81
+ get kernelReuseFactor() {
82
+ return this.compiledFunctions.kernelReuseFactor;
83
+ }
84
+ /**
85
+ * Evaluate sum of all residuals with accumulated gradient.
86
+ * Used by LBFGS for scalar optimization.
87
+ *
88
+ * @param params - Current parameter values
89
+ * @returns Sum of all residuals and accumulated gradient
90
+ */
91
+ evaluateSumWithGradient(params) {
92
+ return this.compiledFunctions.evaluateSumWithGradient(params);
93
+ }
94
+ }