numopt-js 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CODING_RULES.md +161 -0
- package/LICENSE +22 -0
- package/README.md +807 -0
- package/dist/core/adjointGradientDescent.d.ts +61 -0
- package/dist/core/adjointGradientDescent.d.ts.map +1 -0
- package/dist/core/adjointGradientDescent.js +764 -0
- package/dist/core/adjointGradientDescent.js.map +1 -0
- package/dist/core/constrainedGaussNewton.d.ts +44 -0
- package/dist/core/constrainedGaussNewton.d.ts.map +1 -0
- package/dist/core/constrainedGaussNewton.js +314 -0
- package/dist/core/constrainedGaussNewton.js.map +1 -0
- package/dist/core/constrainedLevenbergMarquardt.d.ts +46 -0
- package/dist/core/constrainedLevenbergMarquardt.d.ts.map +1 -0
- package/dist/core/constrainedLevenbergMarquardt.js +469 -0
- package/dist/core/constrainedLevenbergMarquardt.js.map +1 -0
- package/dist/core/constrainedUtils.d.ts +92 -0
- package/dist/core/constrainedUtils.d.ts.map +1 -0
- package/dist/core/constrainedUtils.js +364 -0
- package/dist/core/constrainedUtils.js.map +1 -0
- package/dist/core/convergence.d.ts +35 -0
- package/dist/core/convergence.d.ts.map +1 -0
- package/dist/core/convergence.js +51 -0
- package/dist/core/convergence.js.map +1 -0
- package/dist/core/createGradientFunction.d.ts +85 -0
- package/dist/core/createGradientFunction.d.ts.map +1 -0
- package/dist/core/createGradientFunction.js +93 -0
- package/dist/core/createGradientFunction.js.map +1 -0
- package/dist/core/effectiveJacobian.d.ts +90 -0
- package/dist/core/effectiveJacobian.d.ts.map +1 -0
- package/dist/core/effectiveJacobian.js +128 -0
- package/dist/core/effectiveJacobian.js.map +1 -0
- package/dist/core/finiteDiff.d.ts +171 -0
- package/dist/core/finiteDiff.d.ts.map +1 -0
- package/dist/core/finiteDiff.js +363 -0
- package/dist/core/finiteDiff.js.map +1 -0
- package/dist/core/gaussNewton.d.ts +29 -0
- package/dist/core/gaussNewton.d.ts.map +1 -0
- package/dist/core/gaussNewton.js +151 -0
- package/dist/core/gaussNewton.js.map +1 -0
- package/dist/core/gradientDescent.d.ts +35 -0
- package/dist/core/gradientDescent.d.ts.map +1 -0
- package/dist/core/gradientDescent.js +204 -0
- package/dist/core/gradientDescent.js.map +1 -0
- package/dist/core/jacobianComputation.d.ts +24 -0
- package/dist/core/jacobianComputation.d.ts.map +1 -0
- package/dist/core/jacobianComputation.js +38 -0
- package/dist/core/jacobianComputation.js.map +1 -0
- package/dist/core/levenbergMarquardt.d.ts +36 -0
- package/dist/core/levenbergMarquardt.d.ts.map +1 -0
- package/dist/core/levenbergMarquardt.js +286 -0
- package/dist/core/levenbergMarquardt.js.map +1 -0
- package/dist/core/lineSearch.d.ts +42 -0
- package/dist/core/lineSearch.d.ts.map +1 -0
- package/dist/core/lineSearch.js +106 -0
- package/dist/core/lineSearch.js.map +1 -0
- package/dist/core/logger.d.ts +77 -0
- package/dist/core/logger.d.ts.map +1 -0
- package/dist/core/logger.js +162 -0
- package/dist/core/logger.js.map +1 -0
- package/dist/core/types.d.ts +427 -0
- package/dist/core/types.d.ts.map +1 -0
- package/dist/core/types.js +15 -0
- package/dist/core/types.js.map +1 -0
- package/dist/index.d.ts +26 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +29 -0
- package/dist/index.js.map +1 -0
- package/dist/utils/formatting.d.ts +27 -0
- package/dist/utils/formatting.d.ts.map +1 -0
- package/dist/utils/formatting.js +54 -0
- package/dist/utils/formatting.js.map +1 -0
- package/dist/utils/matrix.d.ts +63 -0
- package/dist/utils/matrix.d.ts.map +1 -0
- package/dist/utils/matrix.js +129 -0
- package/dist/utils/matrix.js.map +1 -0
- package/dist/utils/resultFormatter.d.ts +122 -0
- package/dist/utils/resultFormatter.d.ts.map +1 -0
- package/dist/utils/resultFormatter.js +342 -0
- package/dist/utils/resultFormatter.js.map +1 -0
- package/package.json +74 -0
|
@@ -0,0 +1,363 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* This file implements numerical differentiation methods for computing
|
|
3
|
+
* gradients and Jacobian matrices when analytical derivatives are not available.
|
|
4
|
+
*
|
|
5
|
+
* Role in system:
|
|
6
|
+
* - Provides automatic gradient/Jacobian computation via finite differences
|
|
7
|
+
* - Used when users don't provide analytical derivatives
|
|
8
|
+
* - Critical for algorithms that require gradient information
|
|
9
|
+
*
|
|
10
|
+
* For first-time readers:
|
|
11
|
+
* - Start with finiteDiffGradient for general optimization
|
|
12
|
+
* - finiteDiffJacobian is for nonlinear least squares problems
|
|
13
|
+
* - Central difference is used for better accuracy than forward difference
|
|
14
|
+
*/
|
|
15
|
+
import { Matrix } from 'ml-matrix';
|
|
16
|
+
const DEFAULT_STEP_SIZE = 1e-6;
|
|
17
|
+
const CENTRAL_DIFFERENCE_DENOMINATOR = 2.0; // Denominator for central difference formula: (f(x+h) - f(x-h)) / (2h)
|
|
18
|
+
/**
|
|
19
|
+
* Computes the gradient vector using central difference method.
|
|
20
|
+
*
|
|
21
|
+
* Central difference formula: f'(x) ≈ (f(x+h) - f(x-h)) / (2h)
|
|
22
|
+
*
|
|
23
|
+
* This is more accurate than forward difference but requires two function
|
|
24
|
+
* evaluations per parameter. The trade-off is worth it for better convergence.
|
|
25
|
+
*
|
|
26
|
+
* @param parameters - The point at which to evaluate the gradient
|
|
27
|
+
* @param costFunction - The cost function to differentiate
|
|
28
|
+
* @param options - Optional numerical differentiation settings
|
|
29
|
+
* @returns The gradient vector at the given parameters
|
|
30
|
+
*
|
|
31
|
+
* @example
|
|
32
|
+
* ```typescript
|
|
33
|
+
* // Standalone usage - compute gradient at a specific point
|
|
34
|
+
* const costFn = (params) => params[0] ** 2 + params[1] ** 2;
|
|
35
|
+
* const params = new Float64Array([1.0, 2.0]);
|
|
36
|
+
* const gradient = finiteDiffGradient(params, costFn);
|
|
37
|
+
* // gradient ≈ [2.0, 4.0]
|
|
38
|
+
* ```
|
|
39
|
+
*
|
|
40
|
+
* @example
|
|
41
|
+
* ```typescript
|
|
42
|
+
* // Usage with gradientDescent - note the parameter order!
|
|
43
|
+
* import { gradientDescent, finiteDiffGradient } from 'numopt-js';
|
|
44
|
+
*
|
|
45
|
+
* const costFn = (params) => Math.pow(params[0] - 3, 2) + Math.pow(params[1] - 2, 2);
|
|
46
|
+
*
|
|
47
|
+
* const result = gradientDescent(
|
|
48
|
+
* new Float64Array([0, 0]),
|
|
49
|
+
* costFn,
|
|
50
|
+
* (params) => finiteDiffGradient(params, costFn), // ✅ Correct: params first!
|
|
51
|
+
* { maxIterations: 100, tolerance: 1e-6 }
|
|
52
|
+
* );
|
|
53
|
+
* ```
|
|
54
|
+
*
|
|
55
|
+
* @example
|
|
56
|
+
* ```typescript
|
|
57
|
+
* // For easier usage with optimizers, consider using createFiniteDiffGradient:
|
|
58
|
+
* import { gradientDescent, createFiniteDiffGradient } from 'numopt-js';
|
|
59
|
+
*
|
|
60
|
+
* const costFn = (params) => Math.pow(params[0] - 3, 2) + Math.pow(params[1] - 2, 2);
|
|
61
|
+
* const gradientFn = createFiniteDiffGradient(costFn); // No parameter order confusion!
|
|
62
|
+
*
|
|
63
|
+
* const result = gradientDescent(
|
|
64
|
+
* new Float64Array([0, 0]),
|
|
65
|
+
* costFn,
|
|
66
|
+
* gradientFn,
|
|
67
|
+
* { maxIterations: 100, tolerance: 1e-6 }
|
|
68
|
+
* );
|
|
69
|
+
* ```
|
|
70
|
+
*
|
|
71
|
+
* @remarks
|
|
72
|
+
* **Important:** When using with optimization algorithms, note the parameter order:
|
|
73
|
+
* - ✅ Correct: `(params) => finiteDiffGradient(params, costFn)`
|
|
74
|
+
* - ❌ Wrong: `(params) => finiteDiffGradient(costFn, params)`
|
|
75
|
+
*
|
|
76
|
+
* Consider using {@link createFiniteDiffGradient} for a more intuitive API.
|
|
77
|
+
*/
|
|
78
|
+
export function finiteDiffGradient(parameters, costFunction, options = {}) {
|
|
79
|
+
const stepSize = options.stepSize ?? DEFAULT_STEP_SIZE;
|
|
80
|
+
const parameterCount = parameters.length;
|
|
81
|
+
const gradient = new Float64Array(parameterCount);
|
|
82
|
+
for (let i = 0; i < parameterCount; i++) {
|
|
83
|
+
// Forward point: x + h
|
|
84
|
+
const forwardParams = new Float64Array(parameters);
|
|
85
|
+
forwardParams[i] += stepSize;
|
|
86
|
+
const forwardCost = costFunction(forwardParams);
|
|
87
|
+
// Backward point: x - h
|
|
88
|
+
const backwardParams = new Float64Array(parameters);
|
|
89
|
+
backwardParams[i] -= stepSize;
|
|
90
|
+
const backwardCost = costFunction(backwardParams);
|
|
91
|
+
// Central difference: (f(x+h) - f(x-h)) / (2h)
|
|
92
|
+
gradient[i] = (forwardCost - backwardCost) / (CENTRAL_DIFFERENCE_DENOMINATOR * stepSize);
|
|
93
|
+
}
|
|
94
|
+
return gradient;
|
|
95
|
+
}
|
|
96
|
+
/**
|
|
97
|
+
* Computes the Jacobian matrix using central difference method.
|
|
98
|
+
*
|
|
99
|
+
* The Jacobian J has dimensions (residualCount × parameterCount) where:
|
|
100
|
+
* - Each row corresponds to a residual component
|
|
101
|
+
* - Each column corresponds to a parameter
|
|
102
|
+
* - J[i][j] = ∂r_i / ∂p_j
|
|
103
|
+
*
|
|
104
|
+
* Central difference is used for each partial derivative.
|
|
105
|
+
*/
|
|
106
|
+
export function finiteDiffJacobian(residualFunction, parameters, options = {}) {
|
|
107
|
+
const stepSize = options.stepSize ?? DEFAULT_STEP_SIZE;
|
|
108
|
+
const parameterCount = parameters.length;
|
|
109
|
+
// Compute residual at current point to determine dimension
|
|
110
|
+
const currentResidual = residualFunction(parameters);
|
|
111
|
+
const residualCount = currentResidual.length;
|
|
112
|
+
// Initialize Jacobian matrix (residualCount × parameterCount)
|
|
113
|
+
const jacobianData = [];
|
|
114
|
+
for (let i = 0; i < residualCount; i++) {
|
|
115
|
+
jacobianData.push(new Array(parameterCount).fill(0));
|
|
116
|
+
}
|
|
117
|
+
// Compute each column of the Jacobian (derivative w.r.t. each parameter)
|
|
118
|
+
for (let paramIndex = 0; paramIndex < parameterCount; paramIndex++) {
|
|
119
|
+
// Forward point: p + h * e_j (where e_j is unit vector)
|
|
120
|
+
const forwardParams = new Float64Array(parameters);
|
|
121
|
+
forwardParams[paramIndex] += stepSize;
|
|
122
|
+
const forwardResidual = residualFunction(forwardParams);
|
|
123
|
+
// Backward point: p - h * e_j
|
|
124
|
+
const backwardParams = new Float64Array(parameters);
|
|
125
|
+
backwardParams[paramIndex] -= stepSize;
|
|
126
|
+
const backwardResidual = residualFunction(backwardParams);
|
|
127
|
+
// Central difference for each residual component
|
|
128
|
+
for (let residualIndex = 0; residualIndex < residualCount; residualIndex++) {
|
|
129
|
+
const derivative = (forwardResidual[residualIndex] - backwardResidual[residualIndex]) / (CENTRAL_DIFFERENCE_DENOMINATOR * stepSize);
|
|
130
|
+
jacobianData[residualIndex][paramIndex] = derivative;
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
return new Matrix(jacobianData);
|
|
134
|
+
}
|
|
135
|
+
/**
|
|
136
|
+
* Computes the partial derivative of a constrained cost function with respect to parameters.
|
|
137
|
+
* Uses central difference method while keeping states fixed.
|
|
138
|
+
*
|
|
139
|
+
* Formula: ∂f/∂p_i ≈ (f(p+h·e_i, x) - f(p-h·e_i, x)) / (2h)
|
|
140
|
+
*
|
|
141
|
+
* @param parameters - Parameter vector p
|
|
142
|
+
* @param states - State vector x (kept fixed)
|
|
143
|
+
* @param costFunction - Constrained cost function f(p, x)
|
|
144
|
+
* @param options - Optional numerical differentiation settings
|
|
145
|
+
* @returns Gradient vector ∂f/∂p
|
|
146
|
+
*/
|
|
147
|
+
export function finiteDiffPartialP(parameters, states, costFunction, options = {}) {
|
|
148
|
+
const stepSize = options.stepSize ?? DEFAULT_STEP_SIZE;
|
|
149
|
+
const parameterCount = parameters.length;
|
|
150
|
+
const gradient = new Float64Array(parameterCount);
|
|
151
|
+
for (let i = 0; i < parameterCount; i++) {
|
|
152
|
+
// Forward point: p + h·e_i
|
|
153
|
+
const forwardParams = new Float64Array(parameters);
|
|
154
|
+
forwardParams[i] += stepSize;
|
|
155
|
+
const forwardCost = costFunction(forwardParams, states);
|
|
156
|
+
// Backward point: p - h·e_i
|
|
157
|
+
const backwardParams = new Float64Array(parameters);
|
|
158
|
+
backwardParams[i] -= stepSize;
|
|
159
|
+
const backwardCost = costFunction(backwardParams, states);
|
|
160
|
+
// Central difference: (f(p+h·e_i, x) - f(p-h·e_i, x)) / (2h)
|
|
161
|
+
gradient[i] = (forwardCost - backwardCost) / (CENTRAL_DIFFERENCE_DENOMINATOR * stepSize);
|
|
162
|
+
}
|
|
163
|
+
return gradient;
|
|
164
|
+
}
|
|
165
|
+
/**
|
|
166
|
+
* Computes the partial derivative of a constrained cost function with respect to states.
|
|
167
|
+
* Uses central difference method while keeping parameters fixed.
|
|
168
|
+
*
|
|
169
|
+
* Formula: ∂f/∂x_i ≈ (f(p, x+h·e_i) - f(p, x-h·e_i)) / (2h)
|
|
170
|
+
*
|
|
171
|
+
* @param parameters - Parameter vector p (kept fixed)
|
|
172
|
+
* @param states - State vector x
|
|
173
|
+
* @param costFunction - Constrained cost function f(p, x)
|
|
174
|
+
* @param options - Optional numerical differentiation settings
|
|
175
|
+
* @returns Gradient vector ∂f/∂x
|
|
176
|
+
*/
|
|
177
|
+
export function finiteDiffPartialX(parameters, states, costFunction, options = {}) {
|
|
178
|
+
const stepSize = options.stepSize ?? DEFAULT_STEP_SIZE;
|
|
179
|
+
const stateCount = states.length;
|
|
180
|
+
const gradient = new Float64Array(stateCount);
|
|
181
|
+
for (let i = 0; i < stateCount; i++) {
|
|
182
|
+
// Forward point: x + h·e_i
|
|
183
|
+
const forwardStates = new Float64Array(states);
|
|
184
|
+
forwardStates[i] += stepSize;
|
|
185
|
+
const forwardCost = costFunction(parameters, forwardStates);
|
|
186
|
+
// Backward point: x - h·e_i
|
|
187
|
+
const backwardStates = new Float64Array(states);
|
|
188
|
+
backwardStates[i] -= stepSize;
|
|
189
|
+
const backwardCost = costFunction(parameters, backwardStates);
|
|
190
|
+
// Central difference: (f(p, x+h·e_i) - f(p, x-h·e_i)) / (2h)
|
|
191
|
+
gradient[i] = (forwardCost - backwardCost) / (CENTRAL_DIFFERENCE_DENOMINATOR * stepSize);
|
|
192
|
+
}
|
|
193
|
+
return gradient;
|
|
194
|
+
}
|
|
195
|
+
/**
|
|
196
|
+
* Computes the partial derivative of a constraint function with respect to parameters.
|
|
197
|
+
* Uses central difference method while keeping states fixed.
|
|
198
|
+
* Returns a Jacobian matrix of size (constraintCount × parameterCount).
|
|
199
|
+
*
|
|
200
|
+
* Formula: ∂c/∂p_ij ≈ (c_i(p+h·e_j, x) - c_i(p-h·e_j, x)) / (2h)
|
|
201
|
+
*
|
|
202
|
+
* @param parameters - Parameter vector p
|
|
203
|
+
* @param states - State vector x (kept fixed)
|
|
204
|
+
* @param constraintFunction - Constraint function c(p, x)
|
|
205
|
+
* @param options - Optional numerical differentiation settings
|
|
206
|
+
* @returns Jacobian matrix ∂c/∂p
|
|
207
|
+
*/
|
|
208
|
+
export function finiteDiffConstraintPartialP(parameters, states, constraintFunction, options = {}) {
|
|
209
|
+
const stepSize = options.stepSize ?? DEFAULT_STEP_SIZE;
|
|
210
|
+
const parameterCount = parameters.length;
|
|
211
|
+
// Compute constraint at current point to determine dimension
|
|
212
|
+
const currentConstraint = constraintFunction(parameters, states);
|
|
213
|
+
const constraintCount = currentConstraint.length;
|
|
214
|
+
// Initialize Jacobian matrix (constraintCount × parameterCount)
|
|
215
|
+
const jacobianData = [];
|
|
216
|
+
for (let i = 0; i < constraintCount; i++) {
|
|
217
|
+
jacobianData.push(new Array(parameterCount).fill(0));
|
|
218
|
+
}
|
|
219
|
+
// Compute each column of the Jacobian (derivative w.r.t. each parameter)
|
|
220
|
+
for (let paramIndex = 0; paramIndex < parameterCount; paramIndex++) {
|
|
221
|
+
// Forward point: p + h·e_j
|
|
222
|
+
const forwardParams = new Float64Array(parameters);
|
|
223
|
+
forwardParams[paramIndex] += stepSize;
|
|
224
|
+
const forwardConstraint = constraintFunction(forwardParams, states);
|
|
225
|
+
// Backward point: p - h·e_j
|
|
226
|
+
const backwardParams = new Float64Array(parameters);
|
|
227
|
+
backwardParams[paramIndex] -= stepSize;
|
|
228
|
+
const backwardConstraint = constraintFunction(backwardParams, states);
|
|
229
|
+
// Central difference for each constraint component
|
|
230
|
+
for (let constraintIndex = 0; constraintIndex < constraintCount; constraintIndex++) {
|
|
231
|
+
const derivative = (forwardConstraint[constraintIndex] - backwardConstraint[constraintIndex]) / (CENTRAL_DIFFERENCE_DENOMINATOR * stepSize);
|
|
232
|
+
jacobianData[constraintIndex][paramIndex] = derivative;
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
return new Matrix(jacobianData);
|
|
236
|
+
}
|
|
237
|
+
/**
|
|
238
|
+
* Computes the partial derivative of a constraint function with respect to states.
|
|
239
|
+
* Uses central difference method while keeping parameters fixed.
|
|
240
|
+
* Returns a Jacobian matrix of size (constraintCount × stateCount).
|
|
241
|
+
*
|
|
242
|
+
* Formula: ∂c/∂x_ij ≈ (c_i(p, x+h·e_j) - c_i(p, x-h·e_j)) / (2h)
|
|
243
|
+
*
|
|
244
|
+
* @param parameters - Parameter vector p (kept fixed)
|
|
245
|
+
* @param states - State vector x
|
|
246
|
+
* @param constraintFunction - Constraint function c(p, x)
|
|
247
|
+
* @param options - Optional numerical differentiation settings
|
|
248
|
+
* @returns Jacobian matrix ∂c/∂x
|
|
249
|
+
*/
|
|
250
|
+
export function finiteDiffConstraintPartialX(parameters, states, constraintFunction, options = {}) {
|
|
251
|
+
const stepSize = options.stepSize ?? DEFAULT_STEP_SIZE;
|
|
252
|
+
const stateCount = states.length;
|
|
253
|
+
// Compute constraint at current point to determine dimension
|
|
254
|
+
const currentConstraint = constraintFunction(parameters, states);
|
|
255
|
+
const constraintCount = currentConstraint.length;
|
|
256
|
+
// Initialize Jacobian matrix (constraintCount × stateCount)
|
|
257
|
+
const jacobianData = [];
|
|
258
|
+
for (let i = 0; i < constraintCount; i++) {
|
|
259
|
+
jacobianData.push(new Array(stateCount).fill(0));
|
|
260
|
+
}
|
|
261
|
+
// Compute each column of the Jacobian (derivative w.r.t. each state)
|
|
262
|
+
for (let stateIndex = 0; stateIndex < stateCount; stateIndex++) {
|
|
263
|
+
// Forward point: x + h·e_j
|
|
264
|
+
const forwardStates = new Float64Array(states);
|
|
265
|
+
forwardStates[stateIndex] += stepSize;
|
|
266
|
+
const forwardConstraint = constraintFunction(parameters, forwardStates);
|
|
267
|
+
// Backward point: x - h·e_j
|
|
268
|
+
const backwardStates = new Float64Array(states);
|
|
269
|
+
backwardStates[stateIndex] -= stepSize;
|
|
270
|
+
const backwardConstraint = constraintFunction(parameters, backwardStates);
|
|
271
|
+
// Central difference for each constraint component
|
|
272
|
+
for (let constraintIndex = 0; constraintIndex < constraintCount; constraintIndex++) {
|
|
273
|
+
const derivative = (forwardConstraint[constraintIndex] - backwardConstraint[constraintIndex]) / (CENTRAL_DIFFERENCE_DENOMINATOR * stepSize);
|
|
274
|
+
jacobianData[constraintIndex][stateIndex] = derivative;
|
|
275
|
+
}
|
|
276
|
+
}
|
|
277
|
+
return new Matrix(jacobianData);
|
|
278
|
+
}
|
|
279
|
+
/**
|
|
280
|
+
* Computes the partial derivative of a constrained residual function with respect to parameters.
|
|
281
|
+
* Uses central difference method while keeping states fixed.
|
|
282
|
+
* Returns a Jacobian matrix of size (residualCount × parameterCount).
|
|
283
|
+
*
|
|
284
|
+
* Formula: ∂r/∂p_ij ≈ (r_i(p+h·e_j, x) - r_i(p-h·e_j, x)) / (2h)
|
|
285
|
+
*
|
|
286
|
+
* @param parameters - Parameter vector p
|
|
287
|
+
* @param states - State vector x (kept fixed)
|
|
288
|
+
* @param residualFunction - Constrained residual function r(p, x)
|
|
289
|
+
* @param options - Optional numerical differentiation settings
|
|
290
|
+
* @returns Jacobian matrix ∂r/∂p
|
|
291
|
+
*/
|
|
292
|
+
export function finiteDiffResidualPartialP(parameters, states, residualFunction, options = {}) {
|
|
293
|
+
const stepSize = options.stepSize ?? DEFAULT_STEP_SIZE;
|
|
294
|
+
const parameterCount = parameters.length;
|
|
295
|
+
// Compute residual at current point to determine dimension
|
|
296
|
+
const currentResidual = residualFunction(parameters, states);
|
|
297
|
+
const residualCount = currentResidual.length;
|
|
298
|
+
// Initialize Jacobian matrix (residualCount × parameterCount)
|
|
299
|
+
const jacobianData = [];
|
|
300
|
+
for (let i = 0; i < residualCount; i++) {
|
|
301
|
+
jacobianData.push(new Array(parameterCount).fill(0));
|
|
302
|
+
}
|
|
303
|
+
// Compute each column of the Jacobian (derivative w.r.t. each parameter)
|
|
304
|
+
for (let paramIndex = 0; paramIndex < parameterCount; paramIndex++) {
|
|
305
|
+
// Forward point: p + h·e_j
|
|
306
|
+
const forwardParams = new Float64Array(parameters);
|
|
307
|
+
forwardParams[paramIndex] += stepSize;
|
|
308
|
+
const forwardResidual = residualFunction(forwardParams, states);
|
|
309
|
+
// Backward point: p - h·e_j
|
|
310
|
+
const backwardParams = new Float64Array(parameters);
|
|
311
|
+
backwardParams[paramIndex] -= stepSize;
|
|
312
|
+
const backwardResidual = residualFunction(backwardParams, states);
|
|
313
|
+
// Central difference for each residual component
|
|
314
|
+
for (let residualIndex = 0; residualIndex < residualCount; residualIndex++) {
|
|
315
|
+
const derivative = (forwardResidual[residualIndex] - backwardResidual[residualIndex]) / (CENTRAL_DIFFERENCE_DENOMINATOR * stepSize);
|
|
316
|
+
jacobianData[residualIndex][paramIndex] = derivative;
|
|
317
|
+
}
|
|
318
|
+
}
|
|
319
|
+
return new Matrix(jacobianData);
|
|
320
|
+
}
|
|
321
|
+
/**
|
|
322
|
+
* Computes the partial derivative of a constrained residual function with respect to states.
|
|
323
|
+
* Uses central difference method while keeping parameters fixed.
|
|
324
|
+
* Returns a Jacobian matrix of size (residualCount × stateCount).
|
|
325
|
+
*
|
|
326
|
+
* Formula: ∂r/∂x_ij ≈ (r_i(p, x+h·e_j) - r_i(p, x-h·e_j)) / (2h)
|
|
327
|
+
*
|
|
328
|
+
* @param parameters - Parameter vector p (kept fixed)
|
|
329
|
+
* @param states - State vector x
|
|
330
|
+
* @param residualFunction - Constrained residual function r(p, x)
|
|
331
|
+
* @param options - Optional numerical differentiation settings
|
|
332
|
+
* @returns Jacobian matrix ∂r/∂x
|
|
333
|
+
*/
|
|
334
|
+
export function finiteDiffResidualPartialX(parameters, states, residualFunction, options = {}) {
|
|
335
|
+
const stepSize = options.stepSize ?? DEFAULT_STEP_SIZE;
|
|
336
|
+
const stateCount = states.length;
|
|
337
|
+
// Compute residual at current point to determine dimension
|
|
338
|
+
const currentResidual = residualFunction(parameters, states);
|
|
339
|
+
const residualCount = currentResidual.length;
|
|
340
|
+
// Initialize Jacobian matrix (residualCount × stateCount)
|
|
341
|
+
const jacobianData = [];
|
|
342
|
+
for (let i = 0; i < residualCount; i++) {
|
|
343
|
+
jacobianData.push(new Array(stateCount).fill(0));
|
|
344
|
+
}
|
|
345
|
+
// Compute each column of the Jacobian (derivative w.r.t. each state)
|
|
346
|
+
for (let stateIndex = 0; stateIndex < stateCount; stateIndex++) {
|
|
347
|
+
// Forward point: x + h·e_j
|
|
348
|
+
const forwardStates = new Float64Array(states);
|
|
349
|
+
forwardStates[stateIndex] += stepSize;
|
|
350
|
+
const forwardResidual = residualFunction(parameters, forwardStates);
|
|
351
|
+
// Backward point: x - h·e_j
|
|
352
|
+
const backwardStates = new Float64Array(states);
|
|
353
|
+
backwardStates[stateIndex] -= stepSize;
|
|
354
|
+
const backwardResidual = residualFunction(parameters, backwardStates);
|
|
355
|
+
// Central difference for each residual component
|
|
356
|
+
for (let residualIndex = 0; residualIndex < residualCount; residualIndex++) {
|
|
357
|
+
const derivative = (forwardResidual[residualIndex] - backwardResidual[residualIndex]) / (CENTRAL_DIFFERENCE_DENOMINATOR * stepSize);
|
|
358
|
+
jacobianData[residualIndex][stateIndex] = derivative;
|
|
359
|
+
}
|
|
360
|
+
}
|
|
361
|
+
return new Matrix(jacobianData);
|
|
362
|
+
}
|
|
363
|
+
//# sourceMappingURL=finiteDiff.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"finiteDiff.js","sourceRoot":"","sources":["../../src/core/finiteDiff.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;GAaG;AAEH,OAAO,EAAE,MAAM,EAAE,MAAM,WAAW,CAAC;AAUnC,MAAM,iBAAiB,GAAG,IAAI,CAAC;AAC/B,MAAM,8BAA8B,GAAG,GAAG,CAAC,CAAC,uEAAuE;AAEnH;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA2DG;AACH,MAAM,UAAU,kBAAkB,CAChC,UAAwB,EACxB,YAAoB,EACpB,UAA2C,EAAE;IAE7C,MAAM,QAAQ,GAAG,OAAO,CAAC,QAAQ,IAAI,iBAAiB,CAAC;IACvD,MAAM,cAAc,GAAG,UAAU,CAAC,MAAM,CAAC;IACzC,MAAM,QAAQ,GAAG,IAAI,YAAY,CAAC,cAAc,CAAC,CAAC;IAElD,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,cAAc,EAAE,CAAC,EAAE,EAAE,CAAC;QACxC,uBAAuB;QACvB,MAAM,aAAa,GAAG,IAAI,YAAY,CAAC,UAAU,CAAC,CAAC;QACnD,aAAa,CAAC,CAAC,CAAC,IAAI,QAAQ,CAAC;QAC7B,MAAM,WAAW,GAAG,YAAY,CAAC,aAAa,CAAC,CAAC;QAEhD,wBAAwB;QACxB,MAAM,cAAc,GAAG,IAAI,YAAY,CAAC,UAAU,CAAC,CAAC;QACpD,cAAc,CAAC,CAAC,CAAC,IAAI,QAAQ,CAAC;QAC9B,MAAM,YAAY,GAAG,YAAY,CAAC,cAAc,CAAC,CAAC;QAElD,+CAA+C;QAC/C,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,WAAW,GAAG,YAAY,CAAC,GAAG,CAAC,8BAA8B,GAAG,QAAQ,CAAC,CAAC;IAC3F,CAAC;IAED,OAAO,QAAQ,CAAC;AAClB,CAAC;AAED;;;;;;;;;GASG;AACH,MAAM,UAAU,kBAAkB,CAChC,gBAA4B,EAC5B,UAAwB,EACxB,UAA2C,EAAE;IAE7C,MAAM,QAAQ,GAAG,OAAO,CAAC,QAAQ,IAAI,iBAAiB,CAAC;IACvD,MAAM,cAAc,GAAG,UAAU,CAAC,MAAM,CAAC;IAEzC,2DAA2D;IAC3D,MAAM,eAAe,GAAG,gBAAgB,CAAC,UAAU,CAAC,CAAC;IACrD,MAAM,aAAa,GAAG,eAAe,CAAC,MAAM,CAAC;IAE7C,8DAA8D;IAC9D,MAAM,YAAY,GAAe,EAAE,CAAC;IACpC,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,aAAa,EAAE,CAAC,EAAE,EAAE,CAAC;QACvC,YAAY,CAAC,IAAI,CAAC,IAAI,KAAK,CAAC,cAAc,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;IACvD,CAAC;IAED,yEAAyE;IACzE,KAAK,IAAI,UAAU,GAAG,CAAC,EAAE,UAAU,GAAG,cAAc,EAAE,UAAU,EAAE,EAAE,CAAC;QACnE,wDAAwD;QACxD,MAAM,aAAa,GAAG,IAAI,YAAY,CAAC,UAAU,CAAC,CAAC;QACnD,aAAa,CAAC,UAAU,CAAC,IAAI,QAAQ,CAAC;QACtC,MAAM,eAAe,GAAG,gBAAgB,CAAC,aAAa,CAAC,CAAC;QAExD,8BAA8B;QAC9B,MAAM,cAAc,GAAG,IAAI,YAAY,CAAC,UAAU,CAAC,CAAC;QACpD,cAAc,CAAC,UAAU,CAAC,IAAI,QAAQ,CAAC;QACvC,MAAM,gBAAgB,GAAG,gBAAgB,CAAC,cAAc,CAAC,CAAC;QAE1D,iDAAiD;QACjD,KAAK,IAAI,aAAa,GAAG,CAAC,EAAE,aAAa,GAAG,aAAa,EAAE,aAAa,EAAE,EAAE,CAAC;YAC3E,MAAM,UAAU,GAAG,CAAC,eAAe,CAAC,aAAa,CAAC,GAAG,gBAAgB,CAAC,aAAa,CAAC,CAAC,GAAG,CAAC,8BAA8B,GAAG,QAAQ,CAAC,CAAC;YACpI,YAAY,CAAC,aAAa,CAAC,CAAC,UAAU,CAAC,GAAG,UAAU,CAAC;QACvD,CAAC;IACH,CAAC;IAED,OAAO,IAAI,MAAM,CAAC,YAAY,CAAC,CAAC;AAClC,CAAC;AAED;;;;;;;;;;;GAWG;AACH,MAAM,UAAU,kBAAkB,CAChC,UAAwB,EACxB,MAAoB,EACpB,YAA+B,EAC/B,UAA2C,EAAE;IAE7C,MAAM,QAAQ,GAAG,OAAO,CAAC,QAAQ,IAAI,iBAAiB,CAAC;IACvD,MAAM,cAAc,GAAG,UAAU,CAAC,MAAM,CAAC;IACzC,MAAM,QAAQ,GAAG,IAAI,YAAY,CAAC,cAAc,CAAC,CAAC;IAElD,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,cAAc,EAAE,CAAC,EAAE,EAAE,CAAC;QACxC,2BAA2B;QAC3B,MAAM,aAAa,GAAG,IAAI,YAAY,CAAC,UAAU,CAAC,CAAC;QACnD,aAAa,CAAC,CAAC,CAAC,IAAI,QAAQ,CAAC;QAC7B,MAAM,WAAW,GAAG,YAAY,CAAC,aAAa,EAAE,MAAM,CAAC,CAAC;QAExD,4BAA4B;QAC5B,MAAM,cAAc,GAAG,IAAI,YAAY,CAAC,UAAU,CAAC,CAAC;QACpD,cAAc,CAAC,CAAC,CAAC,IAAI,QAAQ,CAAC;QAC9B,MAAM,YAAY,GAAG,YAAY,CAAC,cAAc,EAAE,MAAM,CAAC,CAAC;QAE1D,6DAA6D;QAC7D,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,WAAW,GAAG,YAAY,CAAC,GAAG,CAAC,8BAA8B,GAAG,QAAQ,CAAC,CAAC;IAC3F,CAAC;IAED,OAAO,QAAQ,CAAC;AAClB,CAAC;AAED;;;;;;;;;;;GAWG;AACH,MAAM,UAAU,kBAAkB,CAChC,UAAwB,EACxB,MAAoB,EACpB,YAA+B,EAC/B,UAA2C,EAAE;IAE7C,MAAM,QAAQ,GAAG,OAAO,CAAC,QAAQ,IAAI,iBAAiB,CAAC;IACvD,MAAM,UAAU,GAAG,MAAM,CAAC,MAAM,CAAC;IACjC,MAAM,QAAQ,GAAG,IAAI,YAAY,CAAC,UAAU,CAAC,CAAC;IAE9C,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,UAAU,EAAE,CAAC,EAAE,EAAE,CAAC;QACpC,2BAA2B;QAC3B,MAAM,aAAa,GAAG,IAAI,YAAY,CAAC,MAAM,CAAC,CAAC;QAC/C,aAAa,CAAC,CAAC,CAAC,IAAI,QAAQ,CAAC;QAC7B,MAAM,WAAW,GAAG,YAAY,CAAC,UAAU,EAAE,aAAa,CAAC,CAAC;QAE5D,4BAA4B;QAC5B,MAAM,cAAc,GAAG,IAAI,YAAY,CAAC,MAAM,CAAC,CAAC;QAChD,cAAc,CAAC,CAAC,CAAC,IAAI,QAAQ,CAAC;QAC9B,MAAM,YAAY,GAAG,YAAY,CAAC,UAAU,EAAE,cAAc,CAAC,CAAC;QAE9D,6DAA6D;QAC7D,QAAQ,CAAC,CAAC,CAAC,GAAG,CAAC,WAAW,GAAG,YAAY,CAAC,GAAG,CAAC,8BAA8B,GAAG,QAAQ,CAAC,CAAC;IAC3F,CAAC;IAED,OAAO,QAAQ,CAAC;AAClB,CAAC;AAED;;;;;;;;;;;;GAYG;AACH,MAAM,UAAU,4BAA4B,CAC1C,UAAwB,EACxB,MAAoB,EACpB,kBAAgC,EAChC,UAA2C,EAAE;IAE7C,MAAM,QAAQ,GAAG,OAAO,CAAC,QAAQ,IAAI,iBAAiB,CAAC;IACvD,MAAM,cAAc,GAAG,UAAU,CAAC,MAAM,CAAC;IAEzC,6DAA6D;IAC7D,MAAM,iBAAiB,GAAG,kBAAkB,CAAC,UAAU,EAAE,MAAM,CAAC,CAAC;IACjE,MAAM,eAAe,GAAG,iBAAiB,CAAC,MAAM,CAAC;IAEjD,gEAAgE;IAChE,MAAM,YAAY,GAAe,EAAE,CAAC;IACpC,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,eAAe,EAAE,CAAC,EAAE,EAAE,CAAC;QACzC,YAAY,CAAC,IAAI,CAAC,IAAI,KAAK,CAAC,cAAc,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;IACvD,CAAC;IAED,yEAAyE;IACzE,KAAK,IAAI,UAAU,GAAG,CAAC,EAAE,UAAU,GAAG,cAAc,EAAE,UAAU,EAAE,EAAE,CAAC;QACnE,2BAA2B;QAC3B,MAAM,aAAa,GAAG,IAAI,YAAY,CAAC,UAAU,CAAC,CAAC;QACnD,aAAa,CAAC,UAAU,CAAC,IAAI,QAAQ,CAAC;QACtC,MAAM,iBAAiB,GAAG,kBAAkB,CAAC,aAAa,EAAE,MAAM,CAAC,CAAC;QAEpE,4BAA4B;QAC5B,MAAM,cAAc,GAAG,IAAI,YAAY,CAAC,UAAU,CAAC,CAAC;QACpD,cAAc,CAAC,UAAU,CAAC,IAAI,QAAQ,CAAC;QACvC,MAAM,kBAAkB,GAAG,kBAAkB,CAAC,cAAc,EAAE,MAAM,CAAC,CAAC;QAEtE,mDAAmD;QACnD,KAAK,IAAI,eAAe,GAAG,CAAC,EAAE,eAAe,GAAG,eAAe,EAAE,eAAe,EAAE,EAAE,CAAC;YACnF,MAAM,UAAU,GAAG,CAAC,iBAAiB,CAAC,eAAe,CAAC,GAAG,kBAAkB,CAAC,eAAe,CAAC,CAAC,GAAG,CAAC,8BAA8B,GAAG,QAAQ,CAAC,CAAC;YAC5I,YAAY,CAAC,eAAe,CAAC,CAAC,UAAU,CAAC,GAAG,UAAU,CAAC;QACzD,CAAC;IACH,CAAC;IAED,OAAO,IAAI,MAAM,CAAC,YAAY,CAAC,CAAC;AAClC,CAAC;AAED;;;;;;;;;;;;GAYG;AACH,MAAM,UAAU,4BAA4B,CAC1C,UAAwB,EACxB,MAAoB,EACpB,kBAAgC,EAChC,UAA2C,EAAE;IAE7C,MAAM,QAAQ,GAAG,OAAO,CAAC,QAAQ,IAAI,iBAAiB,CAAC;IACvD,MAAM,UAAU,GAAG,MAAM,CAAC,MAAM,CAAC;IAEjC,6DAA6D;IAC7D,MAAM,iBAAiB,GAAG,kBAAkB,CAAC,UAAU,EAAE,MAAM,CAAC,CAAC;IACjE,MAAM,eAAe,GAAG,iBAAiB,CAAC,MAAM,CAAC;IAEjD,4DAA4D;IAC5D,MAAM,YAAY,GAAe,EAAE,CAAC;IACpC,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,eAAe,EAAE,CAAC,EAAE,EAAE,CAAC;QACzC,YAAY,CAAC,IAAI,CAAC,IAAI,KAAK,CAAC,UAAU,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;IACnD,CAAC;IAED,qEAAqE;IACrE,KAAK,IAAI,UAAU,GAAG,CAAC,EAAE,UAAU,GAAG,UAAU,EAAE,UAAU,EAAE,EAAE,CAAC;QAC/D,2BAA2B;QAC3B,MAAM,aAAa,GAAG,IAAI,YAAY,CAAC,MAAM,CAAC,CAAC;QAC/C,aAAa,CAAC,UAAU,CAAC,IAAI,QAAQ,CAAC;QACtC,MAAM,iBAAiB,GAAG,kBAAkB,CAAC,UAAU,EAAE,aAAa,CAAC,CAAC;QAExE,4BAA4B;QAC5B,MAAM,cAAc,GAAG,IAAI,YAAY,CAAC,MAAM,CAAC,CAAC;QAChD,cAAc,CAAC,UAAU,CAAC,IAAI,QAAQ,CAAC;QACvC,MAAM,kBAAkB,GAAG,kBAAkB,CAAC,UAAU,EAAE,cAAc,CAAC,CAAC;QAE1E,mDAAmD;QACnD,KAAK,IAAI,eAAe,GAAG,CAAC,EAAE,eAAe,GAAG,eAAe,EAAE,eAAe,EAAE,EAAE,CAAC;YACnF,MAAM,UAAU,GAAG,CAAC,iBAAiB,CAAC,eAAe,CAAC,GAAG,kBAAkB,CAAC,eAAe,CAAC,CAAC,GAAG,CAAC,8BAA8B,GAAG,QAAQ,CAAC,CAAC;YAC5I,YAAY,CAAC,eAAe,CAAC,CAAC,UAAU,CAAC,GAAG,UAAU,CAAC;QACzD,CAAC;IACH,CAAC;IAED,OAAO,IAAI,MAAM,CAAC,YAAY,CAAC,CAAC;AAClC,CAAC;AAED;;;;;;;;;;;;GAYG;AACH,MAAM,UAAU,0BAA0B,CACxC,UAAwB,EACxB,MAAoB,EACpB,gBAAuC,EACvC,UAA2C,EAAE;IAE7C,MAAM,QAAQ,GAAG,OAAO,CAAC,QAAQ,IAAI,iBAAiB,CAAC;IACvD,MAAM,cAAc,GAAG,UAAU,CAAC,MAAM,CAAC;IAEzC,2DAA2D;IAC3D,MAAM,eAAe,GAAG,gBAAgB,CAAC,UAAU,EAAE,MAAM,CAAC,CAAC;IAC7D,MAAM,aAAa,GAAG,eAAe,CAAC,MAAM,CAAC;IAE7C,8DAA8D;IAC9D,MAAM,YAAY,GAAe,EAAE,CAAC;IACpC,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,aAAa,EAAE,CAAC,EAAE,EAAE,CAAC;QACvC,YAAY,CAAC,IAAI,CAAC,IAAI,KAAK,CAAC,cAAc,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;IACvD,CAAC;IAED,yEAAyE;IACzE,KAAK,IAAI,UAAU,GAAG,CAAC,EAAE,UAAU,GAAG,cAAc,EAAE,UAAU,EAAE,EAAE,CAAC;QACnE,2BAA2B;QAC3B,MAAM,aAAa,GAAG,IAAI,YAAY,CAAC,UAAU,CAAC,CAAC;QACnD,aAAa,CAAC,UAAU,CAAC,IAAI,QAAQ,CAAC;QACtC,MAAM,eAAe,GAAG,gBAAgB,CAAC,aAAa,EAAE,MAAM,CAAC,CAAC;QAEhE,4BAA4B;QAC5B,MAAM,cAAc,GAAG,IAAI,YAAY,CAAC,UAAU,CAAC,CAAC;QACpD,cAAc,CAAC,UAAU,CAAC,IAAI,QAAQ,CAAC;QACvC,MAAM,gBAAgB,GAAG,gBAAgB,CAAC,cAAc,EAAE,MAAM,CAAC,CAAC;QAElE,iDAAiD;QACjD,KAAK,IAAI,aAAa,GAAG,CAAC,EAAE,aAAa,GAAG,aAAa,EAAE,aAAa,EAAE,EAAE,CAAC;YAC3E,MAAM,UAAU,GAAG,CAAC,eAAe,CAAC,aAAa,CAAC,GAAG,gBAAgB,CAAC,aAAa,CAAC,CAAC,GAAG,CAAC,8BAA8B,GAAG,QAAQ,CAAC,CAAC;YACpI,YAAY,CAAC,aAAa,CAAC,CAAC,UAAU,CAAC,GAAG,UAAU,CAAC;QACvD,CAAC;IACH,CAAC;IAED,OAAO,IAAI,MAAM,CAAC,YAAY,CAAC,CAAC;AAClC,CAAC;AAED;;;;;;;;;;;;GAYG;AACH,MAAM,UAAU,0BAA0B,CACxC,UAAwB,EACxB,MAAoB,EACpB,gBAAuC,EACvC,UAA2C,EAAE;IAE7C,MAAM,QAAQ,GAAG,OAAO,CAAC,QAAQ,IAAI,iBAAiB,CAAC;IACvD,MAAM,UAAU,GAAG,MAAM,CAAC,MAAM,CAAC;IAEjC,2DAA2D;IAC3D,MAAM,eAAe,GAAG,gBAAgB,CAAC,UAAU,EAAE,MAAM,CAAC,CAAC;IAC7D,MAAM,aAAa,GAAG,eAAe,CAAC,MAAM,CAAC;IAE7C,0DAA0D;IAC1D,MAAM,YAAY,GAAe,EAAE,CAAC;IACpC,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,aAAa,EAAE,CAAC,EAAE,EAAE,CAAC;QACvC,YAAY,CAAC,IAAI,CAAC,IAAI,KAAK,CAAC,UAAU,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;IACnD,CAAC;IAED,qEAAqE;IACrE,KAAK,IAAI,UAAU,GAAG,CAAC,EAAE,UAAU,GAAG,UAAU,EAAE,UAAU,EAAE,EAAE,CAAC;QAC/D,2BAA2B;QAC3B,MAAM,aAAa,GAAG,IAAI,YAAY,CAAC,MAAM,CAAC,CAAC;QAC/C,aAAa,CAAC,UAAU,CAAC,IAAI,QAAQ,CAAC;QACtC,MAAM,eAAe,GAAG,gBAAgB,CAAC,UAAU,EAAE,aAAa,CAAC,CAAC;QAEpE,4BAA4B;QAC5B,MAAM,cAAc,GAAG,IAAI,YAAY,CAAC,MAAM,CAAC,CAAC;QAChD,cAAc,CAAC,UAAU,CAAC,IAAI,QAAQ,CAAC;QACvC,MAAM,gBAAgB,GAAG,gBAAgB,CAAC,UAAU,EAAE,cAAc,CAAC,CAAC;QAEtE,iDAAiD;QACjD,KAAK,IAAI,aAAa,GAAG,CAAC,EAAE,aAAa,GAAG,aAAa,EAAE,aAAa,EAAE,EAAE,CAAC;YAC3E,MAAM,UAAU,GAAG,CAAC,eAAe,CAAC,aAAa,CAAC,GAAG,gBAAgB,CAAC,aAAa,CAAC,CAAC,GAAG,CAAC,8BAA8B,GAAG,QAAQ,CAAC,CAAC;YACpI,YAAY,CAAC,aAAa,CAAC,CAAC,UAAU,CAAC,GAAG,UAAU,CAAC;QACvD,CAAC;IACH,CAAC;IAED,OAAO,IAAI,MAAM,CAAC,YAAY,CAAC,CAAC;AAClC,CAAC"}
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* This file implements the Gauss-Newton method for solving nonlinear least squares problems.
|
|
3
|
+
*
|
|
4
|
+
* Role in system:
|
|
5
|
+
* - Phase 2 intermediate algorithm (builds on gradient descent concepts)
|
|
6
|
+
* - Foundation for Levenberg-Marquardt method
|
|
7
|
+
* - Specifically designed for nonlinear least squares problems
|
|
8
|
+
*
|
|
9
|
+
* For first-time readers:
|
|
10
|
+
* - Start with gaussNewton function
|
|
11
|
+
* - Understand how it solves normal equations: (J^T J) δ = -J^T r
|
|
12
|
+
* - This is a special case of Newton's method for least squares
|
|
13
|
+
*/
|
|
14
|
+
import type { ResidualFn, GaussNewtonOptions, OptimizationResult } from './types.js';
|
|
15
|
+
/**
|
|
16
|
+
* Performs Gauss-Newton optimization for nonlinear least squares problems.
|
|
17
|
+
*
|
|
18
|
+
* Algorithm:
|
|
19
|
+
* 1. Start with initial parameters
|
|
20
|
+
* 2. Compute residual vector r and Jacobian matrix J
|
|
21
|
+
* 3. Solve normal equations: (J^T J) δ = -J^T r
|
|
22
|
+
* 4. Update parameters: x_new = x_old + δ
|
|
23
|
+
* 5. Repeat until convergence
|
|
24
|
+
*
|
|
25
|
+
* The Gauss-Newton method approximates the Hessian as J^T J, which is
|
|
26
|
+
* exact for linear least squares and a good approximation for nonlinear cases.
|
|
27
|
+
*/
|
|
28
|
+
export declare function gaussNewton(initialParameters: Float64Array, residualFunction: ResidualFn, options?: GaussNewtonOptions): OptimizationResult;
|
|
29
|
+
//# sourceMappingURL=gaussNewton.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"gaussNewton.d.ts","sourceRoot":"","sources":["../../src/core/gaussNewton.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;GAYG;AAGH,OAAO,KAAK,EACV,UAAU,EAEV,kBAAkB,EAClB,kBAAkB,EACnB,MAAM,YAAY,CAAC;AAapB;;;;;;;;;;;;GAYG;AACH,wBAAgB,WAAW,CACzB,iBAAiB,EAAE,YAAY,EAC/B,gBAAgB,EAAE,UAAU,EAC5B,OAAO,GAAE,kBAAuB,GAC/B,kBAAkB,CAqIpB"}
|
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* This file implements the Gauss-Newton method for solving nonlinear least squares problems.
|
|
3
|
+
*
|
|
4
|
+
* Role in system:
|
|
5
|
+
* - Phase 2 intermediate algorithm (builds on gradient descent concepts)
|
|
6
|
+
* - Foundation for Levenberg-Marquardt method
|
|
7
|
+
* - Specifically designed for nonlinear least squares problems
|
|
8
|
+
*
|
|
9
|
+
* For first-time readers:
|
|
10
|
+
* - Start with gaussNewton function
|
|
11
|
+
* - Understand how it solves normal equations: (J^T J) δ = -J^T r
|
|
12
|
+
* - This is a special case of Newton's method for least squares
|
|
13
|
+
*/
|
|
14
|
+
import { solve, CholeskyDecomposition } from 'ml-matrix';
|
|
15
|
+
import { float64ArrayToMatrix, matrixToFloat64Array, vectorNorm, computeSumOfSquaredResiduals } from '../utils/matrix.js';
|
|
16
|
+
import { checkStepSizeConvergence, checkResidualConvergence, createConvergenceResult } from './convergence.js';
|
|
17
|
+
import { computeJacobianMatrix } from './jacobianComputation.js';
|
|
18
|
+
import { Logger } from './logger.js';
|
|
19
|
+
const DEFAULT_MAX_ITERATIONS = 1000;
|
|
20
|
+
const DEFAULT_TOLERANCE = 1e-6;
|
|
21
|
+
const DEFAULT_USE_NUMERIC_JACOBIAN = true;
|
|
22
|
+
const DEFAULT_JACOBIAN_STEP = 1e-6;
|
|
23
|
+
const NEGATIVE_COEFFICIENT = -1.0; // Coefficient for negative right-hand side in normal equations: (J^T J) δ = -J^T r
|
|
24
|
+
/**
|
|
25
|
+
* Performs Gauss-Newton optimization for nonlinear least squares problems.
|
|
26
|
+
*
|
|
27
|
+
* Algorithm:
|
|
28
|
+
* 1. Start with initial parameters
|
|
29
|
+
* 2. Compute residual vector r and Jacobian matrix J
|
|
30
|
+
* 3. Solve normal equations: (J^T J) δ = -J^T r
|
|
31
|
+
* 4. Update parameters: x_new = x_old + δ
|
|
32
|
+
* 5. Repeat until convergence
|
|
33
|
+
*
|
|
34
|
+
* The Gauss-Newton method approximates the Hessian as J^T J, which is
|
|
35
|
+
* exact for linear least squares and a good approximation for nonlinear cases.
|
|
36
|
+
*/
|
|
37
|
+
export function gaussNewton(initialParameters, residualFunction, options = {}) {
|
|
38
|
+
const actualOptions = options;
|
|
39
|
+
const jacobianFunction = actualOptions.jacobian;
|
|
40
|
+
const maxIterations = actualOptions.maxIterations ?? DEFAULT_MAX_ITERATIONS;
|
|
41
|
+
const tolerance = actualOptions.tolerance ?? DEFAULT_TOLERANCE;
|
|
42
|
+
const useNumericJacobian = actualOptions.useNumericJacobian ?? DEFAULT_USE_NUMERIC_JACOBIAN;
|
|
43
|
+
const jacobianStep = actualOptions.jacobianStep ?? DEFAULT_JACOBIAN_STEP;
|
|
44
|
+
const onIteration = actualOptions.onIteration;
|
|
45
|
+
const logger = new Logger(actualOptions.logLevel, actualOptions.verbose);
|
|
46
|
+
let currentParameters = new Float64Array(initialParameters);
|
|
47
|
+
for (let iteration = 0; iteration < maxIterations; iteration++) {
|
|
48
|
+
// Compute residual vector
|
|
49
|
+
const residual = residualFunction(currentParameters);
|
|
50
|
+
const residualNorm = vectorNorm(residual);
|
|
51
|
+
const cost = computeSumOfSquaredResiduals(residualNorm);
|
|
52
|
+
// Call progress callback if provided
|
|
53
|
+
if (onIteration) {
|
|
54
|
+
onIteration(iteration, cost, currentParameters);
|
|
55
|
+
}
|
|
56
|
+
// Compute Jacobian matrix
|
|
57
|
+
// Early return: use analytical Jacobian if provided
|
|
58
|
+
const jacobianMatrix = computeJacobianMatrix(jacobianFunction, residualFunction, currentParameters, useNumericJacobian, jacobianStep, 'gaussNewton');
|
|
59
|
+
// Compute J^T J and J^T r
|
|
60
|
+
const jacobianTranspose = jacobianMatrix.transpose();
|
|
61
|
+
const jtj = jacobianTranspose.mmul(jacobianMatrix);
|
|
62
|
+
const residualMatrix = float64ArrayToMatrix(residual);
|
|
63
|
+
const jtr = jacobianTranspose.mmul(residualMatrix);
|
|
64
|
+
// Solve normal equations: (J^T J) δ = -J^T r
|
|
65
|
+
// This gives us: δ = -(J^T J)^(-1) J^T r
|
|
66
|
+
// Try Cholesky decomposition first for efficiency (if J^T J is positive definite)
|
|
67
|
+
let step;
|
|
68
|
+
try {
|
|
69
|
+
const negativeJtr = jtr.mul(NEGATIVE_COEFFICIENT);
|
|
70
|
+
let stepMatrix;
|
|
71
|
+
try {
|
|
72
|
+
const cholesky = new CholeskyDecomposition(jtj);
|
|
73
|
+
if (cholesky.isPositiveDefinite()) {
|
|
74
|
+
// Use Cholesky decomposition for efficiency (about 2x faster than LU)
|
|
75
|
+
stepMatrix = cholesky.solve(negativeJtr);
|
|
76
|
+
}
|
|
77
|
+
else {
|
|
78
|
+
// J^T J is not positive definite, fallback to LU decomposition
|
|
79
|
+
stepMatrix = solve(jtj, negativeJtr);
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
catch (choleskyError) {
|
|
83
|
+
// Cholesky decomposition failed (non-symmetric or other issues), fallback to LU
|
|
84
|
+
stepMatrix = solve(jtj, negativeJtr);
|
|
85
|
+
}
|
|
86
|
+
step = matrixToFloat64Array(stepMatrix);
|
|
87
|
+
}
|
|
88
|
+
catch (error) {
|
|
89
|
+
// Handle singular matrix (J^T J is not invertible)
|
|
90
|
+
logger.warn('gaussNewton', iteration, 'Singular matrix encountered. Consider using Levenberg-Marquardt method for better robustness.', [
|
|
91
|
+
{ key: 'Cost:', value: cost },
|
|
92
|
+
{ key: 'Residual norm:', value: residualNorm }
|
|
93
|
+
]);
|
|
94
|
+
const result = createConvergenceResult(currentParameters, iteration, false, cost, undefined);
|
|
95
|
+
return { ...result, finalResidualNorm: residualNorm };
|
|
96
|
+
}
|
|
97
|
+
// Check convergence: step size is small enough
|
|
98
|
+
const stepNorm = vectorNorm(step);
|
|
99
|
+
if (checkStepSizeConvergence(stepNorm, tolerance, iteration)) {
|
|
100
|
+
logger.info('gaussNewton', iteration, 'Converged', [
|
|
101
|
+
{ key: 'Cost:', value: cost },
|
|
102
|
+
{ key: 'Residual norm:', value: residualNorm },
|
|
103
|
+
{ key: 'Step size:', value: stepNorm }
|
|
104
|
+
]);
|
|
105
|
+
const result = createConvergenceResult(currentParameters, iteration, true, cost, undefined);
|
|
106
|
+
return { ...result, finalResidualNorm: residualNorm };
|
|
107
|
+
}
|
|
108
|
+
// Update parameters: x_new = x_old + δ
|
|
109
|
+
const newParameters = new Float64Array(currentParameters.length);
|
|
110
|
+
for (let i = 0; i < currentParameters.length; i++) {
|
|
111
|
+
newParameters[i] = currentParameters[i] + step[i];
|
|
112
|
+
}
|
|
113
|
+
// Compute residual for new parameters
|
|
114
|
+
const newResidual = residualFunction(newParameters);
|
|
115
|
+
const newResidualNorm = vectorNorm(newResidual);
|
|
116
|
+
const newCost = computeSumOfSquaredResiduals(newResidualNorm);
|
|
117
|
+
// Check convergence: residual norm is small enough
|
|
118
|
+
if (checkResidualConvergence(newResidualNorm, tolerance, iteration)) {
|
|
119
|
+
logger.info('gaussNewton', iteration, 'Converged', [
|
|
120
|
+
{ key: 'Cost:', value: newCost },
|
|
121
|
+
{ key: 'Residual norm:', value: newResidualNorm }
|
|
122
|
+
]);
|
|
123
|
+
const result = createConvergenceResult(newParameters, iteration, true, newCost, undefined);
|
|
124
|
+
return { ...result, finalResidualNorm: newResidualNorm };
|
|
125
|
+
}
|
|
126
|
+
logger.debug('gaussNewton', iteration, 'Progress', [
|
|
127
|
+
{ key: 'Cost:', value: cost },
|
|
128
|
+
{ key: 'Residual norm:', value: residualNorm },
|
|
129
|
+
{ key: 'Step norm:', value: stepNorm }
|
|
130
|
+
]);
|
|
131
|
+
currentParameters = newParameters;
|
|
132
|
+
}
|
|
133
|
+
// Maximum iterations reached
|
|
134
|
+
const finalResidual = residualFunction(currentParameters);
|
|
135
|
+
const finalResidualNorm = vectorNorm(finalResidual);
|
|
136
|
+
const finalCost = computeSumOfSquaredResiduals(finalResidualNorm);
|
|
137
|
+
logger.warn('gaussNewton', undefined, 'Maximum iterations reached', [
|
|
138
|
+
{ key: 'Iterations:', value: maxIterations },
|
|
139
|
+
{ key: 'Final cost:', value: finalCost },
|
|
140
|
+
{ key: 'Final residual norm:', value: finalResidualNorm }
|
|
141
|
+
]);
|
|
142
|
+
return {
|
|
143
|
+
parameters: currentParameters,
|
|
144
|
+
iterations: maxIterations,
|
|
145
|
+
converged: false,
|
|
146
|
+
finalCost: finalCost,
|
|
147
|
+
finalGradientNorm: undefined,
|
|
148
|
+
finalResidualNorm: finalResidualNorm
|
|
149
|
+
};
|
|
150
|
+
}
|
|
151
|
+
//# sourceMappingURL=gaussNewton.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"gaussNewton.js","sourceRoot":"","sources":["../../src/core/gaussNewton.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;GAYG;AAEH,OAAO,EAAU,KAAK,EAAE,qBAAqB,EAAE,MAAM,WAAW,CAAC;AAOjE,OAAO,EAAE,oBAAoB,EAAE,oBAAoB,EAAE,UAAU,EAAE,4BAA4B,EAAE,MAAM,oBAAoB,CAAC;AAC1H,OAAO,EAAE,wBAAwB,EAAE,wBAAwB,EAAE,uBAAuB,EAAE,MAAM,kBAAkB,CAAC;AAC/G,OAAO,EAAE,qBAAqB,EAAE,MAAM,0BAA0B,CAAC;AACjE,OAAO,EAAE,MAAM,EAAE,MAAM,aAAa,CAAC;AAErC,MAAM,sBAAsB,GAAG,IAAI,CAAC;AACpC,MAAM,iBAAiB,GAAG,IAAI,CAAC;AAC/B,MAAM,4BAA4B,GAAG,IAAI,CAAC;AAC1C,MAAM,qBAAqB,GAAG,IAAI,CAAC;AACnC,MAAM,oBAAoB,GAAG,CAAC,GAAG,CAAC,CAAC,mFAAmF;AAGtH;;;;;;;;;;;;GAYG;AACH,MAAM,UAAU,WAAW,CACzB,iBAA+B,EAC/B,gBAA4B,EAC5B,UAA8B,EAAE;IAEhC,MAAM,aAAa,GAAuB,OAAO,CAAC;IAClD,MAAM,gBAAgB,GAA2B,aAAa,CAAC,QAAQ,CAAC;IAExE,MAAM,aAAa,GAAG,aAAa,CAAC,aAAa,IAAI,sBAAsB,CAAC;IAC5E,MAAM,SAAS,GAAG,aAAa,CAAC,SAAS,IAAI,iBAAiB,CAAC;IAC/D,MAAM,kBAAkB,GAAG,aAAa,CAAC,kBAAkB,IAAI,4BAA4B,CAAC;IAC5F,MAAM,YAAY,GAAG,aAAa,CAAC,YAAY,IAAI,qBAAqB,CAAC;IACzE,MAAM,WAAW,GAAG,aAAa,CAAC,WAAW,CAAC;IAC9C,MAAM,MAAM,GAAG,IAAI,MAAM,CAAC,aAAa,CAAC,QAAQ,EAAE,aAAa,CAAC,OAAO,CAAC,CAAC;IAEzE,IAAI,iBAAiB,GAAG,IAAI,YAAY,CAAC,iBAAiB,CAAC,CAAC;IAE5D,KAAK,IAAI,SAAS,GAAG,CAAC,EAAE,SAAS,GAAG,aAAa,EAAE,SAAS,EAAE,EAAE,CAAC;QAC/D,0BAA0B;QAC1B,MAAM,QAAQ,GAAG,gBAAgB,CAAC,iBAAiB,CAAC,CAAC;QACrD,MAAM,YAAY,GAAG,UAAU,CAAC,QAAQ,CAAC,CAAC;QAC1C,MAAM,IAAI,GAAG,4BAA4B,CAAC,YAAY,CAAC,CAAC;QAExD,qCAAqC;QACrC,IAAI,WAAW,EAAE,CAAC;YAChB,WAAW,CAAC,SAAS,EAAE,IAAI,EAAE,iBAAiB,CAAC,CAAC;QAClD,CAAC;QAED,0BAA0B;QAC1B,oDAAoD;QACpD,MAAM,cAAc,GAAW,qBAAqB,CAClD,gBAAgB,EAChB,gBAAgB,EAChB,iBAAiB,EACjB,kBAAkB,EAClB,YAAY,EACZ,aAAa,CACd,CAAC;QAEF,0BAA0B;QAC1B,MAAM,iBAAiB,GAAG,cAAc,CAAC,SAAS,EAAE,CAAC;QACrD,MAAM,GAAG,GAAG,iBAAiB,CAAC,IAAI,CAAC,cAAc,CAAC,CAAC;QACnD,MAAM,cAAc,GAAG,oBAAoB,CAAC,QAAQ,CAAC,CAAC;QACtD,MAAM,GAAG,GAAG,iBAAiB,CAAC,IAAI,CAAC,cAAc,CAAC,CAAC;QAEnD,6CAA6C;QAC7C,yCAAyC;QACzC,kFAAkF;QAClF,IAAI,IAAkB,CAAC;QACvB,IAAI,CAAC;YACH,MAAM,WAAW,GAAG,GAAG,CAAC,GAAG,CAAC,oBAAoB,CAAC,CAAC;YAClD,IAAI,UAAkB,CAAC;YACvB,IAAI,CAAC;gBACH,MAAM,QAAQ,GAAG,IAAI,qBAAqB,CAAC,GAAG,CAAC,CAAC;gBAChD,IAAI,QAAQ,CAAC,kBAAkB,EAAE,EAAE,CAAC;oBAClC,sEAAsE;oBACtE,UAAU,GAAG,QAAQ,CAAC,KAAK,CAAC,WAAW,CAAC,CAAC;gBAC3C,CAAC;qBAAM,CAAC;oBACN,+DAA+D;oBAC/D,UAAU,GAAG,KAAK,CAAC,GAAG,EAAE,WAAW,CAAC,CAAC;gBACvC,CAAC;YACH,CAAC;YAAC,OAAO,aAAa,EAAE,CAAC;gBACvB,gFAAgF;gBAChF,UAAU,GAAG,KAAK,CAAC,GAAG,EAAE,WAAW,CAAC,CAAC;YACvC,CAAC;YACD,IAAI,GAAG,oBAAoB,CAAC,UAAU,CAAC,CAAC;QAC1C,CAAC;QAAC,OAAO,KAAK,EAAE,CAAC;YACf,mDAAmD;YACnD,MAAM,CAAC,IAAI,CAAC,aAAa,EAAE,SAAS,EAAE,+FAA+F,EAAE;gBACrI,EAAE,GAAG,EAAE,OAAO,EAAE,KAAK,EAAE,IAAI,EAAE;gBAC7B,EAAE,GAAG,EAAE,gBAAgB,EAAE,KAAK,EAAE,YAAY,EAAE;aAC/C,CAAC,CAAC;YACH,MAAM,MAAM,GAAG,uBAAuB,CAAC,iBAAiB,EAAE,SAAS,EAAE,KAAK,EAAE,IAAI,EAAE,SAAS,CAAC,CAAC;YAC7F,OAAO,EAAE,GAAG,MAAM,EAAE,iBAAiB,EAAE,YAAY,EAAE,CAAC;QACxD,CAAC;QAED,+CAA+C;QAC/C,MAAM,QAAQ,GAAG,UAAU,CAAC,IAAI,CAAC,CAAC;QAClC,IAAI,wBAAwB,CAAC,QAAQ,EAAE,SAAS,EAAE,SAAS,CAAC,EAAE,CAAC;YAC7D,MAAM,CAAC,IAAI,CAAC,aAAa,EAAE,SAAS,EAAE,WAAW,EAAE;gBACjD,EAAE,GAAG,EAAE,OAAO,EAAE,KAAK,EAAE,IAAI,EAAE;gBAC7B,EAAE,GAAG,EAAE,gBAAgB,EAAE,KAAK,EAAE,YAAY,EAAE;gBAC9C,EAAE,GAAG,EAAE,YAAY,EAAE,KAAK,EAAE,QAAQ,EAAE;aACvC,CAAC,CAAC;YACH,MAAM,MAAM,GAAG,uBAAuB,CAAC,iBAAiB,EAAE,SAAS,EAAE,IAAI,EAAE,IAAI,EAAE,SAAS,CAAC,CAAC;YAC5F,OAAO,EAAE,GAAG,MAAM,EAAE,iBAAiB,EAAE,YAAY,EAAE,CAAC;QACxD,CAAC;QAED,uCAAuC;QACvC,MAAM,aAAa,GAAG,IAAI,YAAY,CAAC,iBAAiB,CAAC,MAAM,CAAC,CAAC;QACjE,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,iBAAiB,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE,CAAC;YAClD,aAAa,CAAC,CAAC,CAAC,GAAG,iBAAiB,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC;QACpD,CAAC;QAED,sCAAsC;QACtC,MAAM,WAAW,GAAG,gBAAgB,CAAC,aAAa,CAAC,CAAC;QACpD,MAAM,eAAe,GAAG,UAAU,CAAC,WAAW,CAAC,CAAC;QAChD,MAAM,OAAO,GAAG,4BAA4B,CAAC,eAAe,CAAC,CAAC;QAE9D,mDAAmD;QACnD,IAAI,wBAAwB,CAAC,eAAe,EAAE,SAAS,EAAE,SAAS,CAAC,EAAE,CAAC;YACpE,MAAM,CAAC,IAAI,CAAC,aAAa,EAAE,SAAS,EAAE,WAAW,EAAE;gBACjD,EAAE,GAAG,EAAE,OAAO,EAAE,KAAK,EAAE,OAAO,EAAE;gBAChC,EAAE,GAAG,EAAE,gBAAgB,EAAE,KAAK,EAAE,eAAe,EAAE;aAClD,CAAC,CAAC;YACH,MAAM,MAAM,GAAG,uBAAuB,CAAC,aAAa,EAAE,SAAS,EAAE,IAAI,EAAE,OAAO,EAAE,SAAS,CAAC,CAAC;YAC3F,OAAO,EAAE,GAAG,MAAM,EAAE,iBAAiB,EAAE,eAAe,EAAE,CAAC;QAC3D,CAAC;QAED,MAAM,CAAC,KAAK,CAAC,aAAa,EAAE,SAAS,EAAE,UAAU,EAAE;YACjD,EAAE,GAAG,EAAE,OAAO,EAAE,KAAK,EAAE,IAAI,EAAE;YAC7B,EAAE,GAAG,EAAE,gBAAgB,EAAE,KAAK,EAAE,YAAY,EAAE;YAC9C,EAAE,GAAG,EAAE,YAAY,EAAE,KAAK,EAAE,QAAQ,EAAE;SACvC,CAAC,CAAC;QAEH,iBAAiB,GAAG,aAAa,CAAC;IACpC,CAAC;IAED,6BAA6B;IAC7B,MAAM,aAAa,GAAG,gBAAgB,CAAC,iBAAiB,CAAC,CAAC;IAC1D,MAAM,iBAAiB,GAAG,UAAU,CAAC,aAAa,CAAC,CAAC;IACpD,MAAM,SAAS,GAAG,4BAA4B,CAAC,iBAAiB,CAAC,CAAC;IAElE,MAAM,CAAC,IAAI,CAAC,aAAa,EAAE,SAAS,EAAE,4BAA4B,EAAE;QAClE,EAAE,GAAG,EAAE,aAAa,EAAE,KAAK,EAAE,aAAa,EAAE;QAC5C,EAAE,GAAG,EAAE,aAAa,EAAE,KAAK,EAAE,SAAS,EAAE;QACxC,EAAE,GAAG,EAAE,sBAAsB,EAAE,KAAK,EAAE,iBAAiB,EAAE;KAC1D,CAAC,CAAC;IAEH,OAAO;QACL,UAAU,EAAE,iBAAiB;QAC7B,UAAU,EAAE,aAAa;QACzB,SAAS,EAAE,KAAK;QAChB,SAAS,EAAE,SAAS;QACpB,iBAAiB,EAAE,SAAS;QAC5B,iBAAiB,EAAE,iBAAiB;KACrC,CAAC;AACJ,CAAC"}
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* This file implements the gradient descent optimization algorithm.
|
|
3
|
+
*
|
|
4
|
+
* References:
|
|
5
|
+
* - Nocedal & Wright, "Numerical Optimization" (2nd ed.), Chapter 2 (steepest descent and line search basics)
|
|
6
|
+
* - Boyd & Vandenberghe, "Convex Optimization", Section 9.3 (backtracking line search with Armijo rule)
|
|
7
|
+
*
|
|
8
|
+
* Role in system:
|
|
9
|
+
* - Phase 1 foundation algorithm (simple, testable)
|
|
10
|
+
* - Establishes basic optimization framework
|
|
11
|
+
* - Used as building block for more advanced methods
|
|
12
|
+
*
|
|
13
|
+
* For first-time readers:
|
|
14
|
+
* - Start with gradientDescent function
|
|
15
|
+
* - Understand how it uses line search or fixed step size
|
|
16
|
+
* - Check convergence criteria implementation
|
|
17
|
+
*/
|
|
18
|
+
import type { CostFn, GradientFn, GradientDescentOptions, GradientDescentResult } from './types.js';
|
|
19
|
+
/**
|
|
20
|
+
* Performs gradient descent optimization to minimize a cost function.
|
|
21
|
+
*
|
|
22
|
+
* Algorithm:
|
|
23
|
+
* 1. Start with initial parameters
|
|
24
|
+
* 2. Compute gradient at current point
|
|
25
|
+
* 3. Move in negative gradient direction (steepest descent)
|
|
26
|
+
* 4. Use line search or fixed step size to determine step
|
|
27
|
+
* 5. Repeat until convergence or max iterations
|
|
28
|
+
*
|
|
29
|
+
* Convergence criteria:
|
|
30
|
+
* - Gradient norm < tolerance
|
|
31
|
+
* - Step size < tolerance
|
|
32
|
+
* - Maximum iterations reached
|
|
33
|
+
*/
|
|
34
|
+
export declare function gradientDescent(initialParameters: Float64Array, costFunction: CostFn, gradientFunction: GradientFn, options?: GradientDescentOptions): GradientDescentResult;
|
|
35
|
+
//# sourceMappingURL=gradientDescent.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"gradientDescent.d.ts","sourceRoot":"","sources":["../../src/core/gradientDescent.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;GAgBG;AAEH,OAAO,KAAK,EACV,MAAM,EACN,UAAU,EACV,sBAAsB,EACtB,qBAAqB,EACtB,MAAM,YAAY,CAAC;AA2OpB;;;;;;;;;;;;;;GAcG;AACH,wBAAgB,eAAe,CAC7B,iBAAiB,EAAE,YAAY,EAC/B,YAAY,EAAE,MAAM,EACpB,gBAAgB,EAAE,UAAU,EAC5B,OAAO,GAAE,sBAA2B,GACnC,qBAAqB,CA0DvB"}
|