numopt-js 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CODING_RULES.md +161 -0
- package/LICENSE +22 -0
- package/README.md +807 -0
- package/dist/core/adjointGradientDescent.d.ts +61 -0
- package/dist/core/adjointGradientDescent.d.ts.map +1 -0
- package/dist/core/adjointGradientDescent.js +764 -0
- package/dist/core/adjointGradientDescent.js.map +1 -0
- package/dist/core/constrainedGaussNewton.d.ts +44 -0
- package/dist/core/constrainedGaussNewton.d.ts.map +1 -0
- package/dist/core/constrainedGaussNewton.js +314 -0
- package/dist/core/constrainedGaussNewton.js.map +1 -0
- package/dist/core/constrainedLevenbergMarquardt.d.ts +46 -0
- package/dist/core/constrainedLevenbergMarquardt.d.ts.map +1 -0
- package/dist/core/constrainedLevenbergMarquardt.js +469 -0
- package/dist/core/constrainedLevenbergMarquardt.js.map +1 -0
- package/dist/core/constrainedUtils.d.ts +92 -0
- package/dist/core/constrainedUtils.d.ts.map +1 -0
- package/dist/core/constrainedUtils.js +364 -0
- package/dist/core/constrainedUtils.js.map +1 -0
- package/dist/core/convergence.d.ts +35 -0
- package/dist/core/convergence.d.ts.map +1 -0
- package/dist/core/convergence.js +51 -0
- package/dist/core/convergence.js.map +1 -0
- package/dist/core/createGradientFunction.d.ts +85 -0
- package/dist/core/createGradientFunction.d.ts.map +1 -0
- package/dist/core/createGradientFunction.js +93 -0
- package/dist/core/createGradientFunction.js.map +1 -0
- package/dist/core/effectiveJacobian.d.ts +90 -0
- package/dist/core/effectiveJacobian.d.ts.map +1 -0
- package/dist/core/effectiveJacobian.js +128 -0
- package/dist/core/effectiveJacobian.js.map +1 -0
- package/dist/core/finiteDiff.d.ts +171 -0
- package/dist/core/finiteDiff.d.ts.map +1 -0
- package/dist/core/finiteDiff.js +363 -0
- package/dist/core/finiteDiff.js.map +1 -0
- package/dist/core/gaussNewton.d.ts +29 -0
- package/dist/core/gaussNewton.d.ts.map +1 -0
- package/dist/core/gaussNewton.js +151 -0
- package/dist/core/gaussNewton.js.map +1 -0
- package/dist/core/gradientDescent.d.ts +35 -0
- package/dist/core/gradientDescent.d.ts.map +1 -0
- package/dist/core/gradientDescent.js +204 -0
- package/dist/core/gradientDescent.js.map +1 -0
- package/dist/core/jacobianComputation.d.ts +24 -0
- package/dist/core/jacobianComputation.d.ts.map +1 -0
- package/dist/core/jacobianComputation.js +38 -0
- package/dist/core/jacobianComputation.js.map +1 -0
- package/dist/core/levenbergMarquardt.d.ts +36 -0
- package/dist/core/levenbergMarquardt.d.ts.map +1 -0
- package/dist/core/levenbergMarquardt.js +286 -0
- package/dist/core/levenbergMarquardt.js.map +1 -0
- package/dist/core/lineSearch.d.ts +42 -0
- package/dist/core/lineSearch.d.ts.map +1 -0
- package/dist/core/lineSearch.js +106 -0
- package/dist/core/lineSearch.js.map +1 -0
- package/dist/core/logger.d.ts +77 -0
- package/dist/core/logger.d.ts.map +1 -0
- package/dist/core/logger.js +162 -0
- package/dist/core/logger.js.map +1 -0
- package/dist/core/types.d.ts +427 -0
- package/dist/core/types.d.ts.map +1 -0
- package/dist/core/types.js +15 -0
- package/dist/core/types.js.map +1 -0
- package/dist/index.d.ts +26 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +29 -0
- package/dist/index.js.map +1 -0
- package/dist/utils/formatting.d.ts +27 -0
- package/dist/utils/formatting.d.ts.map +1 -0
- package/dist/utils/formatting.js +54 -0
- package/dist/utils/formatting.js.map +1 -0
- package/dist/utils/matrix.d.ts +63 -0
- package/dist/utils/matrix.d.ts.map +1 -0
- package/dist/utils/matrix.js +129 -0
- package/dist/utils/matrix.js.map +1 -0
- package/dist/utils/resultFormatter.d.ts +122 -0
- package/dist/utils/resultFormatter.d.ts.map +1 -0
- package/dist/utils/resultFormatter.js +342 -0
- package/dist/utils/resultFormatter.js.map +1 -0
- package/package.json +74 -0
|
@@ -0,0 +1,364 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* This file provides shared utility functions for constrained optimization algorithms
|
|
3
|
+
* using the adjoint method.
|
|
4
|
+
*
|
|
5
|
+
* Role in system:
|
|
6
|
+
* - Eliminates code duplication between adjointGradientDescent, constrainedGaussNewton, and constrainedLevenbergMarquardt
|
|
7
|
+
* - Centralizes adjoint method computation logic (DRY principle)
|
|
8
|
+
* - Provides reusable functions for state updates and constraint handling
|
|
9
|
+
*
|
|
10
|
+
* For first-time readers:
|
|
11
|
+
* - These are utility functions used internally by constrained optimization algorithms
|
|
12
|
+
* - solveAdjointEquation: Solves the adjoint equation for computing gradients
|
|
13
|
+
* - updateStates: Updates states using linear approximation to maintain constraint satisfaction
|
|
14
|
+
* - validateInitialConditions: Validates initial states and constraints
|
|
15
|
+
*
|
|
16
|
+
* Extracted from adjointGradientDescent.ts to enable code reuse.
|
|
17
|
+
*/
|
|
18
|
+
import { Matrix, solve, CholeskyDecomposition } from 'ml-matrix';
|
|
19
|
+
import { vectorNorm, scaleVector, addVectors } from '../utils/matrix.js';
|
|
20
|
+
import { float64ArrayToMatrix, matrixToFloat64Array } from '../utils/matrix.js';
|
|
21
|
+
import { finiteDiffConstraintPartialX } from './finiteDiff.js';
|
|
22
|
+
const NEGATIVE_COEFFICIENT = -1.0; // Coefficient for negating vectors
|
|
23
|
+
const MAX_DIAG_LOG_DIM = 40; // upper bound to log row/col diagnostics
|
|
24
|
+
const MAX_REGULARIZATION_ATTEMPTS = 8; // Maximum number of regularization attempts when solving linear systems
|
|
25
|
+
const REGULARIZATION_BASE = 10; // Base for exponential regularization scaling
|
|
26
|
+
const REGULARIZATION_INITIAL_EXPONENT = -8; // Initial exponent for regularization: 10^(-8)
|
|
27
|
+
const REGULARIZATION_MAX_EXPONENT = 7; // Maximum exponent for regularization: 10^7
|
|
28
|
+
const REGULARIZATION_FALLBACK_EXPONENT = -1; // Fallback exponent when baseReg is 0: 10^(-1)
|
|
29
|
+
const MAX_DIAGNOSTIC_ENTRIES = 5; // Maximum number of smallest rows/columns to include in diagnostics
|
|
30
|
+
function checkFiniteMatrix(mat) {
|
|
31
|
+
for (let r = 0; r < mat.rows; r++) {
|
|
32
|
+
for (let c = 0; c < mat.columns; c++) {
|
|
33
|
+
const v = mat.get(r, c);
|
|
34
|
+
if (!Number.isFinite(v)) {
|
|
35
|
+
return { ok: false, firstBad: { row: r, col: c, value: v } };
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
return { ok: true };
|
|
40
|
+
}
|
|
41
|
+
function computeVectorDiagnostics(b) {
|
|
42
|
+
let sum = 0;
|
|
43
|
+
let minAbs = Number.POSITIVE_INFINITY;
|
|
44
|
+
let maxAbs = 0;
|
|
45
|
+
for (let r = 0; r < b.rows; r++) {
|
|
46
|
+
for (let c = 0; c < b.columns; c++) {
|
|
47
|
+
const v = b.get(r, c);
|
|
48
|
+
const abs = Math.abs(v);
|
|
49
|
+
sum += v * v;
|
|
50
|
+
minAbs = Math.min(minAbs, abs);
|
|
51
|
+
maxAbs = Math.max(maxAbs, abs);
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
return { norm: Math.sqrt(sum), minAbs: minAbs === Number.POSITIVE_INFINITY ? 0 : minAbs, maxAbs };
|
|
55
|
+
}
|
|
56
|
+
function computeRowColDiagnostics(A) {
|
|
57
|
+
const rowNorms = [];
|
|
58
|
+
const colNorms = [];
|
|
59
|
+
for (let r = 0; r < A.rows; r++) {
|
|
60
|
+
let sum = 0;
|
|
61
|
+
for (let c = 0; c < A.columns; c++) {
|
|
62
|
+
const v = A.get(r, c);
|
|
63
|
+
sum += v * v;
|
|
64
|
+
}
|
|
65
|
+
rowNorms.push(Math.sqrt(sum));
|
|
66
|
+
}
|
|
67
|
+
for (let c = 0; c < A.columns; c++) {
|
|
68
|
+
let sum = 0;
|
|
69
|
+
for (let r = 0; r < A.rows; r++) {
|
|
70
|
+
const v = A.get(r, c);
|
|
71
|
+
sum += v * v;
|
|
72
|
+
}
|
|
73
|
+
colNorms.push(Math.sqrt(sum));
|
|
74
|
+
}
|
|
75
|
+
const rowPairs = rowNorms.map((norm, index) => ({ index, norm })).sort((a, b) => a.norm - b.norm).slice(0, MAX_DIAGNOSTIC_ENTRIES);
|
|
76
|
+
const colPairs = colNorms.map((norm, index) => ({ index, norm })).sort((a, b) => a.norm - b.norm).slice(0, MAX_DIAGNOSTIC_ENTRIES);
|
|
77
|
+
return {
|
|
78
|
+
minRowNorm: Math.min(...rowNorms),
|
|
79
|
+
maxRowNorm: Math.max(...rowNorms),
|
|
80
|
+
minColNorm: Math.min(...colNorms),
|
|
81
|
+
maxColNorm: Math.max(...colNorms),
|
|
82
|
+
smallestRows: rowPairs,
|
|
83
|
+
smallestCols: colPairs
|
|
84
|
+
};
|
|
85
|
+
}
|
|
86
|
+
/**
|
|
87
|
+
* Computes regularization lambda for a given attempt.
|
|
88
|
+
* Uses exponential scaling to gradually increase regularization strength.
|
|
89
|
+
*/
|
|
90
|
+
function computeRegularizationLambda(baseReg, attempt) {
|
|
91
|
+
return baseReg > 0
|
|
92
|
+
? baseReg * Math.pow(REGULARIZATION_BASE, attempt)
|
|
93
|
+
: Math.pow(REGULARIZATION_BASE, REGULARIZATION_INITIAL_EXPONENT + attempt);
|
|
94
|
+
}
|
|
95
|
+
/**
|
|
96
|
+
* Attempts to solve linear system with regularization retries.
|
|
97
|
+
* Cholesky decomposition is preferred for efficiency, falls back to general solver.
|
|
98
|
+
*/
|
|
99
|
+
function trySolveWithRegularization(A, b, baseReg, logger, algorithmName) {
|
|
100
|
+
let lastError;
|
|
101
|
+
for (let attempt = 0; attempt < MAX_REGULARIZATION_ATTEMPTS; attempt++) {
|
|
102
|
+
const lambda = computeRegularizationLambda(baseReg, attempt);
|
|
103
|
+
const AwithReg = A.add(Matrix.eye(A.rows, A.columns).mul(lambda));
|
|
104
|
+
try {
|
|
105
|
+
const chol = new CholeskyDecomposition(AwithReg);
|
|
106
|
+
if (chol.isPositiveDefinite()) {
|
|
107
|
+
return matrixToFloat64Array(chol.solve(b));
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
catch (err) {
|
|
111
|
+
lastError = err;
|
|
112
|
+
}
|
|
113
|
+
try {
|
|
114
|
+
return matrixToFloat64Array(solve(AwithReg, b));
|
|
115
|
+
}
|
|
116
|
+
catch (err) {
|
|
117
|
+
lastError = err;
|
|
118
|
+
continue;
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
logger.warn(algorithmName, undefined, `Failed to solve system with regularization: ${lastError}`);
|
|
122
|
+
throw new Error(`Failed to solve linear system even with Tikhonov regularization. ` +
|
|
123
|
+
`Matrix may be singular or ill-conditioned. Last error: ${lastError}`);
|
|
124
|
+
}
|
|
125
|
+
/**
|
|
126
|
+
* Solves square system Ax = b with validation and regularization.
|
|
127
|
+
* Fast path for square matrices using direct Cholesky/LU decomposition.
|
|
128
|
+
*/
|
|
129
|
+
function solveSquareSystem(Areg, b, regularization, logger, algorithmName) {
|
|
130
|
+
const baseReg = regularization > 0 ? regularization : 0;
|
|
131
|
+
const diagnostics = Areg.rows <= MAX_DIAG_LOG_DIM && Areg.columns <= MAX_DIAG_LOG_DIM
|
|
132
|
+
? computeRowColDiagnostics(Areg)
|
|
133
|
+
: undefined;
|
|
134
|
+
const rhsDiagnostics = computeVectorDiagnostics(b);
|
|
135
|
+
const dimsOk = Areg.rows === b.rows;
|
|
136
|
+
const Afinite = checkFiniteMatrix(Areg);
|
|
137
|
+
const bFinite = checkFiniteMatrix(b);
|
|
138
|
+
if (!dimsOk || !Afinite.ok || !bFinite.ok) {
|
|
139
|
+
const detailRows = [
|
|
140
|
+
{ key: 'A_rows', value: Areg.rows },
|
|
141
|
+
{ key: 'A_cols', value: Areg.columns },
|
|
142
|
+
{ key: 'b_rows', value: b.rows },
|
|
143
|
+
{ key: 'b_cols', value: b.columns }
|
|
144
|
+
];
|
|
145
|
+
if (!Afinite.ok && Afinite.firstBad) {
|
|
146
|
+
detailRows.push({ key: 'A_bad_row', value: Afinite.firstBad.row });
|
|
147
|
+
detailRows.push({ key: 'A_bad_col', value: Afinite.firstBad.col });
|
|
148
|
+
detailRows.push({ key: 'A_bad_val', value: Afinite.firstBad.value });
|
|
149
|
+
}
|
|
150
|
+
if (!bFinite.ok && bFinite.firstBad) {
|
|
151
|
+
detailRows.push({ key: 'b_bad_row', value: bFinite.firstBad.row });
|
|
152
|
+
detailRows.push({ key: 'b_bad_col', value: bFinite.firstBad.col });
|
|
153
|
+
detailRows.push({ key: 'b_bad_val', value: bFinite.firstBad.value });
|
|
154
|
+
}
|
|
155
|
+
const numericDetails = detailRows.filter(d => typeof d.value === 'number');
|
|
156
|
+
logger.warn(algorithmName, undefined, 'Invalid dimensions or NaN/Inf detected before solve', numericDetails);
|
|
157
|
+
throw new Error('Invalid dimensions or NaN/Inf in inputs for square solve');
|
|
158
|
+
}
|
|
159
|
+
try {
|
|
160
|
+
return trySolveWithRegularization(Areg, b, baseReg, logger, algorithmName);
|
|
161
|
+
}
|
|
162
|
+
catch (error) {
|
|
163
|
+
const detailRows = [
|
|
164
|
+
{ key: 'rows', value: Areg.rows },
|
|
165
|
+
{ key: 'cols', value: Areg.columns },
|
|
166
|
+
{ key: 'reg_final', value: baseReg > 0 ? baseReg * Math.pow(REGULARIZATION_BASE, REGULARIZATION_MAX_EXPONENT) : Math.pow(REGULARIZATION_BASE, REGULARIZATION_FALLBACK_EXPONENT) }
|
|
167
|
+
];
|
|
168
|
+
detailRows.push({ key: 'rhs_norm', value: rhsDiagnostics.norm }, { key: 'rhs_min_abs', value: rhsDiagnostics.minAbs }, { key: 'rhs_max_abs', value: rhsDiagnostics.maxAbs });
|
|
169
|
+
if (diagnostics) {
|
|
170
|
+
detailRows.push({ key: 'minRowNorm', value: diagnostics.minRowNorm }, { key: 'minColNorm', value: diagnostics.minColNorm }, { key: 'maxRowNorm', value: diagnostics.maxRowNorm }, { key: 'maxColNorm', value: diagnostics.maxColNorm });
|
|
171
|
+
diagnostics.smallestRows.forEach((row, idx) => {
|
|
172
|
+
detailRows.push({ key: `row_${idx}`, value: row.index });
|
|
173
|
+
detailRows.push({ key: `row_${idx}_norm`, value: row.norm });
|
|
174
|
+
});
|
|
175
|
+
diagnostics.smallestCols.forEach((col, idx) => {
|
|
176
|
+
detailRows.push({ key: `col_${idx}`, value: col.index });
|
|
177
|
+
detailRows.push({ key: `col_${idx}_norm`, value: col.norm });
|
|
178
|
+
});
|
|
179
|
+
}
|
|
180
|
+
const numericDetails = detailRows.filter(d => typeof d.value === 'number');
|
|
181
|
+
logger.warn(algorithmName, undefined, `Failed to solve square system with regularization up to ~1e0: ${error}`, numericDetails);
|
|
182
|
+
throw error;
|
|
183
|
+
}
|
|
184
|
+
}
|
|
185
|
+
/**
|
|
186
|
+
* Solves overdetermined system (rows > columns) using normal equations.
|
|
187
|
+
* Converts to square system A^T A x = A^T b for efficient Cholesky solution.
|
|
188
|
+
*/
|
|
189
|
+
function solveOverdeterminedSystem(A, b, regularization, logger, algorithmName) {
|
|
190
|
+
const AT = A.transpose();
|
|
191
|
+
const ATA = AT.mmul(A);
|
|
192
|
+
const ATb = AT.mmul(b);
|
|
193
|
+
const baseReg = regularization > 0 ? regularization : 0;
|
|
194
|
+
try {
|
|
195
|
+
return trySolveWithRegularization(ATA, ATb, baseReg, logger, algorithmName);
|
|
196
|
+
}
|
|
197
|
+
catch (error) {
|
|
198
|
+
logger.warn(algorithmName, undefined, `Failed to solve overdetermined system with normal equations: ${error}`);
|
|
199
|
+
throw new Error(`Failed to solve overdetermined system Ax = b using normal equations A^T A x = A^T b. ` +
|
|
200
|
+
`Matrix A^T A may be singular or ill-conditioned. Last error: ${error}`);
|
|
201
|
+
}
|
|
202
|
+
}
|
|
203
|
+
/**
|
|
204
|
+
* Solves underdetermined system (rows < columns) using normal equations.
|
|
205
|
+
* Strategy: solve A A^T y = b, then x = A^T y for minimum-norm solution.
|
|
206
|
+
*/
|
|
207
|
+
function solveUnderdeterminedSystem(A, b, regularization, logger, algorithmName) {
|
|
208
|
+
const AT = A.transpose();
|
|
209
|
+
const AAT = A.mmul(AT);
|
|
210
|
+
const baseReg = regularization > 0 ? regularization : 0;
|
|
211
|
+
try {
|
|
212
|
+
const y = trySolveWithRegularization(AAT, b, baseReg, logger, algorithmName);
|
|
213
|
+
const x = AT.mmul(float64ArrayToMatrix(y));
|
|
214
|
+
return matrixToFloat64Array(x);
|
|
215
|
+
}
|
|
216
|
+
catch (error) {
|
|
217
|
+
logger.warn(algorithmName, undefined, `Failed to solve underdetermined system with normal equations: ${error}`);
|
|
218
|
+
throw new Error(`Failed to solve underdetermined system Ax = b using normal equations A A^T y = b, x = A^T y. ` +
|
|
219
|
+
`Matrix A A^T may be singular or ill-conditioned. Last error: ${error}`);
|
|
220
|
+
}
|
|
221
|
+
}
|
|
222
|
+
/**
|
|
223
|
+
* Solves a least squares problem Ax = b using Cholesky decomposition.
|
|
224
|
+
* For square matrices, uses Cholesky/LU decomposition directly.
|
|
225
|
+
* For non-square matrices, uses normal equations to convert to square system:
|
|
226
|
+
* - Overdetermined (rows > columns): A^T A x = A^T b (Cholesky on A^T A)
|
|
227
|
+
* - Underdetermined (rows < columns): A A^T y = b, x = A^T y (Cholesky on A A^T)
|
|
228
|
+
* This approach avoids SVD/pseudoInverse entirely for better performance.
|
|
229
|
+
*
|
|
230
|
+
* @param A - Coefficient matrix
|
|
231
|
+
* @param b - Right-hand side vector (as Matrix column vector)
|
|
232
|
+
* @param logger - Logger for error messages
|
|
233
|
+
* @param algorithmName - Name of calling algorithm (for error messages)
|
|
234
|
+
* @returns Solution vector x as Float64Array
|
|
235
|
+
*/
|
|
236
|
+
export function solveLeastSquares(A, b, logger, algorithmName = 'constrainedOptimization', regularization = 0) {
|
|
237
|
+
const Areg = regularization > 0 && A.rows === A.columns
|
|
238
|
+
? A.add(Matrix.eye(A.rows, A.columns).mul(regularization))
|
|
239
|
+
: A;
|
|
240
|
+
if (Areg.rows === Areg.columns) {
|
|
241
|
+
return solveSquareSystem(Areg, b, regularization, logger, algorithmName);
|
|
242
|
+
}
|
|
243
|
+
const isOverdetermined = A.rows > A.columns;
|
|
244
|
+
if (isOverdetermined) {
|
|
245
|
+
return solveOverdeterminedSystem(A, b, regularization, logger, algorithmName);
|
|
246
|
+
}
|
|
247
|
+
return solveUnderdeterminedSystem(A, b, regularization, logger, algorithmName);
|
|
248
|
+
}
|
|
249
|
+
/**
|
|
250
|
+
* Solves the adjoint equation: (∂c/∂x)^T λ = rhs
|
|
251
|
+
* Returns the adjoint variable λ.
|
|
252
|
+
* Supports both square and non-square constraint Jacobians.
|
|
253
|
+
*
|
|
254
|
+
* This is the core of the adjoint method, used for efficient gradient computation
|
|
255
|
+
* without explicitly inverting matrices.
|
|
256
|
+
*
|
|
257
|
+
* @param dcdx - Constraint Jacobian ∂c/∂x
|
|
258
|
+
* @param rhs - Right-hand side vector (e.g., (∂f/∂x)^T or (r_x^T r))
|
|
259
|
+
* @param logger - Logger instance for error reporting
|
|
260
|
+
* @param algorithmName - Name of calling algorithm (for error messages)
|
|
261
|
+
* @returns Adjoint variable λ
|
|
262
|
+
*/
|
|
263
|
+
export function solveAdjointEquation(dcdx, rhs, logger, algorithmName = 'constrainedOptimization', regularization = 0) {
|
|
264
|
+
// Transpose needed to form adjoint equation: (∂c/∂x)^T λ = rhs (standard form for linear solve)
|
|
265
|
+
const dcdxTranspose = dcdx.transpose();
|
|
266
|
+
const rhsMatrix = float64ArrayToMatrix(rhs);
|
|
267
|
+
// Hierarchical solver handles both square and non-square constraint Jacobians efficiently
|
|
268
|
+
try {
|
|
269
|
+
return solveLeastSquares(dcdxTranspose, rhsMatrix, logger, algorithmName, regularization);
|
|
270
|
+
}
|
|
271
|
+
catch (error) {
|
|
272
|
+
logger.warn(algorithmName, undefined, `Failed to solve adjoint equation: ${error}`);
|
|
273
|
+
throw new Error(`Failed to solve adjoint equation (∂c/∂x)^T λ = rhs. ` +
|
|
274
|
+
`The constraint Jacobian ∂c/∂x may be singular or ill-conditioned. ` +
|
|
275
|
+
`Matrix size: ${dcdx.rows} × ${dcdx.columns}. ` +
|
|
276
|
+
`Original error: ${error}`);
|
|
277
|
+
}
|
|
278
|
+
}
|
|
279
|
+
/**
|
|
280
|
+
* Updates states using linear approximation: x_new = x_old + dx
|
|
281
|
+
* where dx solves (∂c/∂x) dx = -∂c/∂p · Δp
|
|
282
|
+
* Supports both square and non-square constraint Jacobians.
|
|
283
|
+
*
|
|
284
|
+
* This maintains constraint satisfaction approximately using first-order Taylor expansion.
|
|
285
|
+
* For large steps, constraints may be violated slightly, but the algorithm will correct
|
|
286
|
+
* this in subsequent iterations.
|
|
287
|
+
*
|
|
288
|
+
* @param currentStates - Current state vector x
|
|
289
|
+
* @param dcdx - Constraint Jacobian ∂c/∂x
|
|
290
|
+
* @param dcdp - Constraint Jacobian ∂c/∂p
|
|
291
|
+
* @param deltaP - Parameter change Δp
|
|
292
|
+
* @param logger - Logger instance for error reporting
|
|
293
|
+
* @param algorithmName - Name of calling algorithm (for error messages)
|
|
294
|
+
* @returns Updated state vector x_new
|
|
295
|
+
*/
|
|
296
|
+
export function updateStates(currentStates, dcdx, dcdp, deltaP, logger, algorithmName = 'constrainedOptimization') {
|
|
297
|
+
// Compute how parameter changes affect constraints: needed to determine state updates
|
|
298
|
+
const deltaPMatrix = float64ArrayToMatrix(deltaP);
|
|
299
|
+
const dcdpDeltaP = dcdp.mmul(deltaPMatrix);
|
|
300
|
+
const dcdpDeltaPVector = matrixToFloat64Array(dcdpDeltaP);
|
|
301
|
+
// Linear approximation maintains constraint satisfaction: (∂c/∂x) dx = -∂c/∂p · Δp
|
|
302
|
+
const negativeDcdpDeltaP = scaleVector(dcdpDeltaPVector, NEGATIVE_COEFFICIENT);
|
|
303
|
+
const negativeDcdpDeltaPMatrix = float64ArrayToMatrix(negativeDcdpDeltaP);
|
|
304
|
+
// Hierarchical solver efficiently handles both square and non-square constraint Jacobians
|
|
305
|
+
const dx = solveLeastSquares(dcdx, negativeDcdpDeltaPMatrix, logger, algorithmName);
|
|
306
|
+
return addVectors(currentStates, dx);
|
|
307
|
+
}
|
|
308
|
+
/**
|
|
309
|
+
* Projects states onto the constraint manifold for fixed parameters using
|
|
310
|
+
* a few Newton correction steps: (∂c/∂x) Δx = -c(p, x).
|
|
311
|
+
* This is a standard feasibility-restoration step consistent with the
|
|
312
|
+
* implicit function theorem (solving c(p, x) = 0 locally).
|
|
313
|
+
*/
|
|
314
|
+
export function projectStatesToConstraints(parameters, states, constraintFunction, stepSizeX, constraintTolerance, logger, algorithmName = 'constrainedOptimization', maxIterations = 3) {
|
|
315
|
+
let projectedStates = new Float64Array(states);
|
|
316
|
+
for (let i = 0; i < maxIterations; i++) {
|
|
317
|
+
const constraint = constraintFunction(parameters, projectedStates);
|
|
318
|
+
const constraintNorm = vectorNorm(constraint);
|
|
319
|
+
if (constraintNorm <= constraintTolerance) {
|
|
320
|
+
break;
|
|
321
|
+
}
|
|
322
|
+
const dcdx = finiteDiffConstraintPartialX(parameters, projectedStates, constraintFunction, { stepSize: stepSizeX });
|
|
323
|
+
const negativeConstraint = scaleVector(constraint, NEGATIVE_COEFFICIENT);
|
|
324
|
+
const negativeConstraintMatrix = float64ArrayToMatrix(negativeConstraint);
|
|
325
|
+
try {
|
|
326
|
+
const deltaX = solveLeastSquares(dcdx, negativeConstraintMatrix, logger, algorithmName);
|
|
327
|
+
const updatedStates = addVectors(projectedStates, deltaX);
|
|
328
|
+
projectedStates = new Float64Array(updatedStates);
|
|
329
|
+
}
|
|
330
|
+
catch (error) {
|
|
331
|
+
logger.warn(algorithmName, undefined, `Failed to project onto constraints: ${error}`);
|
|
332
|
+
break;
|
|
333
|
+
}
|
|
334
|
+
}
|
|
335
|
+
return projectedStates;
|
|
336
|
+
}
|
|
337
|
+
/**
|
|
338
|
+
* Validates initial conditions including constraint satisfaction and dimensions.
|
|
339
|
+
*
|
|
340
|
+
* Checks that:
|
|
341
|
+
* 1. Constraint count equals state count (required for adjoint method)
|
|
342
|
+
* 2. Initial constraint violation is within tolerance (warns if not)
|
|
343
|
+
*
|
|
344
|
+
* @param initialParameters - Initial parameter vector p0
|
|
345
|
+
* @param initialStates - Initial state vector x0
|
|
346
|
+
* @param constraintFunction - Constraint function c(p, x) = 0
|
|
347
|
+
* @param constraintTolerance - Tolerance for constraint violation
|
|
348
|
+
* @param logger - Logger instance for warnings
|
|
349
|
+
* @param algorithmName - Name of calling algorithm (for error messages)
|
|
350
|
+
* @throws Error if constraint count != state count
|
|
351
|
+
*/
|
|
352
|
+
export function validateInitialConditions(initialParameters, initialStates, constraintFunction, constraintTolerance, logger, algorithmName = 'constrainedOptimization') {
|
|
353
|
+
const initialConstraint = constraintFunction(initialParameters, initialStates);
|
|
354
|
+
const initialConstraintNorm = vectorNorm(initialConstraint);
|
|
355
|
+
if (initialConstraintNorm > constraintTolerance) {
|
|
356
|
+
logger.warn(algorithmName, undefined, 'Initial constraint violation', [
|
|
357
|
+
{ key: '||c(p0,x0)||:', value: initialConstraintNorm },
|
|
358
|
+
{ key: 'Tolerance:', value: constraintTolerance }
|
|
359
|
+
]);
|
|
360
|
+
}
|
|
361
|
+
// Note: Constraint count and state count no longer need to match.
|
|
362
|
+
// The adjoint method now supports non-square constraint Jacobians.
|
|
363
|
+
}
|
|
364
|
+
//# sourceMappingURL=constrainedUtils.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"constrainedUtils.js","sourceRoot":"","sources":["../../src/core/constrainedUtils.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;GAgBG;AAEH,OAAO,EAAE,MAAM,EAAE,KAAK,EAAE,qBAAqB,EAAE,MAAM,WAAW,CAAC;AAEjE,OAAO,EAAE,UAAU,EAAE,WAAW,EAAE,UAAU,EAAE,MAAM,oBAAoB,CAAC;AACzE,OAAO,EAAE,oBAAoB,EAAE,oBAAoB,EAAE,MAAM,oBAAoB,CAAC;AAEhF,OAAO,EAAE,4BAA4B,EAAE,MAAM,iBAAiB,CAAC;AAE/D,MAAM,oBAAoB,GAAG,CAAC,GAAG,CAAC,CAAC,mCAAmC;AACtE,MAAM,gBAAgB,GAAG,EAAE,CAAC,CAAC,yCAAyC;AACtE,MAAM,2BAA2B,GAAG,CAAC,CAAC,CAAC,wEAAwE;AAC/G,MAAM,mBAAmB,GAAG,EAAE,CAAC,CAAC,8CAA8C;AAC9E,MAAM,+BAA+B,GAAG,CAAC,CAAC,CAAC,CAAC,+CAA+C;AAC3F,MAAM,2BAA2B,GAAG,CAAC,CAAC,CAAC,4CAA4C;AACnF,MAAM,gCAAgC,GAAG,CAAC,CAAC,CAAC,CAAC,+CAA+C;AAC5F,MAAM,sBAAsB,GAAG,CAAC,CAAC,CAAC,oEAAoE;AAEtG,SAAS,iBAAiB,CAAC,GAAW;IACpC,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,GAAG,CAAC,IAAI,EAAE,CAAC,EAAE,EAAE,CAAC;QAClC,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,GAAG,CAAC,OAAO,EAAE,CAAC,EAAE,EAAE,CAAC;YACrC,MAAM,CAAC,GAAG,GAAG,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;YACxB,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC;gBACxB,OAAO,EAAE,EAAE,EAAE,KAAK,EAAE,QAAQ,EAAE,EAAE,GAAG,EAAE,CAAC,EAAE,GAAG,EAAE,CAAC,EAAE,KAAK,EAAE,CAAC,EAAE,EAAE,CAAC;YAC/D,CAAC;QACH,CAAC;IACH,CAAC;IACD,OAAO,EAAE,EAAE,EAAE,IAAI,EAAE,CAAC;AACtB,CAAC;AAED,SAAS,wBAAwB,CAAC,CAAS;IACzC,IAAI,GAAG,GAAG,CAAC,CAAC;IACZ,IAAI,MAAM,GAAG,MAAM,CAAC,iBAAiB,CAAC;IACtC,IAAI,MAAM,GAAG,CAAC,CAAC;IACf,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,IAAI,EAAE,CAAC,EAAE,EAAE,CAAC;QAChC,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,OAAO,EAAE,CAAC,EAAE,EAAE,CAAC;YACnC,MAAM,CAAC,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;YACtB,MAAM,GAAG,GAAG,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC;YACxB,GAAG,IAAI,CAAC,GAAG,CAAC,CAAC;YACb,MAAM,GAAG,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE,GAAG,CAAC,CAAC;YAC/B,MAAM,GAAG,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE,GAAG,CAAC,CAAC;QACjC,CAAC;IACH,CAAC;IACD,OAAO,EAAE,IAAI,EAAE,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,EAAE,MAAM,EAAE,MAAM,KAAK,MAAM,CAAC,iBAAiB,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC;AACpG,CAAC;AAED,SAAS,wBAAwB,CAAC,CAAS;IAQzC,MAAM,QAAQ,GAAa,EAAE,CAAC;IAC9B,MAAM,QAAQ,GAAa,EAAE,CAAC;IAE9B,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,IAAI,EAAE,CAAC,EAAE,EAAE,CAAC;QAChC,IAAI,GAAG,GAAG,CAAC,CAAC;QACZ,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,OAAO,EAAE,CAAC,EAAE,EAAE,CAAC;YACnC,MAAM,CAAC,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;YACtB,GAAG,IAAI,CAAC,GAAG,CAAC,CAAC;QACf,CAAC;QACD,QAAQ,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC;IAChC,CAAC;IAED,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,OAAO,EAAE,CAAC,EAAE,EAAE,CAAC;QACnC,IAAI,GAAG,GAAG,CAAC,CAAC;QACZ,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,IAAI,EAAE,CAAC,EAAE,EAAE,CAAC;YAChC,MAAM,CAAC,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;YACtB,GAAG,IAAI,CAAC,GAAG,CAAC,CAAC;QACf,CAAC;QACD,QAAQ,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC;IAChC,CAAC;IAED,MAAM,QAAQ,GAAG,QAAQ,CAAC,GAAG,CAAC,CAAC,IAAI,EAAE,KAAK,EAAE,EAAE,CAAC,CAAC,EAAE,KAAK,EAAE,IAAI,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,sBAAsB,CAAC,CAAC;IACnI,MAAM,QAAQ,GAAG,QAAQ,CAAC,GAAG,CAAC,CAAC,IAAI,EAAE,KAAK,EAAE,EAAE,CAAC,CAAC,EAAE,KAAK,EAAE,IAAI,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,sBAAsB,CAAC,CAAC;IAEnI,OAAO;QACL,UAAU,EAAE,IAAI,CAAC,GAAG,CAAC,GAAG,QAAQ,CAAC;QACjC,UAAU,EAAE,IAAI,CAAC,GAAG,CAAC,GAAG,QAAQ,CAAC;QACjC,UAAU,EAAE,IAAI,CAAC,GAAG,CAAC,GAAG,QAAQ,CAAC;QACjC,UAAU,EAAE,IAAI,CAAC,GAAG,CAAC,GAAG,QAAQ,CAAC;QACjC,YAAY,EAAE,QAAQ;QACtB,YAAY,EAAE,QAAQ;KACvB,CAAC;AACJ,CAAC;AAED;;;GAGG;AACH,SAAS,2BAA2B,CAClC,OAAe,EACf,OAAe;IAEf,OAAO,OAAO,GAAG,CAAC;QAChB,CAAC,CAAC,OAAO,GAAG,IAAI,CAAC,GAAG,CAAC,mBAAmB,EAAE,OAAO,CAAC;QAClD,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,mBAAmB,EAAE,+BAA+B,GAAG,OAAO,CAAC,CAAC;AAC/E,CAAC;AAED;;;GAGG;AACH,SAAS,0BAA0B,CACjC,CAAS,EACT,CAAS,EACT,OAAe,EACf,MAAc,EACd,aAAqB;IAErB,IAAI,SAAkB,CAAC;IACvB,KAAK,IAAI,OAAO,GAAG,CAAC,EAAE,OAAO,GAAG,2BAA2B,EAAE,OAAO,EAAE,EAAE,CAAC;QACvE,MAAM,MAAM,GAAG,2BAA2B,CAAC,OAAO,EAAE,OAAO,CAAC,CAAC;QAC7D,MAAM,QAAQ,GAAG,CAAC,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,EAAE,CAAC,CAAC,OAAO,CAAC,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC;QAClE,IAAI,CAAC;YACH,MAAM,IAAI,GAAG,IAAI,qBAAqB,CAAC,QAAQ,CAAC,CAAC;YACjD,IAAI,IAAI,CAAC,kBAAkB,EAAE,EAAE,CAAC;gBAC9B,OAAO,oBAAoB,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC;YAC7C,CAAC;QACH,CAAC;QAAC,OAAO,GAAG,EAAE,CAAC;YACb,SAAS,GAAG,GAAG,CAAC;QAClB,CAAC;QACD,IAAI,CAAC;YACH,OAAO,oBAAoB,CAAC,KAAK,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC,CAAC;QAClD,CAAC;QAAC,OAAO,GAAG,EAAE,CAAC;YACb,SAAS,GAAG,GAAG,CAAC;YAChB,SAAS;QACX,CAAC;IACH,CAAC;IACD,MAAM,CAAC,IAAI,CAAC,aAAa,EAAE,SAAS,EAAE,+CAA+C,SAAS,EAAE,CAAC,CAAC;IAClG,MAAM,IAAI,KAAK,CACb,mEAAmE;QACnE,0DAA0D,SAAS,EAAE,CACtE,CAAC;AACJ,CAAC;AAED;;;GAGG;AACH,SAAS,iBAAiB,CACxB,IAAY,EACZ,CAAS,EACT,cAAsB,EACtB,MAAc,EACd,aAAqB;IAErB,MAAM,OAAO,GAAG,cAAc,GAAG,CAAC,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC;IACxD,MAAM,WAAW,GACf,IAAI,CAAC,IAAI,IAAI,gBAAgB,IAAI,IAAI,CAAC,OAAO,IAAI,gBAAgB;QAC/D,CAAC,CAAC,wBAAwB,CAAC,IAAI,CAAC;QAChC,CAAC,CAAC,SAAS,CAAC;IAChB,MAAM,cAAc,GAAG,wBAAwB,CAAC,CAAC,CAAC,CAAC;IAEnD,MAAM,MAAM,GAAG,IAAI,CAAC,IAAI,KAAK,CAAC,CAAC,IAAI,CAAC;IACpC,MAAM,OAAO,GAAG,iBAAiB,CAAC,IAAI,CAAC,CAAC;IACxC,MAAM,OAAO,GAAG,iBAAiB,CAAC,CAAC,CAAC,CAAC;IACrC,IAAI,CAAC,MAAM,IAAI,CAAC,OAAO,CAAC,EAAE,IAAI,CAAC,OAAO,CAAC,EAAE,EAAE,CAAC;QAC1C,MAAM,UAAU,GAAmD;YACjE,EAAE,GAAG,EAAE,QAAQ,EAAE,KAAK,EAAE,IAAI,CAAC,IAAI,EAAE;YACnC,EAAE,GAAG,EAAE,QAAQ,EAAE,KAAK,EAAE,IAAI,CAAC,OAAO,EAAE;YACtC,EAAE,GAAG,EAAE,QAAQ,EAAE,KAAK,EAAE,CAAC,CAAC,IAAI,EAAE;YAChC,EAAE,GAAG,EAAE,QAAQ,EAAE,KAAK,EAAE,CAAC,CAAC,OAAO,EAAE;SACpC,CAAC;QACF,IAAI,CAAC,OAAO,CAAC,EAAE,IAAI,OAAO,CAAC,QAAQ,EAAE,CAAC;YACpC,UAAU,CAAC,IAAI,CAAC,EAAE,GAAG,EAAE,WAAW,EAAE,KAAK,EAAE,OAAO,CAAC,QAAQ,CAAC,GAAG,EAAE,CAAC,CAAC;YACnE,UAAU,CAAC,IAAI,CAAC,EAAE,GAAG,EAAE,WAAW,EAAE,KAAK,EAAE,OAAO,CAAC,QAAQ,CAAC,GAAG,EAAE,CAAC,CAAC;YACnE,UAAU,CAAC,IAAI,CAAC,EAAE,GAAG,EAAE,WAAW,EAAE,KAAK,EAAE,OAAO,CAAC,QAAQ,CAAC,KAAK,EAAE,CAAC,CAAC;QACvE,CAAC;QACD,IAAI,CAAC,OAAO,CAAC,EAAE,IAAI,OAAO,CAAC,QAAQ,EAAE,CAAC;YACpC,UAAU,CAAC,IAAI,CAAC,EAAE,GAAG,EAAE,WAAW,EAAE,KAAK,EAAE,OAAO,CAAC,QAAQ,CAAC,GAAG,EAAE,CAAC,CAAC;YACnE,UAAU,CAAC,IAAI,CAAC,EAAE,GAAG,EAAE,WAAW,EAAE,KAAK,EAAE,OAAO,CAAC,QAAQ,CAAC,GAAG,EAAE,CAAC,CAAC;YACnE,UAAU,CAAC,IAAI,CAAC,EAAE,GAAG,EAAE,WAAW,EAAE,KAAK,EAAE,OAAO,CAAC,QAAQ,CAAC,KAAK,EAAE,CAAC,CAAC;QACvE,CAAC;QACD,MAAM,cAAc,GAAG,UAAU,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,KAAK,KAAK,QAAQ,CAA0C,CAAC;QACpH,MAAM,CAAC,IAAI,CAAC,aAAa,EAAE,SAAS,EAAE,qDAAqD,EAAE,cAAc,CAAC,CAAC;QAC7G,MAAM,IAAI,KAAK,CAAC,0DAA0D,CAAC,CAAC;IAC9E,CAAC;IAED,IAAI,CAAC;QACH,OAAO,0BAA0B,CAAC,IAAI,EAAE,CAAC,EAAE,OAAO,EAAE,MAAM,EAAE,aAAa,CAAC,CAAC;IAC7E,CAAC;IAAC,OAAO,KAAK,EAAE,CAAC;QACf,MAAM,UAAU,GAAmD;YACjE,EAAE,GAAG,EAAE,MAAM,EAAE,KAAK,EAAE,IAAI,CAAC,IAAI,EAAE;YACjC,EAAE,GAAG,EAAE,MAAM,EAAE,KAAK,EAAE,IAAI,CAAC,OAAO,EAAE;YACpC,EAAE,GAAG,EAAE,WAAW,EAAE,KAAK,EAAE,OAAO,GAAG,CAAC,CAAC,CAAC,CAAC,OAAO,GAAG,IAAI,CAAC,GAAG,CAAC,mBAAmB,EAAE,2BAA2B,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,mBAAmB,EAAE,gCAAgC,CAAC,EAAE;SAClL,CAAC;QACF,UAAU,CAAC,IAAI,CACb,EAAE,GAAG,EAAE,UAAU,EAAE,KAAK,EAAE,cAAc,CAAC,IAAI,EAAE,EAC/C,EAAE,GAAG,EAAE,aAAa,EAAE,KAAK,EAAE,cAAc,CAAC,MAAM,EAAE,EACpD,EAAE,GAAG,EAAE,aAAa,EAAE,KAAK,EAAE,cAAc,CAAC,MAAM,EAAE,CACrD,CAAC;QACF,IAAI,WAAW,EAAE,CAAC;YAChB,UAAU,CAAC,IAAI,CACb,EAAE,GAAG,EAAE,YAAY,EAAE,KAAK,EAAE,WAAW,CAAC,UAAU,EAAE,EACpD,EAAE,GAAG,EAAE,YAAY,EAAE,KAAK,EAAE,WAAW,CAAC,UAAU,EAAE,EACpD,EAAE,GAAG,EAAE,YAAY,EAAE,KAAK,EAAE,WAAW,CAAC,UAAU,EAAE,EACpD,EAAE,GAAG,EAAE,YAAY,EAAE,KAAK,EAAE,WAAW,CAAC,UAAU,EAAE,CACrD,CAAC;YACF,WAAW,CAAC,YAAY,CAAC,OAAO,CAAC,CAAC,GAAG,EAAE,GAAG,EAAE,EAAE;gBAC5C,UAAU,CAAC,IAAI,CAAC,EAAE,GAAG,EAAE,OAAO,GAAG,EAAE,EAAE,KAAK,EAAE,GAAG,CAAC,KAAK,EAAE,CAAC,CAAC;gBACzD,UAAU,CAAC,IAAI,CAAC,EAAE,GAAG,EAAE,OAAO,GAAG,OAAO,EAAE,KAAK,EAAE,GAAG,CAAC,IAAI,EAAE,CAAC,CAAC;YAC/D,CAAC,CAAC,CAAC;YACH,WAAW,CAAC,YAAY,CAAC,OAAO,CAAC,CAAC,GAAG,EAAE,GAAG,EAAE,EAAE;gBAC5C,UAAU,CAAC,IAAI,CAAC,EAAE,GAAG,EAAE,OAAO,GAAG,EAAE,EAAE,KAAK,EAAE,GAAG,CAAC,KAAK,EAAE,CAAC,CAAC;gBACzD,UAAU,CAAC,IAAI,CAAC,EAAE,GAAG,EAAE,OAAO,GAAG,OAAO,EAAE,KAAK,EAAE,GAAG,CAAC,IAAI,EAAE,CAAC,CAAC;YAC/D,CAAC,CAAC,CAAC;QACL,CAAC;QACD,MAAM,cAAc,GAAG,UAAU,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,CAAC,KAAK,KAAK,QAAQ,CAA0C,CAAC;QACpH,MAAM,CAAC,IAAI,CAAC,aAAa,EAAE,SAAS,EAAE,iEAAiE,KAAK,EAAE,EAAE,cAAc,CAAC,CAAC;QAChI,MAAM,KAAK,CAAC;IACd,CAAC;AACH,CAAC;AAED;;;GAGG;AACH,SAAS,yBAAyB,CAChC,CAAS,EACT,CAAS,EACT,cAAsB,EACtB,MAAc,EACd,aAAqB;IAErB,MAAM,EAAE,GAAG,CAAC,CAAC,SAAS,EAAE,CAAC;IACzB,MAAM,GAAG,GAAG,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;IACvB,MAAM,GAAG,GAAG,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;IACvB,MAAM,OAAO,GAAG,cAAc,GAAG,CAAC,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC;IAExD,IAAI,CAAC;QACH,OAAO,0BAA0B,CAAC,GAAG,EAAE,GAAG,EAAE,OAAO,EAAE,MAAM,EAAE,aAAa,CAAC,CAAC;IAC9E,CAAC;IAAC,OAAO,KAAK,EAAE,CAAC;QACf,MAAM,CAAC,IAAI,CAAC,aAAa,EAAE,SAAS,EAAE,gEAAgE,KAAK,EAAE,CAAC,CAAC;QAC/G,MAAM,IAAI,KAAK,CACb,uFAAuF;YACvF,gEAAgE,KAAK,EAAE,CACxE,CAAC;IACJ,CAAC;AACH,CAAC;AAED;;;GAGG;AACH,SAAS,0BAA0B,CACjC,CAAS,EACT,CAAS,EACT,cAAsB,EACtB,MAAc,EACd,aAAqB;IAErB,MAAM,EAAE,GAAG,CAAC,CAAC,SAAS,EAAE,CAAC;IACzB,MAAM,GAAG,GAAG,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC;IACvB,MAAM,OAAO,GAAG,cAAc,GAAG,CAAC,CAAC,CAAC,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC;IAExD,IAAI,CAAC;QACH,MAAM,CAAC,GAAG,0BAA0B,CAAC,GAAG,EAAE,CAAC,EAAE,OAAO,EAAE,MAAM,EAAE,aAAa,CAAC,CAAC;QAC7E,MAAM,CAAC,GAAG,EAAE,CAAC,IAAI,CAAC,oBAAoB,CAAC,CAAC,CAAC,CAAC,CAAC;QAC3C,OAAO,oBAAoB,CAAC,CAAC,CAAC,CAAC;IACjC,CAAC;IAAC,OAAO,KAAK,EAAE,CAAC;QACf,MAAM,CAAC,IAAI,CAAC,aAAa,EAAE,SAAS,EAAE,iEAAiE,KAAK,EAAE,CAAC,CAAC;QAChH,MAAM,IAAI,KAAK,CACb,+FAA+F;YAC/F,gEAAgE,KAAK,EAAE,CACxE,CAAC;IACJ,CAAC;AACH,CAAC;AAED;;;;;;;;;;;;;GAaG;AACH,MAAM,UAAU,iBAAiB,CAC/B,CAAS,EACT,CAAS,EACT,MAAc,EACd,gBAAwB,yBAAyB,EACjD,iBAAyB,CAAC;IAE1B,MAAM,IAAI,GACR,cAAc,GAAG,CAAC,IAAI,CAAC,CAAC,IAAI,KAAK,CAAC,CAAC,OAAO;QACxC,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,IAAI,EAAE,CAAC,CAAC,OAAO,CAAC,CAAC,GAAG,CAAC,cAAc,CAAC,CAAC;QAC1D,CAAC,CAAC,CAAC,CAAC;IAER,IAAI,IAAI,CAAC,IAAI,KAAK,IAAI,CAAC,OAAO,EAAE,CAAC;QAC/B,OAAO,iBAAiB,CAAC,IAAI,EAAE,CAAC,EAAE,cAAc,EAAE,MAAM,EAAE,aAAa,CAAC,CAAC;IAC3E,CAAC;IAED,MAAM,gBAAgB,GAAG,CAAC,CAAC,IAAI,GAAG,CAAC,CAAC,OAAO,CAAC;IAC5C,IAAI,gBAAgB,EAAE,CAAC;QACrB,OAAO,yBAAyB,CAAC,CAAC,EAAE,CAAC,EAAE,cAAc,EAAE,MAAM,EAAE,aAAa,CAAC,CAAC;IAChF,CAAC;IAED,OAAO,0BAA0B,CAAC,CAAC,EAAE,CAAC,EAAE,cAAc,EAAE,MAAM,EAAE,aAAa,CAAC,CAAC;AACjF,CAAC;AAED;;;;;;;;;;;;;GAaG;AACH,MAAM,UAAU,oBAAoB,CAClC,IAAY,EACZ,GAAiB,EACjB,MAAc,EACd,gBAAwB,yBAAyB,EACjD,iBAAyB,CAAC;IAE1B,gGAAgG;IAChG,MAAM,aAAa,GAAG,IAAI,CAAC,SAAS,EAAE,CAAC;IAEvC,MAAM,SAAS,GAAG,oBAAoB,CAAC,GAAG,CAAC,CAAC;IAE5C,0FAA0F;IAC1F,IAAI,CAAC;QACH,OAAO,iBAAiB,CAAC,aAAa,EAAE,SAAS,EAAE,MAAM,EAAE,aAAa,EAAE,cAAc,CAAC,CAAC;IAC5F,CAAC;IAAC,OAAO,KAAK,EAAE,CAAC;QACf,MAAM,CAAC,IAAI,CAAC,aAAa,EAAE,SAAS,EAAE,qCAAqC,KAAK,EAAE,CAAC,CAAC;QACpF,MAAM,IAAI,KAAK,CACb,sDAAsD;YACtD,oEAAoE;YACpE,gBAAgB,IAAI,CAAC,IAAI,MAAM,IAAI,CAAC,OAAO,IAAI;YAC/C,mBAAmB,KAAK,EAAE,CAC3B,CAAC;IACJ,CAAC;AACH,CAAC;AAED;;;;;;;;;;;;;;;;GAgBG;AACH,MAAM,UAAU,YAAY,CAC1B,aAA2B,EAC3B,IAAY,EACZ,IAAY,EACZ,MAAoB,EACpB,MAAc,EACd,gBAAwB,yBAAyB;IAEjD,sFAAsF;IACtF,MAAM,YAAY,GAAG,oBAAoB,CAAC,MAAM,CAAC,CAAC;IAClD,MAAM,UAAU,GAAG,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;IAC3C,MAAM,gBAAgB,GAAG,oBAAoB,CAAC,UAAU,CAAC,CAAC;IAE1D,mFAAmF;IACnF,MAAM,kBAAkB,GAAG,WAAW,CAAC,gBAAgB,EAAE,oBAAoB,CAAC,CAAC;IAC/E,MAAM,wBAAwB,GAAG,oBAAoB,CAAC,kBAAkB,CAAC,CAAC;IAE1E,0FAA0F;IAC1F,MAAM,EAAE,GAAG,iBAAiB,CAAC,IAAI,EAAE,wBAAwB,EAAE,MAAM,EAAE,aAAa,CAAC,CAAC;IAEpF,OAAO,UAAU,CAAC,aAAa,EAAE,EAAE,CAAC,CAAC;AACvC,CAAC;AAED;;;;;GAKG;AACH,MAAM,UAAU,0BAA0B,CACxC,UAAwB,EACxB,MAAoB,EACpB,kBAAgC,EAChC,SAAiB,EACjB,mBAA2B,EAC3B,MAAc,EACd,gBAAwB,yBAAyB,EACjD,gBAAwB,CAAC;IAEzB,IAAI,eAAe,GAAG,IAAI,YAAY,CAAC,MAAM,CAAC,CAAC;IAE/C,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,aAAa,EAAE,CAAC,EAAE,EAAE,CAAC;QACvC,MAAM,UAAU,GAAG,kBAAkB,CAAC,UAAU,EAAE,eAAe,CAAC,CAAC;QACnE,MAAM,cAAc,GAAG,UAAU,CAAC,UAAU,CAAC,CAAC;QAC9C,IAAI,cAAc,IAAI,mBAAmB,EAAE,CAAC;YAC1C,MAAM;QACR,CAAC;QAED,MAAM,IAAI,GAAG,4BAA4B,CAAC,UAAU,EAAE,eAAe,EAAE,kBAAkB,EAAE,EAAE,QAAQ,EAAE,SAAS,EAAE,CAAC,CAAC;QACpH,MAAM,kBAAkB,GAAG,WAAW,CAAC,UAAU,EAAE,oBAAoB,CAAC,CAAC;QACzE,MAAM,wBAAwB,GAAG,oBAAoB,CAAC,kBAAkB,CAAC,CAAC;QAE1E,IAAI,CAAC;YACH,MAAM,MAAM,GAAG,iBAAiB,CAAC,IAAI,EAAE,wBAAwB,EAAE,MAAM,EAAE,aAAa,CAAC,CAAC;YACxF,MAAM,aAAa,GAAG,UAAU,CAAC,eAAe,EAAE,MAAM,CAAC,CAAC;YAC1D,eAAe,GAAG,IAAI,YAAY,CAAC,aAAa,CAAC,CAAC;QACpD,CAAC;QAAC,OAAO,KAAK,EAAE,CAAC;YACf,MAAM,CAAC,IAAI,CAAC,aAAa,EAAE,SAAS,EAAE,uCAAuC,KAAK,EAAE,CAAC,CAAC;YACtF,MAAM;QACR,CAAC;IACH,CAAC;IAED,OAAO,eAAe,CAAC;AACzB,CAAC;AAED;;;;;;;;;;;;;;GAcG;AACH,MAAM,UAAU,yBAAyB,CACvC,iBAA+B,EAC/B,aAA2B,EAC3B,kBAAgC,EAChC,mBAA2B,EAC3B,MAAc,EACd,gBAAwB,yBAAyB;IAEjD,MAAM,iBAAiB,GAAG,kBAAkB,CAAC,iBAAiB,EAAE,aAAa,CAAC,CAAC;IAC/E,MAAM,qBAAqB,GAAG,UAAU,CAAC,iBAAiB,CAAC,CAAC;IAC5D,IAAI,qBAAqB,GAAG,mBAAmB,EAAE,CAAC;QAChD,MAAM,CAAC,IAAI,CAAC,aAAa,EAAE,SAAS,EAAE,8BAA8B,EAAE;YACpE,EAAE,GAAG,EAAE,eAAe,EAAE,KAAK,EAAE,qBAAqB,EAAE;YACtD,EAAE,GAAG,EAAE,YAAY,EAAE,KAAK,EAAE,mBAAmB,EAAE;SAClD,CAAC,CAAC;IACL,CAAC;IAED,kEAAkE;IAClE,mEAAmE;AACrE,CAAC"}
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* This file provides helper functions for convergence checking across
|
|
3
|
+
* different optimization algorithms.
|
|
4
|
+
*
|
|
5
|
+
* Role in system:
|
|
6
|
+
* - Centralizes convergence checking logic (DRY principle)
|
|
7
|
+
* - Used by gradient descent, Gauss-Newton, and Levenberg-Marquardt
|
|
8
|
+
* - Provides consistent convergence criteria
|
|
9
|
+
*
|
|
10
|
+
* For first-time readers:
|
|
11
|
+
* - These are utility functions used by optimization algorithms
|
|
12
|
+
* - Each function checks a specific convergence criterion
|
|
13
|
+
*/
|
|
14
|
+
import type { OptimizationResult } from './types.js';
|
|
15
|
+
/**
|
|
16
|
+
* Creates a convergence result object with consistent structure.
|
|
17
|
+
* Used to avoid code duplication across optimization algorithms.
|
|
18
|
+
*/
|
|
19
|
+
export declare function createConvergenceResult(parameters: Float64Array, iteration: number, converged: boolean, finalCost: number, finalGradientNorm?: number): OptimizationResult;
|
|
20
|
+
/**
|
|
21
|
+
* Checks if gradient norm indicates convergence.
|
|
22
|
+
* Returns true if gradient is small enough (algorithm has found a stationary point).
|
|
23
|
+
*/
|
|
24
|
+
export declare function checkGradientConvergence(gradientNorm: number, tolerance: number, iteration: number): boolean;
|
|
25
|
+
/**
|
|
26
|
+
* Checks if step size indicates convergence.
|
|
27
|
+
* Returns true if step is small enough (algorithm is making minimal progress).
|
|
28
|
+
*/
|
|
29
|
+
export declare function checkStepSizeConvergence(stepNorm: number, tolerance: number, iteration: number): boolean;
|
|
30
|
+
/**
|
|
31
|
+
* Checks if residual norm indicates convergence.
|
|
32
|
+
* Returns true if residual is small enough (problem is solved to desired accuracy).
|
|
33
|
+
*/
|
|
34
|
+
export declare function checkResidualConvergence(residualNorm: number, tolerance: number, iteration: number): boolean;
|
|
35
|
+
//# sourceMappingURL=convergence.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"convergence.d.ts","sourceRoot":"","sources":["../../src/core/convergence.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;GAYG;AAEH,OAAO,KAAK,EAAE,kBAAkB,EAAE,MAAM,YAAY,CAAC;AAErD;;;GAGG;AACH,wBAAgB,uBAAuB,CACrC,UAAU,EAAE,YAAY,EACxB,SAAS,EAAE,MAAM,EACjB,SAAS,EAAE,OAAO,EAClB,SAAS,EAAE,MAAM,EACjB,iBAAiB,CAAC,EAAE,MAAM,GACzB,kBAAkB,CAQpB;AAED;;;GAGG;AACH,wBAAgB,wBAAwB,CACtC,YAAY,EAAE,MAAM,EACpB,SAAS,EAAE,MAAM,EACjB,SAAS,EAAE,MAAM,GAChB,OAAO,CAGT;AAED;;;GAGG;AACH,wBAAgB,wBAAwB,CACtC,QAAQ,EAAE,MAAM,EAChB,SAAS,EAAE,MAAM,EACjB,SAAS,EAAE,MAAM,GAChB,OAAO,CAGT;AAED;;;GAGG;AACH,wBAAgB,wBAAwB,CACtC,YAAY,EAAE,MAAM,EACpB,SAAS,EAAE,MAAM,EACjB,SAAS,EAAE,MAAM,GAChB,OAAO,CAGT"}
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* This file provides helper functions for convergence checking across
|
|
3
|
+
* different optimization algorithms.
|
|
4
|
+
*
|
|
5
|
+
* Role in system:
|
|
6
|
+
* - Centralizes convergence checking logic (DRY principle)
|
|
7
|
+
* - Used by gradient descent, Gauss-Newton, and Levenberg-Marquardt
|
|
8
|
+
* - Provides consistent convergence criteria
|
|
9
|
+
*
|
|
10
|
+
* For first-time readers:
|
|
11
|
+
* - These are utility functions used by optimization algorithms
|
|
12
|
+
* - Each function checks a specific convergence criterion
|
|
13
|
+
*/
|
|
14
|
+
/**
|
|
15
|
+
* Creates a convergence result object with consistent structure.
|
|
16
|
+
* Used to avoid code duplication across optimization algorithms.
|
|
17
|
+
*/
|
|
18
|
+
export function createConvergenceResult(parameters, iteration, converged, finalCost, finalGradientNorm) {
|
|
19
|
+
return {
|
|
20
|
+
parameters,
|
|
21
|
+
iterations: iteration + 1,
|
|
22
|
+
converged,
|
|
23
|
+
finalCost,
|
|
24
|
+
finalGradientNorm
|
|
25
|
+
};
|
|
26
|
+
}
|
|
27
|
+
/**
|
|
28
|
+
* Checks if gradient norm indicates convergence.
|
|
29
|
+
* Returns true if gradient is small enough (algorithm has found a stationary point).
|
|
30
|
+
*/
|
|
31
|
+
export function checkGradientConvergence(gradientNorm, tolerance, iteration) {
|
|
32
|
+
// Skip convergence check on first iteration (no step taken yet)
|
|
33
|
+
return iteration > 0 && gradientNorm < tolerance;
|
|
34
|
+
}
|
|
35
|
+
/**
|
|
36
|
+
* Checks if step size indicates convergence.
|
|
37
|
+
* Returns true if step is small enough (algorithm is making minimal progress).
|
|
38
|
+
*/
|
|
39
|
+
export function checkStepSizeConvergence(stepNorm, tolerance, iteration) {
|
|
40
|
+
// Skip convergence check on first iteration (no step taken yet)
|
|
41
|
+
return iteration > 0 && stepNorm < tolerance;
|
|
42
|
+
}
|
|
43
|
+
/**
|
|
44
|
+
* Checks if residual norm indicates convergence.
|
|
45
|
+
* Returns true if residual is small enough (problem is solved to desired accuracy).
|
|
46
|
+
*/
|
|
47
|
+
export function checkResidualConvergence(residualNorm, tolerance, iteration) {
|
|
48
|
+
// Skip convergence check on first iteration (no step taken yet)
|
|
49
|
+
return iteration > 0 && residualNorm < tolerance;
|
|
50
|
+
}
|
|
51
|
+
//# sourceMappingURL=convergence.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"convergence.js","sourceRoot":"","sources":["../../src/core/convergence.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;GAYG;AAIH;;;GAGG;AACH,MAAM,UAAU,uBAAuB,CACrC,UAAwB,EACxB,SAAiB,EACjB,SAAkB,EAClB,SAAiB,EACjB,iBAA0B;IAE1B,OAAO;QACL,UAAU;QACV,UAAU,EAAE,SAAS,GAAG,CAAC;QACzB,SAAS;QACT,SAAS;QACT,iBAAiB;KAClB,CAAC;AACJ,CAAC;AAED;;;GAGG;AACH,MAAM,UAAU,wBAAwB,CACtC,YAAoB,EACpB,SAAiB,EACjB,SAAiB;IAEjB,gEAAgE;IAChE,OAAO,SAAS,GAAG,CAAC,IAAI,YAAY,GAAG,SAAS,CAAC;AACnD,CAAC;AAED;;;GAGG;AACH,MAAM,UAAU,wBAAwB,CACtC,QAAgB,EAChB,SAAiB,EACjB,SAAiB;IAEjB,gEAAgE;IAChE,OAAO,SAAS,GAAG,CAAC,IAAI,QAAQ,GAAG,SAAS,CAAC;AAC/C,CAAC;AAED;;;GAGG;AACH,MAAM,UAAU,wBAAwB,CACtC,YAAoB,EACpB,SAAiB,EACjB,SAAiB;IAEjB,gEAAgE;IAChE,OAAO,SAAS,GAAG,CAAC,IAAI,YAAY,GAAG,SAAS,CAAC;AACnD,CAAC"}
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* This file provides helper functions for creating gradient and Jacobian functions
|
|
3
|
+
* from cost and residual functions using finite differences.
|
|
4
|
+
*
|
|
5
|
+
* Role in system:
|
|
6
|
+
* - Simplifies the API for users who want to use numerical differentiation
|
|
7
|
+
* - Prevents common mistakes with parameter ordering
|
|
8
|
+
* - Provides a more intuitive interface for optimization algorithms
|
|
9
|
+
*
|
|
10
|
+
* For first-time readers:
|
|
11
|
+
* - Use createFiniteDiffGradient when you have a cost function and need a gradient
|
|
12
|
+
* - Use createFiniteDiffJacobian when you have a residual function and need a Jacobian
|
|
13
|
+
* - These are convenience wrappers around finiteDiffGradient and finiteDiffJacobian
|
|
14
|
+
*/
|
|
15
|
+
import type { CostFn, GradientFn, ResidualFn, JacobianFn, NumericalDifferentiationOptions } from './types.js';
|
|
16
|
+
/**
|
|
17
|
+
* Creates a gradient function from a cost function using finite differences.
|
|
18
|
+
*
|
|
19
|
+
* This is a convenience wrapper around finiteDiffGradient that returns a gradient
|
|
20
|
+
* function suitable for use with optimization algorithms like gradientDescent.
|
|
21
|
+
*
|
|
22
|
+
* @param costFunction - The cost function to differentiate
|
|
23
|
+
* @param options - Optional numerical differentiation settings
|
|
24
|
+
* @returns A gradient function that can be passed to optimization algorithms
|
|
25
|
+
*
|
|
26
|
+
* @example
|
|
27
|
+
* ```typescript
|
|
28
|
+
* import { gradientDescent, createFiniteDiffGradient } from 'numopt-js';
|
|
29
|
+
*
|
|
30
|
+
* // Define your cost function
|
|
31
|
+
* const costFn = (params) => Math.pow(params[0] - 3, 2) + Math.pow(params[1] - 2, 2);
|
|
32
|
+
*
|
|
33
|
+
* // Create a gradient function (no need to worry about parameter order!)
|
|
34
|
+
* const gradientFn = createFiniteDiffGradient(costFn);
|
|
35
|
+
*
|
|
36
|
+
* // Use it with an optimizer
|
|
37
|
+
* const result = gradientDescent(
|
|
38
|
+
* new Float64Array([0, 0]),
|
|
39
|
+
* costFn,
|
|
40
|
+
* gradientFn,
|
|
41
|
+
* { maxIterations: 100, tolerance: 1e-6 }
|
|
42
|
+
* );
|
|
43
|
+
* ```
|
|
44
|
+
*
|
|
45
|
+
* @example
|
|
46
|
+
* ```typescript
|
|
47
|
+
* // With custom step size
|
|
48
|
+
* const gradientFn = createFiniteDiffGradient(costFn, { stepSize: 1e-8 });
|
|
49
|
+
* ```
|
|
50
|
+
*/
|
|
51
|
+
export declare function createFiniteDiffGradient(costFunction: CostFn, options?: NumericalDifferentiationOptions): GradientFn;
|
|
52
|
+
/**
|
|
53
|
+
* Creates a Jacobian function from a residual function using finite differences.
|
|
54
|
+
*
|
|
55
|
+
* This is a convenience wrapper around finiteDiffJacobian that returns a Jacobian
|
|
56
|
+
* function suitable for use with optimization algorithms like gaussNewton.
|
|
57
|
+
*
|
|
58
|
+
* @param residualFunction - The residual function to differentiate
|
|
59
|
+
* @param options - Optional numerical differentiation settings
|
|
60
|
+
* @returns A Jacobian function that can be passed to optimization algorithms
|
|
61
|
+
*
|
|
62
|
+
* @example
|
|
63
|
+
* ```typescript
|
|
64
|
+
* import { gaussNewton, createFiniteDiffJacobian } from 'numopt-js';
|
|
65
|
+
*
|
|
66
|
+
* // Define your residual function
|
|
67
|
+
* const residualFn = (params) => {
|
|
68
|
+
* // Return residuals for curve fitting, etc.
|
|
69
|
+
* return new Float64Array([...]);
|
|
70
|
+
* };
|
|
71
|
+
*
|
|
72
|
+
* // Create a Jacobian function
|
|
73
|
+
* const jacobianFn = createFiniteDiffJacobian(residualFn);
|
|
74
|
+
*
|
|
75
|
+
* // Use it with an optimizer
|
|
76
|
+
* const result = gaussNewton(
|
|
77
|
+
* new Float64Array([1, 1]),
|
|
78
|
+
* residualFn,
|
|
79
|
+
* jacobianFn,
|
|
80
|
+
* { maxIterations: 100, tolerance: 1e-6 }
|
|
81
|
+
* );
|
|
82
|
+
* ```
|
|
83
|
+
*/
|
|
84
|
+
export declare function createFiniteDiffJacobian(residualFunction: ResidualFn, options?: NumericalDifferentiationOptions): JacobianFn;
|
|
85
|
+
//# sourceMappingURL=createGradientFunction.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"createGradientFunction.d.ts","sourceRoot":"","sources":["../../src/core/createGradientFunction.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;GAaG;AAGH,OAAO,KAAK,EACR,MAAM,EACN,UAAU,EACV,UAAU,EACV,UAAU,EACV,+BAA+B,EAClC,MAAM,YAAY,CAAC;AAEpB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GAkCG;AACH,wBAAgB,wBAAwB,CACpC,YAAY,EAAE,MAAM,EACpB,OAAO,CAAC,EAAE,+BAA+B,GAC1C,UAAU,CAIZ;AAED;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA+BG;AACH,wBAAgB,wBAAwB,CACpC,gBAAgB,EAAE,UAAU,EAC5B,OAAO,CAAC,EAAE,+BAA+B,GAC1C,UAAU,CAIZ"}
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* This file provides helper functions for creating gradient and Jacobian functions
|
|
3
|
+
* from cost and residual functions using finite differences.
|
|
4
|
+
*
|
|
5
|
+
* Role in system:
|
|
6
|
+
* - Simplifies the API for users who want to use numerical differentiation
|
|
7
|
+
* - Prevents common mistakes with parameter ordering
|
|
8
|
+
* - Provides a more intuitive interface for optimization algorithms
|
|
9
|
+
*
|
|
10
|
+
* For first-time readers:
|
|
11
|
+
* - Use createFiniteDiffGradient when you have a cost function and need a gradient
|
|
12
|
+
* - Use createFiniteDiffJacobian when you have a residual function and need a Jacobian
|
|
13
|
+
* - These are convenience wrappers around finiteDiffGradient and finiteDiffJacobian
|
|
14
|
+
*/
|
|
15
|
+
import { finiteDiffGradient, finiteDiffJacobian } from './finiteDiff.js';
|
|
16
|
+
/**
|
|
17
|
+
* Creates a gradient function from a cost function using finite differences.
|
|
18
|
+
*
|
|
19
|
+
* This is a convenience wrapper around finiteDiffGradient that returns a gradient
|
|
20
|
+
* function suitable for use with optimization algorithms like gradientDescent.
|
|
21
|
+
*
|
|
22
|
+
* @param costFunction - The cost function to differentiate
|
|
23
|
+
* @param options - Optional numerical differentiation settings
|
|
24
|
+
* @returns A gradient function that can be passed to optimization algorithms
|
|
25
|
+
*
|
|
26
|
+
* @example
|
|
27
|
+
* ```typescript
|
|
28
|
+
* import { gradientDescent, createFiniteDiffGradient } from 'numopt-js';
|
|
29
|
+
*
|
|
30
|
+
* // Define your cost function
|
|
31
|
+
* const costFn = (params) => Math.pow(params[0] - 3, 2) + Math.pow(params[1] - 2, 2);
|
|
32
|
+
*
|
|
33
|
+
* // Create a gradient function (no need to worry about parameter order!)
|
|
34
|
+
* const gradientFn = createFiniteDiffGradient(costFn);
|
|
35
|
+
*
|
|
36
|
+
* // Use it with an optimizer
|
|
37
|
+
* const result = gradientDescent(
|
|
38
|
+
* new Float64Array([0, 0]),
|
|
39
|
+
* costFn,
|
|
40
|
+
* gradientFn,
|
|
41
|
+
* { maxIterations: 100, tolerance: 1e-6 }
|
|
42
|
+
* );
|
|
43
|
+
* ```
|
|
44
|
+
*
|
|
45
|
+
* @example
|
|
46
|
+
* ```typescript
|
|
47
|
+
* // With custom step size
|
|
48
|
+
* const gradientFn = createFiniteDiffGradient(costFn, { stepSize: 1e-8 });
|
|
49
|
+
* ```
|
|
50
|
+
*/
|
|
51
|
+
export function createFiniteDiffGradient(costFunction, options) {
|
|
52
|
+
return (params) => {
|
|
53
|
+
return finiteDiffGradient(params, costFunction, options);
|
|
54
|
+
};
|
|
55
|
+
}
|
|
56
|
+
/**
|
|
57
|
+
* Creates a Jacobian function from a residual function using finite differences.
|
|
58
|
+
*
|
|
59
|
+
* This is a convenience wrapper around finiteDiffJacobian that returns a Jacobian
|
|
60
|
+
* function suitable for use with optimization algorithms like gaussNewton.
|
|
61
|
+
*
|
|
62
|
+
* @param residualFunction - The residual function to differentiate
|
|
63
|
+
* @param options - Optional numerical differentiation settings
|
|
64
|
+
* @returns A Jacobian function that can be passed to optimization algorithms
|
|
65
|
+
*
|
|
66
|
+
* @example
|
|
67
|
+
* ```typescript
|
|
68
|
+
* import { gaussNewton, createFiniteDiffJacobian } from 'numopt-js';
|
|
69
|
+
*
|
|
70
|
+
* // Define your residual function
|
|
71
|
+
* const residualFn = (params) => {
|
|
72
|
+
* // Return residuals for curve fitting, etc.
|
|
73
|
+
* return new Float64Array([...]);
|
|
74
|
+
* };
|
|
75
|
+
*
|
|
76
|
+
* // Create a Jacobian function
|
|
77
|
+
* const jacobianFn = createFiniteDiffJacobian(residualFn);
|
|
78
|
+
*
|
|
79
|
+
* // Use it with an optimizer
|
|
80
|
+
* const result = gaussNewton(
|
|
81
|
+
* new Float64Array([1, 1]),
|
|
82
|
+
* residualFn,
|
|
83
|
+
* jacobianFn,
|
|
84
|
+
* { maxIterations: 100, tolerance: 1e-6 }
|
|
85
|
+
* );
|
|
86
|
+
* ```
|
|
87
|
+
*/
|
|
88
|
+
export function createFiniteDiffJacobian(residualFunction, options) {
|
|
89
|
+
return (params) => {
|
|
90
|
+
return finiteDiffJacobian(residualFunction, params, options);
|
|
91
|
+
};
|
|
92
|
+
}
|
|
93
|
+
//# sourceMappingURL=createGradientFunction.js.map
|