numopt-js 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. package/CODING_RULES.md +161 -0
  2. package/LICENSE +22 -0
  3. package/README.md +807 -0
  4. package/dist/core/adjointGradientDescent.d.ts +61 -0
  5. package/dist/core/adjointGradientDescent.d.ts.map +1 -0
  6. package/dist/core/adjointGradientDescent.js +764 -0
  7. package/dist/core/adjointGradientDescent.js.map +1 -0
  8. package/dist/core/constrainedGaussNewton.d.ts +44 -0
  9. package/dist/core/constrainedGaussNewton.d.ts.map +1 -0
  10. package/dist/core/constrainedGaussNewton.js +314 -0
  11. package/dist/core/constrainedGaussNewton.js.map +1 -0
  12. package/dist/core/constrainedLevenbergMarquardt.d.ts +46 -0
  13. package/dist/core/constrainedLevenbergMarquardt.d.ts.map +1 -0
  14. package/dist/core/constrainedLevenbergMarquardt.js +469 -0
  15. package/dist/core/constrainedLevenbergMarquardt.js.map +1 -0
  16. package/dist/core/constrainedUtils.d.ts +92 -0
  17. package/dist/core/constrainedUtils.d.ts.map +1 -0
  18. package/dist/core/constrainedUtils.js +364 -0
  19. package/dist/core/constrainedUtils.js.map +1 -0
  20. package/dist/core/convergence.d.ts +35 -0
  21. package/dist/core/convergence.d.ts.map +1 -0
  22. package/dist/core/convergence.js +51 -0
  23. package/dist/core/convergence.js.map +1 -0
  24. package/dist/core/createGradientFunction.d.ts +85 -0
  25. package/dist/core/createGradientFunction.d.ts.map +1 -0
  26. package/dist/core/createGradientFunction.js +93 -0
  27. package/dist/core/createGradientFunction.js.map +1 -0
  28. package/dist/core/effectiveJacobian.d.ts +90 -0
  29. package/dist/core/effectiveJacobian.d.ts.map +1 -0
  30. package/dist/core/effectiveJacobian.js +128 -0
  31. package/dist/core/effectiveJacobian.js.map +1 -0
  32. package/dist/core/finiteDiff.d.ts +171 -0
  33. package/dist/core/finiteDiff.d.ts.map +1 -0
  34. package/dist/core/finiteDiff.js +363 -0
  35. package/dist/core/finiteDiff.js.map +1 -0
  36. package/dist/core/gaussNewton.d.ts +29 -0
  37. package/dist/core/gaussNewton.d.ts.map +1 -0
  38. package/dist/core/gaussNewton.js +151 -0
  39. package/dist/core/gaussNewton.js.map +1 -0
  40. package/dist/core/gradientDescent.d.ts +35 -0
  41. package/dist/core/gradientDescent.d.ts.map +1 -0
  42. package/dist/core/gradientDescent.js +204 -0
  43. package/dist/core/gradientDescent.js.map +1 -0
  44. package/dist/core/jacobianComputation.d.ts +24 -0
  45. package/dist/core/jacobianComputation.d.ts.map +1 -0
  46. package/dist/core/jacobianComputation.js +38 -0
  47. package/dist/core/jacobianComputation.js.map +1 -0
  48. package/dist/core/levenbergMarquardt.d.ts +36 -0
  49. package/dist/core/levenbergMarquardt.d.ts.map +1 -0
  50. package/dist/core/levenbergMarquardt.js +286 -0
  51. package/dist/core/levenbergMarquardt.js.map +1 -0
  52. package/dist/core/lineSearch.d.ts +42 -0
  53. package/dist/core/lineSearch.d.ts.map +1 -0
  54. package/dist/core/lineSearch.js +106 -0
  55. package/dist/core/lineSearch.js.map +1 -0
  56. package/dist/core/logger.d.ts +77 -0
  57. package/dist/core/logger.d.ts.map +1 -0
  58. package/dist/core/logger.js +162 -0
  59. package/dist/core/logger.js.map +1 -0
  60. package/dist/core/types.d.ts +427 -0
  61. package/dist/core/types.d.ts.map +1 -0
  62. package/dist/core/types.js +15 -0
  63. package/dist/core/types.js.map +1 -0
  64. package/dist/index.d.ts +26 -0
  65. package/dist/index.d.ts.map +1 -0
  66. package/dist/index.js +29 -0
  67. package/dist/index.js.map +1 -0
  68. package/dist/utils/formatting.d.ts +27 -0
  69. package/dist/utils/formatting.d.ts.map +1 -0
  70. package/dist/utils/formatting.js +54 -0
  71. package/dist/utils/formatting.js.map +1 -0
  72. package/dist/utils/matrix.d.ts +63 -0
  73. package/dist/utils/matrix.d.ts.map +1 -0
  74. package/dist/utils/matrix.js +129 -0
  75. package/dist/utils/matrix.js.map +1 -0
  76. package/dist/utils/resultFormatter.d.ts +122 -0
  77. package/dist/utils/resultFormatter.d.ts.map +1 -0
  78. package/dist/utils/resultFormatter.js +342 -0
  79. package/dist/utils/resultFormatter.js.map +1 -0
  80. package/package.json +74 -0
@@ -0,0 +1,204 @@
1
+ /**
2
+ * This file implements the gradient descent optimization algorithm.
3
+ *
4
+ * References:
5
+ * - Nocedal & Wright, "Numerical Optimization" (2nd ed.), Chapter 2 (steepest descent and line search basics)
6
+ * - Boyd & Vandenberghe, "Convex Optimization", Section 9.3 (backtracking line search with Armijo rule)
7
+ *
8
+ * Role in system:
9
+ * - Phase 1 foundation algorithm (simple, testable)
10
+ * - Establishes basic optimization framework
11
+ * - Used as building block for more advanced methods
12
+ *
13
+ * For first-time readers:
14
+ * - Start with gradientDescent function
15
+ * - Understand how it uses line search or fixed step size
16
+ * - Check convergence criteria implementation
17
+ */
18
+ import { backtrackingLineSearch } from './lineSearch.js';
19
+ import { vectorNorm, scaleVector, addVectors } from '../utils/matrix.js';
20
+ import { checkGradientConvergence, checkStepSizeConvergence, createConvergenceResult } from './convergence.js';
21
+ import { Logger } from './logger.js';
22
+ const DEFAULT_MAX_ITERATIONS = 1000;
23
+ const DEFAULT_TOLERANCE = 1e-6;
24
+ const DEFAULT_STEP_SIZE = 0.01;
25
+ const DEFAULT_USE_LINE_SEARCH = true;
26
+ const ZERO_STEP_SIZE = 0.0; // Indicates line search found no valid step (not a descent direction)
27
+ const NEGATIVE_GRADIENT_DIRECTION = -1.0; // Multiplier for negative gradient direction (steepest descent)
28
+ /**
29
+ * Determines the step size for gradient descent iteration.
30
+ * Uses line search if enabled, otherwise uses fixed step size.
31
+ * Returns the step size and whether line search was used.
32
+ */
33
+ function determineStepSize(currentGradient, currentParameters, costFunction, gradientFunction, useLineSearch, fixedStepSize) {
34
+ // Early return for fixed step size case
35
+ if (!useLineSearch || fixedStepSize !== undefined) {
36
+ return { stepSize: fixedStepSize ?? DEFAULT_STEP_SIZE, usedLineSearch: false };
37
+ }
38
+ // Use line search when enabled and no fixed step size provided
39
+ const searchDirection = scaleVector(currentGradient, NEGATIVE_GRADIENT_DIRECTION);
40
+ // Backtracking line search with Armijo condition (Boyd & Vandenberghe, Sec. 9.3)
41
+ // to choose a step satisfying sufficient decrease (Nocedal & Wright, Ch. 2)
42
+ const stepSize = backtrackingLineSearch(costFunction, gradientFunction, currentParameters, searchDirection);
43
+ return { stepSize, usedLineSearch: true };
44
+ }
45
+ /**
46
+ * Updates parameters by taking a step in the negative gradient direction.
47
+ * Returns the new parameters and the step vector.
48
+ */
49
+ function updateParametersWithGradientStep(currentParameters, currentGradient, stepSize) {
50
+ const negativeStepSize = NEGATIVE_GRADIENT_DIRECTION * stepSize;
51
+ const step = scaleVector(currentGradient, negativeStepSize);
52
+ const newParameters = addVectors(currentParameters, step);
53
+ return { newParameters, step };
54
+ }
55
+ /**
56
+ * Checks gradient convergence and returns result if converged.
57
+ * Early return pattern to reduce nesting.
58
+ */
59
+ function checkGradientConvergenceAndReturn(currentParameters, iteration, currentCost, gradientNorm, tolerance, usedLineSearchFlag, logger) {
60
+ if (checkGradientConvergence(gradientNorm, tolerance, iteration)) {
61
+ logger.info('gradientDescent', iteration, 'Converged', [
62
+ { key: 'Cost:', value: currentCost },
63
+ { key: 'Gradient norm:', value: gradientNorm }
64
+ ]);
65
+ const result = createConvergenceResult(currentParameters, iteration, true, currentCost, gradientNorm);
66
+ return { converged: true, result: { ...result, usedLineSearch: usedLineSearchFlag } };
67
+ }
68
+ return { converged: false };
69
+ }
70
+ /**
71
+ * Handles line search failure case.
72
+ * Returns convergence result indicating failure.
73
+ */
74
+ function handleLineSearchFailure(currentParameters, iteration, currentCost, gradientNorm, logger) {
75
+ logger.warn('gradientDescent', iteration, 'Line search failed', [
76
+ { key: 'Cost:', value: currentCost },
77
+ { key: 'Gradient norm:', value: gradientNorm }
78
+ ]);
79
+ return {
80
+ converged: true,
81
+ result: {
82
+ parameters: currentParameters,
83
+ iterations: iteration,
84
+ converged: false,
85
+ finalCost: currentCost,
86
+ finalGradientNorm: gradientNorm,
87
+ usedLineSearch: true
88
+ }
89
+ };
90
+ }
91
+ /**
92
+ * Checks step size convergence and returns result if converged.
93
+ * Early return pattern to reduce nesting.
94
+ */
95
+ function checkStepSizeConvergenceAndReturn(currentParameters, iteration, currentCost, gradientNorm, stepNorm, tolerance, newUsedLineSearch, logger) {
96
+ if (checkStepSizeConvergence(stepNorm, tolerance, iteration)) {
97
+ logger.info('gradientDescent', iteration, 'Converged', [
98
+ { key: 'Cost:', value: currentCost },
99
+ { key: 'Gradient norm:', value: gradientNorm },
100
+ { key: 'Step size:', value: stepNorm }
101
+ ]);
102
+ const result = createConvergenceResult(currentParameters, iteration, true, currentCost, gradientNorm);
103
+ return { converged: true, result: { ...result, usedLineSearch: newUsedLineSearch } };
104
+ }
105
+ return { converged: false };
106
+ }
107
+ /**
108
+ * Performs a single gradient descent iteration.
109
+ * Returns the updated state or a convergence result if converged.
110
+ */
111
+ function performGradientDescentIteration(iteration, currentParameters, currentCost, costFunction, gradientFunction, tolerance, useLineSearch, fixedStepSize, onIteration, logger, usedLineSearchFlag) {
112
+ const currentGradient = gradientFunction(currentParameters);
113
+ const gradientNorm = vectorNorm(currentGradient); // Uses Euclidean norm for steepest descent direction (Nocedal & Wright, Ch. 2)
114
+ // Handle callback (different behavior for first iteration)
115
+ if (onIteration) {
116
+ const callbackIteration = iteration === 0 ? 0 : iteration;
117
+ onIteration(callbackIteration, currentCost, currentParameters);
118
+ }
119
+ // Check gradient convergence - early return
120
+ const gradientConvergenceResult = checkGradientConvergenceAndReturn(currentParameters, iteration, currentCost, gradientNorm, tolerance, usedLineSearchFlag, logger);
121
+ if (gradientConvergenceResult.converged && gradientConvergenceResult.result) {
122
+ return { converged: true, result: gradientConvergenceResult.result };
123
+ }
124
+ // Determine step size
125
+ const stepSizeResult = determineStepSize(currentGradient, currentParameters, costFunction, gradientFunction, useLineSearch, fixedStepSize);
126
+ // Early return: line search failed
127
+ if (stepSizeResult.stepSize === ZERO_STEP_SIZE) {
128
+ const failureResult = handleLineSearchFailure(currentParameters, iteration, currentCost, gradientNorm, logger);
129
+ return failureResult;
130
+ }
131
+ const newUsedLineSearch = usedLineSearchFlag || stepSizeResult.usedLineSearch;
132
+ // Update parameters
133
+ const { newParameters, step } = updateParametersWithGradientStep(currentParameters, currentGradient, stepSizeResult.stepSize);
134
+ const newCost = costFunction(newParameters);
135
+ // Check step size convergence - early return
136
+ const stepNorm = vectorNorm(step); // Step length via 2-norm for step-size convergence (Boyd & Vandenberghe, Sec. 9.3)
137
+ const stepSizeConvergenceResult = checkStepSizeConvergenceAndReturn(currentParameters, iteration, currentCost, gradientNorm, stepNorm, tolerance, newUsedLineSearch, logger);
138
+ if (stepSizeConvergenceResult.converged && stepSizeConvergenceResult.result) {
139
+ return { converged: true, result: stepSizeConvergenceResult.result };
140
+ }
141
+ // Log progress
142
+ logger.debug('gradientDescent', iteration, 'Progress', [
143
+ { key: 'Cost:', value: currentCost },
144
+ { key: 'Gradient norm:', value: gradientNorm },
145
+ { key: 'Step size:', value: stepSizeResult.stepSize }
146
+ ]);
147
+ return { converged: false, newParameters, newCost, newUsedLineSearch };
148
+ }
149
+ /**
150
+ * Performs gradient descent optimization to minimize a cost function.
151
+ *
152
+ * Algorithm:
153
+ * 1. Start with initial parameters
154
+ * 2. Compute gradient at current point
155
+ * 3. Move in negative gradient direction (steepest descent)
156
+ * 4. Use line search or fixed step size to determine step
157
+ * 5. Repeat until convergence or max iterations
158
+ *
159
+ * Convergence criteria:
160
+ * - Gradient norm < tolerance
161
+ * - Step size < tolerance
162
+ * - Maximum iterations reached
163
+ */
164
+ export function gradientDescent(initialParameters, costFunction, gradientFunction, options = {}) {
165
+ const maxIterations = options.maxIterations ?? DEFAULT_MAX_ITERATIONS;
166
+ const tolerance = options.tolerance ?? DEFAULT_TOLERANCE;
167
+ const stepSize = options.stepSize;
168
+ const useLineSearch = options.useLineSearch ?? DEFAULT_USE_LINE_SEARCH;
169
+ const onIteration = options.onIteration;
170
+ const logger = new Logger(options.logLevel, options.verbose);
171
+ let currentParameters = new Float64Array(initialParameters);
172
+ let currentCost = costFunction(currentParameters);
173
+ let usedLineSearchFlag = false;
174
+ for (let iteration = 0; iteration < maxIterations; iteration++) {
175
+ const iterationResult = performGradientDescentIteration(iteration, currentParameters, currentCost, costFunction, gradientFunction, tolerance, useLineSearch, stepSize, onIteration, logger, usedLineSearchFlag);
176
+ if (iterationResult.converged && iterationResult.result) {
177
+ return iterationResult.result;
178
+ }
179
+ if (iterationResult.newParameters && iterationResult.newCost !== undefined) {
180
+ currentParameters = new Float64Array(iterationResult.newParameters);
181
+ currentCost = iterationResult.newCost;
182
+ if (iterationResult.newUsedLineSearch !== undefined) {
183
+ usedLineSearchFlag = iterationResult.newUsedLineSearch;
184
+ }
185
+ }
186
+ }
187
+ // Maximum iterations reached
188
+ const finalGradient = gradientFunction(currentParameters);
189
+ const finalGradientNorm = vectorNorm(finalGradient);
190
+ logger.warn('gradientDescent', undefined, 'Maximum iterations reached', [
191
+ { key: 'Iterations:', value: maxIterations },
192
+ { key: 'Final cost:', value: currentCost },
193
+ { key: 'Final gradient norm:', value: finalGradientNorm }
194
+ ]);
195
+ return {
196
+ parameters: currentParameters,
197
+ iterations: maxIterations,
198
+ converged: false,
199
+ finalCost: currentCost,
200
+ finalGradientNorm: finalGradientNorm,
201
+ usedLineSearch: usedLineSearchFlag
202
+ };
203
+ }
204
+ //# sourceMappingURL=gradientDescent.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"gradientDescent.js","sourceRoot":"","sources":["../../src/core/gradientDescent.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;GAgBG;AAQH,OAAO,EAAE,sBAAsB,EAAE,MAAM,iBAAiB,CAAC;AACzD,OAAO,EAAE,UAAU,EAAE,WAAW,EAAE,UAAU,EAAE,MAAM,oBAAoB,CAAC;AACzE,OAAO,EAAE,wBAAwB,EAAE,wBAAwB,EAAE,uBAAuB,EAAE,MAAM,kBAAkB,CAAC;AAC/G,OAAO,EAAE,MAAM,EAAE,MAAM,aAAa,CAAC;AAErC,MAAM,sBAAsB,GAAG,IAAI,CAAC;AACpC,MAAM,iBAAiB,GAAG,IAAI,CAAC;AAC/B,MAAM,iBAAiB,GAAG,IAAI,CAAC;AAC/B,MAAM,uBAAuB,GAAG,IAAI,CAAC;AACrC,MAAM,cAAc,GAAG,GAAG,CAAC,CAAC,sEAAsE;AAClG,MAAM,2BAA2B,GAAG,CAAC,GAAG,CAAC,CAAC,gEAAgE;AAE1G;;;;GAIG;AACH,SAAS,iBAAiB,CACxB,eAA6B,EAC7B,iBAA+B,EAC/B,YAAoB,EACpB,gBAA4B,EAC5B,aAAsB,EACtB,aAAiC;IAEjC,wCAAwC;IACxC,IAAI,CAAC,aAAa,IAAI,aAAa,KAAK,SAAS,EAAE,CAAC;QAClD,OAAO,EAAE,QAAQ,EAAE,aAAa,IAAI,iBAAiB,EAAE,cAAc,EAAE,KAAK,EAAE,CAAC;IACjF,CAAC;IAED,+DAA+D;IAC/D,MAAM,eAAe,GAAG,WAAW,CAAC,eAAe,EAAE,2BAA2B,CAAC,CAAC;IAClF,iFAAiF;IACjF,4EAA4E;IAC5E,MAAM,QAAQ,GAAG,sBAAsB,CACrC,YAAY,EACZ,gBAAgB,EAChB,iBAAiB,EACjB,eAAe,CAChB,CAAC;IACF,OAAO,EAAE,QAAQ,EAAE,cAAc,EAAE,IAAI,EAAE,CAAC;AAC5C,CAAC;AAED;;;GAGG;AACH,SAAS,gCAAgC,CACvC,iBAA+B,EAC/B,eAA6B,EAC7B,QAAgB;IAEhB,MAAM,gBAAgB,GAAG,2BAA2B,GAAG,QAAQ,CAAC;IAChE,MAAM,IAAI,GAAG,WAAW,CAAC,eAAe,EAAE,gBAAgB,CAAC,CAAC;IAC5D,MAAM,aAAa,GAAG,UAAU,CAAC,iBAAiB,EAAE,IAAI,CAAC,CAAC;IAC1D,OAAO,EAAE,aAAa,EAAE,IAAI,EAAE,CAAC;AACjC,CAAC;AAED;;;GAGG;AACH,SAAS,iCAAiC,CACxC,iBAA+B,EAC/B,SAAiB,EACjB,WAAmB,EACnB,YAAoB,EACpB,SAAiB,EACjB,kBAA2B,EAC3B,MAAc;IAEd,IAAI,wBAAwB,CAAC,YAAY,EAAE,SAAS,EAAE,SAAS,CAAC,EAAE,CAAC;QACjE,MAAM,CAAC,IAAI,CAAC,iBAAiB,EAAE,SAAS,EAAE,WAAW,EAAE;YACrD,EAAE,GAAG,EAAE,OAAO,EAAE,KAAK,EAAE,WAAW,EAAE;YACpC,EAAE,GAAG,EAAE,gBAAgB,EAAE,KAAK,EAAE,YAAY,EAAE;SAC/C,CAAC,CAAC;QACH,MAAM,MAAM,GAAG,uBAAuB,CAAC,iBAAiB,EAAE,SAAS,EAAE,IAAI,EAAE,WAAW,EAAE,YAAY,CAAC,CAAC;QACtG,OAAO,EAAE,SAAS,EAAE,IAAI,EAAE,MAAM,EAAE,EAAE,GAAG,MAAM,EAAE,cAAc,EAAE,kBAAkB,EAAE,EAAE,CAAC;IACxF,CAAC;IACD,OAAO,EAAE,SAAS,EAAE,KAAK,EAAE,CAAC;AAC9B,CAAC;AAED;;;GAGG;AACH,SAAS,uBAAuB,CAC9B,iBAA+B,EAC/B,SAAiB,EACjB,WAAmB,EACnB,YAAoB,EACpB,MAAc;IAEd,MAAM,CAAC,IAAI,CAAC,iBAAiB,EAAE,SAAS,EAAE,oBAAoB,EAAE;QAC9D,EAAE,GAAG,EAAE,OAAO,EAAE,KAAK,EAAE,WAAW,EAAE;QACpC,EAAE,GAAG,EAAE,gBAAgB,EAAE,KAAK,EAAE,YAAY,EAAE;KAC/C,CAAC,CAAC;IACH,OAAO;QACL,SAAS,EAAE,IAAI;QACf,MAAM,EAAE;YACN,UAAU,EAAE,iBAAiB;YAC7B,UAAU,EAAE,SAAS;YACrB,SAAS,EAAE,KAAK;YAChB,SAAS,EAAE,WAAW;YACtB,iBAAiB,EAAE,YAAY;YAC/B,cAAc,EAAE,IAAI;SACrB;KACF,CAAC;AACJ,CAAC;AAED;;;GAGG;AACH,SAAS,iCAAiC,CACxC,iBAA+B,EAC/B,SAAiB,EACjB,WAAmB,EACnB,YAAoB,EACpB,QAAgB,EAChB,SAAiB,EACjB,iBAA0B,EAC1B,MAAc;IAEd,IAAI,wBAAwB,CAAC,QAAQ,EAAE,SAAS,EAAE,SAAS,CAAC,EAAE,CAAC;QAC7D,MAAM,CAAC,IAAI,CAAC,iBAAiB,EAAE,SAAS,EAAE,WAAW,EAAE;YACrD,EAAE,GAAG,EAAE,OAAO,EAAE,KAAK,EAAE,WAAW,EAAE;YACpC,EAAE,GAAG,EAAE,gBAAgB,EAAE,KAAK,EAAE,YAAY,EAAE;YAC9C,EAAE,GAAG,EAAE,YAAY,EAAE,KAAK,EAAE,QAAQ,EAAE;SACvC,CAAC,CAAC;QACH,MAAM,MAAM,GAAG,uBAAuB,CAAC,iBAAiB,EAAE,SAAS,EAAE,IAAI,EAAE,WAAW,EAAE,YAAY,CAAC,CAAC;QACtG,OAAO,EAAE,SAAS,EAAE,IAAI,EAAE,MAAM,EAAE,EAAE,GAAG,MAAM,EAAE,cAAc,EAAE,iBAAiB,EAAE,EAAE,CAAC;IACvF,CAAC;IACD,OAAO,EAAE,SAAS,EAAE,KAAK,EAAE,CAAC;AAC9B,CAAC;AAED;;;GAGG;AACH,SAAS,+BAA+B,CACtC,SAAiB,EACjB,iBAA+B,EAC/B,WAAmB,EACnB,YAAoB,EACpB,gBAA4B,EAC5B,SAAiB,EACjB,aAAsB,EACtB,aAAiC,EACjC,WAA8F,EAC9F,MAAc,EACd,kBAA2B;IAE3B,MAAM,eAAe,GAAG,gBAAgB,CAAC,iBAAiB,CAAC,CAAC;IAC5D,MAAM,YAAY,GAAG,UAAU,CAAC,eAAe,CAAC,CAAC,CAAC,+EAA+E;IAEjI,2DAA2D;IAC3D,IAAI,WAAW,EAAE,CAAC;QAChB,MAAM,iBAAiB,GAAG,SAAS,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC;QAC1D,WAAW,CAAC,iBAAiB,EAAE,WAAW,EAAE,iBAAiB,CAAC,CAAC;IACjE,CAAC;IAED,4CAA4C;IAC5C,MAAM,yBAAyB,GAAG,iCAAiC,CACjE,iBAAiB,EACjB,SAAS,EACT,WAAW,EACX,YAAY,EACZ,SAAS,EACT,kBAAkB,EAClB,MAAM,CACP,CAAC;IACF,IAAI,yBAAyB,CAAC,SAAS,IAAI,yBAAyB,CAAC,MAAM,EAAE,CAAC;QAC5E,OAAO,EAAE,SAAS,EAAE,IAAI,EAAE,MAAM,EAAE,yBAAyB,CAAC,MAAM,EAAE,CAAC;IACvE,CAAC;IAED,sBAAsB;IACtB,MAAM,cAAc,GAAG,iBAAiB,CACtC,eAAe,EACf,iBAAiB,EACjB,YAAY,EACZ,gBAAgB,EAChB,aAAa,EACb,aAAa,CACd,CAAC;IAEF,mCAAmC;IACnC,IAAI,cAAc,CAAC,QAAQ,KAAK,cAAc,EAAE,CAAC;QAC/C,MAAM,aAAa,GAAG,uBAAuB,CAC3C,iBAAiB,EACjB,SAAS,EACT,WAAW,EACX,YAAY,EACZ,MAAM,CACP,CAAC;QACF,OAAO,aAAa,CAAC;IACvB,CAAC;IAED,MAAM,iBAAiB,GAAG,kBAAkB,IAAI,cAAc,CAAC,cAAc,CAAC;IAE9E,oBAAoB;IACpB,MAAM,EAAE,aAAa,EAAE,IAAI,EAAE,GAAG,gCAAgC,CAC9D,iBAAiB,EACjB,eAAe,EACf,cAAc,CAAC,QAAQ,CACxB,CAAC;IACF,MAAM,OAAO,GAAG,YAAY,CAAC,aAAa,CAAC,CAAC;IAE5C,6CAA6C;IAC7C,MAAM,QAAQ,GAAG,UAAU,CAAC,IAAI,CAAC,CAAC,CAAC,mFAAmF;IACtH,MAAM,yBAAyB,GAAG,iCAAiC,CACjE,iBAAiB,EACjB,SAAS,EACT,WAAW,EACX,YAAY,EACZ,QAAQ,EACR,SAAS,EACT,iBAAiB,EACjB,MAAM,CACP,CAAC;IACF,IAAI,yBAAyB,CAAC,SAAS,IAAI,yBAAyB,CAAC,MAAM,EAAE,CAAC;QAC5E,OAAO,EAAE,SAAS,EAAE,IAAI,EAAE,MAAM,EAAE,yBAAyB,CAAC,MAAM,EAAE,CAAC;IACvE,CAAC;IAED,eAAe;IACf,MAAM,CAAC,KAAK,CAAC,iBAAiB,EAAE,SAAS,EAAE,UAAU,EAAE;QACrD,EAAE,GAAG,EAAE,OAAO,EAAE,KAAK,EAAE,WAAW,EAAE;QACpC,EAAE,GAAG,EAAE,gBAAgB,EAAE,KAAK,EAAE,YAAY,EAAE;QAC9C,EAAE,GAAG,EAAE,YAAY,EAAE,KAAK,EAAE,cAAc,CAAC,QAAQ,EAAE;KACtD,CAAC,CAAC;IAEH,OAAO,EAAE,SAAS,EAAE,KAAK,EAAE,aAAa,EAAE,OAAO,EAAE,iBAAiB,EAAE,CAAC;AACzE,CAAC;AAED;;;;;;;;;;;;;;GAcG;AACH,MAAM,UAAU,eAAe,CAC7B,iBAA+B,EAC/B,YAAoB,EACpB,gBAA4B,EAC5B,UAAkC,EAAE;IAEpC,MAAM,aAAa,GAAG,OAAO,CAAC,aAAa,IAAI,sBAAsB,CAAC;IACtE,MAAM,SAAS,GAAG,OAAO,CAAC,SAAS,IAAI,iBAAiB,CAAC;IACzD,MAAM,QAAQ,GAAG,OAAO,CAAC,QAAQ,CAAC;IAClC,MAAM,aAAa,GAAG,OAAO,CAAC,aAAa,IAAI,uBAAuB,CAAC;IACvE,MAAM,WAAW,GAAG,OAAO,CAAC,WAAW,CAAC;IACxC,MAAM,MAAM,GAAG,IAAI,MAAM,CAAC,OAAO,CAAC,QAAQ,EAAE,OAAO,CAAC,OAAO,CAAC,CAAC;IAE7D,IAAI,iBAAiB,GAAG,IAAI,YAAY,CAAC,iBAAiB,CAAC,CAAC;IAC5D,IAAI,WAAW,GAAG,YAAY,CAAC,iBAAiB,CAAC,CAAC;IAClD,IAAI,kBAAkB,GAAG,KAAK,CAAC;IAE/B,KAAK,IAAI,SAAS,GAAG,CAAC,EAAE,SAAS,GAAG,aAAa,EAAE,SAAS,EAAE,EAAE,CAAC;QAC/D,MAAM,eAAe,GAAG,+BAA+B,CACrD,SAAS,EACT,iBAAiB,EACjB,WAAW,EACX,YAAY,EACZ,gBAAgB,EAChB,SAAS,EACT,aAAa,EACb,QAAQ,EACR,WAAW,EACX,MAAM,EACN,kBAAkB,CACnB,CAAC;QAEF,IAAI,eAAe,CAAC,SAAS,IAAI,eAAe,CAAC,MAAM,EAAE,CAAC;YACxD,OAAO,eAAe,CAAC,MAAM,CAAC;QAChC,CAAC;QAED,IAAI,eAAe,CAAC,aAAa,IAAI,eAAe,CAAC,OAAO,KAAK,SAAS,EAAE,CAAC;YAC3E,iBAAiB,GAAG,IAAI,YAAY,CAAC,eAAe,CAAC,aAAa,CAAC,CAAC;YACpE,WAAW,GAAG,eAAe,CAAC,OAAO,CAAC;YACtC,IAAI,eAAe,CAAC,iBAAiB,KAAK,SAAS,EAAE,CAAC;gBACpD,kBAAkB,GAAG,eAAe,CAAC,iBAAiB,CAAC;YACzD,CAAC;QACH,CAAC;IACH,CAAC;IAED,6BAA6B;IAC7B,MAAM,aAAa,GAAG,gBAAgB,CAAC,iBAAiB,CAAC,CAAC;IAC1D,MAAM,iBAAiB,GAAG,UAAU,CAAC,aAAa,CAAC,CAAC;IAEpD,MAAM,CAAC,IAAI,CAAC,iBAAiB,EAAE,SAAS,EAAE,4BAA4B,EAAE;QACtE,EAAE,GAAG,EAAE,aAAa,EAAE,KAAK,EAAE,aAAa,EAAE;QAC5C,EAAE,GAAG,EAAE,aAAa,EAAE,KAAK,EAAE,WAAW,EAAE;QAC1C,EAAE,GAAG,EAAE,sBAAsB,EAAE,KAAK,EAAE,iBAAiB,EAAE;KAC1D,CAAC,CAAC;IAEH,OAAO;QACL,UAAU,EAAE,iBAAiB;QAC7B,UAAU,EAAE,aAAa;QACzB,SAAS,EAAE,KAAK;QAChB,SAAS,EAAE,WAAW;QACtB,iBAAiB,EAAE,iBAAiB;QACpC,cAAc,EAAE,kBAAkB;KACnC,CAAC;AACJ,CAAC"}
@@ -0,0 +1,24 @@
1
+ /**
2
+ * This file provides a shared function for computing Jacobian matrices
3
+ * using analytical functions or numerical differentiation.
4
+ *
5
+ * Role in system:
6
+ * - Eliminates code duplication between Gauss-Newton and Levenberg-Marquardt
7
+ * - Centralizes Jacobian computation logic (DRY principle)
8
+ * - Used by both least squares optimization algorithms
9
+ *
10
+ * For first-time readers:
11
+ * - This is a utility function used internally by optimization algorithms
12
+ * - Prefers analytical Jacobian if provided, falls back to numerical differentiation
13
+ */
14
+ import { Matrix } from 'ml-matrix';
15
+ import type { ResidualFn, JacobianFn } from './types.js';
16
+ /**
17
+ * Computes the Jacobian matrix using analytical function or numerical differentiation.
18
+ * Early return pattern: prefers analytical Jacobian if available.
19
+ *
20
+ * This function is shared between Gauss-Newton and Levenberg-Marquardt algorithms
21
+ * to avoid code duplication.
22
+ */
23
+ export declare function computeJacobianMatrix(jacobianFunction: JacobianFn | undefined, residualFunction: ResidualFn, parameters: Float64Array, useNumericJacobian: boolean, jacobianStep: number, algorithmName: string): Matrix;
24
+ //# sourceMappingURL=jacobianComputation.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"jacobianComputation.d.ts","sourceRoot":"","sources":["../../src/core/jacobianComputation.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;GAYG;AAEH,OAAO,EAAE,MAAM,EAAE,MAAM,WAAW,CAAC;AACnC,OAAO,KAAK,EAAE,UAAU,EAAE,UAAU,EAAE,MAAM,YAAY,CAAC;AAGzD;;;;;;GAMG;AACH,wBAAgB,qBAAqB,CACnC,gBAAgB,EAAE,UAAU,GAAG,SAAS,EACxC,gBAAgB,EAAE,UAAU,EAC5B,UAAU,EAAE,YAAY,EACxB,kBAAkB,EAAE,OAAO,EAC3B,YAAY,EAAE,MAAM,EACpB,aAAa,EAAE,MAAM,GACpB,MAAM,CAmBR"}
@@ -0,0 +1,38 @@
1
+ /**
2
+ * This file provides a shared function for computing Jacobian matrices
3
+ * using analytical functions or numerical differentiation.
4
+ *
5
+ * Role in system:
6
+ * - Eliminates code duplication between Gauss-Newton and Levenberg-Marquardt
7
+ * - Centralizes Jacobian computation logic (DRY principle)
8
+ * - Used by both least squares optimization algorithms
9
+ *
10
+ * For first-time readers:
11
+ * - This is a utility function used internally by optimization algorithms
12
+ * - Prefers analytical Jacobian if provided, falls back to numerical differentiation
13
+ */
14
+ import { finiteDiffJacobian } from './finiteDiff.js';
15
+ /**
16
+ * Computes the Jacobian matrix using analytical function or numerical differentiation.
17
+ * Early return pattern: prefers analytical Jacobian if available.
18
+ *
19
+ * This function is shared between Gauss-Newton and Levenberg-Marquardt algorithms
20
+ * to avoid code duplication.
21
+ */
22
+ export function computeJacobianMatrix(jacobianFunction, residualFunction, parameters, useNumericJacobian, jacobianStep, algorithmName) {
23
+ // Early return: use analytical Jacobian if provided
24
+ if (jacobianFunction) {
25
+ return jacobianFunction(parameters);
26
+ }
27
+ // Early return: use numerical Jacobian if enabled
28
+ if (useNumericJacobian) {
29
+ return finiteDiffJacobian(residualFunction, parameters, { stepSize: jacobianStep });
30
+ }
31
+ // Neither provided: throw error with helpful message
32
+ throw new Error('Jacobian computation is required but not provided. ' +
33
+ `Please either:\n` +
34
+ ` 1. Provide a jacobian in options: ${algorithmName}(params, residualFn, { jacobian: jacobianFn })\n` +
35
+ ` 2. Enable numerical Jacobian: ${algorithmName}(params, residualFn, { useNumericJacobian: true })\n` +
36
+ 'Note: Numerical Jacobian is enabled by default. If you see this error, it may have been explicitly disabled.');
37
+ }
38
+ //# sourceMappingURL=jacobianComputation.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"jacobianComputation.js","sourceRoot":"","sources":["../../src/core/jacobianComputation.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;GAYG;AAIH,OAAO,EAAE,kBAAkB,EAAE,MAAM,iBAAiB,CAAC;AAErD;;;;;;GAMG;AACH,MAAM,UAAU,qBAAqB,CACnC,gBAAwC,EACxC,gBAA4B,EAC5B,UAAwB,EACxB,kBAA2B,EAC3B,YAAoB,EACpB,aAAqB;IAErB,oDAAoD;IACpD,IAAI,gBAAgB,EAAE,CAAC;QACrB,OAAO,gBAAgB,CAAC,UAAU,CAAC,CAAC;IACtC,CAAC;IAED,kDAAkD;IAClD,IAAI,kBAAkB,EAAE,CAAC;QACvB,OAAO,kBAAkB,CAAC,gBAAgB,EAAE,UAAU,EAAE,EAAE,QAAQ,EAAE,YAAY,EAAE,CAAC,CAAC;IACtF,CAAC;IAED,qDAAqD;IACrD,MAAM,IAAI,KAAK,CACb,qDAAqD;QACrD,kBAAkB;QAClB,uCAAuC,aAAa,kDAAkD;QACtG,mCAAmC,aAAa,sDAAsD;QACtG,8GAA8G,CAC/G,CAAC;AACJ,CAAC"}
@@ -0,0 +1,36 @@
1
+ /**
2
+ * This file implements the Levenberg-Marquardt algorithm for solving
3
+ * nonlinear least squares problems, following the derivations in:
4
+ * - Moré, "The Levenberg-Marquardt Algorithm: Implementation and Theory", 1978 (Lecture Notes in Mathematics 630)
5
+ * - Lourakis, "A Brief Description of the Levenberg-Marquardt Algorithm", 2005 tutorial
6
+ *
7
+ * Role in system:
8
+ * - Phase 3 advanced algorithm (main MVP target)
9
+ * - Combines Gauss-Newton method with damping for robustness
10
+ * - Handles cases where Gauss-Newton might fail (singular matrices, poor conditioning)
11
+ *
12
+ * For first-time readers:
13
+ * - Start with levenbergMarquardt function
14
+ * - Understand lambda (damping parameter) update strategy
15
+ * - Check convergence criteria implementation
16
+ * - Debug features (callbacks, verbose logging) are top priority
17
+ */
18
+ import type { ResidualFn, LevenbergMarquardtOptions, LevenbergMarquardtResult } from './types.js';
19
+ /**
20
+ * Performs Levenberg-Marquardt optimization for nonlinear least squares problems.
21
+ *
22
+ * Algorithm:
23
+ * 1. Start with initial parameters and lambda (damping parameter)
24
+ * 2. Compute residual vector r and Jacobian matrix J
25
+ * 3. Solve damped normal equations: (J^T J + λI) δ = -J^T r
26
+ * 4. Try step: x_new = x_old + δ
27
+ * 5. If cost decreases: accept step, decrease lambda
28
+ * 6. If cost increases: reject step, increase lambda
29
+ * 7. Repeat until convergence
30
+ *
31
+ * The damping parameter lambda interpolates between:
32
+ * - Gauss-Newton (λ → 0): fast convergence near solution
33
+ * - Gradient descent (λ → ∞): robust but slow
34
+ */
35
+ export declare function levenbergMarquardt(initialParameters: Float64Array, residualFunction: ResidualFn, options?: LevenbergMarquardtOptions): LevenbergMarquardtResult;
36
+ //# sourceMappingURL=levenbergMarquardt.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"levenbergMarquardt.d.ts","sourceRoot":"","sources":["../../src/core/levenbergMarquardt.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;GAgBG;AAGH,OAAO,KAAK,EACV,UAAU,EAEV,yBAAyB,EACzB,wBAAwB,EACzB,MAAM,YAAY,CAAC;AAgKpB;;;;;;;;;;;;;;;GAeG;AACH,wBAAgB,kBAAkB,CAChC,iBAAiB,EAAE,YAAY,EAC/B,gBAAgB,EAAE,UAAU,EAC5B,OAAO,GAAE,yBAA8B,GACtC,wBAAwB,CAiN1B"}
@@ -0,0 +1,286 @@
1
+ /**
2
+ * This file implements the Levenberg-Marquardt algorithm for solving
3
+ * nonlinear least squares problems, following the derivations in:
4
+ * - Moré, "The Levenberg-Marquardt Algorithm: Implementation and Theory", 1978 (Lecture Notes in Mathematics 630)
5
+ * - Lourakis, "A Brief Description of the Levenberg-Marquardt Algorithm", 2005 tutorial
6
+ *
7
+ * Role in system:
8
+ * - Phase 3 advanced algorithm (main MVP target)
9
+ * - Combines Gauss-Newton method with damping for robustness
10
+ * - Handles cases where Gauss-Newton might fail (singular matrices, poor conditioning)
11
+ *
12
+ * For first-time readers:
13
+ * - Start with levenbergMarquardt function
14
+ * - Understand lambda (damping parameter) update strategy
15
+ * - Check convergence criteria implementation
16
+ * - Debug features (callbacks, verbose logging) are top priority
17
+ */
18
+ import { Matrix, solve, CholeskyDecomposition } from 'ml-matrix';
19
+ import { float64ArrayToMatrix, matrixToFloat64Array, vectorNorm, computeSumOfSquaredResiduals } from '../utils/matrix.js';
20
+ import { checkGradientConvergence, checkStepSizeConvergence, checkResidualConvergence } from './convergence.js';
21
+ import { computeJacobianMatrix } from './jacobianComputation.js';
22
+ import { Logger } from './logger.js';
23
+ const DEFAULT_MAX_ITERATIONS = 1000;
24
+ const DEFAULT_LAMBDA_INITIAL = 1e-3;
25
+ const DEFAULT_LAMBDA_FACTOR = 10.0;
26
+ const DEFAULT_TOL_GRADIENT = 1e-6;
27
+ const DEFAULT_TOL_STEP = 1e-6;
28
+ const DEFAULT_TOL_RESIDUAL = 1e-6;
29
+ const DEFAULT_USE_NUMERIC_JACOBIAN = true;
30
+ const DEFAULT_JACOBIAN_STEP = 1e-6;
31
+ const MAXIMUM_LAMBDA_THRESHOLD = 1e10; // Maximum lambda before giving up (prevents infinite loop)
32
+ const NEGATIVE_COEFFICIENT = -1.0; // Coefficient for negative right-hand side in damped normal equations: (J^T J + λI) δ = -J^T r
33
+ /**
34
+ * Computes J^T J and J^T r matrices needed for normal equations.
35
+ * Returns both matrices for use in solving damped normal equations.
36
+ */
37
+ function computeNormalEquationsMatrices(jacobianMatrix, residual) {
38
+ const jacobianTranspose = jacobianMatrix.transpose();
39
+ const jtj = jacobianTranspose.mmul(jacobianMatrix);
40
+ const residualMatrix = float64ArrayToMatrix(residual);
41
+ const jtr = jacobianTranspose.mmul(residualMatrix);
42
+ return { jtj, jtr };
43
+ }
44
+ /**
45
+ * Creates a convergence result object for Levenberg-Marquardt algorithm.
46
+ * Centralizes result creation to avoid code duplication.
47
+ */
48
+ function createConvergenceResultForLM(parameters, iteration, converged, finalCost, finalGradientNorm, finalResidualNorm, finalLambda) {
49
+ return {
50
+ parameters,
51
+ iterations: iteration + 1,
52
+ converged,
53
+ finalCost,
54
+ finalGradientNorm,
55
+ finalResidualNorm,
56
+ finalLambda
57
+ };
58
+ }
59
+ /**
60
+ * Tries a Levenberg-Marquardt step by solving damped normal equations.
61
+ * Returns whether step was accepted and updated parameters/lambda.
62
+ */
63
+ function tryLevenbergMarquardtStep(jtj, jtr, currentParameters, currentLambda, lambdaFactor, residualFunction, currentCost, tolStep, iteration, logger) {
64
+ // Early return: lambda too large
65
+ if (currentLambda >= MAXIMUM_LAMBDA_THRESHOLD) {
66
+ logger.warn('levenbergMarquardt', iteration, 'Lambda too large, stopping optimization', [
67
+ { key: 'Lambda:', value: currentLambda },
68
+ { key: 'Cost:', value: currentCost }
69
+ ]);
70
+ return { stepAccepted: false, newLambda: currentLambda, shouldStop: true };
71
+ }
72
+ try {
73
+ // Add damping: J^T J + λI
74
+ const parameterCount = jtj.rows;
75
+ const identity = Matrix.eye(parameterCount, parameterCount);
76
+ const dampedHessian = jtj.add(identity.mul(currentLambda));
77
+ // Solve: (J^T J + λI) δ = -J^T r
78
+ // Use Cholesky decomposition for efficiency (dampedHessian is always positive definite when λ > 0)
79
+ const negativeJtr = jtr.mul(NEGATIVE_COEFFICIENT);
80
+ let stepMatrix;
81
+ try {
82
+ const cholesky = new CholeskyDecomposition(dampedHessian);
83
+ if (cholesky.isPositiveDefinite()) {
84
+ stepMatrix = cholesky.solve(negativeJtr);
85
+ }
86
+ else {
87
+ // Fallback to LU decomposition if Cholesky fails (should not happen when λ > 0)
88
+ stepMatrix = solve(dampedHessian, negativeJtr);
89
+ }
90
+ }
91
+ catch (choleskyError) {
92
+ // Fallback to LU decomposition if Cholesky decomposition fails
93
+ stepMatrix = solve(dampedHessian, negativeJtr);
94
+ }
95
+ const step = matrixToFloat64Array(stepMatrix);
96
+ const stepNorm = vectorNorm(step);
97
+ // Check step size convergence (termination test suggested in Lourakis 2005, Section 5)
98
+ if (checkStepSizeConvergence(stepNorm, tolStep, iteration)) {
99
+ return { stepAccepted: false, newLambda: currentLambda, stepNorm };
100
+ }
101
+ // Try the step: x_new = x_old + δ
102
+ const newParameters = new Float64Array(currentParameters.length);
103
+ for (let i = 0; i < currentParameters.length; i++) {
104
+ newParameters[i] = currentParameters[i] + step[i];
105
+ }
106
+ const newResidual = residualFunction(newParameters);
107
+ const newResidualNorm = vectorNorm(newResidual);
108
+ const newCost = computeSumOfSquaredResiduals(newResidualNorm);
109
+ // Check if step improved the cost
110
+ if (newCost < currentCost) {
111
+ // Step successful: accept it and decrease lambda
112
+ // (trust-region style update per Moré 1978, Section 4 and Lourakis 2005, Section 4.1)
113
+ const newLambda = currentLambda / lambdaFactor;
114
+ logger.debug('levenbergMarquardt', iteration, 'Step accepted', [
115
+ { key: 'Cost:', value: currentCost },
116
+ { key: 'New cost:', value: newCost },
117
+ { key: 'Lambda:', value: newLambda }
118
+ ]);
119
+ return { stepAccepted: true, newParameters, newLambda };
120
+ }
121
+ // Step failed: reject it and increase lambda
122
+ // (damping increase strategy from Moré 1978, Section 4 and Lourakis 2005, Section 4.1)
123
+ const newLambda = currentLambda * lambdaFactor;
124
+ logger.debug('levenbergMarquardt', iteration, 'Step rejected', [
125
+ { key: 'Cost:', value: currentCost },
126
+ { key: 'New cost:', value: newCost },
127
+ { key: 'Lambda:', value: newLambda }
128
+ ]);
129
+ return { stepAccepted: false, newLambda };
130
+ }
131
+ catch (error) {
132
+ // Singular matrix or numerical issues: increase lambda and retry
133
+ const newLambda = currentLambda * lambdaFactor;
134
+ logger.warn('levenbergMarquardt', iteration, 'Singular matrix encountered, increasing lambda', [
135
+ { key: 'Lambda:', value: newLambda },
136
+ { key: 'Cost:', value: currentCost }
137
+ ]);
138
+ return { stepAccepted: false, newLambda };
139
+ }
140
+ }
141
+ /**
142
+ * Performs Levenberg-Marquardt optimization for nonlinear least squares problems.
143
+ *
144
+ * Algorithm:
145
+ * 1. Start with initial parameters and lambda (damping parameter)
146
+ * 2. Compute residual vector r and Jacobian matrix J
147
+ * 3. Solve damped normal equations: (J^T J + λI) δ = -J^T r
148
+ * 4. Try step: x_new = x_old + δ
149
+ * 5. If cost decreases: accept step, decrease lambda
150
+ * 6. If cost increases: reject step, increase lambda
151
+ * 7. Repeat until convergence
152
+ *
153
+ * The damping parameter lambda interpolates between:
154
+ * - Gauss-Newton (λ → 0): fast convergence near solution
155
+ * - Gradient descent (λ → ∞): robust but slow
156
+ */
157
+ export function levenbergMarquardt(initialParameters, residualFunction, options = {}) {
158
+ const actualOptions = options;
159
+ const jacobianFunction = actualOptions.jacobian;
160
+ const maxIterations = actualOptions.maxIterations ?? DEFAULT_MAX_ITERATIONS;
161
+ const lambdaInitial = actualOptions.lambdaInitial ?? DEFAULT_LAMBDA_INITIAL;
162
+ const lambdaFactor = actualOptions.lambdaFactor ?? DEFAULT_LAMBDA_FACTOR;
163
+ const tolGradient = actualOptions.tolGradient ?? DEFAULT_TOL_GRADIENT;
164
+ const tolStep = actualOptions.tolStep ?? DEFAULT_TOL_STEP;
165
+ const tolResidual = actualOptions.tolResidual ?? DEFAULT_TOL_RESIDUAL;
166
+ const useNumericJacobian = actualOptions.useNumericJacobian ?? DEFAULT_USE_NUMERIC_JACOBIAN;
167
+ const jacobianStep = actualOptions.jacobianStep ?? DEFAULT_JACOBIAN_STEP;
168
+ const onIteration = actualOptions.onIteration;
169
+ const logger = new Logger(actualOptions.logLevel, actualOptions.verbose);
170
+ let currentParameters = new Float64Array(initialParameters);
171
+ let currentLambda = lambdaInitial;
172
+ let bestParameters = new Float64Array(initialParameters);
173
+ let bestCost = Infinity;
174
+ for (let iteration = 0; iteration < maxIterations; iteration++) {
175
+ // Compute residual vector
176
+ const residual = residualFunction(currentParameters);
177
+ const residualNorm = vectorNorm(residual);
178
+ const cost = computeSumOfSquaredResiduals(residualNorm);
179
+ // Track best solution so far
180
+ if (cost < bestCost) {
181
+ bestCost = cost;
182
+ bestParameters = new Float64Array(currentParameters);
183
+ }
184
+ // Call progress callback if provided
185
+ if (onIteration) {
186
+ onIteration(iteration, cost, currentParameters);
187
+ }
188
+ // Compute Jacobian matrix
189
+ // Early return: use analytical Jacobian if provided
190
+ const jacobianMatrix = computeJacobianMatrix(jacobianFunction, residualFunction, currentParameters, useNumericJacobian, jacobianStep, 'levenbergMarquardt');
191
+ // Compute J^T J and J^T r
192
+ const { jtj, jtr } = computeNormalEquationsMatrices(jacobianMatrix, residual);
193
+ // Compute gradient norm: ||J^T r||
194
+ const gradientVector = matrixToFloat64Array(jtr);
195
+ const gradientNorm = vectorNorm(gradientVector);
196
+ // Check convergence: gradient norm is small enough (Moré 1978, Section 4 termination test; Lourakis 2005, Section 5)
197
+ if (checkGradientConvergence(gradientNorm, tolGradient, iteration)) {
198
+ logger.info('levenbergMarquardt', iteration, 'Converged', [
199
+ { key: 'Cost:', value: cost },
200
+ { key: 'Gradient norm:', value: gradientNorm },
201
+ { key: 'Residual norm:', value: residualNorm },
202
+ { key: 'Lambda:', value: currentLambda }
203
+ ]);
204
+ return createConvergenceResultForLM(currentParameters, iteration, true, cost, gradientNorm, residualNorm, currentLambda);
205
+ }
206
+ // Try to solve damped normal equations: (J^T J + λI) δ = -J^T r
207
+ let stepAccepted = false;
208
+ while (!stepAccepted && currentLambda < MAXIMUM_LAMBDA_THRESHOLD) {
209
+ const stepResult = tryLevenbergMarquardtStep(jtj, jtr, currentParameters, currentLambda, lambdaFactor, residualFunction, cost, tolStep, iteration, logger);
210
+ // Early return: lambda too large
211
+ if (stepResult.shouldStop) {
212
+ return createConvergenceResultForLM(bestParameters, iteration, false, bestCost, gradientNorm, residualNorm, stepResult.newLambda);
213
+ }
214
+ // Early return: step size convergence (Lourakis 2005, Section 5)
215
+ if (stepResult.stepNorm !== undefined && checkStepSizeConvergence(stepResult.stepNorm, tolStep, iteration)) {
216
+ logger.info('levenbergMarquardt', iteration, 'Converged', [
217
+ { key: 'Cost:', value: cost },
218
+ { key: 'Gradient norm:', value: gradientNorm },
219
+ { key: 'Residual norm:', value: residualNorm },
220
+ { key: 'Step size:', value: stepResult.stepNorm },
221
+ { key: 'Lambda:', value: currentLambda }
222
+ ]);
223
+ return createConvergenceResultForLM(currentParameters, iteration, true, cost, gradientNorm, residualNorm, currentLambda);
224
+ }
225
+ // Update lambda
226
+ currentLambda = stepResult.newLambda;
227
+ // Accept step if successful
228
+ if (stepResult.stepAccepted && stepResult.newParameters) {
229
+ currentParameters = new Float64Array(stepResult.newParameters);
230
+ stepAccepted = true;
231
+ }
232
+ }
233
+ // Check if step was never accepted (lambda became too large)
234
+ if (!stepAccepted && currentLambda >= MAXIMUM_LAMBDA_THRESHOLD) {
235
+ logger.warn('levenbergMarquardt', iteration, 'Could not find acceptable step even with maximum lambda. Stopping optimization.', [
236
+ { key: 'Lambda:', value: currentLambda },
237
+ { key: 'Cost:', value: cost },
238
+ { key: 'Best cost:', value: bestCost }
239
+ ]);
240
+ const finalResidual = residualFunction(bestParameters);
241
+ const finalResidualNorm = vectorNorm(finalResidual);
242
+ const finalGradient = jacobianFunction
243
+ ? matrixToFloat64Array(jacobianFunction(bestParameters).transpose().mmul(float64ArrayToMatrix(finalResidual)))
244
+ : undefined;
245
+ const finalGradientNorm = finalGradient ? vectorNorm(finalGradient) : undefined;
246
+ return createConvergenceResultForLM(bestParameters, iteration, false, bestCost, finalGradientNorm ?? gradientNorm, finalResidualNorm, currentLambda);
247
+ }
248
+ // Check residual norm convergence (Moré 1978, Section 4 stopping rule; Lourakis 2005, Section 5)
249
+ const currentResidual = residualFunction(currentParameters);
250
+ const currentResidualNorm = vectorNorm(currentResidual);
251
+ const currentCost = computeSumOfSquaredResiduals(currentResidualNorm);
252
+ if (checkResidualConvergence(currentResidualNorm, tolResidual, iteration)) {
253
+ logger.info('levenbergMarquardt', iteration, 'Converged', [
254
+ { key: 'Cost:', value: currentCost },
255
+ { key: 'Gradient norm:', value: gradientNorm },
256
+ { key: 'Residual norm:', value: currentResidualNorm },
257
+ { key: 'Lambda:', value: currentLambda }
258
+ ]);
259
+ return createConvergenceResultForLM(currentParameters, iteration, true, currentCost, gradientNorm, currentResidualNorm, currentLambda);
260
+ }
261
+ }
262
+ // Maximum iterations reached - return best solution found
263
+ const finalResidual = residualFunction(bestParameters);
264
+ const finalResidualNorm = vectorNorm(finalResidual);
265
+ const finalGradient = jacobianFunction
266
+ ? matrixToFloat64Array(jacobianFunction(bestParameters).transpose().mmul(float64ArrayToMatrix(finalResidual)))
267
+ : undefined;
268
+ const finalGradientNorm = finalGradient ? vectorNorm(finalGradient) : undefined;
269
+ logger.warn('levenbergMarquardt', undefined, 'Maximum iterations reached', [
270
+ { key: 'Iterations:', value: maxIterations },
271
+ { key: 'Final cost:', value: bestCost },
272
+ { key: 'Final gradient norm:', value: finalGradientNorm ?? 0 },
273
+ { key: 'Final residual norm:', value: finalResidualNorm },
274
+ { key: 'Final lambda:', value: currentLambda }
275
+ ]);
276
+ return {
277
+ parameters: bestParameters,
278
+ iterations: maxIterations,
279
+ converged: false,
280
+ finalCost: bestCost,
281
+ finalGradientNorm: finalGradientNorm,
282
+ finalResidualNorm: finalResidualNorm,
283
+ finalLambda: currentLambda
284
+ };
285
+ }
286
+ //# sourceMappingURL=levenbergMarquardt.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"levenbergMarquardt.js","sourceRoot":"","sources":["../../src/core/levenbergMarquardt.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;GAgBG;AAEH,OAAO,EAAE,MAAM,EAAE,KAAK,EAAE,qBAAqB,EAAE,MAAM,WAAW,CAAC;AAOjE,OAAO,EAAE,oBAAoB,EAAE,oBAAoB,EAAE,UAAU,EAAE,4BAA4B,EAAE,MAAM,oBAAoB,CAAC;AAC1H,OAAO,EAAE,wBAAwB,EAAE,wBAAwB,EAAE,wBAAwB,EAAE,MAAM,kBAAkB,CAAC;AAChH,OAAO,EAAE,qBAAqB,EAAE,MAAM,0BAA0B,CAAC;AACjE,OAAO,EAAE,MAAM,EAAE,MAAM,aAAa,CAAC;AAErC,MAAM,sBAAsB,GAAG,IAAI,CAAC;AACpC,MAAM,sBAAsB,GAAG,IAAI,CAAC;AACpC,MAAM,qBAAqB,GAAG,IAAI,CAAC;AACnC,MAAM,oBAAoB,GAAG,IAAI,CAAC;AAClC,MAAM,gBAAgB,GAAG,IAAI,CAAC;AAC9B,MAAM,oBAAoB,GAAG,IAAI,CAAC;AAClC,MAAM,4BAA4B,GAAG,IAAI,CAAC;AAC1C,MAAM,qBAAqB,GAAG,IAAI,CAAC;AACnC,MAAM,wBAAwB,GAAG,IAAI,CAAC,CAAC,2DAA2D;AAClG,MAAM,oBAAoB,GAAG,CAAC,GAAG,CAAC,CAAC,+FAA+F;AAElI;;;GAGG;AACH,SAAS,8BAA8B,CACrC,cAAsB,EACtB,QAAsB;IAEtB,MAAM,iBAAiB,GAAG,cAAc,CAAC,SAAS,EAAE,CAAC;IACrD,MAAM,GAAG,GAAG,iBAAiB,CAAC,IAAI,CAAC,cAAc,CAAC,CAAC;IACnD,MAAM,cAAc,GAAG,oBAAoB,CAAC,QAAQ,CAAC,CAAC;IACtD,MAAM,GAAG,GAAG,iBAAiB,CAAC,IAAI,CAAC,cAAc,CAAC,CAAC;IACnD,OAAO,EAAE,GAAG,EAAE,GAAG,EAAE,CAAC;AACtB,CAAC;AAED;;;GAGG;AACH,SAAS,4BAA4B,CACnC,UAAwB,EACxB,SAAiB,EACjB,SAAkB,EAClB,SAAiB,EACjB,iBAAyB,EACzB,iBAAyB,EACzB,WAAmB;IAEnB,OAAO;QACL,UAAU;QACV,UAAU,EAAE,SAAS,GAAG,CAAC;QACzB,SAAS;QACT,SAAS;QACT,iBAAiB;QACjB,iBAAiB;QACjB,WAAW;KACZ,CAAC;AACJ,CAAC;AAED;;;GAGG;AACH,SAAS,yBAAyB,CAChC,GAAW,EACX,GAAW,EACX,iBAA+B,EAC/B,aAAqB,EACrB,YAAoB,EACpB,gBAA4B,EAC5B,WAAmB,EACnB,OAAe,EACf,SAAiB,EACjB,MAAc;IAQd,iCAAiC;IACjC,IAAI,aAAa,IAAI,wBAAwB,EAAE,CAAC;QAC9C,MAAM,CAAC,IAAI,CAAC,oBAAoB,EAAE,SAAS,EAAE,yCAAyC,EAAE;YACtF,EAAE,GAAG,EAAE,SAAS,EAAE,KAAK,EAAE,aAAa,EAAE;YACxC,EAAE,GAAG,EAAE,OAAO,EAAE,KAAK,EAAE,WAAW,EAAE;SACrC,CAAC,CAAC;QACH,OAAO,EAAE,YAAY,EAAE,KAAK,EAAE,SAAS,EAAE,aAAa,EAAE,UAAU,EAAE,IAAI,EAAE,CAAC;IAC7E,CAAC;IAED,IAAI,CAAC;QACH,0BAA0B;QAC1B,MAAM,cAAc,GAAG,GAAG,CAAC,IAAI,CAAC;QAChC,MAAM,QAAQ,GAAG,MAAM,CAAC,GAAG,CAAC,cAAc,EAAE,cAAc,CAAC,CAAC;QAC5D,MAAM,aAAa,GAAG,GAAG,CAAC,GAAG,CAAC,QAAQ,CAAC,GAAG,CAAC,aAAa,CAAC,CAAC,CAAC;QAE3D,iCAAiC;QACjC,mGAAmG;QACnG,MAAM,WAAW,GAAG,GAAG,CAAC,GAAG,CAAC,oBAAoB,CAAC,CAAC;QAClD,IAAI,UAAkB,CAAC;QACvB,IAAI,CAAC;YACH,MAAM,QAAQ,GAAG,IAAI,qBAAqB,CAAC,aAAa,CAAC,CAAC;YAC1D,IAAI,QAAQ,CAAC,kBAAkB,EAAE,EAAE,CAAC;gBAClC,UAAU,GAAG,QAAQ,CAAC,KAAK,CAAC,WAAW,CAAC,CAAC;YAC3C,CAAC;iBAAM,CAAC;gBACN,gFAAgF;gBAChF,UAAU,GAAG,KAAK,CAAC,aAAa,EAAE,WAAW,CAAC,CAAC;YACjD,CAAC;QACH,CAAC;QAAC,OAAO,aAAa,EAAE,CAAC;YACvB,+DAA+D;YAC/D,UAAU,GAAG,KAAK,CAAC,aAAa,EAAE,WAAW,CAAC,CAAC;QACjD,CAAC;QACD,MAAM,IAAI,GAAG,oBAAoB,CAAC,UAAU,CAAC,CAAC;QAC9C,MAAM,QAAQ,GAAG,UAAU,CAAC,IAAI,CAAC,CAAC;QAElC,uFAAuF;QACvF,IAAI,wBAAwB,CAAC,QAAQ,EAAE,OAAO,EAAE,SAAS,CAAC,EAAE,CAAC;YAC3D,OAAO,EAAE,YAAY,EAAE,KAAK,EAAE,SAAS,EAAE,aAAa,EAAE,QAAQ,EAAE,CAAC;QACrE,CAAC;QAED,kCAAkC;QAClC,MAAM,aAAa,GAAG,IAAI,YAAY,CAAC,iBAAiB,CAAC,MAAM,CAAC,CAAC;QACjE,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,iBAAiB,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE,CAAC;YAClD,aAAa,CAAC,CAAC,CAAC,GAAG,iBAAiB,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC;QACpD,CAAC;QAED,MAAM,WAAW,GAAG,gBAAgB,CAAC,aAAa,CAAC,CAAC;QACpD,MAAM,eAAe,GAAG,UAAU,CAAC,WAAW,CAAC,CAAC;QAChD,MAAM,OAAO,GAAG,4BAA4B,CAAC,eAAe,CAAC,CAAC;QAE9D,kCAAkC;QAClC,IAAI,OAAO,GAAG,WAAW,EAAE,CAAC;YAC1B,iDAAiD;YACjD,sFAAsF;YACtF,MAAM,SAAS,GAAG,aAAa,GAAG,YAAY,CAAC;YAC/C,MAAM,CAAC,KAAK,CAAC,oBAAoB,EAAE,SAAS,EAAE,eAAe,EAAE;gBAC7D,EAAE,GAAG,EAAE,OAAO,EAAE,KAAK,EAAE,WAAW,EAAE;gBACpC,EAAE,GAAG,EAAE,WAAW,EAAE,KAAK,EAAE,OAAO,EAAE;gBACpC,EAAE,GAAG,EAAE,SAAS,EAAE,KAAK,EAAE,SAAS,EAAE;aACrC,CAAC,CAAC;YACH,OAAO,EAAE,YAAY,EAAE,IAAI,EAAE,aAAa,EAAE,SAAS,EAAE,CAAC;QAC1D,CAAC;QAED,6CAA6C;QAC7C,uFAAuF;QACvF,MAAM,SAAS,GAAG,aAAa,GAAG,YAAY,CAAC;QAC/C,MAAM,CAAC,KAAK,CAAC,oBAAoB,EAAE,SAAS,EAAE,eAAe,EAAE;YAC7D,EAAE,GAAG,EAAE,OAAO,EAAE,KAAK,EAAE,WAAW,EAAE;YACpC,EAAE,GAAG,EAAE,WAAW,EAAE,KAAK,EAAE,OAAO,EAAE;YACpC,EAAE,GAAG,EAAE,SAAS,EAAE,KAAK,EAAE,SAAS,EAAE;SACrC,CAAC,CAAC;QACH,OAAO,EAAE,YAAY,EAAE,KAAK,EAAE,SAAS,EAAE,CAAC;IAC5C,CAAC;IAAC,OAAO,KAAK,EAAE,CAAC;QACf,iEAAiE;QACjE,MAAM,SAAS,GAAG,aAAa,GAAG,YAAY,CAAC;QAC/C,MAAM,CAAC,IAAI,CAAC,oBAAoB,EAAE,SAAS,EAAE,gDAAgD,EAAE;YAC7F,EAAE,GAAG,EAAE,SAAS,EAAE,KAAK,EAAE,SAAS,EAAE;YACpC,EAAE,GAAG,EAAE,OAAO,EAAE,KAAK,EAAE,WAAW,EAAE;SACrC,CAAC,CAAC;QACH,OAAO,EAAE,YAAY,EAAE,KAAK,EAAE,SAAS,EAAE,CAAC;IAC5C,CAAC;AACH,CAAC;AAED;;;;;;;;;;;;;;;GAeG;AACH,MAAM,UAAU,kBAAkB,CAChC,iBAA+B,EAC/B,gBAA4B,EAC5B,UAAqC,EAAE;IAEvC,MAAM,aAAa,GAA8B,OAAO,CAAC;IACzD,MAAM,gBAAgB,GAA2B,aAAa,CAAC,QAAQ,CAAC;IAExE,MAAM,aAAa,GAAG,aAAa,CAAC,aAAa,IAAI,sBAAsB,CAAC;IAC5E,MAAM,aAAa,GAAG,aAAa,CAAC,aAAa,IAAI,sBAAsB,CAAC;IAC5E,MAAM,YAAY,GAAG,aAAa,CAAC,YAAY,IAAI,qBAAqB,CAAC;IACzE,MAAM,WAAW,GAAG,aAAa,CAAC,WAAW,IAAI,oBAAoB,CAAC;IACtE,MAAM,OAAO,GAAG,aAAa,CAAC,OAAO,IAAI,gBAAgB,CAAC;IAC1D,MAAM,WAAW,GAAG,aAAa,CAAC,WAAW,IAAI,oBAAoB,CAAC;IACtE,MAAM,kBAAkB,GAAG,aAAa,CAAC,kBAAkB,IAAI,4BAA4B,CAAC;IAC5F,MAAM,YAAY,GAAG,aAAa,CAAC,YAAY,IAAI,qBAAqB,CAAC;IACzE,MAAM,WAAW,GAAG,aAAa,CAAC,WAAW,CAAC;IAC9C,MAAM,MAAM,GAAG,IAAI,MAAM,CAAC,aAAa,CAAC,QAAQ,EAAE,aAAa,CAAC,OAAO,CAAC,CAAC;IAEzE,IAAI,iBAAiB,GAAG,IAAI,YAAY,CAAC,iBAAiB,CAAC,CAAC;IAC5D,IAAI,aAAa,GAAG,aAAa,CAAC;IAClC,IAAI,cAAc,GAAG,IAAI,YAAY,CAAC,iBAAiB,CAAC,CAAC;IACzD,IAAI,QAAQ,GAAG,QAAQ,CAAC;IAExB,KAAK,IAAI,SAAS,GAAG,CAAC,EAAE,SAAS,GAAG,aAAa,EAAE,SAAS,EAAE,EAAE,CAAC;QAC/D,0BAA0B;QAC1B,MAAM,QAAQ,GAAG,gBAAgB,CAAC,iBAAiB,CAAC,CAAC;QACrD,MAAM,YAAY,GAAG,UAAU,CAAC,QAAQ,CAAC,CAAC;QAC1C,MAAM,IAAI,GAAG,4BAA4B,CAAC,YAAY,CAAC,CAAC;QAExD,6BAA6B;QAC7B,IAAI,IAAI,GAAG,QAAQ,EAAE,CAAC;YACpB,QAAQ,GAAG,IAAI,CAAC;YAChB,cAAc,GAAG,IAAI,YAAY,CAAC,iBAAiB,CAAC,CAAC;QACvD,CAAC;QAED,qCAAqC;QACrC,IAAI,WAAW,EAAE,CAAC;YAChB,WAAW,CAAC,SAAS,EAAE,IAAI,EAAE,iBAAiB,CAAC,CAAC;QAClD,CAAC;QAED,0BAA0B;QAC1B,oDAAoD;QACpD,MAAM,cAAc,GAAW,qBAAqB,CAClD,gBAAgB,EAChB,gBAAgB,EAChB,iBAAiB,EACjB,kBAAkB,EAClB,YAAY,EACZ,oBAAoB,CACrB,CAAC;QAEF,0BAA0B;QAC1B,MAAM,EAAE,GAAG,EAAE,GAAG,EAAE,GAAG,8BAA8B,CAAC,cAAc,EAAE,QAAQ,CAAC,CAAC;QAE9E,mCAAmC;QACnC,MAAM,cAAc,GAAG,oBAAoB,CAAC,GAAG,CAAC,CAAC;QACjD,MAAM,YAAY,GAAG,UAAU,CAAC,cAAc,CAAC,CAAC;QAEhD,qHAAqH;QACrH,IAAI,wBAAwB,CAAC,YAAY,EAAE,WAAW,EAAE,SAAS,CAAC,EAAE,CAAC;YACnE,MAAM,CAAC,IAAI,CAAC,oBAAoB,EAAE,SAAS,EAAE,WAAW,EAAE;gBACxD,EAAE,GAAG,EAAE,OAAO,EAAE,KAAK,EAAE,IAAI,EAAE;gBAC7B,EAAE,GAAG,EAAE,gBAAgB,EAAE,KAAK,EAAE,YAAY,EAAE;gBAC9C,EAAE,GAAG,EAAE,gBAAgB,EAAE,KAAK,EAAE,YAAY,EAAE;gBAC9C,EAAE,GAAG,EAAE,SAAS,EAAE,KAAK,EAAE,aAAa,EAAE;aACzC,CAAC,CAAC;YACH,OAAO,4BAA4B,CACjC,iBAAiB,EACjB,SAAS,EACT,IAAI,EACJ,IAAI,EACJ,YAAY,EACZ,YAAY,EACZ,aAAa,CACd,CAAC;QACJ,CAAC;QAED,gEAAgE;QAChE,IAAI,YAAY,GAAG,KAAK,CAAC;QACzB,OAAO,CAAC,YAAY,IAAI,aAAa,GAAG,wBAAwB,EAAE,CAAC;YACjE,MAAM,UAAU,GAAG,yBAAyB,CAC1C,GAAG,EACH,GAAG,EACH,iBAAiB,EACjB,aAAa,EACb,YAAY,EACZ,gBAAgB,EAChB,IAAI,EACJ,OAAO,EACP,SAAS,EACT,MAAM,CACP,CAAC;YAEF,iCAAiC;YACjC,IAAI,UAAU,CAAC,UAAU,EAAE,CAAC;gBAC1B,OAAO,4BAA4B,CACjC,cAAc,EACd,SAAS,EACT,KAAK,EACL,QAAQ,EACR,YAAY,EACZ,YAAY,EACZ,UAAU,CAAC,SAAS,CACrB,CAAC;YACJ,CAAC;YAED,iEAAiE;YACjE,IAAI,UAAU,CAAC,QAAQ,KAAK,SAAS,IAAI,wBAAwB,CAAC,UAAU,CAAC,QAAQ,EAAE,OAAO,EAAE,SAAS,CAAC,EAAE,CAAC;gBAC3G,MAAM,CAAC,IAAI,CAAC,oBAAoB,EAAE,SAAS,EAAE,WAAW,EAAE;oBACxD,EAAE,GAAG,EAAE,OAAO,EAAE,KAAK,EAAE,IAAI,EAAE;oBAC7B,EAAE,GAAG,EAAE,gBAAgB,EAAE,KAAK,EAAE,YAAY,EAAE;oBAC9C,EAAE,GAAG,EAAE,gBAAgB,EAAE,KAAK,EAAE,YAAY,EAAE;oBAC9C,EAAE,GAAG,EAAE,YAAY,EAAE,KAAK,EAAE,UAAU,CAAC,QAAQ,EAAE;oBACjD,EAAE,GAAG,EAAE,SAAS,EAAE,KAAK,EAAE,aAAa,EAAE;iBACzC,CAAC,CAAC;gBACH,OAAO,4BAA4B,CACjC,iBAAiB,EACjB,SAAS,EACT,IAAI,EACJ,IAAI,EACJ,YAAY,EACZ,YAAY,EACZ,aAAa,CACd,CAAC;YACJ,CAAC;YAED,gBAAgB;YAChB,aAAa,GAAG,UAAU,CAAC,SAAS,CAAC;YAErC,4BAA4B;YAC5B,IAAI,UAAU,CAAC,YAAY,IAAI,UAAU,CAAC,aAAa,EAAE,CAAC;gBACxD,iBAAiB,GAAG,IAAI,YAAY,CAAC,UAAU,CAAC,aAAa,CAAC,CAAC;gBAC/D,YAAY,GAAG,IAAI,CAAC;YACtB,CAAC;QACH,CAAC;QAED,6DAA6D;QAC7D,IAAI,CAAC,YAAY,IAAI,aAAa,IAAI,wBAAwB,EAAE,CAAC;YAC/D,MAAM,CAAC,IAAI,CAAC,oBAAoB,EAAE,SAAS,EAAE,iFAAiF,EAAE;gBAC9H,EAAE,GAAG,EAAE,SAAS,EAAE,KAAK,EAAE,aAAa,EAAE;gBACxC,EAAE,GAAG,EAAE,OAAO,EAAE,KAAK,EAAE,IAAI,EAAE;gBAC7B,EAAE,GAAG,EAAE,YAAY,EAAE,KAAK,EAAE,QAAQ,EAAE;aACvC,CAAC,CAAC;YACH,MAAM,aAAa,GAAG,gBAAgB,CAAC,cAAc,CAAC,CAAC;YACvD,MAAM,iBAAiB,GAAG,UAAU,CAAC,aAAa,CAAC,CAAC;YACpD,MAAM,aAAa,GAAG,gBAAgB;gBACpC,CAAC,CAAC,oBAAoB,CACpB,gBAAgB,CAAC,cAAc,CAAC,CAAC,SAAS,EAAE,CAAC,IAAI,CAAC,oBAAoB,CAAC,aAAa,CAAC,CAAC,CACvF;gBACD,CAAC,CAAC,SAAS,CAAC;YACd,MAAM,iBAAiB,GAAG,aAAa,CAAC,CAAC,CAAC,UAAU,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC;YAChF,OAAO,4BAA4B,CACjC,cAAc,EACd,SAAS,EACT,KAAK,EACL,QAAQ,EACR,iBAAiB,IAAI,YAAY,EACjC,iBAAiB,EACjB,aAAa,CACd,CAAC;QACJ,CAAC;QAED,iGAAiG;QACjG,MAAM,eAAe,GAAG,gBAAgB,CAAC,iBAAiB,CAAC,CAAC;QAC5D,MAAM,mBAAmB,GAAG,UAAU,CAAC,eAAe,CAAC,CAAC;QACxD,MAAM,WAAW,GAAG,4BAA4B,CAAC,mBAAmB,CAAC,CAAC;QACtE,IAAI,wBAAwB,CAAC,mBAAmB,EAAE,WAAW,EAAE,SAAS,CAAC,EAAE,CAAC;YAC1E,MAAM,CAAC,IAAI,CAAC,oBAAoB,EAAE,SAAS,EAAE,WAAW,EAAE;gBACxD,EAAE,GAAG,EAAE,OAAO,EAAE,KAAK,EAAE,WAAW,EAAE;gBACpC,EAAE,GAAG,EAAE,gBAAgB,EAAE,KAAK,EAAE,YAAY,EAAE;gBAC9C,EAAE,GAAG,EAAE,gBAAgB,EAAE,KAAK,EAAE,mBAAmB,EAAE;gBACrD,EAAE,GAAG,EAAE,SAAS,EAAE,KAAK,EAAE,aAAa,EAAE;aACzC,CAAC,CAAC;YACH,OAAO,4BAA4B,CACjC,iBAAiB,EACjB,SAAS,EACT,IAAI,EACJ,WAAW,EACX,YAAY,EACZ,mBAAmB,EACnB,aAAa,CACd,CAAC;QACJ,CAAC;IACH,CAAC;IAED,0DAA0D;IAC1D,MAAM,aAAa,GAAG,gBAAgB,CAAC,cAAc,CAAC,CAAC;IACvD,MAAM,iBAAiB,GAAG,UAAU,CAAC,aAAa,CAAC,CAAC;IACpD,MAAM,aAAa,GAAG,gBAAgB;QACpC,CAAC,CAAC,oBAAoB,CACpB,gBAAgB,CAAC,cAAc,CAAC,CAAC,SAAS,EAAE,CAAC,IAAI,CAAC,oBAAoB,CAAC,aAAa,CAAC,CAAC,CACvF;QACD,CAAC,CAAC,SAAS,CAAC;IACd,MAAM,iBAAiB,GAAG,aAAa,CAAC,CAAC,CAAC,UAAU,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC;IAEhF,MAAM,CAAC,IAAI,CAAC,oBAAoB,EAAE,SAAS,EAAE,4BAA4B,EAAE;QACzE,EAAE,GAAG,EAAE,aAAa,EAAE,KAAK,EAAE,aAAa,EAAE;QAC5C,EAAE,GAAG,EAAE,aAAa,EAAE,KAAK,EAAE,QAAQ,EAAE;QACvC,EAAE,GAAG,EAAE,sBAAsB,EAAE,KAAK,EAAE,iBAAiB,IAAI,CAAC,EAAE;QAC9D,EAAE,GAAG,EAAE,sBAAsB,EAAE,KAAK,EAAE,iBAAiB,EAAE;QACzD,EAAE,GAAG,EAAE,eAAe,EAAE,KAAK,EAAE,aAAa,EAAE;KAC/C,CAAC,CAAC;IAEH,OAAO;QACL,UAAU,EAAE,cAAc;QAC1B,UAAU,EAAE,aAAa;QACzB,SAAS,EAAE,KAAK;QAChB,SAAS,EAAE,QAAQ;QACnB,iBAAiB,EAAE,iBAAiB;QACpC,iBAAiB,EAAE,iBAAiB;QACpC,WAAW,EAAE,aAAa;KAC3B,CAAC;AACJ,CAAC"}