agentic-qe 3.7.21 → 3.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/helpers/brain-checkpoint.cjs +4 -1
- package/.claude/helpers/statusline-v3.cjs +3 -1
- package/.claude/skills/skills-manifest.json +1 -1
- package/CHANGELOG.md +45 -0
- package/README.md +2 -14
- package/assets/helpers/statusline-v3.cjs +3 -1
- package/dist/cli/brain-commands.js +6 -10
- package/dist/cli/bundle.js +7441 -4327
- package/dist/cli/commands/audit.d.ts +43 -0
- package/dist/cli/commands/audit.js +125 -0
- package/dist/cli/commands/hooks.js +29 -6
- package/dist/cli/commands/init.js +1 -73
- package/dist/cli/commands/learning.js +270 -13
- package/dist/cli/commands/ruvector-commands.d.ts +15 -0
- package/dist/cli/commands/ruvector-commands.js +271 -0
- package/dist/cli/handlers/init-handler.d.ts +0 -1
- package/dist/cli/handlers/init-handler.js +0 -6
- package/dist/cli/index.js +4 -2
- package/dist/context/sources/defect-source.js +2 -2
- package/dist/context/sources/memory-source.js +2 -2
- package/dist/context/sources/requirements-source.js +2 -2
- package/dist/coordination/behavior-tree/decorators.d.ts +108 -0
- package/dist/coordination/behavior-tree/decorators.js +251 -0
- package/dist/coordination/behavior-tree/index.d.ts +12 -0
- package/dist/coordination/behavior-tree/index.js +15 -0
- package/dist/coordination/behavior-tree/nodes.d.ts +165 -0
- package/dist/coordination/behavior-tree/nodes.js +338 -0
- package/dist/coordination/behavior-tree/qe-trees.d.ts +105 -0
- package/dist/coordination/behavior-tree/qe-trees.js +181 -0
- package/dist/coordination/coherence-action-gate.d.ts +284 -0
- package/dist/coordination/coherence-action-gate.js +512 -0
- package/dist/coordination/index.d.ts +4 -0
- package/dist/coordination/index.js +8 -0
- package/dist/coordination/reasoning-qec.d.ts +315 -0
- package/dist/coordination/reasoning-qec.js +585 -0
- package/dist/coordination/task-executor.d.ts +16 -0
- package/dist/coordination/task-executor.js +99 -0
- package/dist/coordination/workflow-orchestrator.d.ts +29 -0
- package/dist/coordination/workflow-orchestrator.js +42 -0
- package/dist/domains/visual-accessibility/cnn-visual-regression.d.ts +135 -0
- package/dist/domains/visual-accessibility/cnn-visual-regression.js +327 -0
- package/dist/domains/visual-accessibility/index.d.ts +1 -0
- package/dist/domains/visual-accessibility/index.js +4 -0
- package/dist/governance/coherence-validator.d.ts +112 -0
- package/dist/governance/coherence-validator.js +180 -0
- package/dist/governance/index.d.ts +1 -0
- package/dist/governance/index.js +2 -0
- package/dist/governance/witness-chain.d.ts +311 -0
- package/dist/governance/witness-chain.js +509 -0
- package/dist/init/index.d.ts +0 -2
- package/dist/init/index.js +0 -1
- package/dist/init/init-wizard-steps.d.ts +10 -0
- package/dist/init/init-wizard-steps.js +87 -1
- package/dist/init/init-wizard.d.ts +1 -9
- package/dist/init/init-wizard.js +3 -69
- package/dist/init/orchestrator.js +0 -1
- package/dist/init/phases/01-detection.js +0 -27
- package/dist/init/phases/07-hooks.js +6 -4
- package/dist/init/phases/phase-interface.d.ts +0 -1
- package/dist/init/settings-merge.js +1 -1
- package/dist/integrations/browser/qe-dashboard/clustering.d.ts +48 -0
- package/dist/integrations/browser/qe-dashboard/clustering.js +183 -0
- package/dist/integrations/browser/qe-dashboard/index.d.ts +12 -0
- package/dist/integrations/browser/qe-dashboard/index.js +15 -0
- package/dist/integrations/browser/qe-dashboard/pattern-explorer.d.ts +165 -0
- package/dist/integrations/browser/qe-dashboard/pattern-explorer.js +260 -0
- package/dist/integrations/browser/qe-dashboard/wasm-vector-store.d.ts +144 -0
- package/dist/integrations/browser/qe-dashboard/wasm-vector-store.js +277 -0
- package/dist/integrations/ruvector/cognitive-container-codec.d.ts +51 -0
- package/dist/integrations/ruvector/cognitive-container-codec.js +180 -0
- package/dist/integrations/ruvector/cognitive-container.d.ts +125 -0
- package/dist/integrations/ruvector/cognitive-container.js +306 -0
- package/dist/integrations/ruvector/coherence-gate.d.ts +309 -0
- package/dist/integrations/ruvector/coherence-gate.js +631 -0
- package/dist/integrations/ruvector/compressed-hnsw-integration.d.ts +176 -0
- package/dist/integrations/ruvector/compressed-hnsw-integration.js +301 -0
- package/dist/integrations/ruvector/dither-adapter.d.ts +122 -0
- package/dist/integrations/ruvector/dither-adapter.js +295 -0
- package/dist/integrations/ruvector/domain-transfer.d.ts +129 -0
- package/dist/integrations/ruvector/domain-transfer.js +220 -0
- package/dist/integrations/ruvector/feature-flags.d.ts +214 -2
- package/dist/integrations/ruvector/feature-flags.js +167 -2
- package/dist/integrations/ruvector/filter-adapter.d.ts +71 -0
- package/dist/integrations/ruvector/filter-adapter.js +285 -0
- package/dist/integrations/ruvector/gnn-wrapper.d.ts +20 -0
- package/dist/integrations/ruvector/gnn-wrapper.js +40 -0
- package/dist/integrations/ruvector/hnsw-health-monitor.d.ts +237 -0
- package/dist/integrations/ruvector/hnsw-health-monitor.js +394 -0
- package/dist/integrations/ruvector/index.d.ts +8 -2
- package/dist/integrations/ruvector/index.js +18 -2
- package/dist/integrations/ruvector/interfaces.d.ts +40 -0
- package/dist/integrations/ruvector/sona-persistence.d.ts +54 -0
- package/dist/integrations/ruvector/sona-persistence.js +162 -0
- package/dist/integrations/ruvector/sona-three-loop.d.ts +392 -0
- package/dist/integrations/ruvector/sona-three-loop.js +814 -0
- package/dist/integrations/ruvector/sona-wrapper.d.ts +97 -0
- package/dist/integrations/ruvector/sona-wrapper.js +147 -3
- package/dist/integrations/ruvector/spectral-math.d.ts +101 -0
- package/dist/integrations/ruvector/spectral-math.js +254 -0
- package/dist/integrations/ruvector/temporal-compression.d.ts +163 -0
- package/dist/integrations/ruvector/temporal-compression.js +318 -0
- package/dist/integrations/ruvector/thompson-sampler.d.ts +61 -0
- package/dist/integrations/ruvector/thompson-sampler.js +118 -0
- package/dist/integrations/ruvector/transfer-coherence-stub.d.ts +80 -0
- package/dist/integrations/ruvector/transfer-coherence-stub.js +63 -0
- package/dist/integrations/ruvector/transfer-verification.d.ts +119 -0
- package/dist/integrations/ruvector/transfer-verification.js +115 -0
- package/dist/kernel/hnsw-adapter.d.ts +52 -1
- package/dist/kernel/hnsw-adapter.js +139 -4
- package/dist/kernel/hnsw-index-provider.d.ts +5 -0
- package/dist/kernel/native-hnsw-backend.d.ts +110 -0
- package/dist/kernel/native-hnsw-backend.js +408 -0
- package/dist/kernel/unified-memory.js +5 -6
- package/dist/learning/aqe-learning-engine.d.ts +2 -0
- package/dist/learning/aqe-learning-engine.js +65 -0
- package/dist/learning/experience-capture-middleware.js +20 -0
- package/dist/learning/experience-capture.d.ts +10 -0
- package/dist/learning/experience-capture.js +34 -0
- package/dist/learning/index.d.ts +2 -2
- package/dist/learning/index.js +4 -4
- package/dist/learning/metrics-tracker.d.ts +11 -0
- package/dist/learning/metrics-tracker.js +29 -13
- package/dist/learning/pattern-lifecycle.d.ts +30 -1
- package/dist/learning/pattern-lifecycle.js +92 -20
- package/dist/learning/pattern-store.d.ts +8 -0
- package/dist/learning/pattern-store.js +8 -2
- package/dist/learning/qe-unified-memory.js +1 -28
- package/dist/learning/regret-tracker.d.ts +201 -0
- package/dist/learning/regret-tracker.js +361 -0
- package/dist/mcp/bundle.js +5915 -474
- package/dist/routing/index.d.ts +4 -2
- package/dist/routing/index.js +3 -1
- package/dist/routing/neural-tiny-dancer-router.d.ts +268 -0
- package/dist/routing/neural-tiny-dancer-router.js +514 -0
- package/dist/routing/queen-integration.js +5 -5
- package/dist/routing/routing-config.d.ts +6 -0
- package/dist/routing/routing-config.js +1 -0
- package/dist/routing/simple-neural-router.d.ts +76 -0
- package/dist/routing/simple-neural-router.js +202 -0
- package/dist/routing/tiny-dancer-router.d.ts +20 -1
- package/dist/routing/tiny-dancer-router.js +21 -2
- package/dist/test-scheduling/dag-attention-scheduler.d.ts +81 -0
- package/dist/test-scheduling/dag-attention-scheduler.js +358 -0
- package/dist/test-scheduling/dag-attention-types.d.ts +81 -0
- package/dist/test-scheduling/dag-attention-types.js +10 -0
- package/dist/test-scheduling/index.d.ts +1 -0
- package/dist/test-scheduling/index.js +4 -0
- package/dist/test-scheduling/pipeline.d.ts +8 -0
- package/dist/test-scheduling/pipeline.js +28 -0
- package/package.json +6 -2
- package/dist/cli/commands/migrate.d.ts +0 -9
- package/dist/cli/commands/migrate.js +0 -566
- package/dist/init/init-wizard-migration.d.ts +0 -52
- package/dist/init/init-wizard-migration.js +0 -345
- package/dist/init/migration/config-migrator.d.ts +0 -31
- package/dist/init/migration/config-migrator.js +0 -149
- package/dist/init/migration/data-migrator.d.ts +0 -72
- package/dist/init/migration/data-migrator.js +0 -232
- package/dist/init/migration/detector.d.ts +0 -44
- package/dist/init/migration/detector.js +0 -105
- package/dist/init/migration/index.d.ts +0 -8
- package/dist/init/migration/index.js +0 -8
- package/dist/learning/v2-to-v3-migration.d.ts +0 -86
- package/dist/learning/v2-to-v3-migration.js +0 -529
|
@@ -0,0 +1,814 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* SONA Three-Loop Coordination Engine
|
|
3
|
+
*
|
|
4
|
+
* Implements the three-loop architecture for continuous neural adaptation:
|
|
5
|
+
*
|
|
6
|
+
* 1. **Instant Loop** - Per-request MicroLoRA adaptation (<100us)
|
|
7
|
+
* Applies rank-1 weight updates for real-time personalization.
|
|
8
|
+
*
|
|
9
|
+
* 2. **Background Loop** - Periodic consolidation (every N requests)
|
|
10
|
+
* Merges instant adaptations into base model, runs EWC++ to prevent
|
|
11
|
+
* catastrophic forgetting, and updates the Fisher Information Matrix.
|
|
12
|
+
*
|
|
13
|
+
* 3. **Coordination Loop** - Cross-agent state synchronization
|
|
14
|
+
* Shares learned patterns across agent instances and manages
|
|
15
|
+
* domain-specific adaptation state.
|
|
16
|
+
*
|
|
17
|
+
* @module integrations/ruvector/sona-three-loop
|
|
18
|
+
*/
|
|
19
|
+
import { createRequire } from 'module';
|
|
20
|
+
import { LoggerFactory } from '../../logging/index.js';
|
|
21
|
+
const logger = LoggerFactory.create('sona-three-loop');
|
|
22
|
+
const esmRequire = createRequire(import.meta.url);
|
|
23
|
+
// ============================================================================
|
|
24
|
+
// WASM MicroLoRA Loader
|
|
25
|
+
// ============================================================================
|
|
26
|
+
let wasmLoraModule = null;
|
|
27
|
+
let wasmLoraLoadAttempted = false;
|
|
28
|
+
/**
|
|
29
|
+
* Lazily load @ruvector/learning-wasm for WASM-accelerated MicroLoRA.
|
|
30
|
+
* Returns null if the WASM module is unavailable.
|
|
31
|
+
*/
|
|
32
|
+
function loadWasmMicroLoRA() {
|
|
33
|
+
if (wasmLoraLoadAttempted)
|
|
34
|
+
return wasmLoraModule;
|
|
35
|
+
wasmLoraLoadAttempted = true;
|
|
36
|
+
try {
|
|
37
|
+
const mod = esmRequire('@ruvector/learning-wasm');
|
|
38
|
+
const fs = esmRequire('fs');
|
|
39
|
+
const path = esmRequire('path');
|
|
40
|
+
const wasmPath = path.join(path.dirname(esmRequire.resolve('@ruvector/learning-wasm')), 'ruvector_learning_wasm_bg.wasm');
|
|
41
|
+
mod.initSync({ module: fs.readFileSync(wasmPath) });
|
|
42
|
+
wasmLoraModule = mod;
|
|
43
|
+
logger.info('WASM MicroLoRA loaded from @ruvector/learning-wasm (6.4x faster than TS)');
|
|
44
|
+
}
|
|
45
|
+
catch {
|
|
46
|
+
wasmLoraModule = null;
|
|
47
|
+
logger.debug('@ruvector/learning-wasm not available, using TypeScript MicroLoRA');
|
|
48
|
+
}
|
|
49
|
+
return wasmLoraModule;
|
|
50
|
+
}
|
|
51
|
+
/** Reset WASM loader state (for testing) */
|
|
52
|
+
export function resetWasmLoraLoader() {
|
|
53
|
+
wasmLoraModule = null;
|
|
54
|
+
wasmLoraLoadAttempted = false;
|
|
55
|
+
}
|
|
56
|
+
/** Default three-loop configuration */
|
|
57
|
+
export const DEFAULT_THREE_LOOP_CONFIG = {
|
|
58
|
+
dimension: 384,
|
|
59
|
+
microLoraLr: 0.001,
|
|
60
|
+
consolidationInterval: 100,
|
|
61
|
+
ewcLambda: 1000.0,
|
|
62
|
+
taskBoundaryZScoreThreshold: 2.5,
|
|
63
|
+
fisherDecay: 0.9,
|
|
64
|
+
fisherSampleSize: 200,
|
|
65
|
+
importanceThreshold: 0.01,
|
|
66
|
+
};
|
|
67
|
+
// ============================================================================
|
|
68
|
+
// MicroLoRA (Rank-1 Adaptation)
|
|
69
|
+
// ============================================================================
|
|
70
|
+
/**
|
|
71
|
+
* Micro-Linear Adaptation (element-wise) for per-request weight updates.
|
|
72
|
+
*
|
|
73
|
+
* Applies W' = W + alpha * features element-wise.
|
|
74
|
+
* Not a true rank-1 outer product LoRA — for that, use native @ruvector/sona.
|
|
75
|
+
*
|
|
76
|
+
* This is intentionally lightweight for real-time use (<100us).
|
|
77
|
+
*/
|
|
78
|
+
export class MicroLoRA {
|
|
79
|
+
/** Current rank-1 adaptation delta accumulated across requests */
|
|
80
|
+
adaptationVector;
|
|
81
|
+
/** Base weights (updated by background consolidation) */
|
|
82
|
+
baseWeights;
|
|
83
|
+
/** Learning rate for instant adaptation */
|
|
84
|
+
lr;
|
|
85
|
+
/** Count of adaptations applied since last consolidation */
|
|
86
|
+
adaptationCount = 0;
|
|
87
|
+
constructor(dimension, lr = 0.001) {
|
|
88
|
+
this.adaptationVector = new Float32Array(dimension);
|
|
89
|
+
this.baseWeights = new Float32Array(dimension);
|
|
90
|
+
this.lr = lr;
|
|
91
|
+
}
|
|
92
|
+
/**
|
|
93
|
+
* Apply rank-1 instant adaptation to produce adapted weights.
|
|
94
|
+
*
|
|
95
|
+
* @param features - Input features for the current request
|
|
96
|
+
* @returns Adapted weights = baseWeights + adaptationVector + lr * features
|
|
97
|
+
*/
|
|
98
|
+
adapt(features) {
|
|
99
|
+
const dim = this.baseWeights.length;
|
|
100
|
+
const result = new Float32Array(dim);
|
|
101
|
+
const featureLen = Math.min(features.length, dim);
|
|
102
|
+
// W' = base + accumulated_delta + lr * feature_direction
|
|
103
|
+
for (let i = 0; i < dim; i++) {
|
|
104
|
+
const featureVal = i < featureLen ? features[i] : 0;
|
|
105
|
+
result[i] = this.baseWeights[i] + this.adaptationVector[i] + this.lr * featureVal;
|
|
106
|
+
}
|
|
107
|
+
// Accumulate the adaptation direction into the delta
|
|
108
|
+
for (let i = 0; i < featureLen; i++) {
|
|
109
|
+
this.adaptationVector[i] += this.lr * features[i];
|
|
110
|
+
}
|
|
111
|
+
this.adaptationCount++;
|
|
112
|
+
return result;
|
|
113
|
+
}
|
|
114
|
+
/**
|
|
115
|
+
* Merge accumulated adaptations into base weights and reset delta.
|
|
116
|
+
* Called by the background loop during consolidation.
|
|
117
|
+
*
|
|
118
|
+
* @returns Number of adaptations that were merged
|
|
119
|
+
*/
|
|
120
|
+
consolidate() {
|
|
121
|
+
const merged = this.adaptationCount;
|
|
122
|
+
const dim = this.baseWeights.length;
|
|
123
|
+
// Merge delta into base
|
|
124
|
+
for (let i = 0; i < dim; i++) {
|
|
125
|
+
this.baseWeights[i] += this.adaptationVector[i];
|
|
126
|
+
this.adaptationVector[i] = 0;
|
|
127
|
+
}
|
|
128
|
+
this.adaptationCount = 0;
|
|
129
|
+
return merged;
|
|
130
|
+
}
|
|
131
|
+
/** Get the number of adaptations since last consolidation */
|
|
132
|
+
getAdaptationCount() {
|
|
133
|
+
return this.adaptationCount;
|
|
134
|
+
}
|
|
135
|
+
/** Get the current effective weights (base + delta) */
|
|
136
|
+
getEffectiveWeights() {
|
|
137
|
+
const dim = this.baseWeights.length;
|
|
138
|
+
const result = new Float32Array(dim);
|
|
139
|
+
for (let i = 0; i < dim; i++) {
|
|
140
|
+
result[i] = this.baseWeights[i] + this.adaptationVector[i];
|
|
141
|
+
}
|
|
142
|
+
return result;
|
|
143
|
+
}
|
|
144
|
+
/** Get the L2 norm of the adaptation delta */
|
|
145
|
+
getAdaptationMagnitude() {
|
|
146
|
+
let sumSq = 0;
|
|
147
|
+
for (let i = 0; i < this.adaptationVector.length; i++) {
|
|
148
|
+
sumSq += this.adaptationVector[i] * this.adaptationVector[i];
|
|
149
|
+
}
|
|
150
|
+
return Math.sqrt(sumSq);
|
|
151
|
+
}
|
|
152
|
+
/** Reset adaptation state without affecting base weights */
|
|
153
|
+
resetAdaptation() {
|
|
154
|
+
this.adaptationVector.fill(0);
|
|
155
|
+
this.adaptationCount = 0;
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
// ============================================================================
|
|
159
|
+
// EWC++ (Elastic Weight Consolidation)
|
|
160
|
+
// ============================================================================
|
|
161
|
+
/**
|
|
162
|
+
* EWC++ for preventing catastrophic forgetting across task boundaries.
|
|
163
|
+
*
|
|
164
|
+
* Maintains a diagonal Fisher Information Matrix that captures parameter
|
|
165
|
+
* importance. When a new task boundary is detected (via gradient z-score),
|
|
166
|
+
* the Fisher matrix is updated using online blending:
|
|
167
|
+
* F_new = decay * F_old + (1 - decay) * F_current
|
|
168
|
+
*
|
|
169
|
+
* The EWC loss penalizes deviation from optimal parameters:
|
|
170
|
+
* L_EWC = (lambda/2) * sum(F_i * (theta_i - theta*_i)^2)
|
|
171
|
+
*/
|
|
172
|
+
export class EWCPlusPlus {
|
|
173
|
+
/** Diagonal Fisher Information Matrix (importance weights) */
|
|
174
|
+
fisherMatrix;
|
|
175
|
+
/** Optimal parameters at last task boundary */
|
|
176
|
+
optimalParams;
|
|
177
|
+
/** Regularization strength */
|
|
178
|
+
lambda;
|
|
179
|
+
/** Decay for blending old/new Fisher estimates */
|
|
180
|
+
fisherDecay;
|
|
181
|
+
/** Z-score threshold for task boundary detection */
|
|
182
|
+
zScoreThreshold;
|
|
183
|
+
/** Importance threshold for parameter protection */
|
|
184
|
+
importanceThreshold;
|
|
185
|
+
/** Running statistics for gradient magnitudes */
|
|
186
|
+
gradientHistory = [];
|
|
187
|
+
gradientMean = 0;
|
|
188
|
+
gradientVariance = 0;
|
|
189
|
+
/** Number of task boundaries detected */
|
|
190
|
+
taskBoundaryCount = 0;
|
|
191
|
+
/** Number of consolidation cycles */
|
|
192
|
+
consolidationCount = 0;
|
|
193
|
+
constructor(dimension, lambda = 1000.0, fisherDecay = 0.9, zScoreThreshold = 2.5, importanceThreshold = 0.01) {
|
|
194
|
+
this.fisherMatrix = new Float32Array(dimension);
|
|
195
|
+
this.optimalParams = new Float32Array(dimension);
|
|
196
|
+
this.lambda = lambda;
|
|
197
|
+
this.fisherDecay = fisherDecay;
|
|
198
|
+
this.zScoreThreshold = zScoreThreshold;
|
|
199
|
+
this.importanceThreshold = importanceThreshold;
|
|
200
|
+
}
|
|
201
|
+
/**
|
|
202
|
+
* Detect whether a task boundary has occurred based on the z-score
|
|
203
|
+
* of the gradient magnitude change.
|
|
204
|
+
*
|
|
205
|
+
* A sudden spike in gradient magnitude signals a distribution shift,
|
|
206
|
+
* indicating we have moved to a new task.
|
|
207
|
+
*
|
|
208
|
+
* @param gradients - Current gradient vector
|
|
209
|
+
* @returns true if a task boundary is detected
|
|
210
|
+
*/
|
|
211
|
+
detectTaskBoundary(gradients) {
|
|
212
|
+
// Compute gradient magnitude (L2 norm)
|
|
213
|
+
let magnitude = 0;
|
|
214
|
+
for (let i = 0; i < gradients.length; i++) {
|
|
215
|
+
magnitude += gradients[i] * gradients[i];
|
|
216
|
+
}
|
|
217
|
+
magnitude = Math.sqrt(magnitude);
|
|
218
|
+
if (this.gradientHistory.length < 5) {
|
|
219
|
+
// Not enough history to detect boundary -- just accumulate
|
|
220
|
+
this.gradientHistory.push(magnitude);
|
|
221
|
+
this.updateGradientStats();
|
|
222
|
+
return false;
|
|
223
|
+
}
|
|
224
|
+
// Compute z-score of current magnitude against EXISTING statistics
|
|
225
|
+
// (before adding the new sample to history)
|
|
226
|
+
const stdDev = Math.sqrt(this.gradientVariance);
|
|
227
|
+
let zScore = 0;
|
|
228
|
+
const absDiff = Math.abs(magnitude - this.gradientMean);
|
|
229
|
+
if (stdDev > 1e-10) {
|
|
230
|
+
zScore = absDiff / stdDev;
|
|
231
|
+
}
|
|
232
|
+
else if (absDiff > 1e-10 && this.gradientMean > 1e-10) {
|
|
233
|
+
// When variance is zero (all same magnitude) but new value differs,
|
|
234
|
+
// treat any meaningful deviation as a boundary
|
|
235
|
+
zScore = absDiff / (this.gradientMean * 0.01 + 1e-10);
|
|
236
|
+
}
|
|
237
|
+
// Now add the new sample to history
|
|
238
|
+
this.gradientHistory.push(magnitude);
|
|
239
|
+
// Keep a sliding window of gradient magnitudes
|
|
240
|
+
const maxHistory = 100;
|
|
241
|
+
if (this.gradientHistory.length > maxHistory) {
|
|
242
|
+
this.gradientHistory.shift();
|
|
243
|
+
}
|
|
244
|
+
this.updateGradientStats();
|
|
245
|
+
if (zScore > this.zScoreThreshold) {
|
|
246
|
+
this.taskBoundaryCount++;
|
|
247
|
+
return true;
|
|
248
|
+
}
|
|
249
|
+
return false;
|
|
250
|
+
}
|
|
251
|
+
/**
|
|
252
|
+
* Compute EWC++ regularization loss for current parameters.
|
|
253
|
+
*
|
|
254
|
+
* L_EWC = (lambda/2) * sum(F_i * (theta_i - theta*_i)^2)
|
|
255
|
+
*
|
|
256
|
+
* @param currentParams - Current model parameters
|
|
257
|
+
* @returns Scalar regularization loss
|
|
258
|
+
*/
|
|
259
|
+
computeLoss(currentParams) {
|
|
260
|
+
let loss = 0;
|
|
261
|
+
const dim = Math.min(currentParams.length, this.fisherMatrix.length, this.optimalParams.length);
|
|
262
|
+
for (let i = 0; i < dim; i++) {
|
|
263
|
+
const diff = currentParams[i] - this.optimalParams[i];
|
|
264
|
+
loss += this.fisherMatrix[i] * diff * diff;
|
|
265
|
+
}
|
|
266
|
+
return (this.lambda / 2) * loss;
|
|
267
|
+
}
|
|
268
|
+
/**
|
|
269
|
+
* Update the Fisher Information Matrix using online EWC++ blending.
|
|
270
|
+
*
|
|
271
|
+
* F_new = decay * F_old + (1 - decay) * F_sample
|
|
272
|
+
*
|
|
273
|
+
* The sample Fisher is approximated from squared gradients.
|
|
274
|
+
*
|
|
275
|
+
* Gradient samples should be outcome-weighted (REINFORCE-style), not raw
|
|
276
|
+
* request features. Call recordOutcome() to generate proper gradient proxies.
|
|
277
|
+
*
|
|
278
|
+
* @param gradientSamples - Array of gradient vectors to estimate Fisher
|
|
279
|
+
* @param currentParams - Current parameters to snapshot as optimal
|
|
280
|
+
*/
|
|
281
|
+
updateFisher(gradientSamples, currentParams) {
|
|
282
|
+
if (gradientSamples.length === 0)
|
|
283
|
+
return;
|
|
284
|
+
const dim = this.fisherMatrix.length;
|
|
285
|
+
const sampleFisher = new Float32Array(dim);
|
|
286
|
+
// Estimate Fisher diagonal from squared gradients: F_i = E[g_i^2]
|
|
287
|
+
for (const grad of gradientSamples) {
|
|
288
|
+
const gradLen = Math.min(grad.length, dim);
|
|
289
|
+
for (let i = 0; i < gradLen; i++) {
|
|
290
|
+
sampleFisher[i] += grad[i] * grad[i];
|
|
291
|
+
}
|
|
292
|
+
}
|
|
293
|
+
const count = gradientSamples.length;
|
|
294
|
+
for (let i = 0; i < dim; i++) {
|
|
295
|
+
sampleFisher[i] /= count;
|
|
296
|
+
}
|
|
297
|
+
// Online EWC++ blending: F = decay * F_old + (1 - decay) * F_sample
|
|
298
|
+
const alpha = this.fisherDecay;
|
|
299
|
+
for (let i = 0; i < dim; i++) {
|
|
300
|
+
this.fisherMatrix[i] = alpha * this.fisherMatrix[i] + (1 - alpha) * sampleFisher[i];
|
|
301
|
+
}
|
|
302
|
+
// Update optimal parameters
|
|
303
|
+
const paramLen = Math.min(currentParams.length, dim);
|
|
304
|
+
for (let i = 0; i < paramLen; i++) {
|
|
305
|
+
this.optimalParams[i] = currentParams[i];
|
|
306
|
+
}
|
|
307
|
+
this.consolidationCount++;
|
|
308
|
+
}
|
|
309
|
+
/** Get metrics for monitoring */
|
|
310
|
+
getMetrics() {
|
|
311
|
+
const dim = this.fisherMatrix.length;
|
|
312
|
+
let trace = 0;
|
|
313
|
+
let maxImportance = 0;
|
|
314
|
+
let protectedParams = 0;
|
|
315
|
+
for (let i = 0; i < dim; i++) {
|
|
316
|
+
trace += this.fisherMatrix[i];
|
|
317
|
+
if (this.fisherMatrix[i] > maxImportance) {
|
|
318
|
+
maxImportance = this.fisherMatrix[i];
|
|
319
|
+
}
|
|
320
|
+
if (this.fisherMatrix[i] > this.importanceThreshold) {
|
|
321
|
+
protectedParams++;
|
|
322
|
+
}
|
|
323
|
+
}
|
|
324
|
+
return {
|
|
325
|
+
regularizationLoss: 0, // Computed on demand via computeLoss()
|
|
326
|
+
taskBoundariesDetected: this.taskBoundaryCount,
|
|
327
|
+
fisherTrace: trace,
|
|
328
|
+
avgFisherImportance: dim > 0 ? trace / dim : 0,
|
|
329
|
+
maxFisherImportance: maxImportance,
|
|
330
|
+
protectedParams,
|
|
331
|
+
consolidationCycles: this.consolidationCount,
|
|
332
|
+
lambda: this.lambda,
|
|
333
|
+
};
|
|
334
|
+
}
|
|
335
|
+
/** Get the number of task boundaries detected */
|
|
336
|
+
getTaskBoundaryCount() {
|
|
337
|
+
return this.taskBoundaryCount;
|
|
338
|
+
}
|
|
339
|
+
/** Get the Fisher diagonal as a copy */
|
|
340
|
+
getFisherDiagonal() {
|
|
341
|
+
return new Float32Array(this.fisherMatrix);
|
|
342
|
+
}
|
|
343
|
+
/** Get the optimal parameters as a copy */
|
|
344
|
+
getOptimalParams() {
|
|
345
|
+
return new Float32Array(this.optimalParams);
|
|
346
|
+
}
|
|
347
|
+
/** Load Fisher matrix from persisted data */
|
|
348
|
+
loadFisher(fisher, optimal) {
|
|
349
|
+
const dim = this.fisherMatrix.length;
|
|
350
|
+
const fLen = Math.min(fisher.length, dim);
|
|
351
|
+
const oLen = Math.min(optimal.length, dim);
|
|
352
|
+
for (let i = 0; i < fLen; i++) {
|
|
353
|
+
this.fisherMatrix[i] = fisher[i];
|
|
354
|
+
}
|
|
355
|
+
for (let i = 0; i < oLen; i++) {
|
|
356
|
+
this.optimalParams[i] = optimal[i];
|
|
357
|
+
}
|
|
358
|
+
}
|
|
359
|
+
/** Update running gradient statistics (Welford's algorithm) */
|
|
360
|
+
updateGradientStats() {
|
|
361
|
+
const n = this.gradientHistory.length;
|
|
362
|
+
if (n === 0)
|
|
363
|
+
return;
|
|
364
|
+
let mean = 0;
|
|
365
|
+
for (const v of this.gradientHistory) {
|
|
366
|
+
mean += v;
|
|
367
|
+
}
|
|
368
|
+
mean /= n;
|
|
369
|
+
let variance = 0;
|
|
370
|
+
for (const v of this.gradientHistory) {
|
|
371
|
+
const diff = v - mean;
|
|
372
|
+
variance += diff * diff;
|
|
373
|
+
}
|
|
374
|
+
variance = n > 1 ? variance / (n - 1) : 0;
|
|
375
|
+
this.gradientMean = mean;
|
|
376
|
+
this.gradientVariance = variance;
|
|
377
|
+
}
|
|
378
|
+
}
|
|
379
|
+
// ============================================================================
|
|
380
|
+
// SONA Three-Loop Engine
|
|
381
|
+
// ============================================================================
|
|
382
|
+
/**
|
|
383
|
+
* Three-loop coordination engine for SONA neural adaptation.
|
|
384
|
+
*
|
|
385
|
+
* Combines MicroLoRA instant adaptation, EWC++ background consolidation,
|
|
386
|
+
* and cross-agent coordination into a unified engine.
|
|
387
|
+
*/
|
|
388
|
+
export class SONAThreeLoopEngine {
|
|
389
|
+
config;
|
|
390
|
+
microLora;
|
|
391
|
+
ewc;
|
|
392
|
+
nativeEngine;
|
|
393
|
+
wasmLora = null;
|
|
394
|
+
requestCount = 0;
|
|
395
|
+
lastConsolidationRequest = 0;
|
|
396
|
+
peerStates = new Map();
|
|
397
|
+
gradientBuffer = [];
|
|
398
|
+
lastFeatures = null;
|
|
399
|
+
constructor(config = {}, nativeEngine) {
|
|
400
|
+
this.config = { ...DEFAULT_THREE_LOOP_CONFIG, ...config };
|
|
401
|
+
this.nativeEngine = nativeEngine ?? null;
|
|
402
|
+
this.microLora = new MicroLoRA(this.config.dimension, this.config.microLoraLr);
|
|
403
|
+
this.ewc = new EWCPlusPlus(this.config.dimension, this.config.ewcLambda, this.config.fisherDecay, this.config.taskBoundaryZScoreThreshold, this.config.importanceThreshold);
|
|
404
|
+
// Try WASM MicroLoRA first (6.4x faster than TS, 71x faster than NAPI)
|
|
405
|
+
const wasmMod = loadWasmMicroLoRA();
|
|
406
|
+
if (wasmMod) {
|
|
407
|
+
try {
|
|
408
|
+
this.wasmLora = new wasmMod.WasmMicroLoRA(this.config.dimension, 1, // rank-1
|
|
409
|
+
this.config.microLoraLr);
|
|
410
|
+
logger.info('SONA Three-Loop using WASM MicroLoRA (0.07us/adapt)');
|
|
411
|
+
}
|
|
412
|
+
catch (err) {
|
|
413
|
+
logger.debug('WASM MicroLoRA creation failed, falling back', { error: String(err) });
|
|
414
|
+
this.wasmLora = null;
|
|
415
|
+
}
|
|
416
|
+
}
|
|
417
|
+
if (!this.wasmLora && this.nativeEngine) {
|
|
418
|
+
logger.info('SONA Three-Loop using native @ruvector/sona engine for MicroLoRA delegation');
|
|
419
|
+
}
|
|
420
|
+
}
|
|
421
|
+
// ==========================================================================
|
|
422
|
+
// Loop 1: Instant Adaptation
|
|
423
|
+
// ==========================================================================
|
|
424
|
+
/**
|
|
425
|
+
* Perform instant per-request MicroLoRA adaptation.
|
|
426
|
+
*
|
|
427
|
+
* This is the hot path and must complete in <100us.
|
|
428
|
+
* When a native @ruvector/sona engine is available, delegates to the
|
|
429
|
+
* Rust implementation for true rank-1 LoRA. Otherwise falls back to
|
|
430
|
+
* the TypeScript element-wise approximation.
|
|
431
|
+
*
|
|
432
|
+
* @param requestFeatures - Feature vector for the current request
|
|
433
|
+
* @returns Adaptation result with adapted weights and timing
|
|
434
|
+
*/
|
|
435
|
+
instantAdapt(requestFeatures) {
|
|
436
|
+
const startTime = performance.now();
|
|
437
|
+
let adaptedWeights;
|
|
438
|
+
if (this.wasmLora) {
|
|
439
|
+
// WASM MicroLoRA: 0.07us/adapt — 6.4x faster than TS, real rank-1 LoRA
|
|
440
|
+
try {
|
|
441
|
+
const input = new Float32Array(requestFeatures);
|
|
442
|
+
const wasmResult = this.wasmLora.adapt(input);
|
|
443
|
+
if (wasmResult && wasmResult.length > 0) {
|
|
444
|
+
adaptedWeights = wasmResult;
|
|
445
|
+
// Still update TypeScript MicroLoRA for metric tracking / EWC compatibility
|
|
446
|
+
this.microLora.adapt(requestFeatures);
|
|
447
|
+
}
|
|
448
|
+
else {
|
|
449
|
+
// WASM returned empty/undefined — fall through to TS
|
|
450
|
+
adaptedWeights = this.microLora.adapt(requestFeatures);
|
|
451
|
+
}
|
|
452
|
+
}
|
|
453
|
+
catch {
|
|
454
|
+
adaptedWeights = this.microLora.adapt(requestFeatures);
|
|
455
|
+
}
|
|
456
|
+
}
|
|
457
|
+
else if (this.nativeEngine) {
|
|
458
|
+
// NAPI SonaEngine: slower due to boundary crossing but full engine
|
|
459
|
+
try {
|
|
460
|
+
const nativeResult = this.nativeEngine.applyMicroLora(requestFeatures);
|
|
461
|
+
adaptedWeights = new Float32Array(nativeResult);
|
|
462
|
+
this.microLora.adapt(requestFeatures);
|
|
463
|
+
}
|
|
464
|
+
catch {
|
|
465
|
+
adaptedWeights = this.microLora.adapt(requestFeatures);
|
|
466
|
+
}
|
|
467
|
+
}
|
|
468
|
+
else {
|
|
469
|
+
// TypeScript MicroLoRA: 0.43us/adapt — no dependencies
|
|
470
|
+
adaptedWeights = this.microLora.adapt(requestFeatures);
|
|
471
|
+
}
|
|
472
|
+
const magnitude = this.microLora.getAdaptationMagnitude();
|
|
473
|
+
this.requestCount++;
|
|
474
|
+
// Store features for later use by recordOutcome() — do NOT buffer as
|
|
475
|
+
// gradient proxies here. Proper gradient estimation requires an
|
|
476
|
+
// outcome-weighted REINFORCE signal via recordOutcome().
|
|
477
|
+
this.lastFeatures = [...requestFeatures];
|
|
478
|
+
const latencyUs = (performance.now() - startTime) * 1000;
|
|
479
|
+
return {
|
|
480
|
+
adaptedWeights,
|
|
481
|
+
latencyUs,
|
|
482
|
+
applied: true,
|
|
483
|
+
magnitude,
|
|
484
|
+
requestIndex: this.requestCount,
|
|
485
|
+
};
|
|
486
|
+
}
|
|
487
|
+
/**
|
|
488
|
+
* Record the outcome of a request for REINFORCE-style gradient estimation.
|
|
489
|
+
* Must be called after instantAdapt() with the reward signal.
|
|
490
|
+
*
|
|
491
|
+
* Computes gradient proxy: reward * last_features (REINFORCE estimator)
|
|
492
|
+
* This is what gets buffered for Fisher estimation, NOT raw features.
|
|
493
|
+
*
|
|
494
|
+
* @param reward - Scalar reward (e.g., 1.0 for success, -1.0 for failure, 0.0 for neutral)
|
|
495
|
+
* @param requestIndex - The requestIndex from the AdaptationResult (for matching)
|
|
496
|
+
*/
|
|
497
|
+
recordOutcome(reward, requestIndex) {
|
|
498
|
+
if (this.lastFeatures === null) {
|
|
499
|
+
logger.warn('recordOutcome called without a preceding instantAdapt()');
|
|
500
|
+
return;
|
|
501
|
+
}
|
|
502
|
+
// REINFORCE gradient proxy: g = reward * features
|
|
503
|
+
const dim = this.lastFeatures.length;
|
|
504
|
+
const gradientProxy = new Float32Array(dim);
|
|
505
|
+
for (let i = 0; i < dim; i++) {
|
|
506
|
+
gradientProxy[i] = reward * this.lastFeatures[i];
|
|
507
|
+
}
|
|
508
|
+
this.gradientBuffer.push(gradientProxy);
|
|
509
|
+
// Keep buffer bounded
|
|
510
|
+
if (this.gradientBuffer.length > this.config.fisherSampleSize) {
|
|
511
|
+
this.gradientBuffer.shift();
|
|
512
|
+
}
|
|
513
|
+
this.lastFeatures = null; // Clear after use
|
|
514
|
+
}
|
|
515
|
+
// ==========================================================================
|
|
516
|
+
// Loop 2: Background Consolidation
|
|
517
|
+
// ==========================================================================
|
|
518
|
+
/**
|
|
519
|
+
* Run background consolidation cycle.
|
|
520
|
+
*
|
|
521
|
+
* This merges accumulated MicroLoRA adaptations into base weights,
|
|
522
|
+
* checks for task boundaries, and runs EWC++ if a boundary is detected.
|
|
523
|
+
* When a native @ruvector/sona engine is available, also triggers
|
|
524
|
+
* native background learning via forceLearn() and tick().
|
|
525
|
+
* Should be called periodically (every N requests via shouldConsolidate()).
|
|
526
|
+
*
|
|
527
|
+
* @returns Consolidation result with metrics
|
|
528
|
+
*/
|
|
529
|
+
backgroundConsolidate() {
|
|
530
|
+
const startTime = performance.now();
|
|
531
|
+
// Trigger native background learning if available
|
|
532
|
+
if (this.nativeEngine) {
|
|
533
|
+
try {
|
|
534
|
+
this.nativeEngine.forceLearn();
|
|
535
|
+
this.nativeEngine.tick();
|
|
536
|
+
}
|
|
537
|
+
catch (err) {
|
|
538
|
+
logger.warn('Native SONA background learning failed, continuing with TypeScript', { error: String(err) });
|
|
539
|
+
}
|
|
540
|
+
}
|
|
541
|
+
// Compute EWC loss before consolidation
|
|
542
|
+
const ewcLossBefore = this.ewc.computeLoss(this.microLora.getEffectiveWeights());
|
|
543
|
+
// Check for task boundary using buffered gradients
|
|
544
|
+
let taskBoundaryDetected = false;
|
|
545
|
+
if (this.gradientBuffer.length > 0) {
|
|
546
|
+
const latestGradient = this.gradientBuffer[this.gradientBuffer.length - 1];
|
|
547
|
+
taskBoundaryDetected = this.ewc.detectTaskBoundary(latestGradient);
|
|
548
|
+
}
|
|
549
|
+
// If task boundary detected, update Fisher before consolidation
|
|
550
|
+
if (taskBoundaryDetected && this.gradientBuffer.length > 0) {
|
|
551
|
+
this.ewc.updateFisher(this.gradientBuffer, this.microLora.getEffectiveWeights());
|
|
552
|
+
logger.info('Task boundary detected, Fisher updated', {
|
|
553
|
+
boundaries: this.ewc.getTaskBoundaryCount(),
|
|
554
|
+
requestCount: this.requestCount,
|
|
555
|
+
});
|
|
556
|
+
}
|
|
557
|
+
// Validate gradient quality — warn if all samples are identical
|
|
558
|
+
if (this.gradientBuffer.length > 1) {
|
|
559
|
+
let hasVariance = false;
|
|
560
|
+
const first = this.gradientBuffer[0];
|
|
561
|
+
for (let s = 1; s < this.gradientBuffer.length && !hasVariance; s++) {
|
|
562
|
+
for (let i = 0; i < first.length; i++) {
|
|
563
|
+
if (Math.abs(this.gradientBuffer[s][i] - first[i]) > 1e-8) {
|
|
564
|
+
hasVariance = true;
|
|
565
|
+
break;
|
|
566
|
+
}
|
|
567
|
+
}
|
|
568
|
+
}
|
|
569
|
+
if (!hasVariance) {
|
|
570
|
+
logger.warn('All gradient samples identical — Fisher estimate may be poor. Ensure recordOutcome() is called with diverse rewards.');
|
|
571
|
+
}
|
|
572
|
+
}
|
|
573
|
+
// Consolidate MicroLoRA adaptations into base weights
|
|
574
|
+
const adaptationsMerged = this.microLora.consolidate();
|
|
575
|
+
// Apply EWC++ regularization to consolidated weights
|
|
576
|
+
this.applyEWCRegularization();
|
|
577
|
+
// Compute EWC loss after consolidation
|
|
578
|
+
const ewcLossAfter = this.ewc.computeLoss(this.microLora.getEffectiveWeights());
|
|
579
|
+
this.lastConsolidationRequest = this.requestCount;
|
|
580
|
+
// Clear gradient buffer after consolidation
|
|
581
|
+
this.gradientBuffer = [];
|
|
582
|
+
const durationMs = performance.now() - startTime;
|
|
583
|
+
return {
|
|
584
|
+
consolidated: adaptationsMerged > 0 || taskBoundaryDetected,
|
|
585
|
+
adaptationsMerged,
|
|
586
|
+
ewcLossBefore,
|
|
587
|
+
ewcLossAfter,
|
|
588
|
+
taskBoundaryDetected,
|
|
589
|
+
durationMs,
|
|
590
|
+
};
|
|
591
|
+
}
|
|
592
|
+
/**
|
|
593
|
+
* Check if background consolidation is due.
|
|
594
|
+
*/
|
|
595
|
+
shouldConsolidate() {
|
|
596
|
+
return (this.requestCount - this.lastConsolidationRequest >=
|
|
597
|
+
this.config.consolidationInterval);
|
|
598
|
+
}
|
|
599
|
+
// ==========================================================================
|
|
600
|
+
// Loop 3: Coordination
|
|
601
|
+
// ==========================================================================
|
|
602
|
+
/**
|
|
603
|
+
* Synchronize state with peer agents.
|
|
604
|
+
*
|
|
605
|
+
* Merges peer adaptation vectors using Fisher-weighted averaging:
|
|
606
|
+
* For each parameter, the peer with higher Fisher importance has
|
|
607
|
+
* more influence on the merged value.
|
|
608
|
+
*
|
|
609
|
+
* @param peerStates - Array of peer states to synchronize with
|
|
610
|
+
*/
|
|
611
|
+
syncWithPeers(peerStates) {
|
|
612
|
+
if (peerStates.length === 0)
|
|
613
|
+
return;
|
|
614
|
+
// Store peer states for reference
|
|
615
|
+
for (const peer of peerStates) {
|
|
616
|
+
this.peerStates.set(peer.peerId, peer);
|
|
617
|
+
}
|
|
618
|
+
const dim = this.config.dimension;
|
|
619
|
+
const mergedAdaptation = new Float32Array(dim);
|
|
620
|
+
const totalWeight = new Float32Array(dim);
|
|
621
|
+
// Start with our own state (weighted by our Fisher)
|
|
622
|
+
const ourFisher = this.ewc.getFisherDiagonal();
|
|
623
|
+
const ourAdaptation = this.microLora.adaptationVector;
|
|
624
|
+
for (let i = 0; i < dim; i++) {
|
|
625
|
+
const weight = ourFisher[i] + 1e-10; // Avoid division by zero
|
|
626
|
+
mergedAdaptation[i] += weight * ourAdaptation[i];
|
|
627
|
+
totalWeight[i] += weight;
|
|
628
|
+
}
|
|
629
|
+
// Add peer contributions weighted by their Fisher
|
|
630
|
+
for (const peer of peerStates) {
|
|
631
|
+
const peerDim = Math.min(peer.adaptationVector.length, dim);
|
|
632
|
+
for (let i = 0; i < peerDim; i++) {
|
|
633
|
+
const weight = (i < peer.fisherDiagonal.length ? peer.fisherDiagonal[i] : 0) + 1e-10;
|
|
634
|
+
mergedAdaptation[i] += weight * peer.adaptationVector[i];
|
|
635
|
+
totalWeight[i] += weight;
|
|
636
|
+
}
|
|
637
|
+
}
|
|
638
|
+
// Normalize by total weight
|
|
639
|
+
for (let i = 0; i < dim; i++) {
|
|
640
|
+
this.microLora.adaptationVector[i] = mergedAdaptation[i] / totalWeight[i];
|
|
641
|
+
}
|
|
642
|
+
logger.debug('Synced with peers', {
|
|
643
|
+
peerCount: peerStates.length,
|
|
644
|
+
peersStored: this.peerStates.size,
|
|
645
|
+
});
|
|
646
|
+
}
|
|
647
|
+
/**
|
|
648
|
+
* Get our current state for sharing with peers.
|
|
649
|
+
*/
|
|
650
|
+
getLocalPeerState(peerId, domain) {
|
|
651
|
+
return {
|
|
652
|
+
peerId,
|
|
653
|
+
domain,
|
|
654
|
+
adaptationVector: new Float32Array(this.microLora.adaptationVector),
|
|
655
|
+
fisherDiagonal: this.ewc.getFisherDiagonal(),
|
|
656
|
+
requestCount: this.requestCount,
|
|
657
|
+
lastUpdateMs: Date.now(),
|
|
658
|
+
};
|
|
659
|
+
}
|
|
660
|
+
// ==========================================================================
|
|
661
|
+
// EWC++ Metrics and Access
|
|
662
|
+
// ==========================================================================
|
|
663
|
+
/**
|
|
664
|
+
* Get EWC++ metrics for monitoring and diagnostics.
|
|
665
|
+
*/
|
|
666
|
+
getEWCMetrics() {
|
|
667
|
+
const metrics = this.ewc.getMetrics();
|
|
668
|
+
// Compute current regularization loss
|
|
669
|
+
metrics.regularizationLoss = this.ewc.computeLoss(this.microLora.getEffectiveWeights());
|
|
670
|
+
return metrics;
|
|
671
|
+
}
|
|
672
|
+
/**
|
|
673
|
+
* Get the Fisher diagonal for persistence.
|
|
674
|
+
*/
|
|
675
|
+
getFisherDiagonal() {
|
|
676
|
+
return this.ewc.getFisherDiagonal();
|
|
677
|
+
}
|
|
678
|
+
/**
|
|
679
|
+
* Get the optimal parameters for persistence.
|
|
680
|
+
*/
|
|
681
|
+
getOptimalParams() {
|
|
682
|
+
return this.ewc.getOptimalParams();
|
|
683
|
+
}
|
|
684
|
+
/**
|
|
685
|
+
* Load persisted Fisher matrix and optimal parameters.
|
|
686
|
+
*/
|
|
687
|
+
loadFisher(fisher, optimalParams) {
|
|
688
|
+
this.ewc.loadFisher(fisher, optimalParams);
|
|
689
|
+
}
|
|
690
|
+
/**
|
|
691
|
+
* Get the current base weights from MicroLoRA.
|
|
692
|
+
*/
|
|
693
|
+
getBaseWeights() {
|
|
694
|
+
return new Float32Array(this.microLora.baseWeights);
|
|
695
|
+
}
|
|
696
|
+
/**
|
|
697
|
+
* Set base weights (e.g., from persistence).
|
|
698
|
+
*/
|
|
699
|
+
setBaseWeights(weights) {
|
|
700
|
+
const dim = Math.min(weights.length, this.microLora.baseWeights.length);
|
|
701
|
+
for (let i = 0; i < dim; i++) {
|
|
702
|
+
this.microLora.baseWeights[i] = weights[i];
|
|
703
|
+
}
|
|
704
|
+
}
|
|
705
|
+
/**
|
|
706
|
+
* Get the effective weights (base + adaptation delta).
|
|
707
|
+
*/
|
|
708
|
+
getEffectiveWeights() {
|
|
709
|
+
return this.microLora.getEffectiveWeights();
|
|
710
|
+
}
|
|
711
|
+
/** Get the total number of requests processed */
|
|
712
|
+
getRequestCount() {
|
|
713
|
+
return this.requestCount;
|
|
714
|
+
}
|
|
715
|
+
/** Get the engine configuration */
|
|
716
|
+
getConfig() {
|
|
717
|
+
return { ...this.config };
|
|
718
|
+
}
|
|
719
|
+
/** Get the connected peer states */
|
|
720
|
+
getPeerStates() {
|
|
721
|
+
return new Map(this.peerStates);
|
|
722
|
+
}
|
|
723
|
+
/** Get direct access to internal MicroLoRA (for testing) */
|
|
724
|
+
getMicroLoRA() {
|
|
725
|
+
return this.microLora;
|
|
726
|
+
}
|
|
727
|
+
/** Get direct access to internal EWC++ (for testing) */
|
|
728
|
+
getEWC() {
|
|
729
|
+
return this.ewc;
|
|
730
|
+
}
|
|
731
|
+
// ==========================================================================
|
|
732
|
+
// Private Helpers
|
|
733
|
+
// ==========================================================================
|
|
734
|
+
/**
|
|
735
|
+
* Apply EWC++ regularization to pull weights toward optimal.
|
|
736
|
+
*
|
|
737
|
+
* For parameters with high Fisher importance, nudge the current
|
|
738
|
+
* weights back toward the optimal values to prevent forgetting.
|
|
739
|
+
*/
|
|
740
|
+
applyEWCRegularization() {
|
|
741
|
+
const dim = this.config.dimension;
|
|
742
|
+
const fisher = this.ewc.fisherMatrix;
|
|
743
|
+
const optimal = this.ewc.optimalParams;
|
|
744
|
+
const weights = this.microLora.baseWeights;
|
|
745
|
+
// Regularization step: theta -= alpha * F_i * (theta_i - theta*_i)
|
|
746
|
+
// Use a small step size so we don't overshoot
|
|
747
|
+
const regStepSize = 0.01;
|
|
748
|
+
for (let i = 0; i < dim; i++) {
|
|
749
|
+
if (fisher[i] > this.config.importanceThreshold) {
|
|
750
|
+
const diff = weights[i] - optimal[i];
|
|
751
|
+
weights[i] -= regStepSize * fisher[i] * diff;
|
|
752
|
+
}
|
|
753
|
+
}
|
|
754
|
+
}
|
|
755
|
+
// ==========================================================================
|
|
756
|
+
// SQLite Persistence (Task 2.2: EWC++ Fisher Matrix Persistence)
|
|
757
|
+
// ==========================================================================
|
|
758
|
+
/**
|
|
759
|
+
* Persist Fisher matrix, optimal parameters, and base weights to SQLite
|
|
760
|
+
* via PersistentSONAEngine.saveFisherMatrix().
|
|
761
|
+
*
|
|
762
|
+
* Call this after backgroundConsolidate() to ensure EWC++ state survives
|
|
763
|
+
* across sessions. Typically called at session end or after N consolidations.
|
|
764
|
+
*
|
|
765
|
+
* @param persistFn - Callback that receives data and writes to SQLite.
|
|
766
|
+
* Typically `(domain, fisher, optimal, base, meta) => persistentEngine.saveFisherMatrix(...)`.
|
|
767
|
+
* @param domain - Domain identifier for the Fisher state
|
|
768
|
+
*/
|
|
769
|
+
persistFisher(persistFn, domain) {
|
|
770
|
+
const ewcMetrics = this.ewc.getMetrics();
|
|
771
|
+
persistFn(domain, this.ewc.getFisherDiagonal(), this.ewc.getOptimalParams(), this.getBaseWeights(), {
|
|
772
|
+
taskBoundaries: ewcMetrics.taskBoundariesDetected,
|
|
773
|
+
consolidationCycles: ewcMetrics.consolidationCycles,
|
|
774
|
+
requestCount: this.requestCount,
|
|
775
|
+
ewcLambda: ewcMetrics.lambda,
|
|
776
|
+
});
|
|
777
|
+
logger.info('Fisher matrix persisted to SQLite', {
|
|
778
|
+
domain,
|
|
779
|
+
requestCount: this.requestCount,
|
|
780
|
+
taskBoundaries: ewcMetrics.taskBoundariesDetected,
|
|
781
|
+
});
|
|
782
|
+
}
|
|
783
|
+
/**
|
|
784
|
+
* Restore Fisher matrix, optimal parameters, and base weights from SQLite.
|
|
785
|
+
*
|
|
786
|
+
* Call this on engine startup to restore EWC++ state from a previous session.
|
|
787
|
+
*
|
|
788
|
+
* @param data - Persisted data from PersistentSONAEngine.loadFisherMatrix()
|
|
789
|
+
*/
|
|
790
|
+
restoreFisher(data) {
|
|
791
|
+
this.ewc.loadFisher(data.fisherDiagonal, data.optimalParams);
|
|
792
|
+
if (data.baseWeights) {
|
|
793
|
+
this.setBaseWeights(data.baseWeights);
|
|
794
|
+
}
|
|
795
|
+
this.requestCount = data.requestCount;
|
|
796
|
+
logger.info('Fisher matrix restored from SQLite', {
|
|
797
|
+
requestCount: data.requestCount,
|
|
798
|
+
dimension: data.fisherDiagonal.length,
|
|
799
|
+
});
|
|
800
|
+
}
|
|
801
|
+
}
|
|
802
|
+
// ============================================================================
|
|
803
|
+
// Factory Functions
|
|
804
|
+
// ============================================================================
|
|
805
|
+
/**
|
|
806
|
+
* Create a SONA Three-Loop Engine with the given configuration.
|
|
807
|
+
*
|
|
808
|
+
* @param config - Optional three-loop configuration overrides
|
|
809
|
+
* @param nativeEngine - Optional @ruvector/sona SonaEngine for native delegation
|
|
810
|
+
*/
|
|
811
|
+
export function createSONAThreeLoopEngine(config, nativeEngine) {
|
|
812
|
+
return new SONAThreeLoopEngine(config, nativeEngine);
|
|
813
|
+
}
|
|
814
|
+
//# sourceMappingURL=sona-three-loop.js.map
|