agentic-qe 3.8.11 → 3.8.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (98) hide show
  1. package/.claude/skills/qe-code-intelligence/SKILL.md +29 -20
  2. package/.claude/skills/qe-code-intelligence/evals/qe-code-intelligence.yaml +3 -3
  3. package/.claude/skills/qe-quality-assessment/SKILL.md +1 -1
  4. package/.claude/skills/qe-test-generation/SKILL.md +1 -1
  5. package/.claude/skills/skills-manifest.json +1 -1
  6. package/CHANGELOG.md +45 -0
  7. package/README.md +9 -0
  8. package/assets/skills/qe-code-intelligence/SKILL.md +29 -20
  9. package/assets/skills/qe-code-intelligence/evals/qe-code-intelligence.yaml +3 -3
  10. package/assets/skills/qe-quality-assessment/SKILL.md +1 -1
  11. package/assets/skills/qe-test-generation/SKILL.md +1 -1
  12. package/dist/cli/bundle.js +1162 -1046
  13. package/dist/cli/commands/code.js +149 -11
  14. package/dist/cli/commands/init.js +3 -2
  15. package/dist/cli/commands/ruvector-commands.js +17 -0
  16. package/dist/cli/handlers/init-handler.d.ts +1 -0
  17. package/dist/cli/handlers/init-handler.js +15 -10
  18. package/dist/cli/utils/file-discovery.d.ts +1 -0
  19. package/dist/cli/utils/file-discovery.js +1 -1
  20. package/dist/domains/code-intelligence/coordinator-gnn.d.ts +21 -0
  21. package/dist/domains/code-intelligence/coordinator-gnn.js +102 -0
  22. package/dist/domains/contract-testing/coordinator.js +13 -0
  23. package/dist/domains/coverage-analysis/coordinator.js +5 -0
  24. package/dist/domains/defect-intelligence/coordinator.d.ts +1 -0
  25. package/dist/domains/defect-intelligence/coordinator.js +43 -0
  26. package/dist/domains/quality-assessment/coordinator.js +26 -0
  27. package/dist/domains/test-generation/coordinator.js +14 -0
  28. package/dist/init/orchestrator.js +1 -0
  29. package/dist/init/phases/08-mcp.js +4 -4
  30. package/dist/init/phases/phase-interface.d.ts +3 -1
  31. package/dist/integrations/agentic-flow/reasoning-bank/experience-replay.d.ts +11 -0
  32. package/dist/integrations/agentic-flow/reasoning-bank/experience-replay.js +44 -1
  33. package/dist/integrations/rl-suite/algorithms/eprop.d.ts +79 -0
  34. package/dist/integrations/rl-suite/algorithms/eprop.js +284 -0
  35. package/dist/integrations/rl-suite/algorithms/index.d.ts +2 -1
  36. package/dist/integrations/rl-suite/algorithms/index.js +2 -1
  37. package/dist/integrations/rl-suite/index.d.ts +2 -2
  38. package/dist/integrations/rl-suite/index.js +2 -2
  39. package/dist/integrations/rl-suite/interfaces.d.ts +3 -3
  40. package/dist/integrations/rl-suite/interfaces.js +1 -1
  41. package/dist/integrations/rl-suite/orchestrator.d.ts +2 -2
  42. package/dist/integrations/rl-suite/orchestrator.js +3 -2
  43. package/dist/integrations/rl-suite/reward-signals.d.ts +1 -1
  44. package/dist/integrations/rl-suite/reward-signals.js +1 -1
  45. package/dist/integrations/ruvector/coherence-gate-cohomology.d.ts +41 -0
  46. package/dist/integrations/ruvector/coherence-gate-cohomology.js +47 -0
  47. package/dist/integrations/ruvector/coherence-gate-core.d.ts +200 -0
  48. package/dist/integrations/ruvector/coherence-gate-core.js +294 -0
  49. package/dist/integrations/ruvector/coherence-gate-energy.d.ts +136 -0
  50. package/dist/integrations/ruvector/coherence-gate-energy.js +373 -0
  51. package/dist/integrations/ruvector/coherence-gate-vector.d.ts +38 -0
  52. package/dist/integrations/ruvector/coherence-gate-vector.js +76 -0
  53. package/dist/integrations/ruvector/coherence-gate.d.ts +10 -311
  54. package/dist/integrations/ruvector/coherence-gate.js +10 -652
  55. package/dist/integrations/ruvector/cold-tier-trainer.d.ts +103 -0
  56. package/dist/integrations/ruvector/cold-tier-trainer.js +377 -0
  57. package/dist/integrations/ruvector/cusum-detector.d.ts +70 -0
  58. package/dist/integrations/ruvector/cusum-detector.js +142 -0
  59. package/dist/integrations/ruvector/delta-tracker.d.ts +122 -0
  60. package/dist/integrations/ruvector/delta-tracker.js +311 -0
  61. package/dist/integrations/ruvector/domain-transfer.d.ts +79 -1
  62. package/dist/integrations/ruvector/domain-transfer.js +158 -2
  63. package/dist/integrations/ruvector/eprop-learner.d.ts +135 -0
  64. package/dist/integrations/ruvector/eprop-learner.js +351 -0
  65. package/dist/integrations/ruvector/feature-flags.d.ts +177 -0
  66. package/dist/integrations/ruvector/feature-flags.js +145 -0
  67. package/dist/integrations/ruvector/graphmae-encoder.d.ts +88 -0
  68. package/dist/integrations/ruvector/graphmae-encoder.js +360 -0
  69. package/dist/integrations/ruvector/hdc-fingerprint.d.ts +127 -0
  70. package/dist/integrations/ruvector/hdc-fingerprint.js +222 -0
  71. package/dist/integrations/ruvector/hopfield-memory.d.ts +97 -0
  72. package/dist/integrations/ruvector/hopfield-memory.js +238 -0
  73. package/dist/integrations/ruvector/index.d.ts +13 -2
  74. package/dist/integrations/ruvector/index.js +46 -2
  75. package/dist/integrations/ruvector/mincut-wrapper.d.ts +7 -0
  76. package/dist/integrations/ruvector/mincut-wrapper.js +54 -2
  77. package/dist/integrations/ruvector/reservoir-replay.d.ts +172 -0
  78. package/dist/integrations/ruvector/reservoir-replay.js +335 -0
  79. package/dist/integrations/ruvector/solver-adapter.d.ts +93 -0
  80. package/dist/integrations/ruvector/solver-adapter.js +299 -0
  81. package/dist/integrations/ruvector/sona-persistence.d.ts +33 -0
  82. package/dist/integrations/ruvector/sona-persistence.js +47 -0
  83. package/dist/integrations/ruvector/spectral-sparsifier.d.ts +154 -0
  84. package/dist/integrations/ruvector/spectral-sparsifier.js +389 -0
  85. package/dist/integrations/ruvector/temporal-causality.d.ts +63 -0
  86. package/dist/integrations/ruvector/temporal-causality.js +317 -0
  87. package/dist/learning/pattern-promotion.d.ts +63 -0
  88. package/dist/learning/pattern-promotion.js +235 -1
  89. package/dist/learning/pattern-store.d.ts +2 -0
  90. package/dist/learning/pattern-store.js +187 -1
  91. package/dist/learning/sqlite-persistence.d.ts +2 -0
  92. package/dist/learning/sqlite-persistence.js +4 -0
  93. package/dist/mcp/bundle.js +506 -427
  94. package/dist/shared/utils/index.d.ts +1 -0
  95. package/dist/shared/utils/index.js +1 -0
  96. package/dist/shared/utils/xorshift128.d.ts +24 -0
  97. package/dist/shared/utils/xorshift128.js +50 -0
  98. package/package.json +1 -1
@@ -0,0 +1,351 @@
1
+ /**
2
+ * Agentic QE v3 - E-prop Online Learning (ADR-087 Milestone 4, R11)
3
+ *
4
+ * Eligibility propagation for online learning with 12 bytes/synapse,
5
+ * no backprop required. Uses eligibility traces and feedback alignment
6
+ * (Lillicrap et al. 2016) to avoid the weight transport problem.
7
+ *
8
+ * Algorithm: dw = eta * eligibility * reward
9
+ * Memory budget: 4B weight + 4B trace + 4B feedback = 12 bytes/synapse
10
+ */
11
+ import { secureRandom } from '../../shared/utils/crypto-random.js';
12
+ const DEFAULT_EPROP_CONFIG = {
13
+ inputSize: 2,
14
+ hiddenSize: 16,
15
+ outputSize: 1,
16
+ learningRate: 0.01,
17
+ eligibilityDecay: 0.95,
18
+ feedbackAlignment: true,
19
+ };
20
+ // ============================================================================
21
+ // E-prop Network
22
+ // ============================================================================
23
+ /**
24
+ * E-prop neural network with online eligibility-trace learning.
25
+ *
26
+ * Architecture: input -> hidden (tanh) -> output (softmax or linear)
27
+ *
28
+ * Each synapse stores exactly 12 bytes:
29
+ * - weight: Float32 (4 bytes)
30
+ * - trace: Float32 (4 bytes) — eligibility trace
31
+ * - feedback: Float32 (4 bytes) — random feedback weight (fixed)
32
+ */
33
+ export class EpropNetwork {
34
+ config;
35
+ // Weights (Float32 = 4 bytes each)
36
+ inputHiddenWeights; // inputSize * hiddenSize
37
+ hiddenOutputWeights; // hiddenSize * outputSize
38
+ // Eligibility traces (Float32 = 4 bytes each)
39
+ inputHiddenTraces;
40
+ hiddenOutputTraces;
41
+ // Feedback alignment weights — fixed random, never updated (Float32 = 4 bytes each)
42
+ feedbackWeights; // outputSize * hiddenSize
43
+ // Activations (transient, not counted in memory budget)
44
+ lastInput;
45
+ lastHidden;
46
+ lastHiddenRaw; // pre-activation for derivative
47
+ lastOutput;
48
+ // Stats
49
+ totalSteps = 0;
50
+ totalReward = 0;
51
+ rewardHistory = [];
52
+ constructor(config) {
53
+ this.config = { ...DEFAULT_EPROP_CONFIG, ...config };
54
+ this.validateConfig(this.config);
55
+ const { inputSize, hiddenSize, outputSize } = this.config;
56
+ // Allocate weights with Xavier/Glorot initialization
57
+ this.inputHiddenWeights = this.xavierInit(inputSize, hiddenSize);
58
+ this.hiddenOutputWeights = this.xavierInit(hiddenSize, outputSize);
59
+ // Allocate traces (start at zero)
60
+ this.inputHiddenTraces = new Float32Array(inputSize * hiddenSize);
61
+ this.hiddenOutputTraces = new Float32Array(hiddenSize * outputSize);
62
+ // Allocate fixed random feedback weights
63
+ this.feedbackWeights = this.randomInit(outputSize, hiddenSize);
64
+ // Transient activations
65
+ this.lastInput = new Float32Array(inputSize);
66
+ this.lastHidden = new Float32Array(hiddenSize);
67
+ this.lastHiddenRaw = new Float32Array(hiddenSize);
68
+ this.lastOutput = new Float32Array(outputSize);
69
+ }
70
+ // ==========================================================================
71
+ // Forward Pass
72
+ // ==========================================================================
73
+ /**
74
+ * Compute output from input. Stores activations for eligibility updates.
75
+ *
76
+ * input -> (inputHidden weights) -> tanh -> (hiddenOutput weights) -> output
77
+ *
78
+ * Output activation depends on outputSize:
79
+ * - outputSize === 1: sigmoid (for binary tasks)
80
+ * - outputSize > 1: softmax (for classification)
81
+ */
82
+ forward(input) {
83
+ const { inputSize, hiddenSize, outputSize } = this.config;
84
+ if (input.length !== inputSize) {
85
+ throw new Error(`Input size mismatch: expected ${inputSize}, got ${input.length}`);
86
+ }
87
+ // Store input for trace update
88
+ this.lastInput.set(input);
89
+ // Hidden layer: h = tanh(W_ih^T * x)
90
+ for (let j = 0; j < hiddenSize; j++) {
91
+ let sum = 0;
92
+ for (let i = 0; i < inputSize; i++) {
93
+ sum += input[i] * this.inputHiddenWeights[i * hiddenSize + j];
94
+ }
95
+ this.lastHiddenRaw[j] = sum;
96
+ this.lastHidden[j] = Math.tanh(sum);
97
+ }
98
+ // Output layer: o = W_ho^T * h (raw logits)
99
+ const rawOutput = new Float32Array(outputSize);
100
+ for (let k = 0; k < outputSize; k++) {
101
+ let sum = 0;
102
+ for (let j = 0; j < hiddenSize; j++) {
103
+ sum += this.lastHidden[j] * this.hiddenOutputWeights[j * outputSize + k];
104
+ }
105
+ rawOutput[k] = sum;
106
+ }
107
+ // Apply output activation
108
+ if (outputSize === 1) {
109
+ this.lastOutput[0] = this.sigmoid(rawOutput[0]);
110
+ }
111
+ else {
112
+ const softmaxResult = this.softmax(rawOutput);
113
+ this.lastOutput.set(softmaxResult);
114
+ }
115
+ // Update eligibility traces after forward pass
116
+ this.updateTraces();
117
+ this.totalSteps++;
118
+ return new Float32Array(this.lastOutput);
119
+ }
120
+ // ==========================================================================
121
+ // Online Learning
122
+ // ==========================================================================
123
+ /**
124
+ * Update weights using eligibility traces and reward/error signal.
125
+ *
126
+ * Hidden->output layer:
127
+ * dw[j][k] = learningRate * e_ho[j][k] * reward
128
+ *
129
+ * Input->hidden layer (feedback alignment):
130
+ * The learning signal for hidden neuron j is computed by projecting
131
+ * the scalar reward through feedback weights (or transposed output
132
+ * weights if feedbackAlignment is false).
133
+ * dw[i][j] = learningRate * e_ih[i][j] * L[j]
134
+ * where L[j] = sum_k(B[k][j] * reward) [feedback alignment]
135
+ * L[j] = sum_k(W_ho[j][k] * reward) [weight transport]
136
+ */
137
+ updateOnline(reward) {
138
+ const { inputSize, hiddenSize, outputSize, learningRate, feedbackAlignment } = this.config;
139
+ // Update hidden->output weights: dw = eta * trace * reward
140
+ for (let j = 0; j < hiddenSize; j++) {
141
+ for (let k = 0; k < outputSize; k++) {
142
+ const idx = j * outputSize + k;
143
+ this.hiddenOutputWeights[idx] +=
144
+ learningRate * this.hiddenOutputTraces[idx] * reward;
145
+ }
146
+ }
147
+ // Compute per-hidden-neuron learning signal via feedback alignment
148
+ const learningSignal = new Float32Array(hiddenSize);
149
+ if (feedbackAlignment) {
150
+ for (let j = 0; j < hiddenSize; j++) {
151
+ let signal = 0;
152
+ for (let k = 0; k < outputSize; k++) {
153
+ signal += this.feedbackWeights[k * hiddenSize + j];
154
+ }
155
+ learningSignal[j] = signal * reward;
156
+ }
157
+ }
158
+ else {
159
+ // Weight transport: use transposed output weights
160
+ for (let j = 0; j < hiddenSize; j++) {
161
+ let signal = 0;
162
+ for (let k = 0; k < outputSize; k++) {
163
+ signal += this.hiddenOutputWeights[j * outputSize + k];
164
+ }
165
+ learningSignal[j] = signal * reward;
166
+ }
167
+ }
168
+ // Update input->hidden weights: dw = eta * trace * learningSignal
169
+ for (let i = 0; i < inputSize; i++) {
170
+ for (let j = 0; j < hiddenSize; j++) {
171
+ const idx = i * hiddenSize + j;
172
+ this.inputHiddenWeights[idx] +=
173
+ learningRate * this.inputHiddenTraces[idx] * learningSignal[j];
174
+ }
175
+ }
176
+ // Track reward
177
+ this.totalReward += reward;
178
+ this.rewardHistory.push(reward);
179
+ if (this.rewardHistory.length > 1000) {
180
+ this.rewardHistory.shift();
181
+ }
182
+ }
183
+ // ==========================================================================
184
+ // Trace Management
185
+ // ==========================================================================
186
+ /**
187
+ * Update eligibility traces after a forward pass.
188
+ *
189
+ * Traces capture purely local information (Hebbian-like):
190
+ * Hidden->output: e[j][k] = decay * e[j][k] + h[j]
191
+ * Input->hidden: e[i][j] = decay * e[i][j] + x[i] * dtanh(raw[j])
192
+ *
193
+ * The feedback/learning signal is applied separately in updateOnline().
194
+ */
195
+ updateTraces() {
196
+ const { inputSize, hiddenSize, outputSize, eligibilityDecay } = this.config;
197
+ // Hidden->output traces: e[j][k] = decay * e[j][k] + h[j]
198
+ for (let j = 0; j < hiddenSize; j++) {
199
+ for (let k = 0; k < outputSize; k++) {
200
+ const idx = j * outputSize + k;
201
+ this.hiddenOutputTraces[idx] =
202
+ eligibilityDecay * this.hiddenOutputTraces[idx] + this.lastHidden[j];
203
+ }
204
+ }
205
+ // Input->hidden traces: e[i][j] = decay * e[i][j] + x[i] * dtanh(raw[j])
206
+ for (let i = 0; i < inputSize; i++) {
207
+ for (let j = 0; j < hiddenSize; j++) {
208
+ const idx = i * hiddenSize + j;
209
+ const tanhDerivative = 1 - this.lastHidden[j] * this.lastHidden[j];
210
+ this.inputHiddenTraces[idx] =
211
+ eligibilityDecay * this.inputHiddenTraces[idx] +
212
+ this.lastInput[i] * tanhDerivative;
213
+ }
214
+ }
215
+ }
216
+ /**
217
+ * Reset all eligibility traces to zero (between episodes).
218
+ */
219
+ resetTraces() {
220
+ this.inputHiddenTraces.fill(0);
221
+ this.hiddenOutputTraces.fill(0);
222
+ }
223
+ // ==========================================================================
224
+ // Statistics
225
+ // ==========================================================================
226
+ /**
227
+ * Get network statistics including memory budget verification.
228
+ */
229
+ getStats() {
230
+ const { inputSize, hiddenSize, outputSize } = this.config;
231
+ const synapseCount = inputSize * hiddenSize + // input->hidden
232
+ hiddenSize * outputSize; // hidden->output
233
+ return {
234
+ totalSteps: this.totalSteps,
235
+ totalReward: this.totalReward,
236
+ avgReward: this.rewardHistory.length > 0
237
+ ? this.rewardHistory.reduce((a, b) => a + b, 0) / this.rewardHistory.length
238
+ : 0,
239
+ synapsCount: synapseCount,
240
+ memoryBytes: synapseCount * 12, // 4B weight + 4B trace + 4B feedback
241
+ };
242
+ }
243
+ // ==========================================================================
244
+ // Weight Import / Export
245
+ // ==========================================================================
246
+ /**
247
+ * Export current weights (without traces — those are transient).
248
+ */
249
+ exportWeights() {
250
+ return {
251
+ inputHidden: new Float32Array(this.inputHiddenWeights),
252
+ hiddenOutput: new Float32Array(this.hiddenOutputWeights),
253
+ };
254
+ }
255
+ /**
256
+ * Import weights. Resets traces since the network state has changed.
257
+ */
258
+ importWeights(weights) {
259
+ const { inputSize, hiddenSize, outputSize } = this.config;
260
+ if (weights.inputHidden.length !== inputSize * hiddenSize) {
261
+ throw new Error(`inputHidden size mismatch: expected ${inputSize * hiddenSize}, got ${weights.inputHidden.length}`);
262
+ }
263
+ if (weights.hiddenOutput.length !== hiddenSize * outputSize) {
264
+ throw new Error(`hiddenOutput size mismatch: expected ${hiddenSize * outputSize}, got ${weights.hiddenOutput.length}`);
265
+ }
266
+ this.inputHiddenWeights.set(weights.inputHidden);
267
+ this.hiddenOutputWeights.set(weights.hiddenOutput);
268
+ this.resetTraces();
269
+ }
270
+ // ==========================================================================
271
+ // Internals
272
+ // ==========================================================================
273
+ /** Get the current config (read-only). */
274
+ getConfig() {
275
+ return { ...this.config };
276
+ }
277
+ /** Get the raw eligibility trace values for testing/debugging. */
278
+ getTraces() {
279
+ return {
280
+ inputHidden: new Float32Array(this.inputHiddenTraces),
281
+ hiddenOutput: new Float32Array(this.hiddenOutputTraces),
282
+ };
283
+ }
284
+ // ==========================================================================
285
+ // Private Helpers
286
+ // ==========================================================================
287
+ validateConfig(config) {
288
+ if (config.inputSize <= 0) {
289
+ throw new Error(`inputSize must be positive, got ${config.inputSize}`);
290
+ }
291
+ if (config.hiddenSize <= 0) {
292
+ throw new Error(`hiddenSize must be positive, got ${config.hiddenSize}`);
293
+ }
294
+ if (config.outputSize <= 0) {
295
+ throw new Error(`outputSize must be positive, got ${config.outputSize}`);
296
+ }
297
+ if (config.learningRate <= 0) {
298
+ throw new Error(`learningRate must be positive, got ${config.learningRate}`);
299
+ }
300
+ if (config.eligibilityDecay < 0 || config.eligibilityDecay > 1) {
301
+ throw new Error(`eligibilityDecay must be in [0, 1], got ${config.eligibilityDecay}`);
302
+ }
303
+ }
304
+ /** Xavier/Glorot uniform initialization */
305
+ xavierInit(fanIn, fanOut) {
306
+ const limit = Math.sqrt(6 / (fanIn + fanOut));
307
+ const size = fanIn * fanOut;
308
+ const arr = new Float32Array(size);
309
+ for (let i = 0; i < size; i++) {
310
+ arr[i] = (secureRandom() * 2 - 1) * limit;
311
+ }
312
+ return arr;
313
+ }
314
+ /** Random uniform initialization in [-0.5, 0.5] */
315
+ randomInit(rows, cols) {
316
+ const size = rows * cols;
317
+ const arr = new Float32Array(size);
318
+ for (let i = 0; i < size; i++) {
319
+ arr[i] = secureRandom() - 0.5;
320
+ }
321
+ return arr;
322
+ }
323
+ /** Sigmoid activation */
324
+ sigmoid(x) {
325
+ return 1 / (1 + Math.exp(-x));
326
+ }
327
+ /** Softmax activation */
328
+ softmax(logits) {
329
+ const max = Math.max(...Array.from(logits));
330
+ const exps = new Float32Array(logits.length);
331
+ let sum = 0;
332
+ for (let i = 0; i < logits.length; i++) {
333
+ exps[i] = Math.exp(logits[i] - max);
334
+ sum += exps[i];
335
+ }
336
+ for (let i = 0; i < exps.length; i++) {
337
+ exps[i] /= sum;
338
+ }
339
+ return exps;
340
+ }
341
+ }
342
+ // ============================================================================
343
+ // Factory
344
+ // ============================================================================
345
+ /**
346
+ * Create a new E-prop network with the given configuration.
347
+ */
348
+ export function createEpropNetwork(config) {
349
+ return new EpropNetwork(config);
350
+ }
351
+ //# sourceMappingURL=eprop-learner.js.map
@@ -189,6 +189,118 @@ export interface RuVectorFeatureFlags {
189
189
  * @default true
190
190
  */
191
191
  useReasoningQEC: boolean;
192
+ /**
193
+ * Enable HDC Pattern Fingerprinting (R1, ADR-087)
194
+ * Uses 10,000-bit binary hypervectors with XOR binding for O(1) compositional
195
+ * pattern fingerprinting and nanosecond Hamming-distance similarity.
196
+ * TypeScript fallback; WASM upgrade path via @ruvector/hdc-wasm.
197
+ * @default true
198
+ */
199
+ useHDCFingerprinting: boolean;
200
+ /**
201
+ * Enable CUSUM Drift Detection (R2, ADR-087)
202
+ * Adds statistical change-point detection to the coherence gate using
203
+ * two-sided Cumulative Sum (CUSUM) algorithm. Replaces heuristic thresholds
204
+ * with statistically rigorous drift detection per gate type.
205
+ * @default true
206
+ */
207
+ useCusumDriftDetection: boolean;
208
+ /**
209
+ * Enable Delta Event Sourcing (R3, ADR-087)
210
+ * Tracks pattern version history as delta events in SQLite. Enables rollback
211
+ * to any previous pattern state and incremental sync between agents.
212
+ * @default true
213
+ */
214
+ useDeltaEventSourcing: boolean;
215
+ /**
216
+ * Enable EWC++ Regularization (ADR-087)
217
+ * Activates Elastic Weight Consolidation++ Fisher Information Matrix
218
+ * computation in domain coordinators. Prevents catastrophic forgetting
219
+ * when learning new domains. Requires useSONAThreeLoop to be enabled.
220
+ * @default true
221
+ */
222
+ useEwcPlusPlusRegularization: boolean;
223
+ /**
224
+ * Enable GraphMAE Self-Supervised Embeddings (R4, ADR-087)
225
+ * Zero-label graph learning via masked graph autoencoders. Produces embeddings
226
+ * from code dependency graph structure without labeled training data.
227
+ * Consumer: coordinator-gnn.ts generateGraphMAEEmbeddings()
228
+ * Activation: enable after verifying 1K-node embedding quality in your graph
229
+ * @default false
230
+ */
231
+ useGraphMAEEmbeddings: boolean;
232
+ /**
233
+ * Enable Modern Hopfield Memory (R5, ADR-087)
234
+ * Exponential-capacity associative memory for exact pattern recall.
235
+ * Complements HNSW approximate search with content-addressable exact retrieval.
236
+ * Consumer: pattern-store.ts store()/search() exact recall path
237
+ * Activation: enable after verifying recall accuracy at your pattern count
238
+ * Note: at beta=8 with normalized patterns, softmax recall is equivalent
239
+ * to a single Hopfield fixed-point iteration (Ramsauer 2020, Theorem 3)
240
+ * @default false
241
+ */
242
+ useHopfieldMemory: boolean;
243
+ /**
244
+ * Enable Cold-Tier GNN Training (R6, ADR-087)
245
+ * LRU-cached mini-batch GNN training for graphs exceeding hotsetSize.
246
+ * FileBackedGraph available for true disk-backed larger-than-RAM graphs.
247
+ * Consumer: coordinator-gnn.ts trainWithColdTier()
248
+ * Activation: enable when pattern graph exceeds 10K nodes
249
+ * @default false
250
+ */
251
+ useColdTierGNN: boolean;
252
+ /**
253
+ * Enable Meta-Learning Enhancements (R7, ADR-087)
254
+ * Adds DecayingBeta, PlateauDetector, ParetoFront, and CuriosityBonus
255
+ * to the cross-domain transfer engine for adaptive exploration.
256
+ * Consumer: domain-transfer.ts DomainTransferEngine
257
+ * Activation: enable after verifying plateau detection on transfer history
258
+ * @default false
259
+ */
260
+ useMetaLearningEnhancements: boolean;
261
+ /**
262
+ * Enable Sublinear Solver (R8, ADR-087)
263
+ * O(log n) PageRank for graph-based pattern importance scoring.
264
+ * TypeScript power-iteration fallback; native @ruvector/solver-node optional.
265
+ * Consumer: pattern-promotion.ts (future integration)
266
+ * Activation: enable after bootstrapping a pattern citation graph
267
+ * @default false
268
+ */
269
+ useSublinearSolver: boolean;
270
+ /**
271
+ * Enable Spectral Graph Sparsification (R9, ADR-087)
272
+ * Effective resistance sampling to compress graphs while preserving
273
+ * Laplacian spectral properties. Reduces cost of graph operations.
274
+ * Consumer: coherence checks, mincut analysis (future integration)
275
+ * Activation: enable when graph edge count exceeds 10K
276
+ * @default false
277
+ */
278
+ useSpectralSparsification: boolean;
279
+ /**
280
+ * Enable Reservoir Replay with Coherence Gating (R10, ADR-087)
281
+ * Fixed-size replay buffer with coherence-gated admission and
282
+ * tier-weighted sampling. Uses CUSUM for drift-aware threshold.
283
+ * Consumer: experience-replay.ts (future integration)
284
+ * Activation: enable after verifying admission quality on your workload
285
+ * @default false
286
+ */
287
+ useReservoirReplay: boolean;
288
+ /**
289
+ * Enable E-prop Online Learning (R11, ADR-087)
290
+ * Eligibility propagation with 12 bytes/synapse, no backprop required.
291
+ * Registers as RL algorithm #10 in the suite.
292
+ * Consumer: rl-suite algorithms/eprop.ts
293
+ * @default false
294
+ */
295
+ useEpropOnlineLearning: boolean;
296
+ /**
297
+ * Enable Granger Causality for Test Failure Prediction (R12, ADR-087)
298
+ * Discovers causal chains in test execution history using VAR + F-test.
299
+ * Complements STDP (real-time) with batch historical analysis.
300
+ * Consumer: defect-intelligence domain
301
+ * @default false
302
+ */
303
+ useGrangerCausality: boolean;
192
304
  }
193
305
  /**
194
306
  * Default feature flags - all enabled by default
@@ -341,6 +453,71 @@ export declare function isCoherenceActionGateEnabled(): boolean;
341
453
  * @returns true if useReasoningQEC flag is set
342
454
  */
343
455
  export declare function isReasoningQECEnabled(): boolean;
456
+ /**
457
+ * Check if HDC Pattern Fingerprinting is enabled (R1, ADR-087)
458
+ * @returns true if useHDCFingerprinting flag is set
459
+ */
460
+ export declare function isHDCFingerprintingEnabled(): boolean;
461
+ /**
462
+ * Check if CUSUM Drift Detection is enabled (R2, ADR-087)
463
+ * @returns true if useCusumDriftDetection flag is set
464
+ */
465
+ export declare function isCusumDriftDetectionEnabled(): boolean;
466
+ /**
467
+ * Check if Delta Event Sourcing is enabled (R3, ADR-087)
468
+ * @returns true if useDeltaEventSourcing flag is set
469
+ */
470
+ export declare function isDeltaEventSourcingEnabled(): boolean;
471
+ /**
472
+ * Check if EWC++ Regularization is enabled (ADR-087)
473
+ * @returns true if useEwcPlusPlusRegularization flag is set
474
+ */
475
+ export declare function isEwcPlusPlusEnabled(): boolean;
476
+ /**
477
+ * Check if GraphMAE embeddings are enabled
478
+ * @returns true if useGraphMAEEmbeddings flag is set
479
+ */
480
+ export declare function isGraphMAEEnabled(): boolean;
481
+ /**
482
+ * Check if Hopfield memory is enabled
483
+ * @returns true if useHopfieldMemory flag is set
484
+ */
485
+ export declare function isHopfieldMemoryEnabled(): boolean;
486
+ /**
487
+ * Check if Cold-Tier GNN training is enabled
488
+ * @returns true if useColdTierGNN flag is set
489
+ */
490
+ export declare function isColdTierGNNEnabled(): boolean;
491
+ /**
492
+ * Check if Meta-Learning Enhancements are enabled (R7, ADR-087)
493
+ * @returns true if useMetaLearningEnhancements flag is set
494
+ */
495
+ export declare function isMetaLearningEnabled(): boolean;
496
+ /**
497
+ * Check if Sublinear Solver is enabled (R8, ADR-087)
498
+ * @returns true if useSublinearSolver flag is set
499
+ */
500
+ export declare function isSublinearSolverEnabled(): boolean;
501
+ /**
502
+ * Check if Spectral Sparsification is enabled (R9, ADR-087)
503
+ * @returns true if useSpectralSparsification flag is set
504
+ */
505
+ export declare function isSpectralSparsificationEnabled(): boolean;
506
+ /**
507
+ * Check if Reservoir Replay is enabled (R10, ADR-087)
508
+ * @returns true if useReservoirReplay flag is set
509
+ */
510
+ export declare function isReservoirReplayEnabled(): boolean;
511
+ /**
512
+ * Check if E-prop Online Learning is enabled (R11, ADR-087)
513
+ * @returns true if useEpropOnlineLearning flag is set
514
+ */
515
+ export declare function isEpropOnlineLearningEnabled(): boolean;
516
+ /**
517
+ * Check if Granger Causality is enabled (R12, ADR-087)
518
+ * @returns true if useGrangerCausality flag is set
519
+ */
520
+ export declare function isGrangerCausalityEnabled(): boolean;
344
521
  /**
345
522
  * Initialize feature flags from environment variables
346
523
  *