agentic-qe 3.8.11 → 3.8.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (98) hide show
  1. package/.claude/skills/qe-code-intelligence/SKILL.md +29 -20
  2. package/.claude/skills/qe-code-intelligence/evals/qe-code-intelligence.yaml +3 -3
  3. package/.claude/skills/qe-quality-assessment/SKILL.md +1 -1
  4. package/.claude/skills/qe-test-generation/SKILL.md +1 -1
  5. package/.claude/skills/skills-manifest.json +1 -1
  6. package/CHANGELOG.md +45 -0
  7. package/README.md +9 -0
  8. package/assets/skills/qe-code-intelligence/SKILL.md +29 -20
  9. package/assets/skills/qe-code-intelligence/evals/qe-code-intelligence.yaml +3 -3
  10. package/assets/skills/qe-quality-assessment/SKILL.md +1 -1
  11. package/assets/skills/qe-test-generation/SKILL.md +1 -1
  12. package/dist/cli/bundle.js +1162 -1046
  13. package/dist/cli/commands/code.js +149 -11
  14. package/dist/cli/commands/init.js +3 -2
  15. package/dist/cli/commands/ruvector-commands.js +17 -0
  16. package/dist/cli/handlers/init-handler.d.ts +1 -0
  17. package/dist/cli/handlers/init-handler.js +15 -10
  18. package/dist/cli/utils/file-discovery.d.ts +1 -0
  19. package/dist/cli/utils/file-discovery.js +1 -1
  20. package/dist/domains/code-intelligence/coordinator-gnn.d.ts +21 -0
  21. package/dist/domains/code-intelligence/coordinator-gnn.js +102 -0
  22. package/dist/domains/contract-testing/coordinator.js +13 -0
  23. package/dist/domains/coverage-analysis/coordinator.js +5 -0
  24. package/dist/domains/defect-intelligence/coordinator.d.ts +1 -0
  25. package/dist/domains/defect-intelligence/coordinator.js +43 -0
  26. package/dist/domains/quality-assessment/coordinator.js +26 -0
  27. package/dist/domains/test-generation/coordinator.js +14 -0
  28. package/dist/init/orchestrator.js +1 -0
  29. package/dist/init/phases/08-mcp.js +4 -4
  30. package/dist/init/phases/phase-interface.d.ts +3 -1
  31. package/dist/integrations/agentic-flow/reasoning-bank/experience-replay.d.ts +11 -0
  32. package/dist/integrations/agentic-flow/reasoning-bank/experience-replay.js +44 -1
  33. package/dist/integrations/rl-suite/algorithms/eprop.d.ts +79 -0
  34. package/dist/integrations/rl-suite/algorithms/eprop.js +284 -0
  35. package/dist/integrations/rl-suite/algorithms/index.d.ts +2 -1
  36. package/dist/integrations/rl-suite/algorithms/index.js +2 -1
  37. package/dist/integrations/rl-suite/index.d.ts +2 -2
  38. package/dist/integrations/rl-suite/index.js +2 -2
  39. package/dist/integrations/rl-suite/interfaces.d.ts +3 -3
  40. package/dist/integrations/rl-suite/interfaces.js +1 -1
  41. package/dist/integrations/rl-suite/orchestrator.d.ts +2 -2
  42. package/dist/integrations/rl-suite/orchestrator.js +3 -2
  43. package/dist/integrations/rl-suite/reward-signals.d.ts +1 -1
  44. package/dist/integrations/rl-suite/reward-signals.js +1 -1
  45. package/dist/integrations/ruvector/coherence-gate-cohomology.d.ts +41 -0
  46. package/dist/integrations/ruvector/coherence-gate-cohomology.js +47 -0
  47. package/dist/integrations/ruvector/coherence-gate-core.d.ts +200 -0
  48. package/dist/integrations/ruvector/coherence-gate-core.js +294 -0
  49. package/dist/integrations/ruvector/coherence-gate-energy.d.ts +136 -0
  50. package/dist/integrations/ruvector/coherence-gate-energy.js +373 -0
  51. package/dist/integrations/ruvector/coherence-gate-vector.d.ts +38 -0
  52. package/dist/integrations/ruvector/coherence-gate-vector.js +76 -0
  53. package/dist/integrations/ruvector/coherence-gate.d.ts +10 -311
  54. package/dist/integrations/ruvector/coherence-gate.js +10 -652
  55. package/dist/integrations/ruvector/cold-tier-trainer.d.ts +103 -0
  56. package/dist/integrations/ruvector/cold-tier-trainer.js +377 -0
  57. package/dist/integrations/ruvector/cusum-detector.d.ts +70 -0
  58. package/dist/integrations/ruvector/cusum-detector.js +142 -0
  59. package/dist/integrations/ruvector/delta-tracker.d.ts +122 -0
  60. package/dist/integrations/ruvector/delta-tracker.js +311 -0
  61. package/dist/integrations/ruvector/domain-transfer.d.ts +79 -1
  62. package/dist/integrations/ruvector/domain-transfer.js +158 -2
  63. package/dist/integrations/ruvector/eprop-learner.d.ts +135 -0
  64. package/dist/integrations/ruvector/eprop-learner.js +351 -0
  65. package/dist/integrations/ruvector/feature-flags.d.ts +177 -0
  66. package/dist/integrations/ruvector/feature-flags.js +145 -0
  67. package/dist/integrations/ruvector/graphmae-encoder.d.ts +88 -0
  68. package/dist/integrations/ruvector/graphmae-encoder.js +360 -0
  69. package/dist/integrations/ruvector/hdc-fingerprint.d.ts +127 -0
  70. package/dist/integrations/ruvector/hdc-fingerprint.js +222 -0
  71. package/dist/integrations/ruvector/hopfield-memory.d.ts +97 -0
  72. package/dist/integrations/ruvector/hopfield-memory.js +238 -0
  73. package/dist/integrations/ruvector/index.d.ts +13 -2
  74. package/dist/integrations/ruvector/index.js +46 -2
  75. package/dist/integrations/ruvector/mincut-wrapper.d.ts +7 -0
  76. package/dist/integrations/ruvector/mincut-wrapper.js +54 -2
  77. package/dist/integrations/ruvector/reservoir-replay.d.ts +172 -0
  78. package/dist/integrations/ruvector/reservoir-replay.js +335 -0
  79. package/dist/integrations/ruvector/solver-adapter.d.ts +93 -0
  80. package/dist/integrations/ruvector/solver-adapter.js +299 -0
  81. package/dist/integrations/ruvector/sona-persistence.d.ts +33 -0
  82. package/dist/integrations/ruvector/sona-persistence.js +47 -0
  83. package/dist/integrations/ruvector/spectral-sparsifier.d.ts +154 -0
  84. package/dist/integrations/ruvector/spectral-sparsifier.js +389 -0
  85. package/dist/integrations/ruvector/temporal-causality.d.ts +63 -0
  86. package/dist/integrations/ruvector/temporal-causality.js +317 -0
  87. package/dist/learning/pattern-promotion.d.ts +63 -0
  88. package/dist/learning/pattern-promotion.js +235 -1
  89. package/dist/learning/pattern-store.d.ts +2 -0
  90. package/dist/learning/pattern-store.js +187 -1
  91. package/dist/learning/sqlite-persistence.d.ts +2 -0
  92. package/dist/learning/sqlite-persistence.js +4 -0
  93. package/dist/mcp/bundle.js +506 -427
  94. package/dist/shared/utils/index.d.ts +1 -0
  95. package/dist/shared/utils/index.js +1 -0
  96. package/dist/shared/utils/xorshift128.d.ts +24 -0
  97. package/dist/shared/utils/xorshift128.js +50 -0
  98. package/package.json +1 -1
@@ -36,6 +36,23 @@ const DEFAULT_FEATURE_FLAGS = {
36
36
  useDAGAttention: true,
37
37
  useCoherenceActionGate: true,
38
38
  useReasoningQEC: true,
39
+ // Phase 5 (ADR-087) — enabled by default, opt-out
40
+ useHDCFingerprinting: true,
41
+ useCusumDriftDetection: true,
42
+ useDeltaEventSourcing: true,
43
+ useEwcPlusPlusRegularization: true,
44
+ // Phase 5 Milestone 2 (ADR-087) — verified, default true (opt-out)
45
+ useGraphMAEEmbeddings: true,
46
+ useHopfieldMemory: true,
47
+ useColdTierGNN: true,
48
+ // Phase 5 Milestone 3 (ADR-087) — verified, default true (opt-out)
49
+ useMetaLearningEnhancements: true,
50
+ useSublinearSolver: true,
51
+ useSpectralSparsification: true,
52
+ useReservoirReplay: true,
53
+ // Phase 5 Milestone 4 (ADR-087) — verified, default true (opt-out)
54
+ useEpropOnlineLearning: true,
55
+ useGrangerCausality: true,
39
56
  };
40
57
  // ============================================================================
41
58
  // Internal State
@@ -244,6 +261,101 @@ export function isCoherenceActionGateEnabled() {
244
261
  export function isReasoningQECEnabled() {
245
262
  return currentFeatureFlags.useReasoningQEC;
246
263
  }
264
+ // Phase 5 (ADR-087) convenience functions
265
+ /**
266
+ * Check if HDC Pattern Fingerprinting is enabled (R1, ADR-087)
267
+ * @returns true if useHDCFingerprinting flag is set
268
+ */
269
+ export function isHDCFingerprintingEnabled() {
270
+ return currentFeatureFlags.useHDCFingerprinting;
271
+ }
272
+ /**
273
+ * Check if CUSUM Drift Detection is enabled (R2, ADR-087)
274
+ * @returns true if useCusumDriftDetection flag is set
275
+ */
276
+ export function isCusumDriftDetectionEnabled() {
277
+ return currentFeatureFlags.useCusumDriftDetection;
278
+ }
279
+ /**
280
+ * Check if Delta Event Sourcing is enabled (R3, ADR-087)
281
+ * @returns true if useDeltaEventSourcing flag is set
282
+ */
283
+ export function isDeltaEventSourcingEnabled() {
284
+ return currentFeatureFlags.useDeltaEventSourcing;
285
+ }
286
+ /**
287
+ * Check if EWC++ Regularization is enabled (ADR-087)
288
+ * @returns true if useEwcPlusPlusRegularization flag is set
289
+ */
290
+ export function isEwcPlusPlusEnabled() {
291
+ return currentFeatureFlags.useEwcPlusPlusRegularization;
292
+ }
293
+ // Phase 5 Milestone 2 (ADR-087) convenience functions
294
+ /**
295
+ * Check if GraphMAE embeddings are enabled
296
+ * @returns true if useGraphMAEEmbeddings flag is set
297
+ */
298
+ export function isGraphMAEEnabled() {
299
+ return currentFeatureFlags.useGraphMAEEmbeddings;
300
+ }
301
+ /**
302
+ * Check if Hopfield memory is enabled
303
+ * @returns true if useHopfieldMemory flag is set
304
+ */
305
+ export function isHopfieldMemoryEnabled() {
306
+ return currentFeatureFlags.useHopfieldMemory;
307
+ }
308
+ /**
309
+ * Check if Cold-Tier GNN training is enabled
310
+ * @returns true if useColdTierGNN flag is set
311
+ */
312
+ export function isColdTierGNNEnabled() {
313
+ return currentFeatureFlags.useColdTierGNN;
314
+ }
315
+ // Phase 5 Milestone 3 (ADR-087) convenience functions
316
+ /**
317
+ * Check if Meta-Learning Enhancements are enabled (R7, ADR-087)
318
+ * @returns true if useMetaLearningEnhancements flag is set
319
+ */
320
+ export function isMetaLearningEnabled() {
321
+ return currentFeatureFlags.useMetaLearningEnhancements;
322
+ }
323
+ /**
324
+ * Check if Sublinear Solver is enabled (R8, ADR-087)
325
+ * @returns true if useSublinearSolver flag is set
326
+ */
327
+ export function isSublinearSolverEnabled() {
328
+ return currentFeatureFlags.useSublinearSolver;
329
+ }
330
+ /**
331
+ * Check if Spectral Sparsification is enabled (R9, ADR-087)
332
+ * @returns true if useSpectralSparsification flag is set
333
+ */
334
+ export function isSpectralSparsificationEnabled() {
335
+ return currentFeatureFlags.useSpectralSparsification;
336
+ }
337
+ /**
338
+ * Check if Reservoir Replay is enabled (R10, ADR-087)
339
+ * @returns true if useReservoirReplay flag is set
340
+ */
341
+ export function isReservoirReplayEnabled() {
342
+ return currentFeatureFlags.useReservoirReplay;
343
+ }
344
+ // Phase 5 Milestone 4 (ADR-087) convenience functions
345
+ /**
346
+ * Check if E-prop Online Learning is enabled (R11, ADR-087)
347
+ * @returns true if useEpropOnlineLearning flag is set
348
+ */
349
+ export function isEpropOnlineLearningEnabled() {
350
+ return currentFeatureFlags.useEpropOnlineLearning;
351
+ }
352
+ /**
353
+ * Check if Granger Causality is enabled (R12, ADR-087)
354
+ * @returns true if useGrangerCausality flag is set
355
+ */
356
+ export function isGrangerCausalityEnabled() {
357
+ return currentFeatureFlags.useGrangerCausality;
358
+ }
247
359
  // ============================================================================
248
360
  // Environment Variable Support
249
361
  // ============================================================================
@@ -321,6 +433,39 @@ export function initFeatureFlagsFromEnv() {
321
433
  if (process.env.RUVECTOR_USE_REASONING_QEC !== undefined) {
322
434
  envFlags.useReasoningQEC = process.env.RUVECTOR_USE_REASONING_QEC === 'true';
323
435
  }
436
+ // Phase 5 (ADR-087) env vars
437
+ if (process.env.RUVECTOR_USE_HDC_FINGERPRINTING !== undefined) {
438
+ envFlags.useHDCFingerprinting = process.env.RUVECTOR_USE_HDC_FINGERPRINTING === 'true';
439
+ }
440
+ if (process.env.RUVECTOR_USE_CUSUM_DRIFT_DETECTION !== undefined) {
441
+ envFlags.useCusumDriftDetection = process.env.RUVECTOR_USE_CUSUM_DRIFT_DETECTION === 'true';
442
+ }
443
+ if (process.env.RUVECTOR_USE_DELTA_EVENT_SOURCING !== undefined) {
444
+ envFlags.useDeltaEventSourcing = process.env.RUVECTOR_USE_DELTA_EVENT_SOURCING === 'true';
445
+ }
446
+ if (process.env.RUVECTOR_USE_EWC_PLUS_PLUS !== undefined) {
447
+ envFlags.useEwcPlusPlusRegularization = process.env.RUVECTOR_USE_EWC_PLUS_PLUS === 'true';
448
+ }
449
+ // Phase 5 Milestone 3 (ADR-087) env vars
450
+ if (process.env.RUVECTOR_USE_META_LEARNING !== undefined) {
451
+ envFlags.useMetaLearningEnhancements = process.env.RUVECTOR_USE_META_LEARNING === 'true';
452
+ }
453
+ if (process.env.RUVECTOR_USE_SUBLINEAR_SOLVER !== undefined) {
454
+ envFlags.useSublinearSolver = process.env.RUVECTOR_USE_SUBLINEAR_SOLVER === 'true';
455
+ }
456
+ if (process.env.RUVECTOR_USE_SPECTRAL_SPARSIFICATION !== undefined) {
457
+ envFlags.useSpectralSparsification = process.env.RUVECTOR_USE_SPECTRAL_SPARSIFICATION === 'true';
458
+ }
459
+ if (process.env.RUVECTOR_USE_RESERVOIR_REPLAY !== undefined) {
460
+ envFlags.useReservoirReplay = process.env.RUVECTOR_USE_RESERVOIR_REPLAY === 'true';
461
+ }
462
+ // Phase 5 Milestone 4 (ADR-087) env vars
463
+ if (process.env.RUVECTOR_USE_EPROP_ONLINE_LEARNING !== undefined) {
464
+ envFlags.useEpropOnlineLearning = process.env.RUVECTOR_USE_EPROP_ONLINE_LEARNING === 'true';
465
+ }
466
+ if (process.env.RUVECTOR_USE_GRANGER_CAUSALITY !== undefined) {
467
+ envFlags.useGrangerCausality = process.env.RUVECTOR_USE_GRANGER_CAUSALITY === 'true';
468
+ }
324
469
  setRuVectorFeatureFlags(envFlags);
325
470
  }
326
471
  // ============================================================================
@@ -0,0 +1,88 @@
1
+ /**
2
+ * R4: GraphMAE Self-Supervised Graph Learning
3
+ *
4
+ * Masked graph autoencoders for zero-label graph embeddings.
5
+ * TypeScript implementation (fallback). NAPI upgrade via @ruvector/gnn when graphmae is exposed.
6
+ * @module integrations/ruvector/graphmae-encoder
7
+ */
8
+ /** Configuration for the GraphMAE encoder */
9
+ export interface GraphMAEConfig {
10
+ /** Embedding output dimension. Default: 128 */
11
+ embeddingDim: number;
12
+ /** Fraction of nodes to mask during training. Default: 0.5 */
13
+ maskRatio: number;
14
+ /** Learning rate for reconstruction. Default: 0.001 */
15
+ learningRate: number;
16
+ /** Number of attention heads in GAT encoder. Default: 4 */
17
+ numHeads: number;
18
+ /** SCE loss exponent (gamma). Default: 2 */
19
+ gamma: number;
20
+ }
21
+ /** A graph with node features and adjacency lists */
22
+ export interface QEGraph {
23
+ /** Node feature vectors (node_id -> feature vector) */
24
+ nodes: Map<string, Float32Array>;
25
+ /** Adjacency list (node_id -> list of neighbor node_ids) */
26
+ edges: Map<string, string[]>;
27
+ }
28
+ /** Result of a GraphMAE training run */
29
+ export interface GraphMAEResult {
30
+ /** Node embeddings (node_id -> embedding vector) */
31
+ embeddings: Map<string, Float32Array>;
32
+ /** Reconstruction loss (should decrease over epochs) */
33
+ loss: number;
34
+ /** Per-epoch loss values for convergence analysis */
35
+ lossHistory: number[];
36
+ /** Number of training epochs completed */
37
+ epochs: number;
38
+ }
39
+ /**
40
+ * GraphMAE (Graph Masked Autoencoder) encoder for self-supervised graph embedding.
41
+ * Masks random nodes during training, learns to reconstruct via GAT-style encoder.
42
+ */
43
+ export declare class GraphMAEEncoder {
44
+ private readonly config;
45
+ private readonly rng;
46
+ /** Per-head attention projection weights: W_query, W_key per head */
47
+ private headWeights;
48
+ /** Output projection: embeddingDim x (inputDim * numHeads) — set on first encode */
49
+ private Wout;
50
+ /** Decoder projection: inputDim x embeddingDim — projects embeddings back to feature space */
51
+ private Wdec;
52
+ /** Learnable mask token vector (inputDim) — set on first mask */
53
+ private maskToken;
54
+ /** Cached input dimension from the first graph encountered */
55
+ private inputDim;
56
+ constructor(config?: Partial<GraphMAEConfig>);
57
+ /** Mask a fraction of nodes by replacing features with a learnable mask token. */
58
+ maskNodes(graph: QEGraph): {
59
+ masked: QEGraph;
60
+ maskedIds: Set<string>;
61
+ maskToken: Float32Array;
62
+ };
63
+ /** Single-layer GAT-style message-passing to produce embeddingDim vectors. */
64
+ encode(graph: QEGraph): Map<string, Float32Array>;
65
+ /** SCE loss: (1 - cos_sim^gamma) / gamma, averaged over masked nodes. */
66
+ reconstructionLoss(original: Map<string, Float32Array>, reconstructed: Map<string, Float32Array>, maskedIds: Set<string>): number;
67
+ /** Decode embeddings back to input feature space: decoded = Wdec * embedding. */
68
+ decode(embeddings: Map<string, Float32Array>): Map<string, Float32Array>;
69
+ /** Full training loop: mask -> encode -> decode -> loss -> SPSA optimization. */
70
+ train(graph: QEGraph, epochs: number): GraphMAEResult;
71
+ /** Inference-only: encode the full graph without masking. Returns embeddings. */
72
+ embed(graph: QEGraph): Map<string, Float32Array>;
73
+ /** Initialize weight matrices on first use, based on the graph's feature dimension. */
74
+ private ensureInitialized;
75
+ /** Compute reconstruction loss with a pre-determined mask (no re-masking). */
76
+ private computeFixedMaskLoss;
77
+ /** Collect all trainable weight arrays for SPSA perturbation. */
78
+ private collectWeightArrays;
79
+ /**
80
+ * SPSA gradient-free optimization step (Spall 1992).
81
+ * Perturbs ALL weights simultaneously with random +/-1, evaluates loss at both,
82
+ * estimates gradient, updates weights. 2 forward passes per step regardless of param count.
83
+ */
84
+ private spsaStep;
85
+ }
86
+ /** Create a GraphMAEEncoder with the given configuration. */
87
+ export declare function createGraphMAEEncoder(config?: Partial<GraphMAEConfig>): GraphMAEEncoder;
88
+ //# sourceMappingURL=graphmae-encoder.d.ts.map
@@ -0,0 +1,360 @@
1
+ /**
2
+ * R4: GraphMAE Self-Supervised Graph Learning
3
+ *
4
+ * Masked graph autoencoders for zero-label graph embeddings.
5
+ * TypeScript implementation (fallback). NAPI upgrade via @ruvector/gnn when graphmae is exposed.
6
+ * @module integrations/ruvector/graphmae-encoder
7
+ */
8
+ import { getRuVectorFeatureFlags } from './feature-flags.js';
9
+ import { Xorshift128 } from '../../shared/utils/xorshift128.js';
10
+ // Constants & Helpers
11
+ const DEFAULT_EMBEDDING_DIM = 128;
12
+ const DEFAULT_MASK_RATIO = 0.5;
13
+ const DEFAULT_LEARNING_RATE = 0.001;
14
+ const DEFAULT_NUM_HEADS = 4;
15
+ const DEFAULT_GAMMA = 2;
16
+ /** Dot product of two Float32Arrays */
17
+ function dot(a, b) {
18
+ let sum = 0;
19
+ const len = Math.min(a.length, b.length);
20
+ for (let i = 0; i < len; i++) {
21
+ sum += a[i] * b[i];
22
+ }
23
+ return sum;
24
+ }
25
+ /** L2 norm of a Float32Array */
26
+ function norm(v) {
27
+ return Math.sqrt(dot(v, v));
28
+ }
29
+ /** Cosine similarity between two vectors */
30
+ function cosineSimilarity(a, b) {
31
+ const na = norm(a);
32
+ const nb = norm(b);
33
+ if (na === 0 || nb === 0)
34
+ return 0;
35
+ return dot(a, b) / (na * nb);
36
+ }
37
+ /** Xavier/Glorot initialization for a weight matrix stored as flat Float32Array */
38
+ function xavierInit(rows, cols, rng) {
39
+ const limit = Math.sqrt(6 / (rows + cols));
40
+ const w = new Float32Array(rows * cols);
41
+ for (let i = 0; i < w.length; i++) {
42
+ w[i] = (rng.nextFloat() * 2 - 1) * limit;
43
+ }
44
+ return w;
45
+ }
46
+ /** Matrix-vector multiply: result[i] = sum_j(W[i*cols + j] * x[j]) */
47
+ function matVecMul(W, x, rows, cols) {
48
+ const result = new Float32Array(rows);
49
+ for (let i = 0; i < rows; i++) {
50
+ let sum = 0;
51
+ const offset = i * cols;
52
+ for (let j = 0; j < cols; j++) {
53
+ sum += W[offset + j] * x[j];
54
+ }
55
+ result[i] = sum;
56
+ }
57
+ return result;
58
+ }
59
+ /** Softmax over an array of numbers, returns normalized probabilities */
60
+ function softmax(values) {
61
+ const maxVal = Math.max(...values);
62
+ const exps = values.map((v) => Math.exp(v - maxVal));
63
+ const sum = exps.reduce((a, b) => a + b, 0);
64
+ return sum === 0 ? exps.map(() => 1 / values.length) : exps.map((e) => e / sum);
65
+ }
66
+ function assertGraphMAEEnabled(method) {
67
+ const flags = getRuVectorFeatureFlags();
68
+ if (!flags.useGraphMAEEmbeddings) {
69
+ throw new Error(`GraphMAE is disabled (useGraphMAEEmbeddings=false). Enable the feature flag to use ${method}.`);
70
+ }
71
+ }
72
+ // GraphMAEEncoder
73
+ /**
74
+ * GraphMAE (Graph Masked Autoencoder) encoder for self-supervised graph embedding.
75
+ * Masks random nodes during training, learns to reconstruct via GAT-style encoder.
76
+ */
77
+ export class GraphMAEEncoder {
78
+ config;
79
+ rng;
80
+ /** Per-head attention projection weights: W_query, W_key per head */
81
+ headWeights = [];
82
+ /** Output projection: embeddingDim x (inputDim * numHeads) — set on first encode */
83
+ Wout = null;
84
+ /** Decoder projection: inputDim x embeddingDim — projects embeddings back to feature space */
85
+ Wdec = null;
86
+ /** Learnable mask token vector (inputDim) — set on first mask */
87
+ maskToken = null;
88
+ /** Cached input dimension from the first graph encountered */
89
+ inputDim = 0;
90
+ constructor(config) {
91
+ this.config = {
92
+ embeddingDim: config?.embeddingDim ?? DEFAULT_EMBEDDING_DIM,
93
+ maskRatio: config?.maskRatio ?? DEFAULT_MASK_RATIO,
94
+ learningRate: config?.learningRate ?? DEFAULT_LEARNING_RATE,
95
+ numHeads: config?.numHeads ?? DEFAULT_NUM_HEADS,
96
+ gamma: config?.gamma ?? DEFAULT_GAMMA,
97
+ };
98
+ if (this.config.embeddingDim <= 0) {
99
+ throw new Error(`embeddingDim must be positive, got ${this.config.embeddingDim}`);
100
+ }
101
+ if (this.config.maskRatio < 0 || this.config.maskRatio > 1) {
102
+ throw new Error(`maskRatio must be in [0, 1], got ${this.config.maskRatio}`);
103
+ }
104
+ this.rng = new Xorshift128(0xdeadbeef);
105
+ }
106
+ /** Mask a fraction of nodes by replacing features with a learnable mask token. */
107
+ maskNodes(graph) {
108
+ assertGraphMAEEnabled('maskNodes');
109
+ const nodeIds = Array.from(graph.nodes.keys());
110
+ if (nodeIds.length === 0) {
111
+ return {
112
+ masked: { nodes: new Map(), edges: new Map(graph.edges) },
113
+ maskedIds: new Set(),
114
+ maskToken: new Float32Array(0),
115
+ };
116
+ }
117
+ this.ensureInitialized(graph);
118
+ const numToMask = Math.max(1, Math.round(nodeIds.length * this.config.maskRatio));
119
+ const maskedIds = new Set();
120
+ // Fisher-Yates partial shuffle to select maskedIds
121
+ const shuffled = [...nodeIds];
122
+ for (let i = shuffled.length - 1; i > 0 && maskedIds.size < numToMask; i--) {
123
+ const j = Math.floor(this.rng.nextFloat() * (i + 1));
124
+ [shuffled[i], shuffled[j]] = [shuffled[j], shuffled[i]];
125
+ maskedIds.add(shuffled[i]);
126
+ }
127
+ // If we haven't selected enough yet, add from the front
128
+ for (let i = 0; maskedIds.size < numToMask && i < shuffled.length; i++) {
129
+ maskedIds.add(shuffled[i]);
130
+ }
131
+ const maskedNodes = new Map();
132
+ for (const [id, features] of graph.nodes) {
133
+ maskedNodes.set(id, maskedIds.has(id) ? new Float32Array(this.maskToken) : features);
134
+ }
135
+ return {
136
+ masked: { nodes: maskedNodes, edges: new Map(graph.edges) },
137
+ maskedIds,
138
+ maskToken: new Float32Array(this.maskToken),
139
+ };
140
+ }
141
+ /** Single-layer GAT-style message-passing to produce embeddingDim vectors. */
142
+ encode(graph) {
143
+ assertGraphMAEEnabled('encode');
144
+ if (graph.nodes.size === 0) {
145
+ return new Map();
146
+ }
147
+ this.ensureInitialized(graph);
148
+ const { numHeads, embeddingDim } = this.config;
149
+ const headDim = Math.max(1, Math.floor(this.inputDim / numHeads));
150
+ const embeddings = new Map();
151
+ for (const [nodeId, features] of graph.nodes) {
152
+ const neighbors = graph.edges.get(nodeId) ?? [];
153
+ const validNeighbors = neighbors.filter((n) => graph.nodes.has(n));
154
+ if (validNeighbors.length === 0) {
155
+ // Isolated node: replicate features across heads and project
156
+ const selfConcat = new Float32Array(this.inputDim * numHeads);
157
+ for (let h = 0; h < numHeads; h++) {
158
+ selfConcat.set(features, h * this.inputDim);
159
+ }
160
+ embeddings.set(nodeId, matVecMul(this.Wout, selfConcat, embeddingDim, this.inputDim * numHeads));
161
+ continue;
162
+ }
163
+ // Multi-head attention aggregation
164
+ const headOutputs = [];
165
+ for (let h = 0; h < numHeads; h++) {
166
+ const { Wq, Wk } = this.headWeights[h];
167
+ const query = matVecMul(Wq, features, headDim, this.inputDim);
168
+ // Compute attention scores for neighbors
169
+ const scores = [];
170
+ const neighborFeats = [];
171
+ for (const nId of validNeighbors) {
172
+ const nFeat = graph.nodes.get(nId);
173
+ const key = matVecMul(Wk, nFeat, headDim, this.inputDim);
174
+ scores.push(dot(query, key) / Math.sqrt(headDim));
175
+ neighborFeats.push(nFeat);
176
+ }
177
+ const attnWeights = softmax(scores);
178
+ // Weighted sum of neighbor features
179
+ const aggregated = new Float32Array(this.inputDim);
180
+ for (let n = 0; n < validNeighbors.length; n++) {
181
+ const w = attnWeights[n];
182
+ const nFeat = neighborFeats[n];
183
+ for (let d = 0; d < this.inputDim; d++) {
184
+ aggregated[d] += w * nFeat[d];
185
+ }
186
+ }
187
+ // Combine with self features (residual)
188
+ for (let d = 0; d < this.inputDim; d++) {
189
+ aggregated[d] = (aggregated[d] + features[d]) * 0.5;
190
+ }
191
+ headOutputs.push(aggregated);
192
+ }
193
+ // Concatenate head outputs and project
194
+ const concat = new Float32Array(this.inputDim * numHeads);
195
+ for (let h = 0; h < numHeads; h++) {
196
+ concat.set(headOutputs[h], h * this.inputDim);
197
+ }
198
+ embeddings.set(nodeId, matVecMul(this.Wout, concat, embeddingDim, this.inputDim * numHeads));
199
+ }
200
+ return embeddings;
201
+ }
202
+ /** SCE loss: (1 - cos_sim^gamma) / gamma, averaged over masked nodes. */
203
+ reconstructionLoss(original, reconstructed, maskedIds) {
204
+ assertGraphMAEEnabled('reconstructionLoss');
205
+ if (maskedIds.size === 0)
206
+ return 0;
207
+ const gamma = this.config.gamma;
208
+ let totalLoss = 0;
209
+ let count = 0;
210
+ for (const id of maskedIds) {
211
+ const orig = original.get(id);
212
+ const recon = reconstructed.get(id);
213
+ if (!orig || !recon)
214
+ continue;
215
+ const sim = cosineSimilarity(orig, recon);
216
+ totalLoss += (1 - Math.pow(sim, gamma)) / gamma;
217
+ count++;
218
+ }
219
+ return count === 0 ? 0 : totalLoss / count;
220
+ }
221
+ /** Decode embeddings back to input feature space: decoded = Wdec * embedding. */
222
+ decode(embeddings) {
223
+ assertGraphMAEEnabled('decode');
224
+ if (!this.Wdec || this.inputDim === 0)
225
+ return new Map();
226
+ const decoded = new Map();
227
+ for (const [nodeId, emb] of embeddings) {
228
+ decoded.set(nodeId, matVecMul(this.Wdec, emb, this.inputDim, this.config.embeddingDim));
229
+ }
230
+ return decoded;
231
+ }
232
+ /** Full training loop: mask -> encode -> decode -> loss -> SPSA optimization. */
233
+ train(graph, epochs) {
234
+ assertGraphMAEEnabled('train');
235
+ if (graph.nodes.size === 0) {
236
+ return { embeddings: new Map(), loss: 0, lossHistory: [], epochs: 0 };
237
+ }
238
+ this.ensureInitialized(graph);
239
+ const originalFeatures = graph.nodes;
240
+ const lossHistory = [];
241
+ for (let epoch = 0; epoch < epochs; epoch++) {
242
+ // Generate mask ONCE per epoch (fixed within SPSA steps)
243
+ const { maskedIds } = this.maskNodes(graph);
244
+ // SPSA step with decaying step size and perturbation
245
+ const stepSize = this.config.learningRate / (1 + epoch * 0.1);
246
+ const perturbSize = this.config.learningRate * 0.5 / Math.pow(1 + epoch, 0.101);
247
+ const epochLoss = this.spsaStep(graph, originalFeatures, maskedIds, stepSize, perturbSize);
248
+ lossHistory.push(epochLoss);
249
+ }
250
+ const finalEmbeddings = this.encode(graph);
251
+ return {
252
+ embeddings: finalEmbeddings,
253
+ loss: lossHistory[lossHistory.length - 1],
254
+ lossHistory,
255
+ epochs,
256
+ };
257
+ }
258
+ /** Inference-only: encode the full graph without masking. Returns embeddings. */
259
+ embed(graph) {
260
+ assertGraphMAEEnabled('embed');
261
+ if (graph.nodes.size === 0) {
262
+ return new Map();
263
+ }
264
+ return this.encode(graph);
265
+ }
266
+ /** Initialize weight matrices on first use, based on the graph's feature dimension. */
267
+ ensureInitialized(graph) {
268
+ const firstNode = graph.nodes.values().next().value;
269
+ if (!firstNode)
270
+ return;
271
+ const dim = firstNode.length;
272
+ if (this.inputDim === dim && this.headWeights.length > 0)
273
+ return;
274
+ this.inputDim = dim;
275
+ const { numHeads, embeddingDim } = this.config;
276
+ const headDim = Math.max(1, Math.floor(dim / numHeads));
277
+ this.headWeights = [];
278
+ for (let h = 0; h < numHeads; h++) {
279
+ this.headWeights.push({
280
+ Wq: xavierInit(headDim, dim, this.rng),
281
+ Wk: xavierInit(headDim, dim, this.rng),
282
+ });
283
+ }
284
+ this.Wout = xavierInit(embeddingDim, dim * numHeads, this.rng);
285
+ this.Wdec = xavierInit(dim, embeddingDim, this.rng);
286
+ this.maskToken = new Float32Array(dim);
287
+ for (let i = 0; i < dim; i++) {
288
+ this.maskToken[i] = (this.rng.nextFloat() * 2 - 1) * 0.1;
289
+ }
290
+ }
291
+ /** Compute reconstruction loss with a pre-determined mask (no re-masking). */
292
+ computeFixedMaskLoss(graph, originalFeatures, maskedIds) {
293
+ const maskedNodes = new Map();
294
+ for (const [id, features] of graph.nodes) {
295
+ maskedNodes.set(id, maskedIds.has(id) ? new Float32Array(this.maskToken) : features);
296
+ }
297
+ const maskedGraph = { nodes: maskedNodes, edges: graph.edges };
298
+ const encodedMasked = this.encode(maskedGraph);
299
+ const decodedMasked = this.decode(encodedMasked);
300
+ return this.reconstructionLoss(originalFeatures, decodedMasked, maskedIds);
301
+ }
302
+ /** Collect all trainable weight arrays for SPSA perturbation. */
303
+ collectWeightArrays() {
304
+ const arrays = [];
305
+ for (const head of this.headWeights) {
306
+ arrays.push(head.Wq);
307
+ arrays.push(head.Wk);
308
+ }
309
+ if (this.Wout)
310
+ arrays.push(this.Wout);
311
+ if (this.Wdec)
312
+ arrays.push(this.Wdec);
313
+ return arrays;
314
+ }
315
+ /**
316
+ * SPSA gradient-free optimization step (Spall 1992).
317
+ * Perturbs ALL weights simultaneously with random +/-1, evaluates loss at both,
318
+ * estimates gradient, updates weights. 2 forward passes per step regardless of param count.
319
+ */
320
+ spsaStep(graph, originalFeatures, maskedIds, stepSize, perturbSize) {
321
+ const allWeights = this.collectWeightArrays();
322
+ // Generate random perturbation vector: each element is +1 or -1
323
+ const perturbations = allWeights.map((w) => {
324
+ const p = new Float32Array(w.length);
325
+ for (let i = 0; i < w.length; i++) {
326
+ p[i] = this.rng.nextFloat() < 0.5 ? 1 : -1;
327
+ }
328
+ return p;
329
+ });
330
+ // Perturb weights +delta
331
+ for (let k = 0; k < allWeights.length; k++) {
332
+ for (let i = 0; i < allWeights[k].length; i++) {
333
+ allWeights[k][i] += perturbSize * perturbations[k][i];
334
+ }
335
+ }
336
+ const lossPlus = this.computeFixedMaskLoss(graph, originalFeatures, maskedIds);
337
+ // Perturb weights -2*delta (to get -delta from original)
338
+ for (let k = 0; k < allWeights.length; k++) {
339
+ for (let i = 0; i < allWeights[k].length; i++) {
340
+ allWeights[k][i] -= 2 * perturbSize * perturbations[k][i];
341
+ }
342
+ }
343
+ const lossMinus = this.computeFixedMaskLoss(graph, originalFeatures, maskedIds);
344
+ // Restore to original and apply gradient estimate
345
+ for (let k = 0; k < allWeights.length; k++) {
346
+ for (let i = 0; i < allWeights[k].length; i++) {
347
+ allWeights[k][i] += perturbSize * perturbations[k][i]; // restore
348
+ const gradEstimate = (lossPlus - lossMinus) / (2 * perturbSize * perturbations[k][i]);
349
+ allWeights[k][i] -= stepSize * gradEstimate; // gradient descent step
350
+ }
351
+ }
352
+ return (lossPlus + lossMinus) / 2;
353
+ }
354
+ }
355
+ // Factory
356
+ /** Create a GraphMAEEncoder with the given configuration. */
357
+ export function createGraphMAEEncoder(config) {
358
+ return new GraphMAEEncoder(config);
359
+ }
360
+ //# sourceMappingURL=graphmae-encoder.js.map