ruvector 0.2.13 → 0.2.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1030 @@
1
+ "use strict";
2
+ /**
3
+ * IntelligenceEngine - Full RuVector Intelligence Stack
4
+ *
5
+ * Integrates all RuVector capabilities for self-learning hooks:
6
+ * - VectorDB with HNSW for semantic memory (150x faster)
7
+ * - SONA for continual learning (Micro-LoRA, EWC++)
8
+ * - FastAgentDB for episode/trajectory storage
9
+ * - Attention mechanisms for pattern recognition
10
+ * - ReasoningBank for pattern clustering
11
+ *
12
+ * Replaces the simple Q-learning approach with real ML-powered intelligence.
13
+ */
14
+ Object.defineProperty(exports, "__esModule", { value: true });
15
+ exports.IntelligenceEngine = void 0;
16
+ exports.createIntelligenceEngine = createIntelligenceEngine;
17
+ exports.createHighPerformanceEngine = createHighPerformanceEngine;
18
+ exports.createLightweightEngine = createLightweightEngine;
19
+ const agentdb_fast_1 = require("./agentdb-fast");
20
+ const sona_wrapper_1 = require("./sona-wrapper");
21
+ const onnx_embedder_1 = require("./onnx-embedder");
22
+ const parallel_intelligence_1 = require("./parallel-intelligence");
23
+ // ============================================================================
24
+ // Lazy Loading
25
+ // ============================================================================
26
+ let VectorDB = null;
27
+ let vectorDbError = null;
28
+ function getVectorDB() {
29
+ if (VectorDB)
30
+ return VectorDB;
31
+ if (vectorDbError)
32
+ throw vectorDbError;
33
+ try {
34
+ const core = require('@ruvector/core');
35
+ VectorDB = core.VectorDb || core.VectorDB;
36
+ return VectorDB;
37
+ }
38
+ catch {
39
+ try {
40
+ const pkg = require('ruvector');
41
+ VectorDB = pkg.VectorDb || pkg.VectorDB;
42
+ return VectorDB;
43
+ }
44
+ catch (e) {
45
+ vectorDbError = new Error(`VectorDB not available: ${e.message}`);
46
+ throw vectorDbError;
47
+ }
48
+ }
49
+ }
50
+ let attentionModule = null;
51
+ let attentionError = null;
52
+ function getAttention() {
53
+ if (attentionModule)
54
+ return attentionModule;
55
+ if (attentionError)
56
+ return null; // Silently fail for optional module
57
+ try {
58
+ attentionModule = require('@ruvector/attention');
59
+ return attentionModule;
60
+ }
61
+ catch (e) {
62
+ attentionError = e;
63
+ return null;
64
+ }
65
+ }
66
+ // ============================================================================
67
+ // Intelligence Engine
68
+ // ============================================================================
69
+ /**
70
+ * Full-stack intelligence engine using all RuVector capabilities
71
+ */
72
+ class IntelligenceEngine {
73
+ constructor(config = {}) {
74
+ this.vectorDb = null;
75
+ this.sona = null;
76
+ this.attention = null;
77
+ this.onnxEmbedder = null;
78
+ this.onnxReady = false;
79
+ this.parallel = null;
80
+ // In-memory data structures
81
+ this.memories = new Map();
82
+ this.routingPatterns = new Map(); // state -> action -> value
83
+ this.errorPatterns = new Map(); // error -> fixes
84
+ this.coEditPatterns = new Map(); // file -> related files -> count
85
+ this.agentMappings = new Map(); // extension/dir -> agent
86
+ this.workerTriggerMappings = new Map(); // trigger -> agents
87
+ // Runtime state
88
+ this.currentTrajectoryId = null;
89
+ this.sessionStart = Date.now();
90
+ this.learningEnabled = true;
91
+ this.episodeBatchQueue = [];
92
+ // If ONNX is enabled, use 384 dimensions (MiniLM default)
93
+ const useOnnx = !!(config.enableOnnx && (0, onnx_embedder_1.isOnnxAvailable)());
94
+ const embeddingDim = useOnnx ? 384 : (config.embeddingDim ?? 256);
95
+ this.config = {
96
+ embeddingDim,
97
+ maxMemories: config.maxMemories ?? 100000,
98
+ maxEpisodes: config.maxEpisodes ?? 50000,
99
+ enableSona: config.enableSona ?? true,
100
+ enableAttention: config.enableAttention ?? true,
101
+ enableOnnx: useOnnx,
102
+ sonaConfig: config.sonaConfig ?? {},
103
+ storagePath: config.storagePath ?? '',
104
+ learningRate: config.learningRate ?? 0.1,
105
+ parallelConfig: config.parallelConfig ?? {},
106
+ };
107
+ // Initialize parallel workers (auto-enabled for MCP, disabled for CLI)
108
+ this.parallel = (0, parallel_intelligence_1.getParallelIntelligence)(this.config.parallelConfig);
109
+ this.initParallel();
110
+ // Initialize FastAgentDB for episode storage
111
+ this.agentDb = new agentdb_fast_1.FastAgentDB(this.config.embeddingDim, this.config.maxEpisodes);
112
+ // Initialize ONNX embedder if enabled
113
+ if (this.config.enableOnnx) {
114
+ this.onnxEmbedder = new onnx_embedder_1.OnnxEmbedder();
115
+ // Initialize async (don't block constructor)
116
+ this.initOnnx();
117
+ }
118
+ // Initialize SONA if enabled and available
119
+ if (this.config.enableSona && (0, sona_wrapper_1.isSonaAvailable)()) {
120
+ try {
121
+ this.sona = sona_wrapper_1.SonaEngine.withConfig({
122
+ hiddenDim: this.config.embeddingDim,
123
+ embeddingDim: this.config.embeddingDim,
124
+ microLoraRank: 2, // Fast adaptations
125
+ baseLoraRank: 8,
126
+ patternClusters: 100,
127
+ trajectoryCapacity: 10000,
128
+ ...this.config.sonaConfig,
129
+ });
130
+ }
131
+ catch (e) {
132
+ console.warn('SONA initialization failed, using fallback learning');
133
+ }
134
+ }
135
+ // Initialize attention if enabled (fallback if ONNX not available)
136
+ if (this.config.enableAttention && !this.config.enableOnnx) {
137
+ this.attention = getAttention();
138
+ }
139
+ // Initialize VectorDB for memory
140
+ this.initVectorDb();
141
+ }
142
+ async initOnnx() {
143
+ if (!this.onnxEmbedder)
144
+ return;
145
+ try {
146
+ await this.onnxEmbedder.init();
147
+ this.onnxReady = true;
148
+ }
149
+ catch (e) {
150
+ console.warn('ONNX initialization failed, using fallback embeddings');
151
+ this.onnxReady = false;
152
+ }
153
+ }
154
+ async initVectorDb() {
155
+ try {
156
+ const VDB = getVectorDB();
157
+ this.vectorDb = new VDB({
158
+ dimensions: this.config.embeddingDim,
159
+ distanceMetric: 'Cosine',
160
+ });
161
+ }
162
+ catch {
163
+ // VectorDB not available, use fallback
164
+ }
165
+ }
166
+ async initParallel() {
167
+ if (this.parallel) {
168
+ try {
169
+ await this.parallel.init();
170
+ }
171
+ catch {
172
+ // Parallel not available, use sequential
173
+ this.parallel = null;
174
+ }
175
+ }
176
+ }
177
+ // =========================================================================
178
+ // Embedding Generation
179
+ // =========================================================================
180
+ /**
181
+ * Generate embedding using ONNX, attention, or hash (in order of preference)
182
+ */
183
+ embed(text) {
184
+ const dim = this.config.embeddingDim;
185
+ // Try ONNX semantic embeddings first (best quality)
186
+ if (this.onnxReady && this.onnxEmbedder) {
187
+ try {
188
+ // Note: This is sync wrapper for async ONNX
189
+ // For full async, use embedAsync
190
+ return this.hashEmbed(text, dim); // Fallback for sync context
191
+ }
192
+ catch {
193
+ // Fall through
194
+ }
195
+ }
196
+ // Try to use attention-based embedding
197
+ if (this.attention?.DotProductAttention) {
198
+ try {
199
+ return this.attentionEmbed(text, dim);
200
+ }
201
+ catch {
202
+ // Fall through to hash embedding
203
+ }
204
+ }
205
+ // Improved positional hash embedding
206
+ return this.hashEmbed(text, dim);
207
+ }
208
+ /**
209
+ * Async embedding with ONNX support (recommended for semantic quality)
210
+ */
211
+ async embedAsync(text) {
212
+ // Try ONNX first (best semantic quality)
213
+ if (this.onnxEmbedder) {
214
+ try {
215
+ if (!this.onnxReady) {
216
+ await this.onnxEmbedder.init();
217
+ this.onnxReady = true;
218
+ }
219
+ return await this.onnxEmbedder.embed(text);
220
+ }
221
+ catch {
222
+ // Fall through to sync methods
223
+ }
224
+ }
225
+ // Fall back to sync embedding
226
+ return this.embed(text);
227
+ }
228
+ /**
229
+ * Attention-based embedding using Flash or Multi-head attention
230
+ */
231
+ attentionEmbed(text, dim) {
232
+ const tokens = this.tokenize(text);
233
+ const tokenEmbeddings = tokens.map(t => this.tokenEmbed(t, dim));
234
+ if (tokenEmbeddings.length === 0) {
235
+ return new Array(dim).fill(0);
236
+ }
237
+ try {
238
+ // Try FlashAttention first (fastest)
239
+ if (this.attention?.FlashAttention) {
240
+ const flash = new this.attention.FlashAttention(dim);
241
+ const query = new Float32Array(this.meanPool(tokenEmbeddings));
242
+ const keys = tokenEmbeddings.map(e => new Float32Array(e));
243
+ const values = tokenEmbeddings.map(e => new Float32Array(e));
244
+ const result = flash.forward(query, keys, values);
245
+ return Array.from(result);
246
+ }
247
+ // Try MultiHeadAttention (better quality)
248
+ if (this.attention?.MultiHeadAttention) {
249
+ const numHeads = Math.min(8, Math.floor(dim / 32)); // 8 heads max
250
+ const mha = new this.attention.MultiHeadAttention(dim, numHeads);
251
+ const query = new Float32Array(this.meanPool(tokenEmbeddings));
252
+ const keys = tokenEmbeddings.map(e => new Float32Array(e));
253
+ const values = tokenEmbeddings.map(e => new Float32Array(e));
254
+ const result = mha.forward(query, keys, values);
255
+ return Array.from(result);
256
+ }
257
+ // Fall back to DotProductAttention
258
+ if (this.attention?.DotProductAttention) {
259
+ const attn = new this.attention.DotProductAttention();
260
+ const query = this.meanPool(tokenEmbeddings);
261
+ const result = attn.forward(new Float32Array(query), tokenEmbeddings.map(e => new Float32Array(e)), tokenEmbeddings.map(e => new Float32Array(e)));
262
+ return Array.from(result);
263
+ }
264
+ }
265
+ catch {
266
+ // Fall through to hash embedding
267
+ }
268
+ // Ultimate fallback
269
+ return this.hashEmbed(text, dim);
270
+ }
271
+ /**
272
+ * Improved hash-based embedding with positional encoding
273
+ */
274
+ hashEmbed(text, dim) {
275
+ const embedding = new Array(dim).fill(0);
276
+ const tokens = this.tokenize(text);
277
+ for (let t = 0; t < tokens.length; t++) {
278
+ const token = tokens[t];
279
+ const posWeight = 1 / (1 + t * 0.1); // Positional decay
280
+ for (let i = 0; i < token.length; i++) {
281
+ const charCode = token.charCodeAt(i);
282
+ // Multiple hash functions for better distribution
283
+ const h1 = (charCode * 31 + i * 17 + t * 7) % dim;
284
+ const h2 = (charCode * 37 + i * 23 + t * 11) % dim;
285
+ const h3 = (charCode * 41 + i * 29 + t * 13) % dim;
286
+ embedding[h1] += posWeight;
287
+ embedding[h2] += posWeight * 0.5;
288
+ embedding[h3] += posWeight * 0.25;
289
+ }
290
+ }
291
+ // L2 normalize
292
+ const norm = Math.sqrt(embedding.reduce((a, b) => a + b * b, 0));
293
+ if (norm > 0) {
294
+ for (let i = 0; i < dim; i++)
295
+ embedding[i] /= norm;
296
+ }
297
+ return embedding;
298
+ }
299
+ tokenize(text) {
300
+ return text.toLowerCase()
301
+ .replace(/[^\w\s]/g, ' ')
302
+ .split(/\s+/)
303
+ .filter(t => t.length > 0);
304
+ }
305
+ tokenEmbed(token, dim) {
306
+ const embedding = new Array(dim).fill(0);
307
+ for (let i = 0; i < token.length; i++) {
308
+ const idx = (token.charCodeAt(i) * 31 + i * 17) % dim;
309
+ embedding[idx] += 1;
310
+ }
311
+ const norm = Math.sqrt(embedding.reduce((a, b) => a + b * b, 0));
312
+ if (norm > 0)
313
+ for (let i = 0; i < dim; i++)
314
+ embedding[i] /= norm;
315
+ return embedding;
316
+ }
317
+ meanPool(embeddings) {
318
+ if (embeddings.length === 0)
319
+ return [];
320
+ const dim = embeddings[0].length;
321
+ const result = new Array(dim).fill(0);
322
+ for (const emb of embeddings) {
323
+ for (let i = 0; i < dim; i++)
324
+ result[i] += emb[i];
325
+ }
326
+ for (let i = 0; i < dim; i++)
327
+ result[i] /= embeddings.length;
328
+ return result;
329
+ }
330
+ // =========================================================================
331
+ // Memory Operations
332
+ // =========================================================================
333
+ /**
334
+ * Store content in vector memory (uses ONNX if available)
335
+ */
336
+ async remember(content, type = 'general') {
337
+ const id = `mem-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
338
+ // Use async ONNX embeddings if available for better semantic quality
339
+ const embedding = await this.embedAsync(content);
340
+ const entry = {
341
+ id,
342
+ content,
343
+ type,
344
+ embedding,
345
+ created: new Date().toISOString(),
346
+ accessed: 0,
347
+ };
348
+ this.memories.set(id, entry);
349
+ // Index in VectorDB if available
350
+ if (this.vectorDb) {
351
+ try {
352
+ await this.vectorDb.insert({
353
+ id,
354
+ vector: new Float32Array(embedding),
355
+ metadata: JSON.stringify({ content, type, created: entry.created }),
356
+ });
357
+ }
358
+ catch {
359
+ // Ignore indexing errors
360
+ }
361
+ }
362
+ return entry;
363
+ }
364
+ /**
365
+ * Semantic search of memories (uses ONNX if available)
366
+ */
367
+ async recall(query, topK = 5) {
368
+ // Use async ONNX embeddings if available for better semantic quality
369
+ const queryEmbed = await this.embedAsync(query);
370
+ // Try VectorDB search first (HNSW - 150x faster)
371
+ if (this.vectorDb) {
372
+ try {
373
+ const results = await this.vectorDb.search({
374
+ vector: new Float32Array(queryEmbed),
375
+ k: topK,
376
+ });
377
+ return results.map((r) => {
378
+ const entry = this.memories.get(r.id);
379
+ if (entry) {
380
+ entry.accessed++;
381
+ entry.score = 1 - r.score; // Convert distance to similarity
382
+ }
383
+ return entry;
384
+ }).filter((e) => e !== null);
385
+ }
386
+ catch {
387
+ // Fall through to brute force
388
+ }
389
+ }
390
+ // Fallback: brute-force cosine similarity
391
+ const scored = Array.from(this.memories.values()).map(m => ({
392
+ ...m,
393
+ score: this.cosineSimilarity(queryEmbed, m.embedding),
394
+ }));
395
+ return scored
396
+ .sort((a, b) => (b.score || 0) - (a.score || 0))
397
+ .slice(0, topK);
398
+ }
399
+ cosineSimilarity(a, b) {
400
+ if (a.length !== b.length)
401
+ return 0;
402
+ let dot = 0, normA = 0, normB = 0;
403
+ for (let i = 0; i < a.length; i++) {
404
+ dot += a[i] * b[i];
405
+ normA += a[i] * a[i];
406
+ normB += b[i] * b[i];
407
+ }
408
+ const denom = Math.sqrt(normA) * Math.sqrt(normB);
409
+ return denom > 0 ? dot / denom : 0;
410
+ }
411
+ // =========================================================================
412
+ // Agent Routing with SONA
413
+ // =========================================================================
414
+ /**
415
+ * Route a task to the best agent using learned patterns
416
+ */
417
+ async route(task, file) {
418
+ const ext = file ? this.getExtension(file) : '';
419
+ const state = this.getState(task, ext);
420
+ // Use async ONNX embeddings for semantic routing (critical fix)
421
+ const taskEmbed = await this.embedAsync(task + ' ' + (file || ''));
422
+ // Apply SONA micro-LoRA transformation if available
423
+ let adaptedEmbed = taskEmbed;
424
+ if (this.sona) {
425
+ try {
426
+ adaptedEmbed = this.sona.applyMicroLora(taskEmbed);
427
+ }
428
+ catch {
429
+ // Use original embedding
430
+ }
431
+ }
432
+ // Find similar patterns using ReasoningBank
433
+ let patterns = [];
434
+ if (this.sona) {
435
+ try {
436
+ patterns = this.sona.findPatterns(adaptedEmbed, 5);
437
+ }
438
+ catch {
439
+ // No patterns
440
+ }
441
+ }
442
+ // Default agent mappings
443
+ const defaults = {
444
+ '.rs': 'rust-developer',
445
+ '.ts': 'typescript-developer',
446
+ '.tsx': 'react-developer',
447
+ '.js': 'javascript-developer',
448
+ '.jsx': 'react-developer',
449
+ '.py': 'python-developer',
450
+ '.go': 'go-developer',
451
+ '.sql': 'database-specialist',
452
+ '.md': 'documentation-specialist',
453
+ '.yml': 'devops-engineer',
454
+ '.yaml': 'devops-engineer',
455
+ '.json': 'coder',
456
+ '.toml': 'coder',
457
+ };
458
+ // Check learned patterns first
459
+ const statePatterns = this.routingPatterns.get(state);
460
+ let bestAgent = defaults[ext] || 'coder';
461
+ let bestScore = 0.5;
462
+ let reason = 'default mapping';
463
+ if (statePatterns && statePatterns.size > 0) {
464
+ for (const [agent, score] of statePatterns) {
465
+ if (score > bestScore) {
466
+ bestAgent = agent;
467
+ bestScore = score;
468
+ reason = 'learned from patterns';
469
+ }
470
+ }
471
+ }
472
+ // Check custom agent mappings
473
+ if (this.agentMappings.has(ext)) {
474
+ const mapped = this.agentMappings.get(ext);
475
+ if (bestScore < 0.8) {
476
+ bestAgent = mapped;
477
+ bestScore = 0.8;
478
+ reason = 'custom mapping';
479
+ }
480
+ }
481
+ // Boost confidence if SONA patterns match
482
+ if (patterns.length > 0 && patterns[0].avgQuality > 0.7) {
483
+ bestScore = Math.min(1.0, bestScore + 0.1);
484
+ reason += ' + SONA pattern match';
485
+ }
486
+ return {
487
+ agent: bestAgent,
488
+ confidence: Math.min(1.0, bestScore),
489
+ reason,
490
+ patterns: patterns.length > 0 ? patterns : undefined,
491
+ alternates: this.getAlternates(statePatterns, bestAgent),
492
+ };
493
+ }
494
+ getExtension(file) {
495
+ const idx = file.lastIndexOf('.');
496
+ return idx >= 0 ? file.slice(idx).toLowerCase() : '';
497
+ }
498
+ getState(task, ext) {
499
+ const taskType = task.includes('fix') ? 'fix' :
500
+ task.includes('test') ? 'test' :
501
+ task.includes('refactor') ? 'refactor' :
502
+ task.includes('document') ? 'docs' : 'edit';
503
+ return `${taskType}:${ext || 'unknown'}`;
504
+ }
505
+ getAlternates(patterns, exclude) {
506
+ if (!patterns)
507
+ return [];
508
+ return Array.from(patterns.entries())
509
+ .filter(([a]) => a !== exclude)
510
+ .sort((a, b) => b[1] - a[1])
511
+ .slice(0, 3)
512
+ .map(([agent, confidence]) => ({ agent, confidence: Math.min(1.0, confidence) }));
513
+ }
514
+ // =========================================================================
515
+ // Trajectory Learning
516
+ // =========================================================================
517
+ /**
518
+ * Begin recording a trajectory (before edit/command)
519
+ */
520
+ beginTrajectory(context, file) {
521
+ const embed = this.embed(context + ' ' + (file || ''));
522
+ if (this.sona) {
523
+ try {
524
+ this.currentTrajectoryId = this.sona.beginTrajectory(embed);
525
+ if (file) {
526
+ this.sona.addContext(this.currentTrajectoryId, file);
527
+ }
528
+ }
529
+ catch {
530
+ this.currentTrajectoryId = null;
531
+ }
532
+ }
533
+ }
534
+ /**
535
+ * Add a step to the current trajectory
536
+ */
537
+ addTrajectoryStep(activations, reward) {
538
+ if (this.sona && this.currentTrajectoryId !== null) {
539
+ try {
540
+ const attentionWeights = new Array(activations.length).fill(1 / activations.length);
541
+ this.sona.addStep(this.currentTrajectoryId, activations, attentionWeights, reward);
542
+ }
543
+ catch {
544
+ // Ignore step errors
545
+ }
546
+ }
547
+ }
548
+ /**
549
+ * End the current trajectory with a quality score
550
+ */
551
+ endTrajectory(success, quality) {
552
+ const q = quality ?? (success ? 0.9 : 0.3);
553
+ if (this.sona && this.currentTrajectoryId !== null) {
554
+ try {
555
+ this.sona.endTrajectory(this.currentTrajectoryId, q);
556
+ }
557
+ catch {
558
+ // Ignore end errors
559
+ }
560
+ }
561
+ this.currentTrajectoryId = null;
562
+ }
563
+ /**
564
+ * Set the agent route for current trajectory
565
+ */
566
+ setTrajectoryRoute(agent) {
567
+ if (this.sona && this.currentTrajectoryId !== null) {
568
+ try {
569
+ this.sona.setRoute(this.currentTrajectoryId, agent);
570
+ }
571
+ catch {
572
+ // Ignore route errors
573
+ }
574
+ }
575
+ }
576
+ // =========================================================================
577
+ // Episode Learning (Q-learning compatible)
578
+ // =========================================================================
579
+ /**
580
+ * Record an episode for learning
581
+ */
582
+ async recordEpisode(state, action, reward, nextState, done, metadata) {
583
+ const stateEmbed = this.embed(state);
584
+ const nextStateEmbed = this.embed(nextState);
585
+ // Store in FastAgentDB
586
+ await this.agentDb.storeEpisode({
587
+ state: stateEmbed,
588
+ action,
589
+ reward,
590
+ nextState: nextStateEmbed,
591
+ done,
592
+ metadata,
593
+ });
594
+ // Update routing patterns (Q-learning style)
595
+ if (!this.routingPatterns.has(state)) {
596
+ this.routingPatterns.set(state, new Map());
597
+ }
598
+ const patterns = this.routingPatterns.get(state);
599
+ const oldValue = patterns.get(action) || 0.5;
600
+ const newValue = oldValue + this.config.learningRate * (reward - oldValue);
601
+ patterns.set(action, newValue);
602
+ }
603
+ /**
604
+ * Queue episode for batch processing (3-4x faster with workers)
605
+ */
606
+ queueEpisode(episode) {
607
+ this.episodeBatchQueue.push(episode);
608
+ }
609
+ /**
610
+ * Process queued episodes in parallel batch
611
+ */
612
+ async flushEpisodeBatch() {
613
+ if (this.episodeBatchQueue.length === 0)
614
+ return 0;
615
+ const count = this.episodeBatchQueue.length;
616
+ if (this.parallel) {
617
+ // Use parallel workers for batch processing
618
+ await this.parallel.recordEpisodesBatch(this.episodeBatchQueue);
619
+ }
620
+ else {
621
+ // Sequential fallback
622
+ for (const ep of this.episodeBatchQueue) {
623
+ await this.recordEpisode(ep.state, ep.action, ep.reward, ep.nextState, ep.done, ep.metadata);
624
+ }
625
+ }
626
+ this.episodeBatchQueue = [];
627
+ return count;
628
+ }
629
+ /**
630
+ * Learn from similar past episodes
631
+ */
632
+ async learnFromSimilar(state, k = 5) {
633
+ const stateEmbed = this.embed(state);
634
+ return this.agentDb.searchByState(stateEmbed, k);
635
+ }
636
+ // =========================================================================
637
+ // Worker-Agent Mappings
638
+ // =========================================================================
639
+ /**
640
+ * Register worker trigger to agent mappings
641
+ */
642
+ registerWorkerTrigger(trigger, priority, agents) {
643
+ this.workerTriggerMappings.set(trigger, { priority, agents });
644
+ }
645
+ /**
646
+ * Get agents for a worker trigger
647
+ */
648
+ getAgentsForTrigger(trigger) {
649
+ return this.workerTriggerMappings.get(trigger);
650
+ }
651
+ /**
652
+ * Route a task using worker trigger patterns first, then fall back to regular routing
653
+ */
654
+ async routeWithWorkers(task, file) {
655
+ // Check if task matches any worker trigger patterns
656
+ const taskLower = task.toLowerCase();
657
+ for (const [trigger, config] of this.workerTriggerMappings) {
658
+ if (taskLower.includes(trigger)) {
659
+ const primaryAgent = config.agents[0] || 'coder';
660
+ const alternates = config.agents.slice(1).map(a => ({ agent: a, confidence: 0.7 }));
661
+ return {
662
+ agent: primaryAgent,
663
+ confidence: config.priority === 'critical' ? 0.95 :
664
+ config.priority === 'high' ? 0.85 :
665
+ config.priority === 'medium' ? 0.75 : 0.65,
666
+ reason: `worker trigger: ${trigger}`,
667
+ alternates,
668
+ };
669
+ }
670
+ }
671
+ // Fall back to regular routing
672
+ return this.route(task, file);
673
+ }
674
+ /**
675
+ * Initialize default worker trigger mappings
676
+ */
677
+ initDefaultWorkerMappings() {
678
+ const defaults = [
679
+ ['ultralearn', 'high', ['researcher', 'coder']],
680
+ ['optimize', 'high', ['performance-analyzer']],
681
+ ['audit', 'critical', ['security-analyst', 'tester']],
682
+ ['map', 'medium', ['architect']],
683
+ ['security', 'critical', ['security-analyst']],
684
+ ['benchmark', 'low', ['performance-analyzer']],
685
+ ['document', 'medium', ['documenter']],
686
+ ['refactor', 'medium', ['coder', 'reviewer']],
687
+ ['testgaps', 'high', ['tester']],
688
+ ['deepdive', 'low', ['researcher']],
689
+ ['predict', 'medium', ['analyst']],
690
+ ['consolidate', 'low', ['architect']],
691
+ ];
692
+ for (const [trigger, priority, agents] of defaults) {
693
+ this.workerTriggerMappings.set(trigger, { priority, agents });
694
+ }
695
+ }
696
+ // =========================================================================
697
+ // Co-edit Pattern Learning
698
+ // =========================================================================
699
+ /**
700
+ * Record a co-edit pattern
701
+ */
702
+ recordCoEdit(file1, file2) {
703
+ if (!this.coEditPatterns.has(file1)) {
704
+ this.coEditPatterns.set(file1, new Map());
705
+ }
706
+ if (!this.coEditPatterns.has(file2)) {
707
+ this.coEditPatterns.set(file2, new Map());
708
+ }
709
+ const count1 = this.coEditPatterns.get(file1).get(file2) || 0;
710
+ this.coEditPatterns.get(file1).set(file2, count1 + 1);
711
+ const count2 = this.coEditPatterns.get(file2).get(file1) || 0;
712
+ this.coEditPatterns.get(file2).set(file1, count2 + 1);
713
+ }
714
+ /**
715
+ * Get likely next files to edit
716
+ */
717
+ getLikelyNextFiles(file, topK = 5) {
718
+ const related = this.coEditPatterns.get(file);
719
+ if (!related)
720
+ return [];
721
+ return Array.from(related.entries())
722
+ .sort((a, b) => b[1] - a[1])
723
+ .slice(0, topK)
724
+ .map(([f, count]) => ({ file: f, count }));
725
+ }
726
+ // =========================================================================
727
+ // Error Pattern Learning
728
+ // =========================================================================
729
+ /**
730
+ * Record an error pattern with fixes
731
+ */
732
+ recordErrorFix(errorPattern, fix) {
733
+ if (!this.errorPatterns.has(errorPattern)) {
734
+ this.errorPatterns.set(errorPattern, []);
735
+ }
736
+ const fixes = this.errorPatterns.get(errorPattern);
737
+ if (!fixes.includes(fix)) {
738
+ fixes.push(fix);
739
+ }
740
+ }
741
+ /**
742
+ * Get suggested fixes for an error
743
+ */
744
+ getSuggestedFixes(error) {
745
+ // Exact match
746
+ if (this.errorPatterns.has(error)) {
747
+ return this.errorPatterns.get(error);
748
+ }
749
+ // Fuzzy match by embedding similarity
750
+ const errorEmbed = this.embed(error);
751
+ const matches = [];
752
+ for (const [pattern, fixes] of this.errorPatterns) {
753
+ const patternEmbed = this.embed(pattern);
754
+ const similarity = this.cosineSimilarity(errorEmbed, patternEmbed);
755
+ if (similarity > 0.7) {
756
+ matches.push({ pattern, similarity, fixes });
757
+ }
758
+ }
759
+ if (matches.length === 0)
760
+ return [];
761
+ // Return fixes from most similar pattern
762
+ matches.sort((a, b) => b.similarity - a.similarity);
763
+ return matches[0].fixes;
764
+ }
765
+ // =========================================================================
766
+ // Tick / Background Learning
767
+ // =========================================================================
768
+ /**
769
+ * Run background learning cycle
770
+ */
771
+ tick() {
772
+ if (this.sona) {
773
+ try {
774
+ return this.sona.tick();
775
+ }
776
+ catch {
777
+ return null;
778
+ }
779
+ }
780
+ return null;
781
+ }
782
+ /**
783
+ * Force immediate learning
784
+ */
785
+ forceLearn() {
786
+ if (this.sona) {
787
+ try {
788
+ return this.sona.forceLearn();
789
+ }
790
+ catch {
791
+ return null;
792
+ }
793
+ }
794
+ return null;
795
+ }
796
+ // =========================================================================
797
+ // Statistics
798
+ // =========================================================================
799
+ /**
800
+ * Get comprehensive learning statistics
801
+ */
802
+ getStats() {
803
+ const agentDbStats = this.agentDb.getStats();
804
+ let sonaStats = null;
805
+ if (this.sona) {
806
+ try {
807
+ sonaStats = this.sona.getStats();
808
+ }
809
+ catch {
810
+ // No SONA stats
811
+ }
812
+ }
813
+ // Calculate average reward from patterns
814
+ let totalReward = 0;
815
+ let rewardCount = 0;
816
+ for (const patterns of this.routingPatterns.values()) {
817
+ for (const reward of patterns.values()) {
818
+ totalReward += reward;
819
+ rewardCount++;
820
+ }
821
+ }
822
+ const parallelStats = this.parallel?.getStats() ?? { enabled: false, workers: 0, busy: 0, queued: 0 };
823
+ return {
824
+ totalMemories: this.memories.size,
825
+ memoryDimensions: this.config.embeddingDim,
826
+ totalEpisodes: agentDbStats.episodeCount,
827
+ totalTrajectories: agentDbStats.trajectoryCount,
828
+ avgReward: rewardCount > 0 ? totalReward / rewardCount : 0,
829
+ sonaEnabled: this.sona !== null,
830
+ trajectoriesRecorded: sonaStats?.trajectoriesRecorded ?? 0,
831
+ patternsLearned: sonaStats?.patternsLearned ?? 0,
832
+ microLoraUpdates: sonaStats?.microLoraUpdates ?? 0,
833
+ baseLoraUpdates: sonaStats?.baseLoraUpdates ?? 0,
834
+ ewcConsolidations: sonaStats?.ewcConsolidations ?? 0,
835
+ routingPatterns: this.routingPatterns.size,
836
+ errorPatterns: this.errorPatterns.size,
837
+ coEditPatterns: this.coEditPatterns.size,
838
+ workerTriggers: this.workerTriggerMappings.size,
839
+ attentionEnabled: this.attention !== null,
840
+ onnxEnabled: this.onnxReady,
841
+ parallelEnabled: parallelStats.enabled,
842
+ parallelWorkers: parallelStats.workers,
843
+ parallelBusy: parallelStats.busy,
844
+ parallelQueued: parallelStats.queued,
845
+ };
846
+ }
847
+ // =========================================================================
848
+ // Persistence
849
+ // =========================================================================
850
+ /**
851
+ * Export all data for persistence
852
+ */
853
+ export() {
854
+ return {
855
+ version: '2.0.0',
856
+ exported: new Date().toISOString(),
857
+ config: this.config,
858
+ memories: Array.from(this.memories.values()),
859
+ routingPatterns: Object.fromEntries(Array.from(this.routingPatterns.entries()).map(([k, v]) => [
860
+ k,
861
+ Object.fromEntries(v),
862
+ ])),
863
+ errorPatterns: Object.fromEntries(this.errorPatterns),
864
+ coEditPatterns: Object.fromEntries(Array.from(this.coEditPatterns.entries()).map(([k, v]) => [
865
+ k,
866
+ Object.fromEntries(v),
867
+ ])),
868
+ agentMappings: Object.fromEntries(this.agentMappings),
869
+ workerTriggerMappings: Object.fromEntries(Array.from(this.workerTriggerMappings.entries()).map(([k, v]) => [k, v])),
870
+ stats: this.getStats(),
871
+ };
872
+ }
873
+ /**
874
+ * Import data from persistence
875
+ */
876
+ import(data, merge = false) {
877
+ if (!merge) {
878
+ this.memories.clear();
879
+ this.routingPatterns.clear();
880
+ this.errorPatterns.clear();
881
+ this.coEditPatterns.clear();
882
+ this.agentMappings.clear();
883
+ }
884
+ // Import memories
885
+ if (data.memories) {
886
+ for (const mem of data.memories) {
887
+ this.memories.set(mem.id, mem);
888
+ }
889
+ }
890
+ // Import routing patterns
891
+ if (data.routingPatterns) {
892
+ for (const [state, actions] of Object.entries(data.routingPatterns)) {
893
+ const map = new Map(Object.entries(actions));
894
+ if (merge && this.routingPatterns.has(state)) {
895
+ const existing = this.routingPatterns.get(state);
896
+ for (const [action, value] of map) {
897
+ existing.set(action, Math.max(existing.get(action) || 0, value));
898
+ }
899
+ }
900
+ else {
901
+ this.routingPatterns.set(state, map);
902
+ }
903
+ }
904
+ }
905
+ // Import error patterns
906
+ if (data.errorPatterns) {
907
+ for (const [pattern, fixes] of Object.entries(data.errorPatterns)) {
908
+ if (merge && this.errorPatterns.has(pattern)) {
909
+ const existing = this.errorPatterns.get(pattern);
910
+ for (const fix of fixes) {
911
+ if (!existing.includes(fix))
912
+ existing.push(fix);
913
+ }
914
+ }
915
+ else {
916
+ this.errorPatterns.set(pattern, fixes);
917
+ }
918
+ }
919
+ }
920
+ // Import co-edit patterns
921
+ if (data.coEditPatterns) {
922
+ for (const [file, related] of Object.entries(data.coEditPatterns)) {
923
+ const map = new Map(Object.entries(related));
924
+ if (merge && this.coEditPatterns.has(file)) {
925
+ const existing = this.coEditPatterns.get(file);
926
+ for (const [f, count] of map) {
927
+ existing.set(f, (existing.get(f) || 0) + count);
928
+ }
929
+ }
930
+ else {
931
+ this.coEditPatterns.set(file, map);
932
+ }
933
+ }
934
+ }
935
+ // Import agent mappings
936
+ if (data.agentMappings) {
937
+ for (const [ext, agent] of Object.entries(data.agentMappings)) {
938
+ this.agentMappings.set(ext, agent);
939
+ }
940
+ }
941
+ // Import worker trigger mappings
942
+ if (data.workerTriggerMappings) {
943
+ for (const [trigger, config] of Object.entries(data.workerTriggerMappings)) {
944
+ const typedConfig = config;
945
+ this.workerTriggerMappings.set(trigger, typedConfig);
946
+ }
947
+ }
948
+ }
949
+ /**
950
+ * Clear all data
951
+ */
952
+ clear() {
953
+ this.memories.clear();
954
+ this.routingPatterns.clear();
955
+ this.errorPatterns.clear();
956
+ this.coEditPatterns.clear();
957
+ this.agentMappings.clear();
958
+ this.workerTriggerMappings.clear();
959
+ this.agentDb.clear();
960
+ }
961
+ // =========================================================================
962
+ // Compatibility with existing Intelligence class
963
+ // =========================================================================
964
+ /** Legacy: patterns object */
965
+ get patterns() {
966
+ const result = {};
967
+ for (const [state, actions] of this.routingPatterns) {
968
+ result[state] = Object.fromEntries(actions);
969
+ }
970
+ return result;
971
+ }
972
+ /** Legacy: file_sequences array */
973
+ get file_sequences() {
974
+ const sequences = [];
975
+ for (const [file, related] of this.coEditPatterns) {
976
+ const sorted = Array.from(related.entries())
977
+ .sort((a, b) => b[1] - a[1])
978
+ .map(([f]) => f);
979
+ if (sorted.length > 0) {
980
+ sequences.push([file, ...sorted.slice(0, 3)]);
981
+ }
982
+ }
983
+ return sequences;
984
+ }
985
+ /** Legacy: errors object */
986
+ get errors() {
987
+ return Object.fromEntries(this.errorPatterns);
988
+ }
989
+ }
990
+ exports.IntelligenceEngine = IntelligenceEngine;
991
+ // ============================================================================
992
+ // Factory Functions
993
+ // ============================================================================
994
+ /**
995
+ * Create a new IntelligenceEngine with default settings
996
+ */
997
+ function createIntelligenceEngine(config) {
998
+ return new IntelligenceEngine(config);
999
+ }
1000
+ /**
1001
+ * Create a high-performance engine with all features enabled
1002
+ */
1003
+ function createHighPerformanceEngine() {
1004
+ return new IntelligenceEngine({
1005
+ embeddingDim: 512,
1006
+ maxMemories: 200000,
1007
+ maxEpisodes: 100000,
1008
+ enableSona: true,
1009
+ enableAttention: true,
1010
+ sonaConfig: {
1011
+ hiddenDim: 512,
1012
+ microLoraRank: 2,
1013
+ baseLoraRank: 16,
1014
+ patternClusters: 200,
1015
+ },
1016
+ });
1017
+ }
1018
+ /**
1019
+ * Create a lightweight engine for fast startup
1020
+ */
1021
+ function createLightweightEngine() {
1022
+ return new IntelligenceEngine({
1023
+ embeddingDim: 128,
1024
+ maxMemories: 10000,
1025
+ maxEpisodes: 5000,
1026
+ enableSona: false,
1027
+ enableAttention: false,
1028
+ });
1029
+ }
1030
+ exports.default = IntelligenceEngine;