@arcanea/guardian-evolution 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. package/dist/algorithms/a2c.d.ts +86 -0
  2. package/dist/algorithms/a2c.d.ts.map +1 -0
  3. package/dist/algorithms/a2c.js +361 -0
  4. package/dist/algorithms/a2c.js.map +1 -0
  5. package/dist/algorithms/curiosity.d.ts +82 -0
  6. package/dist/algorithms/curiosity.d.ts.map +1 -0
  7. package/dist/algorithms/curiosity.js +392 -0
  8. package/dist/algorithms/curiosity.js.map +1 -0
  9. package/dist/algorithms/decision-transformer.d.ts +82 -0
  10. package/dist/algorithms/decision-transformer.d.ts.map +1 -0
  11. package/dist/algorithms/decision-transformer.js +415 -0
  12. package/dist/algorithms/decision-transformer.js.map +1 -0
  13. package/dist/algorithms/dqn.d.ts +72 -0
  14. package/dist/algorithms/dqn.d.ts.map +1 -0
  15. package/dist/algorithms/dqn.js +303 -0
  16. package/dist/algorithms/dqn.js.map +1 -0
  17. package/dist/algorithms/index.d.ts +32 -0
  18. package/dist/algorithms/index.d.ts.map +1 -0
  19. package/dist/algorithms/index.js +74 -0
  20. package/dist/algorithms/index.js.map +1 -0
  21. package/dist/algorithms/ppo.d.ts +72 -0
  22. package/dist/algorithms/ppo.d.ts.map +1 -0
  23. package/dist/algorithms/ppo.js +331 -0
  24. package/dist/algorithms/ppo.js.map +1 -0
  25. package/dist/algorithms/q-learning.d.ts +77 -0
  26. package/dist/algorithms/q-learning.d.ts.map +1 -0
  27. package/dist/algorithms/q-learning.js +259 -0
  28. package/dist/algorithms/q-learning.js.map +1 -0
  29. package/dist/algorithms/sarsa.d.ts +82 -0
  30. package/dist/algorithms/sarsa.d.ts.map +1 -0
  31. package/dist/algorithms/sarsa.js +297 -0
  32. package/dist/algorithms/sarsa.js.map +1 -0
  33. package/dist/index.d.ts +118 -0
  34. package/dist/index.d.ts.map +1 -0
  35. package/dist/index.js +201 -0
  36. package/dist/index.js.map +1 -0
  37. package/dist/modes/balanced.d.ts +60 -0
  38. package/dist/modes/balanced.d.ts.map +1 -0
  39. package/dist/modes/balanced.js +234 -0
  40. package/dist/modes/balanced.js.map +1 -0
  41. package/dist/modes/batch.d.ts +82 -0
  42. package/dist/modes/batch.d.ts.map +1 -0
  43. package/dist/modes/batch.js +316 -0
  44. package/dist/modes/batch.js.map +1 -0
  45. package/dist/modes/edge.d.ts +85 -0
  46. package/dist/modes/edge.d.ts.map +1 -0
  47. package/dist/modes/edge.js +310 -0
  48. package/dist/modes/edge.js.map +1 -0
  49. package/dist/modes/index.d.ts +55 -0
  50. package/dist/modes/index.d.ts.map +1 -0
  51. package/dist/modes/index.js +83 -0
  52. package/dist/modes/index.js.map +1 -0
  53. package/dist/modes/real-time.d.ts +58 -0
  54. package/dist/modes/real-time.d.ts.map +1 -0
  55. package/dist/modes/real-time.js +196 -0
  56. package/dist/modes/real-time.js.map +1 -0
  57. package/dist/modes/research.d.ts +79 -0
  58. package/dist/modes/research.d.ts.map +1 -0
  59. package/dist/modes/research.js +389 -0
  60. package/dist/modes/research.js.map +1 -0
  61. package/dist/pattern-learner.d.ts +117 -0
  62. package/dist/pattern-learner.d.ts.map +1 -0
  63. package/dist/pattern-learner.js +603 -0
  64. package/dist/pattern-learner.js.map +1 -0
  65. package/dist/reasoning-bank.d.ts +259 -0
  66. package/dist/reasoning-bank.d.ts.map +1 -0
  67. package/dist/reasoning-bank.js +993 -0
  68. package/dist/reasoning-bank.js.map +1 -0
  69. package/dist/reasoningbank-adapter.d.ts +168 -0
  70. package/dist/reasoningbank-adapter.d.ts.map +1 -0
  71. package/dist/reasoningbank-adapter.js +463 -0
  72. package/dist/reasoningbank-adapter.js.map +1 -0
  73. package/dist/sona-integration.d.ts +168 -0
  74. package/dist/sona-integration.d.ts.map +1 -0
  75. package/dist/sona-integration.js +316 -0
  76. package/dist/sona-integration.js.map +1 -0
  77. package/dist/sona-manager.d.ts +147 -0
  78. package/dist/sona-manager.d.ts.map +1 -0
  79. package/dist/sona-manager.js +695 -0
  80. package/dist/sona-manager.js.map +1 -0
  81. package/dist/types.d.ts +431 -0
  82. package/dist/types.d.ts.map +1 -0
  83. package/dist/types.js +11 -0
  84. package/dist/types.js.map +1 -0
  85. package/package.json +47 -0
@@ -0,0 +1,695 @@
1
+ /**
2
+ * SONA Manager - Self-Optimizing Neural Architecture
3
+ *
4
+ * Manages learning modes and provides adaptive optimization for agent tasks.
5
+ *
6
+ * Performance Targets:
7
+ * - Adaptation: <0.05ms
8
+ * - Pattern retrieval: <1ms
9
+ * - Learning step: <10ms
10
+ *
11
+ * Supported Modes:
12
+ * - real-time: Sub-millisecond adaptation (2200 ops/sec)
13
+ * - balanced: General purpose (+25% quality)
14
+ * - research: Deep exploration (+55% quality)
15
+ * - edge: Resource-constrained (<5MB)
16
+ * - batch: High-throughput processing
17
+ */
18
+ import { RealTimeMode } from './modes/real-time.js';
19
+ import { BalancedMode } from './modes/balanced.js';
20
+ import { ResearchMode } from './modes/research.js';
21
+ import { EdgeMode } from './modes/edge.js';
22
+ import { BatchMode } from './modes/batch.js';
23
+ /**
24
+ * Default mode configurations
25
+ */
26
+ const MODE_CONFIGS = {
27
+ 'real-time': {
28
+ mode: 'real-time',
29
+ loraRank: 2,
30
+ learningRate: 0.001,
31
+ batchSize: 32,
32
+ trajectoryCapacity: 1000,
33
+ patternClusters: 25,
34
+ qualityThreshold: 0.7,
35
+ maxLatencyMs: 0.5,
36
+ memoryBudgetMb: 25,
37
+ ewcLambda: 2000,
38
+ },
39
+ 'balanced': {
40
+ mode: 'balanced',
41
+ loraRank: 4,
42
+ learningRate: 0.002,
43
+ batchSize: 32,
44
+ trajectoryCapacity: 3000,
45
+ patternClusters: 50,
46
+ qualityThreshold: 0.5,
47
+ maxLatencyMs: 18,
48
+ memoryBudgetMb: 50,
49
+ ewcLambda: 2000,
50
+ },
51
+ 'research': {
52
+ mode: 'research',
53
+ loraRank: 16,
54
+ learningRate: 0.002,
55
+ batchSize: 64,
56
+ trajectoryCapacity: 10000,
57
+ patternClusters: 100,
58
+ qualityThreshold: 0.2,
59
+ maxLatencyMs: 100,
60
+ memoryBudgetMb: 100,
61
+ ewcLambda: 2500,
62
+ },
63
+ 'edge': {
64
+ mode: 'edge',
65
+ loraRank: 1,
66
+ learningRate: 0.001,
67
+ batchSize: 16,
68
+ trajectoryCapacity: 200,
69
+ patternClusters: 15,
70
+ qualityThreshold: 0.8,
71
+ maxLatencyMs: 1,
72
+ memoryBudgetMb: 5,
73
+ ewcLambda: 1500,
74
+ },
75
+ 'batch': {
76
+ mode: 'batch',
77
+ loraRank: 8,
78
+ learningRate: 0.002,
79
+ batchSize: 128,
80
+ trajectoryCapacity: 5000,
81
+ patternClusters: 75,
82
+ qualityThreshold: 0.4,
83
+ maxLatencyMs: 50,
84
+ memoryBudgetMb: 75,
85
+ ewcLambda: 2000,
86
+ },
87
+ };
88
+ /**
89
+ * Mode-specific optimizations
90
+ */
91
+ const MODE_OPTIMIZATIONS = {
92
+ 'real-time': {
93
+ enableSIMD: true,
94
+ useMicroLoRA: true,
95
+ gradientCheckpointing: false,
96
+ useHalfPrecision: true,
97
+ patternCaching: true,
98
+ asyncUpdates: true,
99
+ },
100
+ 'balanced': {
101
+ enableSIMD: true,
102
+ useMicroLoRA: false,
103
+ gradientCheckpointing: false,
104
+ useHalfPrecision: false,
105
+ patternCaching: true,
106
+ asyncUpdates: false,
107
+ },
108
+ 'research': {
109
+ enableSIMD: true,
110
+ useMicroLoRA: false,
111
+ gradientCheckpointing: true,
112
+ useHalfPrecision: false,
113
+ patternCaching: true,
114
+ asyncUpdates: false,
115
+ },
116
+ 'edge': {
117
+ enableSIMD: false,
118
+ useMicroLoRA: true,
119
+ gradientCheckpointing: false,
120
+ useHalfPrecision: true,
121
+ patternCaching: false,
122
+ asyncUpdates: true,
123
+ },
124
+ 'batch': {
125
+ enableSIMD: true,
126
+ useMicroLoRA: false,
127
+ gradientCheckpointing: true,
128
+ useHalfPrecision: true,
129
+ patternCaching: true,
130
+ asyncUpdates: true,
131
+ },
132
+ };
133
+ /**
134
+ * SONA Manager - Main orchestrator for neural learning
135
+ */
136
+ export class SONAManager {
137
+ currentMode;
138
+ config;
139
+ optimizations;
140
+ modeImpl;
141
+ trajectories = new Map();
142
+ patterns = new Map();
143
+ loraWeights = new Map();
144
+ ewcState = null;
145
+ eventListeners = new Set();
146
+ stats;
147
+ isInitialized = false;
148
+ // Performance tracking
149
+ operationCount = 0;
150
+ totalLatencyMs = 0;
151
+ learningCycles = 0;
152
+ lastStatsUpdate = Date.now();
153
+ constructor(mode = 'balanced') {
154
+ this.currentMode = mode;
155
+ this.config = { ...MODE_CONFIGS[mode] };
156
+ this.optimizations = { ...MODE_OPTIMIZATIONS[mode] };
157
+ this.modeImpl = this.createModeImplementation(mode);
158
+ this.stats = this.createInitialStats();
159
+ }
160
+ /**
161
+ * Initialize the SONA manager
162
+ */
163
+ async initialize() {
164
+ if (this.isInitialized)
165
+ return;
166
+ // Initialize mode implementation
167
+ await this.modeImpl.initialize();
168
+ // Initialize EWC state for continual learning
169
+ this.ewcState = {
170
+ means: new Map(),
171
+ fisher: new Map(),
172
+ taskCount: 0,
173
+ lastConsolidation: Date.now(),
174
+ };
175
+ this.isInitialized = true;
176
+ }
177
+ /**
178
+ * Change the current learning mode
179
+ */
180
+ async setMode(mode) {
181
+ if (mode === this.currentMode)
182
+ return;
183
+ const previousMode = this.currentMode;
184
+ // Cleanup current mode
185
+ await this.modeImpl.cleanup();
186
+ // Update configuration
187
+ this.currentMode = mode;
188
+ this.config = { ...MODE_CONFIGS[mode] };
189
+ this.optimizations = { ...MODE_OPTIMIZATIONS[mode] };
190
+ // Create new mode implementation
191
+ this.modeImpl = this.createModeImplementation(mode);
192
+ await this.modeImpl.initialize();
193
+ // Emit mode change event
194
+ this.emitEvent({
195
+ type: 'mode_changed',
196
+ fromMode: previousMode,
197
+ toMode: mode,
198
+ });
199
+ }
200
+ /**
201
+ * Get current mode and configuration
202
+ */
203
+ getConfig() {
204
+ return {
205
+ mode: this.currentMode,
206
+ config: { ...this.config },
207
+ optimizations: { ...this.optimizations },
208
+ };
209
+ }
210
+ // ==========================================================================
211
+ // Trajectory Management
212
+ // ==========================================================================
213
+ /**
214
+ * Begin a new trajectory for a task
215
+ */
216
+ beginTrajectory(context, domain = 'general') {
217
+ const startTime = performance.now();
218
+ const trajectoryId = `traj_${Date.now()}_${Math.random().toString(36).slice(2, 9)}`;
219
+ const trajectory = {
220
+ trajectoryId,
221
+ context,
222
+ domain,
223
+ steps: [],
224
+ qualityScore: 0,
225
+ isComplete: false,
226
+ startTime: Date.now(),
227
+ };
228
+ this.trajectories.set(trajectoryId, trajectory);
229
+ this.emitEvent({
230
+ type: 'trajectory_started',
231
+ trajectoryId,
232
+ context,
233
+ });
234
+ this.trackLatency(performance.now() - startTime);
235
+ return trajectoryId;
236
+ }
237
+ /**
238
+ * Record a step in a trajectory
239
+ */
240
+ recordStep(trajectoryId, action, reward, stateEmbedding, metadata) {
241
+ const startTime = performance.now();
242
+ const trajectory = this.trajectories.get(trajectoryId);
243
+ if (!trajectory || trajectory.isComplete)
244
+ return;
245
+ const step = {
246
+ stepId: `step_${trajectory.steps.length}`,
247
+ timestamp: Date.now(),
248
+ action,
249
+ stateBefore: trajectory.steps.length > 0
250
+ ? trajectory.steps[trajectory.steps.length - 1].stateAfter
251
+ : stateEmbedding,
252
+ stateAfter: stateEmbedding,
253
+ reward,
254
+ metadata,
255
+ };
256
+ trajectory.steps.push(step);
257
+ // Update running quality score
258
+ trajectory.qualityScore = this.calculateQualityScore(trajectory);
259
+ this.trackLatency(performance.now() - startTime);
260
+ }
261
+ /**
262
+ * Complete a trajectory
263
+ */
264
+ completeTrajectory(trajectoryId, finalQuality) {
265
+ const startTime = performance.now();
266
+ const trajectory = this.trajectories.get(trajectoryId);
267
+ if (!trajectory || trajectory.isComplete)
268
+ return null;
269
+ trajectory.isComplete = true;
270
+ trajectory.endTime = Date.now();
271
+ if (finalQuality !== undefined) {
272
+ trajectory.qualityScore = finalQuality;
273
+ }
274
+ else {
275
+ trajectory.qualityScore = this.calculateQualityScore(trajectory);
276
+ }
277
+ this.emitEvent({
278
+ type: 'trajectory_completed',
279
+ trajectoryId,
280
+ qualityScore: trajectory.qualityScore,
281
+ });
282
+ // Check if we should trigger learning
283
+ this.checkLearningTrigger();
284
+ this.trackLatency(performance.now() - startTime);
285
+ return trajectory;
286
+ }
287
+ /**
288
+ * Get a trajectory by ID
289
+ */
290
+ getTrajectory(trajectoryId) {
291
+ return this.trajectories.get(trajectoryId);
292
+ }
293
+ // ==========================================================================
294
+ // Pattern Matching
295
+ // ==========================================================================
296
+ /**
297
+ * Find similar patterns for a given context (k=3 optimal)
298
+ */
299
+ async findSimilarPatterns(embedding, k = 3) {
300
+ const startTime = performance.now();
301
+ const matches = await this.modeImpl.findPatterns(embedding, k, Array.from(this.patterns.values()));
302
+ // Track pattern match events
303
+ for (const match of matches) {
304
+ this.emitEvent({
305
+ type: 'pattern_matched',
306
+ patternId: match.pattern.patternId,
307
+ similarity: match.similarity,
308
+ });
309
+ }
310
+ const latency = performance.now() - startTime;
311
+ // Add latency to matches
312
+ return matches.map(m => ({ ...m, latencyMs: latency }));
313
+ }
314
+ /**
315
+ * Store a new pattern
316
+ */
317
+ storePattern(pattern) {
318
+ const startTime = performance.now();
319
+ const fullPattern = {
320
+ ...pattern,
321
+ patternId: `pat_${Date.now()}_${Math.random().toString(36).slice(2, 9)}`,
322
+ createdAt: Date.now(),
323
+ updatedAt: Date.now(),
324
+ };
325
+ this.patterns.set(fullPattern.patternId, fullPattern);
326
+ this.trackLatency(performance.now() - startTime);
327
+ return fullPattern;
328
+ }
329
+ /**
330
+ * Update pattern based on usage
331
+ */
332
+ updatePatternUsage(patternId, quality) {
333
+ const pattern = this.patterns.get(patternId);
334
+ if (!pattern)
335
+ return;
336
+ pattern.usageCount++;
337
+ pattern.qualityHistory.push(quality);
338
+ // Keep only last 100 quality scores
339
+ if (pattern.qualityHistory.length > 100) {
340
+ pattern.qualityHistory = pattern.qualityHistory.slice(-100);
341
+ }
342
+ // Update success rate
343
+ pattern.successRate = pattern.qualityHistory.reduce((a, b) => a + b, 0) / pattern.qualityHistory.length;
344
+ pattern.updatedAt = Date.now();
345
+ }
346
+ // ==========================================================================
347
+ // Learning
348
+ // ==========================================================================
349
+ /**
350
+ * Trigger a learning cycle
351
+ */
352
+ async triggerLearning(reason = 'manual') {
353
+ const startTime = performance.now();
354
+ const completedTrajectories = Array.from(this.trajectories.values())
355
+ .filter(t => t.isComplete && t.qualityScore >= this.config.qualityThreshold);
356
+ if (completedTrajectories.length === 0)
357
+ return;
358
+ this.emitEvent({
359
+ type: 'learning_triggered',
360
+ reason,
361
+ trajectoryCount: completedTrajectories.length,
362
+ });
363
+ // Perform learning via mode implementation
364
+ const improvementDelta = await this.modeImpl.learn(completedTrajectories, this.config, this.ewcState);
365
+ this.learningCycles++;
366
+ this.emitEvent({
367
+ type: 'learning_completed',
368
+ improvementDelta,
369
+ });
370
+ // Prune old trajectories if over capacity
371
+ this.pruneTrajectories();
372
+ this.trackLatency(performance.now() - startTime);
373
+ }
374
+ /**
375
+ * Apply learned adaptations to processing
376
+ */
377
+ async applyAdaptations(input, domain) {
378
+ const startTime = performance.now();
379
+ // Get relevant LoRA weights
380
+ const weights = domain
381
+ ? this.loraWeights.get(domain)
382
+ : this.loraWeights.get('default');
383
+ // Apply adaptations via mode implementation
384
+ const output = await this.modeImpl.applyLoRA(input, weights);
385
+ const latency = performance.now() - startTime;
386
+ // Verify performance target
387
+ if (latency > 0.05 && this.currentMode !== 'research' && this.currentMode !== 'batch') {
388
+ console.warn(`SONA adaptation exceeded target: ${latency.toFixed(3)}ms > 0.05ms`);
389
+ }
390
+ return output;
391
+ }
392
+ // ==========================================================================
393
+ // LoRA Management
394
+ // ==========================================================================
395
+ /**
396
+ * Get LoRA configuration for current mode
397
+ */
398
+ getLoRAConfig() {
399
+ return {
400
+ rank: this.config.loraRank,
401
+ alpha: this.config.loraRank * 2,
402
+ dropout: 0.05,
403
+ targetModules: ['q_proj', 'v_proj', 'k_proj', 'o_proj'],
404
+ microLoRA: this.optimizations.useMicroLoRA,
405
+ };
406
+ }
407
+ /**
408
+ * Initialize LoRA weights for a domain
409
+ */
410
+ initializeLoRAWeights(domain = 'default') {
411
+ const config = this.getLoRAConfig();
412
+ const weights = {
413
+ adapterId: `lora_${domain}_${Date.now()}`,
414
+ A: new Map(),
415
+ B: new Map(),
416
+ createdAt: Date.now(),
417
+ updatedAt: Date.now(),
418
+ iterations: 0,
419
+ domain,
420
+ };
421
+ // Initialize A and B matrices for each target module
422
+ for (const module of config.targetModules) {
423
+ // A: (hidden_dim, rank) initialized with small random values
424
+ // B: (rank, hidden_dim) initialized to zero
425
+ const hiddenDim = 768; // Typical transformer hidden dim
426
+ const A = new Float32Array(hiddenDim * config.rank);
427
+ const B = new Float32Array(config.rank * hiddenDim);
428
+ // Initialize A with small random values
429
+ for (let i = 0; i < A.length; i++) {
430
+ A[i] = (Math.random() - 0.5) * 0.02;
431
+ }
432
+ weights.A.set(module, A);
433
+ weights.B.set(module, B);
434
+ }
435
+ this.loraWeights.set(domain, weights);
436
+ return weights;
437
+ }
438
+ // ==========================================================================
439
+ // EWC (Elastic Weight Consolidation)
440
+ // ==========================================================================
441
+ /**
442
+ * Get EWC configuration
443
+ */
444
+ getEWCConfig() {
445
+ return {
446
+ lambda: this.config.ewcLambda,
447
+ decay: 0.9,
448
+ fisherSamples: 100,
449
+ minFisher: 1e-8,
450
+ online: true,
451
+ };
452
+ }
453
+ /**
454
+ * Consolidate EWC after learning a new task
455
+ */
456
+ consolidateEWC() {
457
+ if (!this.ewcState)
458
+ return;
459
+ const config = this.getEWCConfig();
460
+ // Update Fisher information with decay
461
+ for (const [key, fisher] of this.ewcState.fisher) {
462
+ for (let i = 0; i < fisher.length; i++) {
463
+ fisher[i] *= config.decay;
464
+ }
465
+ }
466
+ this.ewcState.taskCount++;
467
+ this.ewcState.lastConsolidation = Date.now();
468
+ }
469
+ // ==========================================================================
470
+ // Statistics
471
+ // ==========================================================================
472
+ /**
473
+ * Get current neural system statistics
474
+ */
475
+ getStats() {
476
+ this.updateStats();
477
+ return { ...this.stats };
478
+ }
479
+ // ==========================================================================
480
+ // Event System
481
+ // ==========================================================================
482
+ /**
483
+ * Add an event listener
484
+ */
485
+ addEventListener(listener) {
486
+ this.eventListeners.add(listener);
487
+ }
488
+ /**
489
+ * Remove an event listener
490
+ */
491
+ removeEventListener(listener) {
492
+ this.eventListeners.delete(listener);
493
+ }
494
+ // ==========================================================================
495
+ // Cleanup
496
+ // ==========================================================================
497
+ /**
498
+ * Cleanup resources
499
+ */
500
+ async cleanup() {
501
+ await this.modeImpl.cleanup();
502
+ this.trajectories.clear();
503
+ this.patterns.clear();
504
+ this.loraWeights.clear();
505
+ this.eventListeners.clear();
506
+ this.isInitialized = false;
507
+ }
508
+ // ==========================================================================
509
+ // Private Methods
510
+ // ==========================================================================
511
+ createModeImplementation(mode) {
512
+ switch (mode) {
513
+ case 'real-time':
514
+ return new RealTimeMode(MODE_CONFIGS[mode], MODE_OPTIMIZATIONS[mode]);
515
+ case 'balanced':
516
+ return new BalancedMode(MODE_CONFIGS[mode], MODE_OPTIMIZATIONS[mode]);
517
+ case 'research':
518
+ return new ResearchMode(MODE_CONFIGS[mode], MODE_OPTIMIZATIONS[mode]);
519
+ case 'edge':
520
+ return new EdgeMode(MODE_CONFIGS[mode], MODE_OPTIMIZATIONS[mode]);
521
+ case 'batch':
522
+ return new BatchMode(MODE_CONFIGS[mode], MODE_OPTIMIZATIONS[mode]);
523
+ default:
524
+ return new BalancedMode(MODE_CONFIGS['balanced'], MODE_OPTIMIZATIONS['balanced']);
525
+ }
526
+ }
527
+ calculateQualityScore(trajectory) {
528
+ if (trajectory.steps.length === 0)
529
+ return 0;
530
+ // Average reward across steps
531
+ const avgReward = trajectory.steps.reduce((sum, step) => sum + step.reward, 0) / trajectory.steps.length;
532
+ // Discount factor for trajectory length (longer trajectories may accumulate errors)
533
+ const lengthFactor = Math.min(1, 10 / trajectory.steps.length);
534
+ return avgReward * 0.8 + lengthFactor * 0.2;
535
+ }
536
+ checkLearningTrigger() {
537
+ const completedCount = Array.from(this.trajectories.values())
538
+ .filter(t => t.isComplete).length;
539
+ const utilization = completedCount / this.config.trajectoryCapacity;
540
+ // Trigger learning at 80% utilization
541
+ if (utilization >= 0.8) {
542
+ this.triggerLearning('capacity_threshold');
543
+ }
544
+ }
545
+ pruneTrajectories() {
546
+ const completed = Array.from(this.trajectories.entries())
547
+ .filter(([_, t]) => t.isComplete)
548
+ .sort((a, b) => a[1].qualityScore - b[1].qualityScore);
549
+ // Remove lowest quality trajectories if over capacity
550
+ const toRemove = completed.length - Math.floor(this.config.trajectoryCapacity * 0.5);
551
+ if (toRemove > 0) {
552
+ for (let i = 0; i < toRemove && i < completed.length; i++) {
553
+ this.trajectories.delete(completed[i][0]);
554
+ }
555
+ }
556
+ }
557
+ trackLatency(latencyMs) {
558
+ this.operationCount++;
559
+ this.totalLatencyMs += latencyMs;
560
+ }
561
+ emitEvent(event) {
562
+ for (const listener of this.eventListeners) {
563
+ try {
564
+ listener(event);
565
+ }
566
+ catch (error) {
567
+ console.error('Error in neural event listener:', error);
568
+ }
569
+ }
570
+ }
571
+ createInitialStats() {
572
+ return {
573
+ trajectories: {
574
+ total: 0,
575
+ active: 0,
576
+ completed: 0,
577
+ utilization: 0,
578
+ },
579
+ performance: {
580
+ avgQualityScore: 0,
581
+ opsPerSecond: 0,
582
+ learningCycles: 0,
583
+ avgLatencyMs: 0,
584
+ },
585
+ patterns: {
586
+ totalPatterns: 0,
587
+ avgMatchTime: 0,
588
+ cacheHitRate: 0,
589
+ evolutionCount: 0,
590
+ },
591
+ memory: {
592
+ usedMb: 0,
593
+ budgetMb: this.config.memoryBudgetMb,
594
+ trajectoryBytes: 0,
595
+ patternBytes: 0,
596
+ },
597
+ config: {
598
+ mode: this.currentMode,
599
+ loraRank: this.config.loraRank,
600
+ learningRate: this.config.learningRate,
601
+ algorithm: 'ppo',
602
+ },
603
+ };
604
+ }
605
+ updateStats() {
606
+ const now = Date.now();
607
+ const elapsed = (now - this.lastStatsUpdate) / 1000;
608
+ const trajectoryArray = Array.from(this.trajectories.values());
609
+ const completed = trajectoryArray.filter(t => t.isComplete);
610
+ this.stats = {
611
+ trajectories: {
612
+ total: trajectoryArray.length,
613
+ active: trajectoryArray.filter(t => !t.isComplete).length,
614
+ completed: completed.length,
615
+ utilization: trajectoryArray.length / this.config.trajectoryCapacity,
616
+ },
617
+ performance: {
618
+ avgQualityScore: completed.length > 0
619
+ ? completed.reduce((sum, t) => sum + t.qualityScore, 0) / completed.length
620
+ : 0,
621
+ opsPerSecond: elapsed > 0 ? this.operationCount / elapsed : 0,
622
+ learningCycles: this.learningCycles,
623
+ avgLatencyMs: this.operationCount > 0
624
+ ? this.totalLatencyMs / this.operationCount
625
+ : 0,
626
+ },
627
+ patterns: {
628
+ totalPatterns: this.patterns.size,
629
+ avgMatchTime: 0, // Updated by mode implementation
630
+ cacheHitRate: 0, // Updated by mode implementation
631
+ evolutionCount: Array.from(this.patterns.values())
632
+ .reduce((sum, p) => sum + p.evolutionHistory.length, 0),
633
+ },
634
+ memory: {
635
+ usedMb: this.estimateMemoryUsage(),
636
+ budgetMb: this.config.memoryBudgetMb,
637
+ trajectoryBytes: this.estimateTrajectoryBytes(),
638
+ patternBytes: this.estimatePatternBytes(),
639
+ },
640
+ config: {
641
+ mode: this.currentMode,
642
+ loraRank: this.config.loraRank,
643
+ learningRate: this.config.learningRate,
644
+ algorithm: 'ppo',
645
+ },
646
+ };
647
+ this.lastStatsUpdate = now;
648
+ this.operationCount = 0;
649
+ this.totalLatencyMs = 0;
650
+ }
651
+ estimateMemoryUsage() {
652
+ // Rough estimation in MB
653
+ return (this.estimateTrajectoryBytes() + this.estimatePatternBytes()) / (1024 * 1024);
654
+ }
655
+ estimateTrajectoryBytes() {
656
+ let bytes = 0;
657
+ for (const trajectory of this.trajectories.values()) {
658
+ bytes += 200; // Base trajectory overhead
659
+ bytes += trajectory.context.length * 2;
660
+ bytes += trajectory.steps.length * (64 + 4 * 768 * 4); // Step overhead + embeddings
661
+ }
662
+ return bytes;
663
+ }
664
+ estimatePatternBytes() {
665
+ let bytes = 0;
666
+ for (const pattern of this.patterns.values()) {
667
+ bytes += 100; // Base pattern overhead
668
+ bytes += pattern.name.length * 2;
669
+ bytes += pattern.strategy.length * 2;
670
+ bytes += pattern.embedding.byteLength;
671
+ bytes += pattern.qualityHistory.length * 8;
672
+ bytes += pattern.evolutionHistory.length * 100;
673
+ }
674
+ return bytes;
675
+ }
676
+ }
677
+ /**
678
+ * Factory function for creating SONA manager
679
+ */
680
+ export function createSONAManager(mode = 'balanced') {
681
+ return new SONAManager(mode);
682
+ }
683
+ /**
684
+ * Get default configuration for a mode
685
+ */
686
+ export function getModeConfig(mode) {
687
+ return { ...MODE_CONFIGS[mode] };
688
+ }
689
+ /**
690
+ * Get optimizations for a mode
691
+ */
692
+ export function getModeOptimizations(mode) {
693
+ return { ...MODE_OPTIMIZATIONS[mode] };
694
+ }
695
+ //# sourceMappingURL=sona-manager.js.map