hedgequantx 2.5.36 → 2.5.38

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,783 @@
1
+ /**
2
+ * AI Strategy Supervisor
3
+ *
4
+ * Observes, learns from, and optimizes the HQX Ultra Scalping strategy in real-time.
5
+ *
6
+ * FUNCTIONS:
7
+ * 1. OBSERVE - Receive all market data, signals, and trades in real-time
8
+ * 2. LEARN - Analyze winning/losing trades to identify patterns
9
+ * 3. OPTIMIZE - Suggest and apply parameter improvements
10
+ * 4. SUPERVISE - Monitor risk and intervene when necessary
11
+ *
12
+ * In CONSENSUS mode (2+ agents), ALL agents must agree before applying changes.
13
+ */
14
+
15
+ const { analyzePerformance, getMarketAdvice, callAI } = require('./client');
16
+
17
+ // Singleton supervisor state
18
+ let supervisorState = {
19
+ active: false,
20
+ agents: [],
21
+ strategy: null,
22
+ service: null,
23
+ accountId: null,
24
+
25
+ // Real-time data (synced with strategy)
26
+ ticks: [],
27
+ signals: [],
28
+ trades: [],
29
+
30
+ // Learning data
31
+ winningPatterns: [],
32
+ losingPatterns: [],
33
+
34
+ // Performance tracking
35
+ performance: {
36
+ trades: 0,
37
+ wins: 0,
38
+ losses: 0,
39
+ totalPnL: 0,
40
+ maxDrawdown: 0,
41
+ currentDrawdown: 0,
42
+ peakPnL: 0,
43
+ winStreak: 0,
44
+ lossStreak: 0,
45
+ maxWinStreak: 0,
46
+ maxLossStreak: 0
47
+ },
48
+
49
+ // Optimization state
50
+ optimizations: [],
51
+ lastOptimizationTime: 0,
52
+ optimizationInterval: 60000, // Analyze every 60 seconds
53
+
54
+ // Current recommendations
55
+ currentAdvice: {
56
+ action: 'NORMAL',
57
+ sizeMultiplier: 1.0,
58
+ reason: 'Starting'
59
+ },
60
+
61
+ // Behavior history for graph (action over time)
62
+ // Values: 0=PAUSE, 1=CAUTIOUS, 2=NORMAL, 3=AGGRESSIVE
63
+ behaviorHistory: [],
64
+ behaviorStartTime: null
65
+ };
66
+
67
+ // Analysis interval
68
+ let analysisInterval = null;
69
+
70
+ /**
71
+ * Initialize supervisor with strategy and agents
72
+ */
73
+ const initialize = (strategy, agents, service, accountId) => {
74
+ const now = Date.now();
75
+
76
+ supervisorState = {
77
+ ...supervisorState,
78
+ active: true,
79
+ agents: agents || [],
80
+ strategy,
81
+ service,
82
+ accountId,
83
+ ticks: [],
84
+ signals: [],
85
+ trades: [],
86
+ winningPatterns: [],
87
+ losingPatterns: [],
88
+ performance: {
89
+ trades: 0,
90
+ wins: 0,
91
+ losses: 0,
92
+ totalPnL: 0,
93
+ maxDrawdown: 0,
94
+ currentDrawdown: 0,
95
+ peakPnL: 0,
96
+ winStreak: 0,
97
+ lossStreak: 0,
98
+ maxWinStreak: 0,
99
+ maxLossStreak: 0
100
+ },
101
+ optimizations: [],
102
+ lastOptimizationTime: now,
103
+ behaviorHistory: [{ timestamp: now, value: 2, action: 'NORMAL' }], // Start with NORMAL
104
+ behaviorStartTime: now,
105
+ currentAdvice: { action: 'NORMAL', sizeMultiplier: 1.0, reason: 'Starting' }
106
+ };
107
+
108
+ // Start continuous analysis loop
109
+ if (analysisInterval) clearInterval(analysisInterval);
110
+ analysisInterval = setInterval(analyzeAndOptimize, supervisorState.optimizationInterval);
111
+
112
+ // Also record behavior every 10 seconds to have smooth graph
113
+ setInterval(() => {
114
+ if (supervisorState.active) {
115
+ recordBehavior(supervisorState.currentAdvice.action);
116
+ }
117
+ }, 10000);
118
+
119
+ return {
120
+ success: true,
121
+ agents: agents.length,
122
+ mode: agents.length >= 2 ? 'CONSENSUS' : 'INDIVIDUAL'
123
+ };
124
+ };
125
+
126
+ /**
127
+ * Stop supervisor
128
+ */
129
+ const stop = () => {
130
+ if (analysisInterval) {
131
+ clearInterval(analysisInterval);
132
+ analysisInterval = null;
133
+ }
134
+
135
+ const summary = {
136
+ ...supervisorState.performance,
137
+ optimizationsApplied: supervisorState.optimizations.length,
138
+ winningPatterns: supervisorState.winningPatterns.length,
139
+ losingPatterns: supervisorState.losingPatterns.length
140
+ };
141
+
142
+ supervisorState.active = false;
143
+
144
+ return summary;
145
+ };
146
+
147
+ /**
148
+ * Feed tick data (called on every market tick)
149
+ */
150
+ const feedTick = (tick) => {
151
+ if (!supervisorState.active) return;
152
+
153
+ supervisorState.ticks.push({
154
+ ...tick,
155
+ timestamp: Date.now()
156
+ });
157
+
158
+ // Keep last 5000 ticks for pattern analysis
159
+ if (supervisorState.ticks.length > 5000) {
160
+ supervisorState.ticks = supervisorState.ticks.slice(-5000);
161
+ }
162
+ };
163
+
164
+ /**
165
+ * Feed signal data (called when strategy generates a signal)
166
+ */
167
+ const feedSignal = (signal) => {
168
+ if (!supervisorState.active) return;
169
+
170
+ const signalData = {
171
+ ...signal,
172
+ timestamp: Date.now(),
173
+ ticksContext: supervisorState.ticks.slice(-50) // Last 50 ticks before signal
174
+ };
175
+
176
+ supervisorState.signals.push(signalData);
177
+
178
+ // Keep last 100 signals
179
+ if (supervisorState.signals.length > 100) {
180
+ supervisorState.signals = supervisorState.signals.slice(-100);
181
+ }
182
+ };
183
+
184
+ /**
185
+ * Feed trade result (called when a trade completes)
186
+ * This is where LEARNING happens
187
+ */
188
+ const feedTradeResult = (trade) => {
189
+ if (!supervisorState.active) return;
190
+
191
+ const tradeData = {
192
+ ...trade,
193
+ timestamp: Date.now(),
194
+ // Capture context at time of trade
195
+ ticksBefore: supervisorState.ticks.slice(-100),
196
+ signalUsed: supervisorState.signals[supervisorState.signals.length - 1] || null
197
+ };
198
+
199
+ supervisorState.trades.push(tradeData);
200
+
201
+ // Update performance metrics
202
+ const perf = supervisorState.performance;
203
+ perf.trades++;
204
+ perf.totalPnL += trade.pnl || 0;
205
+
206
+ if (trade.pnl > 0) {
207
+ perf.wins++;
208
+ perf.winStreak++;
209
+ perf.lossStreak = 0;
210
+ perf.maxWinStreak = Math.max(perf.maxWinStreak, perf.winStreak);
211
+
212
+ // Learn from winning trade
213
+ learnFromTrade(tradeData, 'win');
214
+ } else if (trade.pnl < 0) {
215
+ perf.losses++;
216
+ perf.lossStreak++;
217
+ perf.winStreak = 0;
218
+ perf.maxLossStreak = Math.max(perf.maxLossStreak, perf.lossStreak);
219
+
220
+ // Learn from losing trade
221
+ learnFromTrade(tradeData, 'loss');
222
+ }
223
+
224
+ // Update drawdown
225
+ if (perf.totalPnL > perf.peakPnL) {
226
+ perf.peakPnL = perf.totalPnL;
227
+ perf.currentDrawdown = 0;
228
+ } else {
229
+ perf.currentDrawdown = perf.peakPnL - perf.totalPnL;
230
+ perf.maxDrawdown = Math.max(perf.maxDrawdown, perf.currentDrawdown);
231
+ }
232
+
233
+ // Trigger immediate analysis after losing streaks
234
+ if (perf.lossStreak >= 3) {
235
+ analyzeAndOptimize();
236
+ }
237
+ };
238
+
239
+ /**
240
+ * Learn from a completed trade
241
+ * Extracts patterns from winning and losing trades
242
+ */
243
+ const learnFromTrade = (trade, result) => {
244
+ const pattern = {
245
+ timestamp: trade.timestamp,
246
+ result,
247
+ pnl: trade.pnl,
248
+ direction: trade.direction || trade.side,
249
+
250
+ // Market context before trade
251
+ priceAction: analyzePriceAction(trade.ticksBefore),
252
+ volumeProfile: analyzeVolume(trade.ticksBefore),
253
+ volatility: calculateVolatility(trade.ticksBefore),
254
+
255
+ // Signal characteristics
256
+ signalConfidence: trade.signalUsed?.confidence || null,
257
+ entryPrice: trade.price || trade.signalUsed?.entry,
258
+ stopLoss: trade.signalUsed?.stopLoss,
259
+ takeProfit: trade.signalUsed?.takeProfit
260
+ };
261
+
262
+ if (result === 'win') {
263
+ supervisorState.winningPatterns.push(pattern);
264
+ // Keep last 50 winning patterns
265
+ if (supervisorState.winningPatterns.length > 50) {
266
+ supervisorState.winningPatterns = supervisorState.winningPatterns.slice(-50);
267
+ }
268
+ } else {
269
+ supervisorState.losingPatterns.push(pattern);
270
+ // Keep last 50 losing patterns
271
+ if (supervisorState.losingPatterns.length > 50) {
272
+ supervisorState.losingPatterns = supervisorState.losingPatterns.slice(-50);
273
+ }
274
+ }
275
+ };
276
+
277
+ /**
278
+ * Analyze price action from ticks
279
+ */
280
+ const analyzePriceAction = (ticks) => {
281
+ if (!ticks || ticks.length < 2) return { trend: 'unknown', strength: 0 };
282
+
283
+ const prices = ticks.map(t => t.price).filter(Boolean);
284
+ if (prices.length < 2) return { trend: 'unknown', strength: 0 };
285
+
286
+ const first = prices[0];
287
+ const last = prices[prices.length - 1];
288
+ const change = last - first;
289
+ const range = Math.max(...prices) - Math.min(...prices);
290
+
291
+ return {
292
+ trend: change > 0 ? 'up' : change < 0 ? 'down' : 'flat',
293
+ strength: range > 0 ? Math.abs(change) / range : 0,
294
+ range,
295
+ change
296
+ };
297
+ };
298
+
299
+ /**
300
+ * Analyze volume from ticks
301
+ */
302
+ const analyzeVolume = (ticks) => {
303
+ if (!ticks || ticks.length === 0) return { total: 0, avg: 0, trend: 'unknown' };
304
+
305
+ const volumes = ticks.map(t => t.volume || 0);
306
+ const total = volumes.reduce((a, b) => a + b, 0);
307
+ const avg = total / volumes.length;
308
+
309
+ // Compare first half vs second half
310
+ const mid = Math.floor(volumes.length / 2);
311
+ const firstHalf = volumes.slice(0, mid).reduce((a, b) => a + b, 0);
312
+ const secondHalf = volumes.slice(mid).reduce((a, b) => a + b, 0);
313
+
314
+ return {
315
+ total,
316
+ avg,
317
+ trend: secondHalf > firstHalf * 1.2 ? 'increasing' : secondHalf < firstHalf * 0.8 ? 'decreasing' : 'stable'
318
+ };
319
+ };
320
+
321
+ /**
322
+ * Calculate volatility from ticks
323
+ */
324
+ const calculateVolatility = (ticks) => {
325
+ if (!ticks || ticks.length < 2) return 0;
326
+
327
+ const prices = ticks.map(t => t.price).filter(Boolean);
328
+ if (prices.length < 2) return 0;
329
+
330
+ const returns = [];
331
+ for (let i = 1; i < prices.length; i++) {
332
+ returns.push((prices[i] - prices[i-1]) / prices[i-1]);
333
+ }
334
+
335
+ const mean = returns.reduce((a, b) => a + b, 0) / returns.length;
336
+ const variance = returns.reduce((sum, r) => sum + Math.pow(r - mean, 2), 0) / returns.length;
337
+
338
+ return Math.sqrt(variance);
339
+ };
340
+
341
+ /**
342
+ * Main analysis and optimization loop
343
+ * Called periodically and after significant events
344
+ */
345
+ const analyzeAndOptimize = async () => {
346
+ if (!supervisorState.active || supervisorState.agents.length === 0) return;
347
+
348
+ const perf = supervisorState.performance;
349
+
350
+ // Skip if not enough data
351
+ if (perf.trades < 3) return;
352
+
353
+ // Prepare performance data for AI analysis
354
+ const performanceData = {
355
+ trades: perf.trades,
356
+ wins: perf.wins,
357
+ losses: perf.losses,
358
+ winRate: perf.trades > 0 ? perf.wins / perf.trades : 0,
359
+ pnl: perf.totalPnL,
360
+ maxDrawdown: perf.maxDrawdown,
361
+ currentDrawdown: perf.currentDrawdown,
362
+ winStreak: perf.winStreak,
363
+ lossStreak: perf.lossStreak,
364
+ maxWinStreak: perf.maxWinStreak,
365
+ maxLossStreak: perf.maxLossStreak,
366
+
367
+ // Calculate averages
368
+ avgWin: perf.wins > 0 ?
369
+ supervisorState.trades.filter(t => t.pnl > 0).reduce((s, t) => s + t.pnl, 0) / perf.wins : 0,
370
+ avgLoss: perf.losses > 0 ?
371
+ Math.abs(supervisorState.trades.filter(t => t.pnl < 0).reduce((s, t) => s + t.pnl, 0) / perf.losses) : 0,
372
+
373
+ // Recent trades for context
374
+ recentTrades: supervisorState.trades.slice(-10).map(t => ({
375
+ side: t.direction || t.side,
376
+ qty: t.qty,
377
+ price: t.price,
378
+ pnl: t.pnl
379
+ })),
380
+
381
+ // Pattern summaries
382
+ winningPatternCount: supervisorState.winningPatterns.length,
383
+ losingPatternCount: supervisorState.losingPatterns.length,
384
+
385
+ // Common characteristics of losing trades
386
+ losingTradeAnalysis: analyzeLosingPatterns()
387
+ };
388
+
389
+ // Get optimization suggestions from all agents
390
+ const suggestions = [];
391
+
392
+ for (const agent of supervisorState.agents) {
393
+ try {
394
+ const suggestion = await getOptimizationFromAgent(agent, performanceData);
395
+ if (suggestion) {
396
+ suggestions.push({
397
+ agentId: agent.id,
398
+ agentName: agent.name,
399
+ ...suggestion
400
+ });
401
+ }
402
+ } catch (e) {
403
+ // Silent fail for individual agent
404
+ }
405
+ }
406
+
407
+ if (suggestions.length === 0) return;
408
+
409
+ // Process suggestions based on mode
410
+ const isConsensus = supervisorState.agents.length >= 2;
411
+
412
+ if (isConsensus) {
413
+ // CONSENSUS MODE: All agents must agree
414
+ const consensusResult = buildConsensus(suggestions);
415
+
416
+ if (consensusResult.isUnanimous && consensusResult.optimizations.length > 0) {
417
+ // Apply unanimous optimizations
418
+ for (const opt of consensusResult.optimizations) {
419
+ applyOptimization(opt);
420
+ }
421
+ }
422
+
423
+ // Update current advice based on consensus
424
+ if (consensusResult.action) {
425
+ supervisorState.currentAdvice = {
426
+ action: consensusResult.action,
427
+ sizeMultiplier: consensusResult.sizeMultiplier || 1.0,
428
+ reason: consensusResult.reason || 'Consensus recommendation'
429
+ };
430
+ recordBehavior(consensusResult.action);
431
+ }
432
+ } else {
433
+ // INDIVIDUAL MODE: Apply single agent's suggestions
434
+ const suggestion = suggestions[0];
435
+
436
+ if (suggestion.optimizations) {
437
+ for (const opt of suggestion.optimizations) {
438
+ applyOptimization(opt);
439
+ }
440
+ }
441
+
442
+ if (suggestion.action) {
443
+ supervisorState.currentAdvice = {
444
+ action: suggestion.action,
445
+ sizeMultiplier: suggestion.sizeMultiplier || 1.0,
446
+ reason: suggestion.reason || 'Agent recommendation'
447
+ };
448
+ recordBehavior(suggestion.action);
449
+ }
450
+ }
451
+
452
+ supervisorState.lastOptimizationTime = Date.now();
453
+ };
454
+
455
+ /**
456
+ * Record behavior for graph visualization
457
+ * Converts action to numeric value: PAUSE=0, CAUTIOUS=1, NORMAL=2, AGGRESSIVE=3
458
+ */
459
+ const recordBehavior = (action) => {
460
+ const actionToValue = {
461
+ 'PAUSE': 0,
462
+ 'CAUTIOUS': 1,
463
+ 'NORMAL': 2,
464
+ 'AGGRESSIVE': 3
465
+ };
466
+
467
+ const value = actionToValue[action] ?? 2; // Default to NORMAL
468
+ const now = Date.now();
469
+
470
+ supervisorState.behaviorHistory.push({
471
+ timestamp: now,
472
+ value,
473
+ action
474
+ });
475
+
476
+ // Keep last 200 data points
477
+ if (supervisorState.behaviorHistory.length > 200) {
478
+ supervisorState.behaviorHistory = supervisorState.behaviorHistory.slice(-200);
479
+ }
480
+ };
481
+
482
+ /**
483
+ * Analyze patterns in losing trades
484
+ */
485
+ const analyzeLosingPatterns = () => {
486
+ const patterns = supervisorState.losingPatterns;
487
+ if (patterns.length === 0) return null;
488
+
489
+ // Find common characteristics
490
+ const trends = patterns.map(p => p.priceAction?.trend).filter(Boolean);
491
+ const volatilities = patterns.map(p => p.volatility).filter(Boolean);
492
+ const confidences = patterns.map(p => p.signalConfidence).filter(Boolean);
493
+
494
+ const trendCounts = {};
495
+ for (const t of trends) {
496
+ trendCounts[t] = (trendCounts[t] || 0) + 1;
497
+ }
498
+
499
+ const avgVolatility = volatilities.length > 0 ?
500
+ volatilities.reduce((a, b) => a + b, 0) / volatilities.length : 0;
501
+
502
+ const avgConfidence = confidences.length > 0 ?
503
+ confidences.reduce((a, b) => a + b, 0) / confidences.length : 0;
504
+
505
+ return {
506
+ commonTrend: Object.entries(trendCounts).sort((a, b) => b[1] - a[1])[0]?.[0] || 'unknown',
507
+ avgVolatility,
508
+ avgConfidence,
509
+ count: patterns.length
510
+ };
511
+ };
512
+
513
+ /**
514
+ * Get optimization suggestion from a single agent
515
+ */
516
+ const getOptimizationFromAgent = async (agent, performanceData) => {
517
+ const systemPrompt = `You are an AI supervisor for HQX Ultra Scalping, a professional futures trading strategy.
518
+
519
+ The strategy uses:
520
+ - Order flow analysis (delta, absorption, imbalance)
521
+ - Statistical models (z-score, standard deviation)
522
+ - Dynamic risk management (Kelly criterion)
523
+
524
+ ANALYZE the performance data and LEARN from the losing trades.
525
+ Suggest SPECIFIC optimizations to improve win rate and reduce losses.
526
+
527
+ Respond in JSON:
528
+ {
529
+ "assessment": "brief assessment",
530
+ "action": "AGGRESSIVE|NORMAL|CAUTIOUS|PAUSE",
531
+ "sizeMultiplier": 0.5-1.5,
532
+ "optimizations": [
533
+ {"param": "name", "direction": "increase|decrease", "amount": "10%", "reason": "why"}
534
+ ],
535
+ "learnings": "what we learned from losing trades",
536
+ "confidence": 0-100
537
+ }`;
538
+
539
+ const prompt = `STRATEGY PERFORMANCE ANALYSIS
540
+
541
+ Stats:
542
+ - Trades: ${performanceData.trades} (${performanceData.wins}W / ${performanceData.losses}L)
543
+ - Win Rate: ${(performanceData.winRate * 100).toFixed(1)}%
544
+ - P&L: $${performanceData.pnl.toFixed(2)}
545
+ - Max Drawdown: $${performanceData.maxDrawdown.toFixed(2)}
546
+ - Current Streak: ${performanceData.winStreak > 0 ? performanceData.winStreak + ' wins' : performanceData.lossStreak + ' losses'}
547
+
548
+ Losing Trade Analysis:
549
+ ${performanceData.losingTradeAnalysis ? `
550
+ - Common trend at entry: ${performanceData.losingTradeAnalysis.commonTrend}
551
+ - Avg volatility: ${(performanceData.losingTradeAnalysis.avgVolatility * 100).toFixed(3)}%
552
+ - Avg signal confidence: ${(performanceData.losingTradeAnalysis.avgConfidence * 100).toFixed(1)}%
553
+ - Total losing patterns: ${performanceData.losingTradeAnalysis.count}
554
+ ` : 'Not enough data'}
555
+
556
+ Recent Trades:
557
+ ${performanceData.recentTrades.map(t => `${t.side} @ ${t.price} → $${t.pnl?.toFixed(2)}`).join('\n')}
558
+
559
+ What should we LEARN and OPTIMIZE?`;
560
+
561
+ try {
562
+ const response = await callAI(agent, prompt, systemPrompt);
563
+ if (!response) return null;
564
+
565
+ const jsonMatch = response.match(/\{[\s\S]*\}/);
566
+ if (jsonMatch) {
567
+ return JSON.parse(jsonMatch[0]);
568
+ }
569
+ return null;
570
+ } catch (e) {
571
+ return null;
572
+ }
573
+ };
574
+
575
+ /**
576
+ * Build consensus from multiple agent suggestions
577
+ */
578
+ const buildConsensus = (suggestions) => {
579
+ if (suggestions.length === 0) return { isUnanimous: false };
580
+
581
+ // Check action consensus
582
+ const actions = suggestions.map(s => s.action).filter(Boolean);
583
+ const allSameAction = actions.length === suggestions.length &&
584
+ actions.every(a => a === actions[0]);
585
+
586
+ // Check optimization consensus
587
+ const allOptimizations = suggestions.flatMap(s => s.optimizations || []);
588
+ const paramGroups = {};
589
+
590
+ for (const opt of allOptimizations) {
591
+ if (!opt.param) continue;
592
+ const key = `${opt.param}_${opt.direction}`;
593
+ if (!paramGroups[key]) {
594
+ paramGroups[key] = { ...opt, count: 0 };
595
+ }
596
+ paramGroups[key].count++;
597
+ }
598
+
599
+ // Find unanimous optimizations (all agents agree)
600
+ const unanimousOptimizations = Object.values(paramGroups)
601
+ .filter(g => g.count === suggestions.length)
602
+ .map(g => ({
603
+ param: g.param,
604
+ direction: g.direction,
605
+ amount: g.amount,
606
+ reason: `Unanimous (${suggestions.length} agents)`
607
+ }));
608
+
609
+ // Average size multiplier
610
+ const multipliers = suggestions.map(s => s.sizeMultiplier || 1.0);
611
+ const avgMultiplier = multipliers.reduce((a, b) => a + b, 0) / multipliers.length;
612
+
613
+ // Average confidence
614
+ const confidences = suggestions.map(s => s.confidence || 50);
615
+ const avgConfidence = Math.round(confidences.reduce((a, b) => a + b, 0) / confidences.length);
616
+
617
+ return {
618
+ isUnanimous: allSameAction && unanimousOptimizations.length > 0,
619
+ action: allSameAction ? actions[0] : 'CAUTIOUS',
620
+ sizeMultiplier: allSameAction ? avgMultiplier : 0.5,
621
+ optimizations: unanimousOptimizations,
622
+ confidence: avgConfidence,
623
+ reason: allSameAction ?
624
+ `${suggestions.length} agents agree` :
625
+ 'Agents disagree - being cautious',
626
+ votes: actions.reduce((acc, a) => { acc[a] = (acc[a] || 0) + 1; return acc; }, {})
627
+ };
628
+ };
629
+
630
+ /**
631
+ * Apply an optimization to the strategy
632
+ */
633
+ const applyOptimization = (optimization) => {
634
+ const strategy = supervisorState.strategy;
635
+ if (!strategy) return false;
636
+
637
+ // Record the optimization
638
+ supervisorState.optimizations.push({
639
+ timestamp: Date.now(),
640
+ ...optimization
641
+ });
642
+
643
+ // Try to apply to strategy if it supports it
644
+ try {
645
+ if (typeof strategy.applyAIOptimization === 'function') {
646
+ strategy.applyAIOptimization(optimization);
647
+ return true;
648
+ }
649
+
650
+ if (typeof strategy.setParameter === 'function') {
651
+ strategy.setParameter(optimization.param, optimization.direction, optimization.amount);
652
+ return true;
653
+ }
654
+ } catch (e) {
655
+ // Strategy doesn't support this optimization
656
+ }
657
+
658
+ return false;
659
+ };
660
+
661
+ /**
662
+ * Get current advice for the strategy
663
+ * Called before each trade decision
664
+ */
665
+ const getCurrentAdvice = () => {
666
+ if (!supervisorState.active) {
667
+ return { action: 'NORMAL', sizeMultiplier: 1.0, reason: 'No supervision' };
668
+ }
669
+
670
+ return supervisorState.currentAdvice;
671
+ };
672
+
673
+ /**
674
+ * Get supervision status
675
+ */
676
+ const getStatus = () => {
677
+ return {
678
+ active: supervisorState.active,
679
+ agents: supervisorState.agents.length,
680
+ mode: supervisorState.agents.length >= 2 ? 'CONSENSUS' : 'INDIVIDUAL',
681
+ performance: supervisorState.performance,
682
+ currentAdvice: supervisorState.currentAdvice,
683
+ optimizationsApplied: supervisorState.optimizations.length,
684
+ patternsLearned: {
685
+ winning: supervisorState.winningPatterns.length,
686
+ losing: supervisorState.losingPatterns.length
687
+ },
688
+ lastOptimization: supervisorState.lastOptimizationTime
689
+ };
690
+ };
691
+
692
+ /**
693
+ * Check if should proceed with trade based on AI advice
694
+ */
695
+ const shouldTrade = () => {
696
+ if (!supervisorState.active) return { proceed: true, multiplier: 1.0 };
697
+
698
+ const advice = supervisorState.currentAdvice;
699
+
700
+ if (advice.action === 'PAUSE') {
701
+ return { proceed: false, reason: advice.reason };
702
+ }
703
+
704
+ return {
705
+ proceed: true,
706
+ multiplier: advice.sizeMultiplier || 1.0,
707
+ action: advice.action
708
+ };
709
+ };
710
+
711
+ /**
712
+ * Get behavior history for graph visualization
713
+ * Returns array of numeric values (0-3) representing agent behavior over time
714
+ *
715
+ * @param {number} maxPoints - Maximum data points to return
716
+ * @returns {Object} { values: number[], labels: string[], startTime: number }
717
+ */
718
+ const getBehaviorHistory = (maxPoints = 50) => {
719
+ if (!supervisorState.active || supervisorState.behaviorHistory.length === 0) {
720
+ return { values: [], labels: [], startTime: null };
721
+ }
722
+
723
+ let history = [...supervisorState.behaviorHistory];
724
+
725
+ // Downsample if too many points
726
+ if (history.length > maxPoints) {
727
+ const step = Math.ceil(history.length / maxPoints);
728
+ history = history.filter((_, i) => i % step === 0);
729
+ }
730
+
731
+ // If too few points, interpolate to make smooth curve
732
+ if (history.length < 10 && history.length > 1) {
733
+ const interpolated = [];
734
+ for (let i = 0; i < history.length - 1; i++) {
735
+ interpolated.push(history[i]);
736
+ // Add intermediate points
737
+ const curr = history[i].value;
738
+ const next = history[i + 1].value;
739
+ const mid = (curr + next) / 2;
740
+ interpolated.push({ value: mid, action: 'interpolated' });
741
+ }
742
+ interpolated.push(history[history.length - 1]);
743
+ history = interpolated;
744
+ }
745
+
746
+ return {
747
+ values: history.map(h => h.value),
748
+ actions: history.map(h => h.action),
749
+ startTime: supervisorState.behaviorStartTime,
750
+ duration: Date.now() - supervisorState.behaviorStartTime
751
+ };
752
+ };
753
+
754
+ /**
755
+ * Get learning statistics for display
756
+ */
757
+ const getLearningStats = () => {
758
+ return {
759
+ patternsLearned: {
760
+ winning: supervisorState.winningPatterns.length,
761
+ losing: supervisorState.losingPatterns.length,
762
+ total: supervisorState.winningPatterns.length + supervisorState.losingPatterns.length
763
+ },
764
+ optimizations: supervisorState.optimizations.length,
765
+ tradesAnalyzed: supervisorState.trades.length,
766
+ ticksProcessed: supervisorState.ticks.length,
767
+ signalsObserved: supervisorState.signals.length
768
+ };
769
+ };
770
+
771
+ module.exports = {
772
+ initialize,
773
+ stop,
774
+ feedTick,
775
+ feedSignal,
776
+ feedTradeResult,
777
+ getCurrentAdvice,
778
+ shouldTrade,
779
+ getStatus,
780
+ analyzeAndOptimize,
781
+ getBehaviorHistory,
782
+ getLearningStats
783
+ };