bulltrackers-module 1.0.211 → 1.0.212

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,397 @@
1
+ /**
2
+ * @fileoverview Mathematics Layer
3
+ * Core mathematical functions, statistics, and signal primitives.
4
+ */
5
+
6
+ const { DataExtractor } = require('./extractors');
7
+ const { SCHEMAS } = require('./profiling');
8
+
9
+ class MathPrimitives {
10
+ static average(values) {
11
+ if (!values || !values.length) return 0;
12
+ return values.reduce((a, b) => a + b, 0) / values.length;
13
+ }
14
+
15
+ static median(values) {
16
+ if (!values || !values.length) return 0;
17
+ const sorted = [...values].sort((a, b) => a - b);
18
+ const mid = Math.floor(sorted.length / 2);
19
+ return sorted.length % 2 === 0
20
+ ? (sorted[mid - 1] + sorted[mid]) / 2
21
+ : sorted[mid];
22
+ }
23
+
24
+ static standardDeviation(values) {
25
+ if (!values || !values.length) return 0;
26
+ const avg = this.average(values);
27
+ const squareDiffs = values.map(val => Math.pow((val || 0) - avg, 2));
28
+ return Math.sqrt(this.average(squareDiffs));
29
+ }
30
+
31
+ static bucketBinary(value, threshold = 0) {
32
+ return value > threshold ? 'winner' : 'loser';
33
+ }
34
+
35
+ static calculateHitProbability(currentPrice, barrierPrice, volatility, days, drift = 0) {
36
+ if (currentPrice <= 0 || barrierPrice <= 0 || volatility <= 0 || days <= 0) return 0;
37
+
38
+ const t = days / 365.0;
39
+ const sigma = volatility;
40
+ const mu = drift;
41
+ const b = Math.log(barrierPrice / currentPrice);
42
+ const nu = mu - 0.5 * Math.pow(sigma, 2);
43
+ const sqrtT = Math.sqrt(t);
44
+ const sigmaSqrtT = sigma * sqrtT;
45
+
46
+ const normCDF = (x) => {
47
+ const t = 1 / (1 + 0.2316419 * Math.abs(x));
48
+ const d = 0.3989423 * Math.exp(-x * x / 2);
49
+ const prob = d * t * (0.3193815 + t * (-0.3565638 + t * (1.781478 + t * (-1.821256 + t * 1.330274))));
50
+ return x > 0 ? 1 - prob : prob;
51
+ };
52
+
53
+ const term1 = (b - nu * t) / sigmaSqrtT;
54
+ const term2 = (2 * nu * b) / (sigma * sigma);
55
+ const term3 = (b + nu * t) / sigmaSqrtT;
56
+
57
+ if ((currentPrice > barrierPrice && barrierPrice > currentPrice) ||
58
+ (currentPrice < barrierPrice && barrierPrice < currentPrice)) {
59
+ return 1.0;
60
+ }
61
+
62
+ const probability = normCDF(( -Math.abs(b) - nu * t ) / sigmaSqrtT) + Math.exp((2 * nu * Math.abs(b)) / (sigma * sigma)) * normCDF(( -Math.abs(b) + nu * t ) / sigmaSqrtT);
63
+
64
+ return Math.min(Math.max(probability, 0), 1);
65
+ }
66
+
67
+ static simulateGBM(currentPrice, volatility, days, simulations = 1000, drift = 0) {
68
+ if (currentPrice <= 0 || volatility <= 0 || days <= 0) return new Float32Array(0);
69
+
70
+ const t = days / 365.0;
71
+ const sigma = volatility;
72
+ const mu = drift;
73
+ const driftTerm = (mu - 0.5 * sigma * sigma) * t;
74
+ const volTerm = sigma * Math.sqrt(t);
75
+
76
+ const results = new Float32Array(simulations);
77
+
78
+ for (let i = 0; i < simulations; i++) {
79
+ const u1 = Math.random();
80
+ const u2 = Math.random();
81
+ const z = Math.sqrt(-2.0 * Math.log(u1)) * Math.cos(2.0 * Math.PI * u2);
82
+ results[i] = currentPrice * Math.exp(driftTerm + volTerm * z);
83
+ }
84
+ return results;
85
+ }
86
+
87
+ static simulatePopulationBreakdown(pricePaths, userProfiles) {
88
+ if (!pricePaths.length || !userProfiles.length) return 0;
89
+
90
+ let totalBreakdownEvents = 0;
91
+ const totalSims = pricePaths.length;
92
+ const totalUsers = userProfiles.length;
93
+
94
+ for (let i = 0; i < totalSims; i++) {
95
+ const simPrice = pricePaths[i];
96
+ let capitulatedUsersInScenario = 0;
97
+
98
+ for (let j = 0; j < totalUsers; j++) {
99
+ const user = userProfiles[j];
100
+ const hypotheticalPnL = ((simPrice - user.entryPrice) / user.entryPrice) * 100;
101
+
102
+ if (hypotheticalPnL < user.thresholdPct) {
103
+ capitulatedUsersInScenario++;
104
+ }
105
+ }
106
+
107
+ totalBreakdownEvents += (capitulatedUsersInScenario / totalUsers);
108
+ }
109
+
110
+ return totalBreakdownEvents / totalSims;
111
+ }
112
+ }
113
+
114
+ class FinancialEngineering {
115
+ /**
116
+ * Calculates the Sortino Ratio based on a series of trade returns.
117
+ * Uses Downside Deviation (risk of loss) rather than Standard Deviation (volatility).
118
+ * @param {number[]} returns - Array of PnL percentages from trades.
119
+ * @param {number} targetReturn - Minimum acceptable return (default 0).
120
+ */
121
+ static sortinoRatio(returns, targetReturn = 0) {
122
+ if (!returns || returns.length < 2) return 0;
123
+
124
+ const avgReturn = MathPrimitives.average(returns);
125
+
126
+ // Calculate Downside Deviation (only negative deviations from target)
127
+ const downsideDiffs = returns.map(r => Math.min(0, r - targetReturn));
128
+ const downsideVariance = downsideDiffs.reduce((sum, d) => sum + (d * d), 0) / returns.length;
129
+ const downsideDev = Math.sqrt(downsideVariance);
130
+
131
+ if (downsideDev === 0) return 0; // No downside risk found
132
+ return (avgReturn - targetReturn) / downsideDev;
133
+ }
134
+
135
+ /**
136
+ * Calculates the Kelly Criterion (Optimal Leverage Fraction).
137
+ * f* = (bp - q) / b
138
+ * @param {number} winRatio - Win Rate (0-100).
139
+ * @param {number} avgWinPct - Average Win %.
140
+ * @param {number} avgLossPct - Average Loss % (must be negative or positive magnitude).
141
+ */
142
+ static kellyCriterion(winRatio, avgWinPct, avgLossPct) {
143
+ const p = winRatio / 100;
144
+ const q = 1 - p;
145
+ const lossMag = Math.abs(avgLossPct);
146
+
147
+ if (lossMag === 0) return 0; // Infinite edge (or divide by zero error)
148
+
149
+ const b = avgWinPct / lossMag; // Payoff odds (b to 1)
150
+
151
+ // Kelly Formula: (bp - q) / b
152
+ const f = (b * p - q) / b;
153
+
154
+ return Math.max(0, f); // Clamp negative Kelly to 0 (Do not bet)
155
+ }
156
+ }
157
+
158
+ class TimeSeriesAnalysis {
159
+ /**
160
+ * Calculates the Hurst Exponent via Rescaled Range (R/S) Analysis.
161
+ * H = 0.5: Random Walk (Gambler).
162
+ * H > 0.5: Persistent/Trending (Momentum).
163
+ * H < 0.5: Anti-Persistent/Mean Reverting (Oscillator).
164
+ * @param {number[]} series - Time series data (e.g. cumulative PnL or prices).
165
+ */
166
+ static hurstExponent(series) {
167
+ if (!series || series.length < 10) return 0.5; // Insufficient data
168
+
169
+ // Create logarithmic differences (returns)
170
+ const logReturns = [];
171
+ for (let i = 1; i < series.length; i++) {
172
+ logReturns.push(Math.log(series[i] / series[i-1]));
173
+ }
174
+
175
+ // Simplified R/S calculation over full range
176
+ const mean = MathPrimitives.average(logReturns);
177
+ const stdDev = MathPrimitives.standardDeviation(logReturns);
178
+
179
+ if (stdDev === 0) return 0.5;
180
+
181
+ // Calculate Deviations from mean
182
+ const deviations = logReturns.map(r => r - mean);
183
+
184
+ // Calculate Cumulative Deviations (Cumulative Range)
185
+ let sum = 0;
186
+ const cumulativeDeviations = deviations.map(d => sum += d);
187
+
188
+ const maxDev = Math.max(...cumulativeDeviations);
189
+ const minDev = Math.min(...cumulativeDeviations);
190
+ const range = maxDev - minDev;
191
+
192
+ const rs = range / stdDev;
193
+ const n = logReturns.length;
194
+
195
+ // H = log(R/S) / log(n)
196
+ // Note: This is a point-estimate simplification of the full regression method
197
+ // but sufficient for behavioral classification.
198
+ const hurst = Math.log(rs) / Math.log(n);
199
+
200
+ return Math.min(1, Math.max(0, hurst));
201
+ }
202
+ }
203
+
204
+ class SignalPrimitives {
205
+ static getMetric(dependencies, calcName, ticker, fieldName, fallback = 0) {
206
+ if (!dependencies || !dependencies[calcName]) return fallback;
207
+ const tickerData = dependencies[calcName][ticker];
208
+ if (!tickerData) return fallback;
209
+
210
+ const val = tickerData[fieldName];
211
+ return (typeof val === 'number') ? val : fallback;
212
+ }
213
+
214
+ static getUnionKeys(dependencies, calcNames) {
215
+ const keys = new Set();
216
+ if (!dependencies) return [];
217
+ for (const name of calcNames) {
218
+ const resultObj = dependencies[name];
219
+ if (resultObj && typeof resultObj === 'object') {
220
+ Object.keys(resultObj).forEach(k => keys.add(k));
221
+ }
222
+ }
223
+ return Array.from(keys);
224
+ }
225
+
226
+ static normalizeTanh(value, scale = 10, sensitivity = 10.0) {
227
+ if (value === 0) return 0;
228
+ return Math.tanh(value / sensitivity) * scale;
229
+ }
230
+
231
+ static normalizeZScore(value, mean, stdDev) {
232
+ if (!stdDev || stdDev === 0) return 0;
233
+ return (value - mean) / stdDev;
234
+ }
235
+
236
+ static divergence(valueA, valueB) {
237
+ return (valueA || 0) - (valueB || 0);
238
+ }
239
+
240
+ static getPreviousState(previousComputed, calcName, ticker, fieldName = null) {
241
+ if (!previousComputed || !previousComputed[calcName]) return null;
242
+ const tickerData = previousComputed[calcName][ticker];
243
+ if (!tickerData) return null;
244
+
245
+ if (fieldName) {
246
+ return tickerData[fieldName];
247
+ }
248
+ return tickerData;
249
+ }
250
+ }
251
+
252
+ class Aggregators {
253
+ static bucketUsersByPnlPerAsset(usersData, tickerMap) {
254
+ const buckets = new Map();
255
+ for (const [userId, portfolio] of Object.entries(usersData)) {
256
+ const userType = portfolio.PublicPositions ? SCHEMAS.USER_TYPES.SPECULATOR : SCHEMAS.USER_TYPES.NORMAL;
257
+ const positions = DataExtractor.getPositions(portfolio, userType);
258
+
259
+ for (const pos of positions) {
260
+ const id = DataExtractor.getInstrumentId(pos);
261
+ const pnl = DataExtractor.getNetProfit(pos);
262
+ if (!id || pnl === 0) continue;
263
+
264
+ const ticker = tickerMap[id];
265
+ if (!ticker) continue;
266
+
267
+ if (!buckets.has(ticker)) buckets.set(ticker, { winners: [], losers: [] });
268
+ const b = buckets.get(ticker);
269
+
270
+ if (pnl > 0) b.winners.push(userId);
271
+ else b.losers.push(userId);
272
+ }
273
+ }
274
+ return Object.fromEntries(buckets);
275
+ }
276
+
277
+ static getWeightedSentiment(positions) {
278
+ if (!positions || positions.length === 0) return 0;
279
+
280
+ let totalWeightedPnL = 0;
281
+ let totalWeight = 0;
282
+
283
+ for (const pos of positions) {
284
+ const pnl = DataExtractor.getNetProfit(pos);
285
+ const weight = DataExtractor.getPositionWeight(pos);
286
+
287
+ if (weight > 0) {
288
+ totalWeightedPnL += (pnl * weight);
289
+ totalWeight += weight;
290
+ }
291
+ }
292
+
293
+ if (totalWeight === 0) return 0;
294
+ return totalWeightedPnL / totalWeight;
295
+ }
296
+ }
297
+
298
+ class TimeSeries {
299
+ static updateEMAState(value, state, alpha = 0.1) {
300
+ const mean = state ? (state.mean || 0) : 0;
301
+ const variance = state ? (state.variance || 1) : 1;
302
+
303
+ if (value === undefined || value === null || isNaN(value)) {
304
+ return { mean, variance };
305
+ }
306
+
307
+ const diff = value - mean;
308
+ const newMean = mean + (alpha * diff);
309
+ const newVariance = (1 - alpha) * (variance + (alpha * diff * diff));
310
+
311
+ return { mean: newMean, variance: newVariance };
312
+ }
313
+
314
+ static pearsonCorrelation(x, y) {
315
+ if (!x || !y || x.length !== y.length || x.length === 0) return 0;
316
+
317
+ const n = x.length;
318
+ let sumX = 0, sumY = 0, sumXY = 0, sumX2 = 0, sumY2 = 0;
319
+
320
+ for (let i = 0; i < n; i++) {
321
+ sumX += x[i];
322
+ sumY += y[i];
323
+ sumXY += x[i] * y[i];
324
+ sumX2 += x[i] * x[i];
325
+ sumY2 += y[i] * y[i];
326
+ }
327
+
328
+ const numerator = (n * sumXY) - (sumX * sumY);
329
+ const denominator = Math.sqrt(((n * sumX2) - (sumX * sumX)) * ((n * sumY2) - (sumY * sumY)));
330
+
331
+ return (denominator === 0) ? 0 : numerator / denominator;
332
+ }
333
+ }
334
+
335
+ class DistributionAnalytics {
336
+ static computeKDE(data, bandwidth, steps = 60) {
337
+ if (!data || data.length === 0) return [];
338
+
339
+ let min = Infinity, max = -Infinity;
340
+ for (const p of data) {
341
+ if (p.value < min) min = p.value;
342
+ if (p.value > max) max = p.value;
343
+ }
344
+
345
+ min -= bandwidth * 3;
346
+ max += bandwidth * 3;
347
+ const stepSize = (max - min) / steps;
348
+ const curve = [];
349
+
350
+ for (let i = 0; i <= steps; i++) {
351
+ const x = min + (i * stepSize);
352
+ let density = 0;
353
+ for (const p of data) {
354
+ const diff = (x - p.value);
355
+ if (Math.abs(diff) > bandwidth * 3) continue;
356
+
357
+ const u = diff / bandwidth;
358
+ const k = 0.39894228 * Math.exp(-0.5 * u * u);
359
+ density += (p.weight * k) / bandwidth;
360
+ }
361
+ if (density > 0) curve.push({ price: x, density });
362
+ }
363
+ return curve;
364
+ }
365
+
366
+ static integrateProfile(curve, startPrice, endPrice) {
367
+ if (!curve || !Array.isArray(curve)) return 0; // Fix for potential crash
368
+ let sum = 0;
369
+ for (let i = 0; i < curve.length - 1; i++) {
370
+ const p1 = curve[i];
371
+ const p2 = curve[i+1];
372
+ if (p1.price >= startPrice && p2.price <= endPrice) {
373
+ sum += (p2.price - p1.price) * ((p1.density + p2.density) / 2);
374
+ }
375
+ }
376
+ return sum;
377
+ }
378
+
379
+ static linearRegression(xValues, yValues) {
380
+ const n = xValues.length;
381
+ if (n !== yValues.length || n < 2) return { slope: 0, r2: 0 };
382
+
383
+ let sumX = 0, sumY = 0, sumXY = 0, sumXX = 0, sumYY = 0;
384
+ for (let i = 0; i < n; i++) {
385
+ sumX += xValues[i];
386
+ sumY += yValues[i];
387
+ sumXY += xValues[i] * yValues[i];
388
+ sumXX += xValues[i] * xValues[i];
389
+ sumYY += yValues[i] * yValues[i];
390
+ }
391
+
392
+ const slope = (n * sumXY - sumX * sumY) / (n * sumXX - sumX * sumX);
393
+ return { slope, n };
394
+ }
395
+ }
396
+
397
+ module.exports = { MathPrimitives, SignalPrimitives, Aggregators, TimeSeries, DistributionAnalytics, FinancialEngineering, TimeSeriesAnalysis };
@@ -0,0 +1,287 @@
1
+ /**
2
+ * @fileoverview Profiling Layer - Intelligence Engine (V5)
3
+ * Encapsulates advanced behavioral profiling, psychological scoring, and classification schemas.
4
+ */
5
+
6
+ const SCHEMAS = {
7
+ USER_TYPES: { NORMAL: 'normal', SPECULATOR: 'speculator' },
8
+ STYLES: {
9
+ INVESTOR: 'Investor',
10
+ SWING_TRADER: 'Swing Trader',
11
+ DAY_TRADER: 'Day Trader',
12
+ SCALPER: 'Scalper'
13
+ },
14
+ LABELS: {
15
+ ELITE: 'Elite',
16
+ SMART: 'Smart Money',
17
+ NEUTRAL: 'Neutral',
18
+ DUMB: 'Dumb Money',
19
+ GAMBLER: 'Gambler'
20
+ }
21
+ };
22
+
23
+ // ========================================================================
24
+ // 1. BEHAVIORAL ANALYTICS ENGINES
25
+ // ========================================================================
26
+
27
+ class CognitiveBiases {
28
+ /**
29
+ * Anchoring Bias Detector.
30
+ * Checks if the user holds "dead money" positions that are hovering near breakeven
31
+ * for extended periods, refusing to close them.
32
+ * @param {Array} openPositions - Current holdings. Needs OpenDateTime (Speculators).
33
+ * @param {number} thresholdPct - +/- % range around 0 PnL (e.g. 2%).
34
+ * @param {number} minDaysHeld - Minimum days held to qualify as "Anchored".
35
+ */
36
+ static calculateAnchoringScore(openPositions, thresholdPct = 2.0, minDaysHeld = 14) {
37
+ if (!openPositions || openPositions.length === 0) return 0;
38
+
39
+ let anchoredCount = 0;
40
+ let validPositions = 0;
41
+ const now = Date.now();
42
+ const msPerDay = 86400000;
43
+
44
+ for (const pos of openPositions) {
45
+ // Only applicable if we have OpenDateTime (Speculator Schema)
46
+ if (pos.OpenDateTime) {
47
+ validPositions++;
48
+ const ageDays = (now - new Date(pos.OpenDateTime).getTime()) / msPerDay;
49
+
50
+ // Is the trade old AND hovering near 0% PnL?
51
+ if (ageDays > minDaysHeld && Math.abs(pos.NetProfit) < thresholdPct) {
52
+ anchoredCount++;
53
+ }
54
+ }
55
+ }
56
+
57
+ return validPositions > 0 ? (anchoredCount / validPositions) : 0;
58
+ }
59
+
60
+ /**
61
+ * Disposition Effect (Loss Aversion in Time Domain).
62
+ * Calculates ratio of Avg Hold Time (Losers) / Avg Hold Time (Winners).
63
+ * Value > 1.0 means they hold losers longer than winners (Bad).
64
+ */
65
+ static calculateDispositionEffect(historyTrades) {
66
+ let winDur = 0, winCount = 0;
67
+ let lossDur = 0, lossCount = 0;
68
+
69
+ for (const t of historyTrades) {
70
+ if (!t.OpenDateTime || !t.CloseDateTime) continue;
71
+
72
+ const dur = (new Date(t.CloseDateTime) - new Date(t.OpenDateTime)) / 3600000; // Hours
73
+
74
+ if (t.NetProfit > 0) {
75
+ winDur += dur;
76
+ winCount++;
77
+ } else if (t.NetProfit < 0) {
78
+ lossDur += dur;
79
+ lossCount++;
80
+ }
81
+ }
82
+
83
+ const avgWinHold = winCount > 0 ? winDur / winCount : 0;
84
+ const avgLossHold = lossCount > 0 ? lossDur / lossCount : 0;
85
+
86
+ if (avgWinHold === 0) return 2.0; // Infinite bias if they never hold winners
87
+ return avgLossHold / avgWinHold;
88
+ }
89
+
90
+ /**
91
+ * Prospect Theory Utility Function (Kahneman/Tversky).
92
+ * Models the psychological "utility" (pain/pleasure) of a return.
93
+ * Losses hurt approx 2.25x more than gains feel good.
94
+ * @param {number} pnl - Net Profit %.
95
+ */
96
+ static prospectUtility(pnl, lambda = 2.25, alpha = 0.88) {
97
+ if (pnl >= 0) {
98
+ return Math.pow(pnl, alpha);
99
+ } else {
100
+ return -lambda * Math.pow(Math.abs(pnl), alpha);
101
+ }
102
+ }
103
+ }
104
+
105
+ class SkillAttribution {
106
+ /**
107
+ * Calculates Selection Skill (Alpha) by comparing User PnL vs Asset Benchmark.
108
+ * Note: Since we don't have individual asset performance histories easily available
109
+ * in the user context, we use the 'Insights' global growth as a daily benchmark proxy.
110
+ * @param {Array} userPositions - Current open positions.
111
+ * @param {Object} dailyInsights - Map of InstrumentID -> Insight Data (which contains 'growth').
112
+ */
113
+ static calculateSelectionAlpha(userPositions, dailyInsights) {
114
+ let totalAlpha = 0;
115
+ let count = 0;
116
+
117
+ for (const pos of userPositions) {
118
+ const instrumentId = pos.InstrumentID;
119
+ // Note: Schema 5 (Insights) is an array, we assume it's converted to a map or we find it.
120
+ // If passed as array, we find the item.
121
+ let insight = null;
122
+ if (Array.isArray(dailyInsights)) {
123
+ insight = dailyInsights.find(i => i.instrumentId === instrumentId);
124
+ }
125
+
126
+ if (insight && typeof insight.growth === 'number') {
127
+ // User PnL for today isn't explicitly stored, but Total PnL is.
128
+ // We use NetProfit as a proxy for "Performance" state.
129
+ // A Better proxy: Is their NetProfit > The Asset's Weekly Growth?
130
+ // This is a rough heuristic given schema limitations.
131
+
132
+ // If the user is long and PnL > 0, and Growth is negative, that's high alpha (Bucking the trend).
133
+ // Simplified: Just returning the difference.
134
+ const diff = pos.NetProfit - insight.growth;
135
+ totalAlpha += diff;
136
+ count++;
137
+ }
138
+ }
139
+ return count > 0 ? totalAlpha / count : 0;
140
+ }
141
+ }
142
+
143
+ class ExecutionAnalytics {
144
+ static calculateEfficiency(price, priceHistory, date, direction, windowDays = 7) {
145
+ if (!priceHistory || priceHistory.length === 0) return 0.5;
146
+ const targetDate = new Date(date);
147
+ const start = new Date(targetDate); start.setDate(start.getDate() - windowDays);
148
+ const end = new Date(targetDate); end.setDate(end.getDate() + windowDays);
149
+ const pricesInWindow = priceHistory.filter(p => { const d = new Date(p.date); return d >= start && d <= end; }).map(p => p.price);
150
+ if (pricesInWindow.length === 0) return 0.5;
151
+ const minP = Math.min(...pricesInWindow);
152
+ const maxP = Math.max(...pricesInWindow);
153
+ const range = maxP - minP;
154
+ if (range === 0) return 0.5;
155
+ const location = (price - minP) / range;
156
+ if (direction === 'Buy') { return 1 - location; } else { return location; }
157
+ }
158
+ static calculateLossTolerance(realizedPnL, maxDrawdown) {
159
+ if (maxDrawdown === 0) return 1;
160
+ return (realizedPnL - maxDrawdown) / Math.abs(maxDrawdown);
161
+ }
162
+ }
163
+
164
+ class Psychometrics {
165
+ static computeDispositionSkew(historyTrades, currentPositions) {
166
+ const getMedian = (arr) => { if (!arr.length) return 0; const sorted = [...arr].sort((a, b) => a - b); const mid = Math.floor(sorted.length / 2); return sorted.length % 2 !== 0 ? sorted[mid] : (sorted[mid - 1] + sorted[mid]) / 2; };
167
+ const realized = historyTrades.map(t => t.NetProfit);
168
+ const unrealized = currentPositions.map(p => p.NetProfit);
169
+ if (realized.length < 5 || unrealized.length < 5) return 0;
170
+ return getMedian(realized) - getMedian(unrealized);
171
+ }
172
+ static detectRevengeTrading(historyTrades) {
173
+ let riskSpikes = 0; let losses = 0; if (historyTrades.length === 0) return 0;
174
+ const avgLev = historyTrades.reduce((sum, t) => sum + (t.Leverage || 1), 0) / historyTrades.length;
175
+ for (let i = 1; i < historyTrades.length; i++) {
176
+ const prev = historyTrades[i-1]; const curr = historyTrades[i];
177
+ if (prev.NetProfit < 0) { losses++; if ((curr.Leverage || 1) > (avgLev * 1.5)) { riskSpikes++; } }
178
+ }
179
+ return losses > 0 ? (riskSpikes / losses) : 0;
180
+ }
181
+ }
182
+
183
+ class AdaptiveAnalytics {
184
+ static analyzeDrawdownAdaptation(tradeHistory, drawdownThreshold = -15) {
185
+ let adaptationScore = 0; let eventCount = 0;
186
+ for (let i = 0; i < tradeHistory.length - 3; i++) {
187
+ if (tradeHistory[i].NetProfit < drawdownThreshold) {
188
+ eventCount++; const nextTrades = tradeHistory.slice(i+1, i+4);
189
+ const prevLev = tradeHistory[i].Leverage || 1;
190
+ const nextLevAvg = nextTrades.reduce((s, t) => s + (t.Leverage||1), 0) / 3;
191
+ if (nextLevAvg < prevLev) adaptationScore += 1; else if (nextLevAvg > prevLev * 1.5) adaptationScore -= 2;
192
+ const lostInstrument = tradeHistory[i].InstrumentID;
193
+ const stuckToSame = nextTrades.every(t => t.InstrumentID === lostInstrument);
194
+ if (!stuckToSame) adaptationScore += 0.5;
195
+ }
196
+ }
197
+ return eventCount > 0 ? (adaptationScore / eventCount) : 0;
198
+ }
199
+ }
200
+
201
+ class UserClassifier {
202
+ static classify(context) {
203
+ const { user, math, prices } = context;
204
+ const history = user.history.today?.PublicHistoryPositions || [];
205
+ const validHistory = history.filter(t => t.OpenDateTime);
206
+ validHistory.sort((a, b) => new Date(a.OpenDateTime) - new Date(b.OpenDateTime));
207
+ const portfolio = math.extract.getPositions(user.portfolio.today, user.type);
208
+ const summary = math.history.getSummary(user.history.today);
209
+ if (!summary) return { intelligence: { label: SCHEMAS.LABELS.NEUTRAL, score: 0 }, style: { primary: SCHEMAS.STYLES.INVESTOR } };
210
+
211
+ let entryScores = [];
212
+ const recentTrades = validHistory.slice(-20);
213
+ for (const t of recentTrades) {
214
+ const ticker = context.mappings.instrumentToTicker[t.InstrumentID];
215
+ const priceData = math.priceExtractor.getHistory(prices, ticker);
216
+ if (priceData && priceData.length > 0) { entryScores.push(ExecutionAnalytics.calculateEfficiency(t.OpenRate, priceData, t.OpenDateTime, 'Buy')); }
217
+ }
218
+ const avgEntryEff = math.compute.average(entryScores) || 0.5;
219
+ const dispositionSkew = Psychometrics.computeDispositionSkew(validHistory, portfolio);
220
+ const revengeScore = Psychometrics.detectRevengeTrading(validHistory);
221
+ const adaptationScore = AdaptiveAnalytics.analyzeDrawdownAdaptation(validHistory);
222
+
223
+ // New Cognitive Bias Checks
224
+ const anchoring = CognitiveBiases.calculateAnchoringScore(portfolio);
225
+ const dispositionTime = CognitiveBiases.calculateDispositionEffect(validHistory);
226
+
227
+ const riskAdjustedReturn = summary.avgLossPct === 0 ? 10 : (summary.avgProfitPct / Math.abs(summary.avgLossPct));
228
+ let smartScore = 50;
229
+ if (riskAdjustedReturn > 1.5) smartScore += 10;
230
+ if (riskAdjustedReturn > 3.0) smartScore += 10;
231
+ if (summary.winRatio > 60) smartScore += 10;
232
+ if (avgEntryEff > 0.7) smartScore += 10;
233
+ if (avgEntryEff < 0.3) smartScore -= 5;
234
+ if (dispositionSkew > 15) smartScore -= 20; else if (dispositionSkew < 5) smartScore += 10;
235
+ if (revengeScore > 0.3) smartScore -= 25;
236
+ if (adaptationScore > 0.5) smartScore += 5; if (adaptationScore < -0.5) smartScore -= 10;
237
+
238
+ // Penalty for biases
239
+ if (anchoring > 0.3) smartScore -= 10;
240
+ if (dispositionTime > 1.5) smartScore -= 10;
241
+
242
+ let label = SCHEMAS.LABELS.NEUTRAL;
243
+ if (smartScore >= 80) label = SCHEMAS.LABELS.ELITE;
244
+ else if (smartScore >= 65) label = SCHEMAS.LABELS.SMART;
245
+ else if (smartScore <= 30) label = SCHEMAS.LABELS.GAMBLER;
246
+ else if (smartScore <= 45) label = SCHEMAS.LABELS.DUMB;
247
+
248
+ const styleProfile = this.classifyStyle(validHistory, portfolio);
249
+ return {
250
+ intelligence: { label: label, score: Math.max(0, Math.min(100, smartScore)), isSmart: smartScore >= 65 },
251
+ style: styleProfile,
252
+ metrics: {
253
+ entryEfficiency: avgEntryEff,
254
+ dispositionSkew: dispositionSkew,
255
+ revengeTendency: revengeScore,
256
+ riskRewardRatio: riskAdjustedReturn,
257
+ drawdownAdaptation: adaptationScore,
258
+ biasAnchoring: anchoring,
259
+ biasDispositionTime: dispositionTime
260
+ }
261
+ };
262
+ }
263
+
264
+ static classifyStyle(history, portfolio) {
265
+ let totalMinutes = 0; let validTrades = 0;
266
+ history.forEach(t => { if (t.OpenDateTime && t.CloseDateTime) { const open = new Date(t.OpenDateTime); const close = new Date(t.CloseDateTime); totalMinutes += (close - open) / 60000; validTrades++; } });
267
+ const avgHoldTime = validTrades > 0 ? totalMinutes / validTrades : 0;
268
+ let baseStyle = SCHEMAS.STYLES.INVESTOR;
269
+ if (validTrades > 0) { if (avgHoldTime < 60) baseStyle = SCHEMAS.STYLES.SCALPER; else if (avgHoldTime < 60 * 24) baseStyle = SCHEMAS.STYLES.DAY_TRADER; else if (avgHoldTime < 60 * 24 * 7) baseStyle = SCHEMAS.STYLES.SWING_TRADER; }
270
+ const subStyles = new Set();
271
+ const assets = [...history, ...portfolio]; let leverageCount = 0;
272
+ assets.forEach(p => { if ((p.Leverage || 1) > 1) leverageCount++; });
273
+ const tradeCount = assets.length || 1;
274
+ if ((leverageCount / tradeCount) > 0.3) subStyles.add("Speculative"); if ((leverageCount / tradeCount) > 0.8) subStyles.add("High-Leverage");
275
+ return { primary: baseStyle, tags: Array.from(subStyles), avgHoldTimeMinutes: avgHoldTime };
276
+ }
277
+ }
278
+
279
+ module.exports = {
280
+ SCHEMAS,
281
+ UserClassifier,
282
+ ExecutionAnalytics,
283
+ Psychometrics,
284
+ AdaptiveAnalytics,
285
+ CognitiveBiases, // New
286
+ SkillAttribution // New
287
+ };