aiden-shared-calculations-unified 1.0.93 → 1.0.95
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/calculations/capitulation/asset-volatility-estimator.js +11 -8
- package/calculations/capitulation/retail-capitulation-risk-forecast.js +102 -54
- package/calculations/gauss/cohort-definer.js +24 -2
- package/calculations/ghost-book/cost-basis-density.js +30 -8
- package/calculations/predicative-alpha/mimetic-latency.js +15 -17
- package/package.json +1 -1
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* @fileoverview CORE Product Line (Pass 1 - Meta)
|
|
3
|
-
* Calculates annualized volatility
|
|
4
|
-
*
|
|
3
|
+
* Calculates annualized volatility.
|
|
4
|
+
* Safe for sharded execution (accumulates results).
|
|
5
5
|
*/
|
|
6
6
|
class AssetVolatilityEstimator {
|
|
7
7
|
constructor() {
|
|
@@ -39,7 +39,7 @@ class AssetVolatilityEstimator {
|
|
|
39
39
|
|
|
40
40
|
if (!prices || !prices.history) return;
|
|
41
41
|
|
|
42
|
-
//
|
|
42
|
+
// Efficiently extract only histories present in this shard/context
|
|
43
43
|
const allHistories = priceExtractor.getAllHistories(prices);
|
|
44
44
|
|
|
45
45
|
const batchResult = {};
|
|
@@ -47,15 +47,16 @@ class AssetVolatilityEstimator {
|
|
|
47
47
|
for (const [key, candles] of allHistories.entries()) {
|
|
48
48
|
let ticker = key;
|
|
49
49
|
|
|
50
|
-
// Resolve ticker
|
|
51
|
-
if (prices.history
|
|
50
|
+
// Resolve ticker logic
|
|
51
|
+
if (prices.history[key] && prices.history[key].instrumentId) {
|
|
52
52
|
const instId = prices.history[key].instrumentId;
|
|
53
53
|
if (mappings && mappings.instrumentToTicker && mappings.instrumentToTicker[instId]) {
|
|
54
54
|
ticker = mappings.instrumentToTicker[instId];
|
|
55
55
|
}
|
|
56
56
|
}
|
|
57
57
|
|
|
58
|
-
|
|
58
|
+
// Guard: Need enough data for a trend
|
|
59
|
+
if (!candles || candles.length < 10) continue;
|
|
59
60
|
|
|
60
61
|
const logReturns = [];
|
|
61
62
|
let lastPrice = 0;
|
|
@@ -64,6 +65,7 @@ class AssetVolatilityEstimator {
|
|
|
64
65
|
const prev = candles[i-1].price;
|
|
65
66
|
const curr = candles[i].price;
|
|
66
67
|
|
|
68
|
+
// Guard: Prevent log(0) or log(negative) errors
|
|
67
69
|
if (prev > 0 && curr > 0) {
|
|
68
70
|
logReturns.push(Math.log(curr / prev));
|
|
69
71
|
lastPrice = curr;
|
|
@@ -73,19 +75,20 @@ class AssetVolatilityEstimator {
|
|
|
73
75
|
const LOOKBACK = 30;
|
|
74
76
|
const relevantReturns = logReturns.slice(-LOOKBACK);
|
|
75
77
|
|
|
78
|
+
// Guard: Need enough returns for Standard Deviation
|
|
76
79
|
if (relevantReturns.length < 5) continue;
|
|
77
80
|
|
|
78
81
|
const stdDev = compute.standardDeviation(relevantReturns);
|
|
79
82
|
const annualizedVol = stdDev * Math.sqrt(365);
|
|
80
83
|
|
|
81
84
|
batchResult[ticker] = {
|
|
82
|
-
volatility_30d: annualizedVol,
|
|
85
|
+
volatility_30d: Number(annualizedVol.toFixed(4)),
|
|
83
86
|
last_price: lastPrice,
|
|
84
87
|
data_points: relevantReturns.length
|
|
85
88
|
};
|
|
86
89
|
}
|
|
87
90
|
|
|
88
|
-
// Accumulate
|
|
91
|
+
// Accumulate results (handling batched execution)
|
|
89
92
|
Object.assign(this.result, batchResult);
|
|
90
93
|
}
|
|
91
94
|
|
|
@@ -1,16 +1,18 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* @fileoverview CORE Product Line (Pass 2 - Standard)
|
|
3
3
|
* Calculates capitulation risk using DYNAMIC volatility from asset-volatility-estimator.
|
|
4
|
-
*
|
|
4
|
+
* MEMORY OPTIMIZED: Uses Cohort Aggregation instead of raw user arrays.
|
|
5
5
|
*/
|
|
6
6
|
|
|
7
7
|
class RetailCapitulationRiskForecast {
|
|
8
8
|
constructor() {
|
|
9
|
-
|
|
9
|
+
// Map<Ticker, Map<CohortKey, { entryPrice, thresholdPct, weight }>>
|
|
10
|
+
this.assetCohorts = new Map();
|
|
11
|
+
|
|
12
|
+
// Cache for Ticker Map
|
|
10
13
|
this.tickerMap = null;
|
|
11
14
|
|
|
12
|
-
//
|
|
13
|
-
// This allows us to MUTATE 'deps.mathLib' instead of RE-ASSIGNING 'this.mathLib'
|
|
15
|
+
// Container for dependency injection (MathLib)
|
|
14
16
|
this.deps = { mathLib: null };
|
|
15
17
|
}
|
|
16
18
|
|
|
@@ -43,23 +45,11 @@ class RetailCapitulationRiskForecast {
|
|
|
43
45
|
return { "type": "object", "patternProperties": { "^.*$": tickerSchema } };
|
|
44
46
|
}
|
|
45
47
|
|
|
46
|
-
_initAsset(ticker, currentPrice, volatility) {
|
|
47
|
-
if (!this.assetRiskProfiles.has(ticker)) {
|
|
48
|
-
this.assetRiskProfiles.set(ticker, {
|
|
49
|
-
currentPrice: currentPrice,
|
|
50
|
-
volatility: volatility,
|
|
51
|
-
profiles: []
|
|
52
|
-
});
|
|
53
|
-
}
|
|
54
|
-
}
|
|
55
|
-
|
|
56
48
|
process(context) {
|
|
57
49
|
const { user, mappings, math, computed } = context;
|
|
58
50
|
const { extract, history, compute, signals } = math;
|
|
59
51
|
|
|
60
|
-
// 1. Capture
|
|
61
|
-
// We are retrieving the 'deps' object (GET) and mutating its property.
|
|
62
|
-
// This bypasses the Proxy SET trap entirely.
|
|
52
|
+
// 1. Capture MathLib (Mutation Pattern)
|
|
63
53
|
if (!this.deps.mathLib && compute) {
|
|
64
54
|
this.deps.mathLib = compute;
|
|
65
55
|
}
|
|
@@ -69,68 +59,123 @@ class RetailCapitulationRiskForecast {
|
|
|
69
59
|
}
|
|
70
60
|
|
|
71
61
|
// 2. Determine User's "Pain Threshold"
|
|
62
|
+
// We bucket this to the nearest 5% to reduce cardinality
|
|
72
63
|
const historyDoc = history.getDailyHistory(user);
|
|
73
64
|
const summary = history.getSummary(historyDoc);
|
|
74
|
-
let
|
|
75
|
-
|
|
76
|
-
: -25.0;
|
|
65
|
+
let rawThreshold = (summary && summary.avgLossPct < 0) ? summary.avgLossPct : -25.0;
|
|
66
|
+
const bucketedThreshold = Math.round(rawThreshold / 5) * 5;
|
|
77
67
|
|
|
78
68
|
// 3. Analyze Positions
|
|
79
69
|
const positions = extract.getPositions(user.portfolio.today, user.type);
|
|
80
70
|
|
|
81
71
|
for (const pos of positions) {
|
|
82
72
|
const instId = extract.getInstrumentId(pos);
|
|
83
|
-
|
|
84
73
|
if (!this.tickerMap) continue;
|
|
85
|
-
|
|
86
74
|
const ticker = this.tickerMap[instId];
|
|
87
|
-
|
|
88
75
|
if (!ticker) continue;
|
|
89
76
|
|
|
90
|
-
// Fetch Dependency
|
|
77
|
+
// Fetch Volatility & Price from Dependency
|
|
78
|
+
// Note: We access this to normalize Entry Price, but we store Volatility
|
|
79
|
+
// at the aggregation stage to keep it consistent.
|
|
91
80
|
const assetStats = signals.getPreviousState(computed, 'asset-volatility-estimator', ticker);
|
|
92
|
-
|
|
93
|
-
const dynamicVol = assetStats ? assetStats.volatility_30d : 0.60;
|
|
94
81
|
const currentPrice = assetStats ? assetStats.last_price : 0;
|
|
82
|
+
const dynamicVol = assetStats ? assetStats.volatility_30d : 0.60;
|
|
95
83
|
|
|
96
84
|
if (currentPrice <= 0) continue;
|
|
97
85
|
|
|
98
|
-
// Get P&L from Position Schema
|
|
99
86
|
const netProfit = extract.getNetProfit(pos);
|
|
100
|
-
|
|
101
|
-
// Calculate Entry Price using the Dependency Price
|
|
102
87
|
const entryPrice = extract.deriveEntryPrice(currentPrice, netProfit);
|
|
103
88
|
|
|
104
89
|
if (entryPrice > 0) {
|
|
105
|
-
this.
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
90
|
+
this._aggregateCohort(ticker, entryPrice, bucketedThreshold, currentPrice, dynamicVol);
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
/**
|
|
96
|
+
* Aggregates a user into a memory-efficient cohort.
|
|
97
|
+
*/
|
|
98
|
+
_aggregateCohort(ticker, entryPrice, thresholdPct, currentPrice, volatility) {
|
|
99
|
+
if (!this.assetCohorts.has(ticker)) {
|
|
100
|
+
this.assetCohorts.set(ticker, {
|
|
101
|
+
currentPrice,
|
|
102
|
+
volatility,
|
|
103
|
+
cohorts: new Map() // Key: "Entry_Threshold" -> Value: { count }
|
|
104
|
+
});
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
const assetData = this.assetCohorts.get(ticker);
|
|
108
|
+
|
|
109
|
+
// Update volatility/price (last write wins is fine for streaming updates of same asset)
|
|
110
|
+
assetData.currentPrice = currentPrice;
|
|
111
|
+
assetData.volatility = volatility;
|
|
112
|
+
|
|
113
|
+
// Create Cohort Key
|
|
114
|
+
// Round Entry Price to 2 significant digits to bucket users
|
|
115
|
+
const entryKey = entryPrice.toPrecision(3);
|
|
116
|
+
const cohortKey = `${entryKey}_${thresholdPct}`;
|
|
117
|
+
|
|
118
|
+
if (!assetData.cohorts.has(cohortKey)) {
|
|
119
|
+
assetData.cohorts.set(cohortKey, {
|
|
120
|
+
entryPrice: parseFloat(entryKey),
|
|
121
|
+
thresholdPct: thresholdPct,
|
|
122
|
+
count: 0
|
|
123
|
+
});
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
// Increment count (Weighting)
|
|
127
|
+
assetData.cohorts.get(cohortKey).count++;
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
/**
|
|
131
|
+
* Local implementation of Population Breakdown that handles WEIGHTED cohorts.
|
|
132
|
+
* Replaces math.simulatePopulationBreakdown for this specific optimized calculation.
|
|
133
|
+
*/
|
|
134
|
+
_simulateWeightedBreakdown(pricePaths, cohorts) {
|
|
135
|
+
if (!pricePaths.length || cohorts.length === 0) return 0;
|
|
136
|
+
|
|
137
|
+
let totalBreakdownEvents = 0;
|
|
138
|
+
const totalSims = pricePaths.length;
|
|
139
|
+
|
|
140
|
+
// Calculate total users (sum of all cohort counts)
|
|
141
|
+
const totalUsers = cohorts.reduce((sum, c) => sum + c.count, 0);
|
|
142
|
+
if (totalUsers === 0) return 0;
|
|
143
|
+
|
|
144
|
+
for (let i = 0; i < totalSims; i++) {
|
|
145
|
+
const simPrice = pricePaths[i];
|
|
146
|
+
let capitulatedCount = 0;
|
|
147
|
+
|
|
148
|
+
for (const cohort of cohorts) {
|
|
149
|
+
// P&L% = (CurrentValue - EntryValue) / EntryValue
|
|
150
|
+
const hypotheticalPnL = ((simPrice - cohort.entryPrice) / cohort.entryPrice) * 100;
|
|
151
|
+
|
|
152
|
+
if (hypotheticalPnL < cohort.thresholdPct) {
|
|
153
|
+
capitulatedCount += cohort.count; // Add the WEIGHT of the cohort
|
|
154
|
+
}
|
|
110
155
|
}
|
|
156
|
+
|
|
157
|
+
totalBreakdownEvents += (capitulatedCount / totalUsers);
|
|
111
158
|
}
|
|
159
|
+
|
|
160
|
+
return totalBreakdownEvents / totalSims;
|
|
112
161
|
}
|
|
113
162
|
|
|
114
163
|
async getResult() {
|
|
115
164
|
const result = {};
|
|
116
165
|
const TIME_HORIZON_DAYS = 3;
|
|
117
166
|
const SIMULATION_COUNT = 1000;
|
|
118
|
-
|
|
119
|
-
// Access the library from the container
|
|
120
167
|
const mathLib = this.deps.mathLib;
|
|
121
168
|
|
|
122
|
-
if (!mathLib || !mathLib.simulateGBM) {
|
|
123
|
-
console.log('[DEBUG RCRF] MathLib missing in deps container!');
|
|
124
|
-
return {};
|
|
125
|
-
}
|
|
169
|
+
if (!mathLib || !mathLib.simulateGBM) return {};
|
|
126
170
|
|
|
127
|
-
for (const [ticker, data] of this.
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
171
|
+
for (const [ticker, data] of this.assetCohorts.entries()) {
|
|
172
|
+
// Flatten Map values to Array for iteration
|
|
173
|
+
const cohortArray = Array.from(data.cohorts.values());
|
|
174
|
+
|
|
175
|
+
if (cohortArray.length === 0) continue;
|
|
131
176
|
|
|
132
177
|
try {
|
|
133
|
-
// 1. Generate Price Paths
|
|
178
|
+
// 1. Generate Price Paths (O(Sims))
|
|
134
179
|
const pricePaths = mathLib.simulateGBM(
|
|
135
180
|
data.currentPrice,
|
|
136
181
|
data.volatility,
|
|
@@ -140,23 +185,26 @@ class RetailCapitulationRiskForecast {
|
|
|
140
185
|
|
|
141
186
|
if (!pricePaths || pricePaths.length === 0) continue;
|
|
142
187
|
|
|
143
|
-
// 2. Run Population Breakdown
|
|
144
|
-
|
|
188
|
+
// 2. Run Weighted Population Breakdown (O(Sims * Cohorts))
|
|
189
|
+
// This is much faster than O(Sims * Users)
|
|
190
|
+
const capitulationProb = this._simulateWeightedBreakdown(
|
|
145
191
|
pricePaths,
|
|
146
|
-
|
|
192
|
+
cohortArray
|
|
147
193
|
);
|
|
148
194
|
|
|
149
|
-
|
|
150
|
-
const
|
|
195
|
+
// Calculate weighted average threshold for reporting
|
|
196
|
+
const totalUsers = cohortArray.reduce((sum, c) => sum + c.count, 0);
|
|
197
|
+
const weightedThresholdSum = cohortArray.reduce((sum, c) => sum + (c.thresholdPct * c.count), 0);
|
|
198
|
+
const avgThreshold = totalUsers > 0 ? weightedThresholdSum / totalUsers : -25;
|
|
151
199
|
|
|
152
200
|
result[ticker] = {
|
|
153
201
|
capitulation_probability: parseFloat(capitulationProb.toFixed(4)),
|
|
154
|
-
at_risk_user_count:
|
|
202
|
+
at_risk_user_count: totalUsers,
|
|
155
203
|
average_pain_threshold_pct: parseFloat(avgThreshold.toFixed(2)),
|
|
156
204
|
used_volatility: parseFloat(data.volatility.toFixed(4))
|
|
157
205
|
};
|
|
158
206
|
} catch (err) {
|
|
159
|
-
|
|
207
|
+
// Silent catch to prevent one bad ticker from stopping the batch
|
|
160
208
|
}
|
|
161
209
|
}
|
|
162
210
|
|
|
@@ -164,9 +212,9 @@ class RetailCapitulationRiskForecast {
|
|
|
164
212
|
}
|
|
165
213
|
|
|
166
214
|
reset() {
|
|
167
|
-
this.
|
|
215
|
+
this.assetCohorts.clear();
|
|
168
216
|
this.tickerMap = null;
|
|
169
|
-
this.deps.mathLib = null;
|
|
217
|
+
this.deps.mathLib = null;
|
|
170
218
|
}
|
|
171
219
|
}
|
|
172
220
|
|
|
@@ -24,10 +24,32 @@ class CohortDefiner {
|
|
|
24
24
|
}
|
|
25
25
|
|
|
26
26
|
static getSchema() {
|
|
27
|
-
const
|
|
27
|
+
const cohortList = {
|
|
28
|
+
"type": "array",
|
|
29
|
+
"items": { "type": "string" },
|
|
30
|
+
"description": "List of User IDs belonging to this behavioral cohort"
|
|
31
|
+
};
|
|
32
|
+
|
|
28
33
|
return {
|
|
29
34
|
"type": "object",
|
|
30
|
-
"
|
|
35
|
+
"required": [
|
|
36
|
+
"smart_investors",
|
|
37
|
+
"smart_scalpers",
|
|
38
|
+
"uncategorized_smart",
|
|
39
|
+
"fomo_chasers",
|
|
40
|
+
"patient_losers",
|
|
41
|
+
"fomo_bagholders",
|
|
42
|
+
"uncategorized_dumb"
|
|
43
|
+
],
|
|
44
|
+
"properties": {
|
|
45
|
+
"smart_investors": cohortList,
|
|
46
|
+
"smart_scalpers": cohortList,
|
|
47
|
+
"uncategorized_smart": cohortList,
|
|
48
|
+
"fomo_chasers": cohortList,
|
|
49
|
+
"patient_losers": cohortList,
|
|
50
|
+
"fomo_bagholders": cohortList,
|
|
51
|
+
"uncategorized_dumb": cohortList
|
|
52
|
+
}
|
|
31
53
|
};
|
|
32
54
|
}
|
|
33
55
|
|
|
@@ -7,7 +7,7 @@ class CostBasisDensity {
|
|
|
7
7
|
|
|
8
8
|
static getMetadata() {
|
|
9
9
|
return {
|
|
10
|
-
type: 'meta',
|
|
10
|
+
type: 'meta', // Runs ONCE per day
|
|
11
11
|
dependencies: ['asset-cost-basis-profile'],
|
|
12
12
|
category: 'ghost_book'
|
|
13
13
|
};
|
|
@@ -35,11 +35,17 @@ class CostBasisDensity {
|
|
|
35
35
|
const { computed, math } = context;
|
|
36
36
|
const { signals: SignalPrimitives } = math;
|
|
37
37
|
|
|
38
|
+
// 1. Get Union of Tickers (Safe Iteration)
|
|
38
39
|
const tickers = SignalPrimitives.getUnionKeys(computed, ['asset-cost-basis-profile']);
|
|
39
40
|
|
|
40
41
|
for (const ticker of tickers) {
|
|
42
|
+
// 2. Safe Data Access
|
|
41
43
|
const data = computed['asset-cost-basis-profile'][ticker];
|
|
42
|
-
|
|
44
|
+
|
|
45
|
+
// Check for 'profile' specifically as it contains the density curve
|
|
46
|
+
if (!data || !Array.isArray(data.profile) || data.profile.length < 3) {
|
|
47
|
+
continue;
|
|
48
|
+
}
|
|
43
49
|
|
|
44
50
|
const profile = data.profile; // Array of {price, density}
|
|
45
51
|
const currentPrice = data.current_price;
|
|
@@ -48,26 +54,42 @@ class CostBasisDensity {
|
|
|
48
54
|
const support = [];
|
|
49
55
|
let maxDensity = 0;
|
|
50
56
|
|
|
51
|
-
//
|
|
57
|
+
// 3. Peak Detection Algorithm
|
|
58
|
+
// Iterate through the KDE curve to find local maxima
|
|
52
59
|
for (let i = 1; i < profile.length - 1; i++) {
|
|
53
60
|
const prev = profile[i-1].density;
|
|
54
61
|
const curr = profile[i].density;
|
|
55
62
|
const next = profile[i+1].density;
|
|
56
63
|
|
|
64
|
+
// Simple Peak Check
|
|
57
65
|
if (curr > prev && curr > next) {
|
|
58
|
-
|
|
66
|
+
const priceVal = Number(profile[i].price.toFixed(2));
|
|
67
|
+
|
|
68
|
+
// Classify as Resistance (Overhead Supply) or Support (Underlying Demand)
|
|
59
69
|
if (profile[i].price > currentPrice) {
|
|
60
|
-
resistance.push(
|
|
70
|
+
resistance.push(priceVal);
|
|
61
71
|
} else {
|
|
62
|
-
support.push(
|
|
72
|
+
support.push(priceVal);
|
|
63
73
|
}
|
|
74
|
+
|
|
64
75
|
if (curr > maxDensity) maxDensity = curr;
|
|
65
76
|
}
|
|
66
77
|
}
|
|
67
78
|
|
|
79
|
+
// 4. Sort Walls by proximity to current price?
|
|
80
|
+
// Currently slice(0,3) takes the first found, which are lower prices in a sorted KDE.
|
|
81
|
+
// Support: We want HIGHEST prices below current (closest to current).
|
|
82
|
+
// Resistance: We want LOWEST prices above current (closest to current).
|
|
83
|
+
|
|
84
|
+
// Sort Descending (Highest Price First)
|
|
85
|
+
support.sort((a, b) => b - a);
|
|
86
|
+
|
|
87
|
+
// Sort Ascending (Lowest Price First)
|
|
88
|
+
resistance.sort((a, b) => a - b);
|
|
89
|
+
|
|
68
90
|
this.walls[ticker] = {
|
|
69
|
-
resistance_zones: resistance.slice(0, 3), //
|
|
70
|
-
support_zones: support.slice(0, 3),
|
|
91
|
+
resistance_zones: resistance.slice(0, 3), // Closest 3 resistance levels
|
|
92
|
+
support_zones: support.slice(0, 3), // Closest 3 support levels
|
|
71
93
|
nearest_wall_strength: Number(maxDensity.toFixed(4))
|
|
72
94
|
};
|
|
73
95
|
}
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* @fileoverview Mimetic Latency Oscillator (MLO) v2.2
|
|
3
|
-
*
|
|
3
|
+
* Measures the lag between "Smart Money" flow and "Herd" conviction.
|
|
4
|
+
* Self-healing state: Warms up over 15-30 days automatically.
|
|
4
5
|
*/
|
|
5
6
|
class MimeticLatencyOscillator {
|
|
6
7
|
constructor() {
|
|
@@ -44,12 +45,10 @@ class MimeticLatencyOscillator {
|
|
|
44
45
|
|
|
45
46
|
for (const ticker of tickers) {
|
|
46
47
|
// 1. Inputs
|
|
47
|
-
// 'skilled-cohort-flow' -> net_flow_pct
|
|
48
48
|
const rawFlow = SignalPrimitives.getMetric(computed, 'skilled-cohort-flow', ticker, 'net_flow_pct', 0);
|
|
49
|
-
// 'herd-consensus-score' -> herd_conviction_score
|
|
50
49
|
const rawHerd = SignalPrimitives.getMetric(computed, 'herd-consensus-score', ticker, 'herd_conviction_score', 0);
|
|
51
50
|
|
|
52
|
-
// 2. Restore State
|
|
51
|
+
// 2. Restore State (Handle Cold Start)
|
|
53
52
|
const prevResult = SignalPrimitives.getPreviousState(previousComputed, 'mimetic-latency', ticker);
|
|
54
53
|
const prevState = prevResult ? prevResult._state : { flow_buffer: [], herd_buffer: [], last_flow: 0, last_herd: 0 };
|
|
55
54
|
|
|
@@ -67,20 +66,20 @@ class MimeticLatencyOscillator {
|
|
|
67
66
|
flowBuffer.push(flowDelta);
|
|
68
67
|
herdBuffer.push(herdDelta);
|
|
69
68
|
|
|
69
|
+
// Cap memory usage
|
|
70
70
|
if (flowBuffer.length > this.windowSize) flowBuffer.shift();
|
|
71
71
|
if (herdBuffer.length > this.windowSize) herdBuffer.shift();
|
|
72
72
|
|
|
73
73
|
// 5. Lagged Cross-Correlation
|
|
74
74
|
let maxCorr = -1.0;
|
|
75
75
|
let bestLag = 0;
|
|
76
|
+
let regime = "WARM_UP";
|
|
76
77
|
|
|
78
|
+
// Only compute if we have statistical significance (>= 15 samples)
|
|
77
79
|
if (flowBuffer.length >= 15) {
|
|
78
80
|
for (let k = 0; k <= this.maxLag; k++) {
|
|
79
|
-
// Check if Flow[t-k] predicts Herd[t]
|
|
80
|
-
// Slice Flow: 0 to End-k
|
|
81
|
-
// Slice Herd: k to End
|
|
82
81
|
const len = flowBuffer.length;
|
|
83
|
-
if (len - k < 5) continue;
|
|
82
|
+
if (len - k < 5) continue;
|
|
84
83
|
|
|
85
84
|
const slicedFlow = flowBuffer.slice(0, len - k);
|
|
86
85
|
const slicedHerd = herdBuffer.slice(k, len);
|
|
@@ -92,16 +91,15 @@ class MimeticLatencyOscillator {
|
|
|
92
91
|
bestLag = k;
|
|
93
92
|
}
|
|
94
93
|
}
|
|
95
|
-
}
|
|
96
94
|
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
else
|
|
103
|
-
|
|
104
|
-
|
|
95
|
+
// 6. Regime Classification
|
|
96
|
+
if (maxCorr > 0.3) {
|
|
97
|
+
if (bestLag >= 3) regime = "EARLY_ALPHA";
|
|
98
|
+
else if (bestLag >= 1) regime = "MARKUP";
|
|
99
|
+
else regime = "FOMO_TRAP";
|
|
100
|
+
} else {
|
|
101
|
+
regime = "DECOUPLING";
|
|
102
|
+
}
|
|
105
103
|
}
|
|
106
104
|
|
|
107
105
|
this.mloResults[ticker] = {
|