aiden-shared-calculations-unified 1.0.70 → 1.0.72

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -61,7 +61,7 @@ class DumbCohortFlow {
61
61
  * Statically declare dependencies.
62
62
  */
63
63
  static getDependencies() {
64
- return ['user_profitability_tracker'];
64
+ return ['user_profitability_tracker'];
65
65
  }
66
66
 
67
67
  _getPortfolioPositions(portfolio) {
@@ -79,6 +79,8 @@ class InLossAssetCrowdFlow {
79
79
 
80
80
  /**
81
81
  * Helper to get the cohort data from the dependency.
82
+ * --- MODIFIED ---
83
+ * Reads `data.users_in_loss` as a string array, not an object array.
82
84
  */
83
85
  _getInLossCohorts(fetchedDependencies) {
84
86
  if (this.inLossCohorts) {
@@ -94,7 +96,8 @@ class InLossAssetCrowdFlow {
94
96
  // Map<ticker, Set<userId>>
95
97
  this.inLossCohorts = new Map();
96
98
  for (const [ticker, data] of Object.entries(pnlStatusData)) {
97
- const userSet = new Set(data.users_in_loss.map(u => u.userId));
99
+ // `data.users_in_loss` is now a string[], so no .map() is needed.
100
+ const userSet = new Set(data.users_in_loss || []); // <-- MODIFIED
98
101
  this.inLossCohorts.set(ticker, userSet);
99
102
  }
100
103
  return this.inLossCohorts;
@@ -79,6 +79,8 @@ class InProfitAssetCrowdFlow {
79
79
 
80
80
  /**
81
81
  * Helper to get the cohort data from the dependency.
82
+ * --- MODIFIED ---
83
+ * Reads `data.users_in_profit` as a string array, not an object array.
82
84
  */
83
85
  _getInProfitCohorts(fetchedDependencies) {
84
86
  if (this.inProfitCohorts) {
@@ -94,7 +96,8 @@ class InProfitAssetCrowdFlow {
94
96
  // Map<ticker, Set<userId>>
95
97
  this.inProfitCohorts = new Map();
96
98
  for (const [ticker, data] of Object.entries(pnlStatusData)) {
97
- const userSet = new Set(data.users_in_profit.map(u => u.userId));
99
+ // `data.users_in_profit` is now a string[], so no .map() is needed.
100
+ const userSet = new Set(data.users_in_profit || []); // <-- MODIFIED
98
101
  this.inProfitCohorts.set(ticker, userSet);
99
102
  }
100
103
  return this.inProfitCohorts;
@@ -54,7 +54,7 @@ class DailyOwnershipPerSector {
54
54
  // 2. Get the insights document
55
55
  const insightsDoc = rootData.insights;
56
56
  if (!insightsDoc || !Array.isArray(insightsDoc.insights)) {
57
- dependencies.logger.log('WARN', `[daily-ownership-per-sector] No 'insights' data found for ${dateStr}.`);
57
+ // dependencies.logger.log('WARN', `[daily-ownership-per-sector] No 'insights' data found for ${dateStr}.`); TODO : This is broken, returns log undefined
58
58
  return {};
59
59
  }
60
60
 
@@ -6,14 +6,19 @@
6
6
  *
7
7
  * It uses the distribution of P&L from 'pnl_distribution_per_stock'
8
8
  * to calculate variance (risk).
9
+ *
10
+ * --- FIX: 2025-11-12 ---
11
+ * Refactored this file to be a "meta" calculation.
12
+ * 1. Removed constructor, getResult, reset, and the no-op 'process'.
13
+ * 2. Added the required `async process(dStr, deps, config, fetchedDeps)` method.
14
+ * 3. Moved all logic into `process`.
15
+ * 4. Updated logic to read from `fetchedDeps['pnl_distribution_per_stock']`.
16
+ * 5. Updated data access to read from the new `data.stats` object
17
+ * provided by the fixed dependency.
9
18
  */
10
19
  const { loadInstrumentMappings } = require('../../utils/sector_mapping_provider');
11
20
 
12
21
  class CrowdSharpeRatioProxy {
13
- constructor() {
14
- this.mappings = null;
15
- }
16
-
17
22
  /**
18
23
  * Defines the output schema for this calculation.
19
24
  * @returns {object} JSON Schema object
@@ -54,25 +59,36 @@ class CrowdSharpeRatioProxy {
54
59
  ];
55
60
  }
56
61
 
57
- process() {
58
- // No-op
59
- }
60
-
61
- async getResult(fetchedDependencies) {
62
+ /**
63
+ * --- FIX: This is the new main execution method for meta-calcs ---
64
+ * It receives all dependencies from the orchestrator.
65
+ */
66
+ async process(dateStr, dependencies, config, fetchedDependencies) {
67
+ // --- FIX: Load dependency data from the argument ---
62
68
  const pnlDistData = fetchedDependencies['pnl_distribution_per_stock'];
63
69
 
64
70
  if (!pnlDistData) {
71
+ dependencies.logger.log('WARN', `[crowd_sharpe_ratio_proxy] Missing dependency 'pnl_distribution_per_stock' for ${dateStr}. Skipping.`);
65
72
  return {};
66
73
  }
67
74
 
68
- if (!this.mappings) {
69
- this.mappings = await loadInstrumentMappings();
75
+ // --- FIX: Load mappings inside the process method ---
76
+ const mappings = await loadInstrumentMappings();
77
+ if (!mappings || !mappings.instrumentToTicker) {
78
+ dependencies.logger.log('ERROR', `[crowd_sharpe_ratio_proxy] Failed to load instrument mappings.`);
79
+ return {};
70
80
  }
71
81
 
72
82
  const result = {};
73
83
 
74
- for (const [instrumentId, data] of Object.entries(pnlDistData)) {
75
- const { sum, sumSq, count } = data;
84
+ for (const [ticker, data] of Object.entries(pnlDistData)) {
85
+
86
+ // --- FIX: Read from the new 'stats' sub-object ---
87
+ if (!data.stats) {
88
+ continue; // Skip if data is malformed
89
+ }
90
+
91
+ const { sum, sumSq, count } = data.stats;
76
92
 
77
93
  if (count < 2) {
78
94
  continue; // Need at least 2 data points for variance
@@ -102,7 +118,8 @@ class CrowdSharpeRatioProxy {
102
118
  // Sharpe = Mean(Return) / StdDev(Return)
103
119
  const sharpeProxy = mean / stdDev;
104
120
 
105
- const ticker = this.mappings.instrumentToTicker[instrumentId] || `id_${instrumentId}`;
121
+ // --- FIX: Data is already keyed by ticker, no mapping needed ---
122
+ // const ticker = this.mappings.instrumentToTicker[instrumentId] || `id_${instrumentId}`;
106
123
 
107
124
  result[ticker] = {
108
125
  sharpe_ratio_proxy: sharpeProxy,
@@ -115,10 +132,6 @@ class CrowdSharpeRatioProxy {
115
132
 
116
133
  return result;
117
134
  }
118
-
119
- reset() {
120
- this.mappings = null;
121
- }
122
135
  }
123
136
 
124
137
  module.exports = CrowdSharpeRatioProxy;
@@ -21,8 +21,10 @@ class SocialTopicDriverIndex {
21
21
  "properties": {
22
22
  "topic": { "type": "string" },
23
23
  "driver_score": { "type": "number" },
24
- "correlation_flow_30d": { "type": "number" },
25
- "correlation_price_30d": { "type": "number" }
24
+ // These fields are from an older version but kept for schema
25
+ // compatibility. They will be null in the corrected logic.
26
+ "correlation_flow_30d": { "type": ["number", "null"] },
27
+ "correlation_price_30d": { "type": ["number", "null"] }
26
28
  },
27
29
  "required": ["topic", "driver_score"]
28
30
  };
@@ -48,6 +50,7 @@ class SocialTopicDriverIndex {
48
50
 
49
51
  /**
50
52
  * Statically declare dependencies.
53
+ * (This was already correct)
51
54
  */
52
55
  static getDependencies() {
53
56
  return [
@@ -59,6 +62,13 @@ class SocialTopicDriverIndex {
59
62
  // No-op
60
63
  }
61
64
 
65
+ /**
66
+ * --- LOGIC FIXED ---
67
+ * This function is rewritten to correctly consume the output of
68
+ * 'social-topic-predictive-potential'. It aggregates the
69
+ * 'predictivePotential' score for each topic across *all* tickers
70
+ * to create a global driver score.
71
+ */
62
72
  getResult(fetchedDependencies) {
63
73
  const potentialData = fetchedDependencies['social-topic-predictive-potential'];
64
74
 
@@ -67,23 +77,60 @@ class SocialTopicDriverIndex {
67
77
  all_topics: []
68
78
  };
69
79
 
70
- if (!potentialData) {
80
+ // The dependency returns a nested object. We need 'daily_topic_signals'.
81
+ const dailyTopicSignals = potentialData?.daily_topic_signals;
82
+
83
+ if (!dailyTopicSignals || Object.keys(dailyTopicSignals).length === 0) {
71
84
  return defaults;
72
85
  }
73
86
 
87
+ // Use a Map to aggregate scores for each topic
88
+ const topicAggregator = new Map();
89
+
90
+ // Iterate over each TICKER (e.g., 'AAPL', 'TSLA') in the signals
91
+ for (const tickerData of Object.values(dailyTopicSignals)) {
92
+
93
+ // Combine bullish and bearish topics for that ticker
94
+ const allTickerTopics = [
95
+ ...(tickerData.topPredictiveBullishTopics || []),
96
+ ...(tickerData.topPredictiveBearishTopics || [])
97
+ ];
98
+
99
+ // Iterate over the topics *for that ticker*
100
+ for (const topicData of allTickerTopics) {
101
+ const topicName = topicData.topic;
102
+
103
+ // Use the 'predictivePotential' score calculated by the dependency
104
+ const score = topicData.predictivePotential || 0;
105
+
106
+ if (!topicAggregator.has(topicName)) {
107
+ topicAggregator.set(topicName, { totalScore: 0, count: 0 });
108
+ }
109
+
110
+ const agg = topicAggregator.get(topicName);
111
+ agg.totalScore += score;
112
+ agg.count += 1;
113
+ }
114
+ }
115
+
74
116
  const allTopics = [];
75
- for (const [topic, data] of Object.entries(potentialData)) {
76
- // Create a "Driver Score" - prioritize flow correlation
77
- const score = (data.correlation_flow_30d * 0.7) + (data.correlation_price_30d * 0.3);
117
+ // Now, create the final ranked list
118
+ for (const [topic, data] of topicAggregator.entries()) {
119
+ if (data.count === 0) continue;
120
+
121
+ // Calculate the average driver score across all tickers
122
+ const avgScore = data.totalScore / data.count;
78
123
 
79
124
  allTopics.push({
80
125
  topic: topic,
81
- driver_score: score,
82
- correlation_flow_30d: data.correlation_flow_30d,
83
- correlation_price_30d: data.correlation_price_30d
126
+ driver_score: avgScore,
127
+ // Set old/incompatible fields to null to match schema
128
+ correlation_flow_30d: null,
129
+ correlation_price_30d: null
84
130
  });
85
131
  }
86
132
 
133
+ // Sort by the new, correct driver_score
87
134
  allTopics.sort((a, b) => b.driver_score - a.driver_score);
88
135
 
89
136
  return {
@@ -152,7 +152,8 @@ function _findPriceForward(instrumentId, dateStr, priceMap) {
152
152
  class SocialTopicPredictivePotentialIndex {
153
153
 
154
154
  static getDependencies() {
155
- return ['social-topic-driver-index'];
155
+ // --- FIX 1: Changed from 'social-topic-driver-index' to break the cycle ---
156
+ return ['social-topic-sentiment-matrix'];
156
157
  }
157
158
 
158
159
  constructor() {
@@ -195,10 +196,13 @@ class SocialTopicPredictivePotentialIndex {
195
196
  // pLimit is not in calculationUtils by default, so we'll use our own
196
197
  // If it were, we'd use: this.pLimit = calculationUtils.pLimit(MAX_CONCURRENT_TRANSACTIONS);
197
198
  await this._loadDependencies(calculationUtils);
198
- const todaySignals = fetchedDependencies['social-topic-driver-index'];
199
+
200
+ // --- FIX 2: Read from the correct dependency ---
201
+ const todaySignals = fetchedDependencies['social-topic-sentiment-matrix'];
199
202
 
200
203
  if (!todaySignals || Object.keys(todaySignals).length === 0) {
201
- logger.log('WARN', `[SocialTopicPredictive] Missing or empty dependency 'social-topic-driver-index' for ${dateStr}. Skipping.`);
204
+ // --- FIX 2.1: Updated log message ---
205
+ logger.log('WARN', `[SocialTopicPredictive] Missing or empty dependency 'social-topic-sentiment-matrix' for ${dateStr}. Skipping.`);
202
206
  return null;
203
207
  }
204
208
 
@@ -264,6 +268,8 @@ class SocialTopicPredictivePotentialIndex {
264
268
  this._updateForwardReturns(state, instrumentId, dateStr, todayPrice, this.priceMap);
265
269
 
266
270
  // --- 4c. Add New Signals (Factored Helper) ---
271
+ // We assume 'todaySignal' (from social-topic-sentiment-matrix)
272
+ // has an 'allDrivers' property.
267
273
  this._addNewSignals(state, todaySignal.allDrivers || [], dateStr);
268
274
 
269
275
  // --- 4d. Recalculate Correlations (Factored Helper) ---
@@ -5,21 +5,32 @@
5
5
  * sample are in profit versus in loss?"
6
6
  *
7
7
  * This provides a crowd-wide P&L status for each instrument.
8
+ *
9
+ * --- FIX ---
10
+ * This version is modified to only store user *IDs* in the arrays,
11
+ * not the full user P&L objects, to prevent exceeding the
12
+ * 1 MiB Firestore document size limit.
8
13
  */
9
14
  const { loadInstrumentMappings } = require('../../utils/sector_mapping_provider');
10
15
 
11
16
  class AssetPnlStatus {
12
17
  constructor() {
13
- // We will store { [instrumentId]: { in_profit: Set(), in_loss: Set() } }
18
+ // We will store { [instrumentId]: { in_profit: Map(), in_loss: Map() } }
19
+ // The maps will store <userId, pnl> but only the keys (userIds) will be saved.
14
20
  this.assets = new Map();
15
21
  this.mappings = null;
16
22
  }
17
23
 
18
24
  /**
19
25
  * Defines the output schema for this calculation.
26
+ * --- MODIFIED ---
27
+ * The `users_in_profit` and `users_in_loss` schemas are changed
28
+ * from `items: userSchema` to `items: { "type": "string" }`.
20
29
  * @returns {object} JSON Schema object
21
30
  */
22
31
  static getSchema() {
32
+ /*
33
+ // The userSchema is no longer needed in the output
23
34
  const userSchema = {
24
35
  "type": "object",
25
36
  "properties": {
@@ -28,6 +39,7 @@ class AssetPnlStatus {
28
39
  },
29
40
  "required": ["userId", "pnl"]
30
41
  };
42
+ */
31
43
 
32
44
  const tickerSchema = {
33
45
  "type": "object",
@@ -47,13 +59,13 @@ class AssetPnlStatus {
47
59
  },
48
60
  "users_in_profit": {
49
61
  "type": "array",
50
- "description": "List of users in profit.",
51
- "items": userSchema
62
+ "description": "List of user IDs in profit.",
63
+ "items": { "type": "string" } // <-- MODIFIED
52
64
  },
53
65
  "users_in_loss": {
54
66
  "type": "array",
55
- "description": "List of users in loss.",
56
- "items": userSchema
67
+ "description": "List of user IDs in loss.",
68
+ "items": { "type": "string" } // <-- MODIFIED
57
69
  }
58
70
  },
59
71
  "required": ["in_profit_count", "in_loss_count", "profit_ratio", "users_in_profit", "users_in_loss"]
@@ -100,6 +112,11 @@ class AssetPnlStatus {
100
112
  }
101
113
  }
102
114
 
115
+ /**
116
+ * --- MODIFIED ---
117
+ * This now saves an array of strings (user IDs) instead of
118
+ * an array of {userId, pnl} objects to save space.
119
+ */
103
120
  async getResult() {
104
121
  if (!this.mappings) {
105
122
  this.mappings = await loadInstrumentMappings();
@@ -118,9 +135,9 @@ class AssetPnlStatus {
118
135
  in_profit_count: profitCount,
119
136
  in_loss_count: lossCount,
120
137
  profit_ratio: (profitCount / total) * 100,
121
- // Convert Maps to arrays of objects for the final result
122
- users_in_profit: Array.from(data.in_profit, ([userId, pnl]) => ({ userId, pnl })),
123
- users_in_loss: Array.from(data.in_loss, ([userId, pnl]) => ({ userId, pnl }))
138
+ // Convert Maps to arrays of *keys* (user IDs)
139
+ users_in_profit: Array.from(data.in_profit.keys()), // <-- MODIFIED
140
+ users_in_loss: Array.from(data.in_loss.keys()) // <-- MODIFIED
124
141
  };
125
142
  }
126
143
  }
@@ -7,6 +7,13 @@
7
7
  * REFACTOR: This calculation now aggregates the distribution into
8
8
  * predefined buckets on the server-side, returning a chart-ready
9
9
  * histogram object instead of raw arrays.
10
+ *
11
+ * --- FIX: 2025-11-12 ---
12
+ * This calculation is a dependency for crowd_sharpe_ratio_proxy,
13
+ * which requires sum, sumSq, and count for variance calculations.
14
+ * This file has been updated to provide *both* the histogram
15
+ * and a 'stats' object containing these required values.
16
+ * ---------------------
10
17
  */
11
18
  const { loadInstrumentMappings } = require('../../utils/sector_mapping_provider');
12
19
 
@@ -31,28 +38,48 @@ class PnlDistributionPerStock {
31
38
  /**
32
39
  * Defines the output schema for this calculation.
33
40
  * REFACTOR: Schema now describes the server-calculated histogram.
34
- * @returns {object} JSON Schema object
41
+ *
42
+ * --- FIX: 2025-11-12 ---
43
+ * Added 'stats' object to the schema to support downstream
44
+ * meta-calculations like crowd_sharpe_ratio_proxy.
35
45
  */
36
46
  static getSchema() {
37
47
  const bucketSchema = {
38
48
  "type": "object",
39
- "description": "Histogram of P&L distribution for a single asset.",
49
+ "description": "Histogram and stats of P&L distribution for a single asset.",
40
50
  "properties": {
41
- "loss_heavy": { "type": "number", "description": "Count of positions with > 50% loss" },
42
- "loss_medium": { "type": "number", "description": "Count of positions with 25-50% loss" },
43
- "loss_light": { "type": "number", "description": "Count of positions with 0-25% loss" },
44
- "gain_light": { "type": "number", "description": "Count of positions with 0-25% gain" },
45
- "gain_medium": { "type": "number", "description": "Count of positions with 25-50% gain" },
46
- "gain_heavy": { "type": "number", "description": "Count of positions with 50-100% gain" },
47
- "gain_extreme": { "type": "number", "description": "Count of positions with > 100% gain" },
48
- "total_positions": { "type": "number", "description": "Total positions counted" }
51
+ "histogram": {
52
+ "type": "object",
53
+ "description": "Histogram of P&L distribution.",
54
+ "properties": {
55
+ "loss_heavy": { "type": "number", "description": "Count of positions with > 50% loss" },
56
+ "loss_medium": { "type": "number", "description": "Count of positions with 25-50% loss" },
57
+ "loss_light": { "type": "number", "description": "Count of positions with 0-25% loss" },
58
+ "gain_light": { "type": "number", "description": "Count of positions with 0-25% gain" },
59
+ "gain_medium": { "type": "number", "description": "Count of positions with 25-50% gain" },
60
+ "gain_heavy": { "type": "number", "description": "Count of positions with 50-100% gain" },
61
+ "gain_extreme": { "type": "number", "description": "Count of positions with > 100% gain" },
62
+ "total_positions": { "type": "number", "description": "Total positions counted" }
63
+ },
64
+ "required": ["total_positions"]
65
+ },
66
+ "stats": {
67
+ "type": "object",
68
+ "description": "Raw statistics needed for variance/Sharpe calculations.",
69
+ "properties": {
70
+ "sum": { "type": "number", "description": "Sum of all P&L percentages" },
71
+ "sumSq": { "type": "number", "description": "Sum of all squared P&L percentages" },
72
+ "count": { "type": "number", "description": "Total count of positions" }
73
+ },
74
+ "required": ["sum", "sumSq", "count"]
75
+ }
49
76
  },
50
- "required": ["total_positions"]
77
+ "required": ["histogram", "stats"]
51
78
  };
52
79
 
53
80
  return {
54
81
  "type": "object",
55
- "description": "Calculates a histogram of P&L percentage distribution for all open positions, per asset.",
82
+ "description": "Calculates a histogram and raw stats of P&L percentage distribution for all open positions, per asset.",
56
83
  "patternProperties": {
57
84
  "^.*$": bucketSchema // Ticker
58
85
  },
@@ -90,6 +117,9 @@ class PnlDistributionPerStock {
90
117
  /**
91
118
  * REFACTOR: This method now calculates the distribution on the server.
92
119
  * It transforms the raw P&L arrays into histogram bucket counts.
120
+ *
121
+ * --- FIX: 2025-11-12 ---
122
+ * Also calculates and returns sum, sumSq, and count.
93
123
  */
94
124
  async getResult() {
95
125
  if (!this.mappings) {
@@ -100,6 +130,11 @@ class PnlDistributionPerStock {
100
130
 
101
131
  for (const [instrumentId, pnlValues] of this.pnlMap.entries()) {
102
132
  const ticker = this.mappings.instrumentToTicker[instrumentId] || `id_${instrumentId}`;
133
+ const count = pnlValues.length;
134
+
135
+ if (count === 0) {
136
+ continue;
137
+ }
103
138
 
104
139
  // 1. Initialize the histogram object for this ticker
105
140
  const histogram = {
@@ -110,11 +145,20 @@ class PnlDistributionPerStock {
110
145
  gain_medium: 0,
111
146
  gain_heavy: 0,
112
147
  gain_extreme: 0,
113
- total_positions: pnlValues.length
148
+ total_positions: count
114
149
  };
115
150
 
116
- // 2. Process all P&L values into the buckets
151
+ // --- FIX: Initialize stats ---
152
+ let sum = 0;
153
+ let sumSq = 0;
154
+
155
+ // 2. Process all P&L values into buckets and calculate stats
117
156
  for (const pnl of pnlValues) {
157
+ // --- FIX: Add to stats ---
158
+ sum += pnl;
159
+ sumSq += (pnl * pnl);
160
+
161
+ // Add to histogram
118
162
  for (const bucket of BUCKETS) {
119
163
  if (pnl >= bucket.min && pnl < bucket.max) {
120
164
  histogram[bucket.label]++;
@@ -122,9 +166,19 @@ class PnlDistributionPerStock {
122
166
  }
123
167
  }
124
168
  }
169
+
170
+ // --- FIX: Create stats object ---
171
+ const stats = {
172
+ sum: sum,
173
+ sumSq: sumSq,
174
+ count: count
175
+ };
125
176
 
126
- // 3. Add the aggregated histogram to the final result
127
- result[ticker] = histogram;
177
+ // 3. Add the aggregated histogram and stats to the final result
178
+ result[ticker] = {
179
+ histogram: histogram,
180
+ stats: stats
181
+ };
128
182
  }
129
183
  return result;
130
184
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "aiden-shared-calculations-unified",
3
- "version": "1.0.70",
3
+ "version": "1.0.72",
4
4
  "description": "Shared calculation modules for the BullTrackers Computation System.",
5
5
  "main": "index.js",
6
6
  "files": [
@@ -24,7 +24,8 @@
24
24
  "@google-cloud/firestore": "^7.11.3",
25
25
  "sharedsetup": "latest",
26
26
  "require-all": "^3.0.0",
27
- "dotenv": "latest"
27
+ "dotenv": "latest",
28
+ "viz.js": "^2.1.2"
28
29
  },
29
30
  "devDependencies": {
30
31
  "bulltracker-deployer": "file:../bulltracker-deployer"