bulltrackers-module 1.0.129 → 1.0.131
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -12,10 +12,17 @@ function checkRootDependencies(calcManifest, rootDataStatus) { if (!calcManifest
|
|
|
12
12
|
}
|
|
13
13
|
|
|
14
14
|
/** Stage 3: Check root data availability for a date */
|
|
15
|
-
|
|
15
|
+
// --- FIX: Passes the full 'dependencies' object down ---
|
|
16
|
+
async function checkRootDataAvailability(dateStr, config, dependencies) {
|
|
17
|
+
const { logger } = dependencies; // Destructure logger for local use
|
|
16
18
|
logger.log('INFO', `[PassRunner] Checking root data for ${dateStr}...`);
|
|
17
19
|
try {
|
|
18
|
-
const [portfolioRefs, insightsData, socialData, historyRefs] = await Promise.all([
|
|
20
|
+
const [portfolioRefs, insightsData, socialData, historyRefs] = await Promise.all([
|
|
21
|
+
getPortfolioPartRefs(config, dependencies, dateStr), // Pass full 'dependencies'
|
|
22
|
+
loadDailyInsights(config, dependencies, dateStr), // Pass full 'dependencies'
|
|
23
|
+
loadDailySocialPostInsights(config, dependencies, dateStr), // Pass full 'dependencies'
|
|
24
|
+
getHistoryPartRefs(config, dependencies, dateStr) // Pass full 'dependencies'
|
|
25
|
+
]);
|
|
19
26
|
const hasPortfolio = !!(portfolioRefs?.length), hasInsights = !!insightsData, hasSocial = !!socialData, hasHistory = !!(historyRefs?.length);
|
|
20
27
|
if (!(hasPortfolio||hasInsights||hasSocial||hasHistory)) { logger.log('WARN', `[PassRunner] No root data for ${dateStr}.`); return null; }
|
|
21
28
|
return { portfolioRefs: portfolioRefs||[], insightsData: insightsData||null, socialData: socialData||null, historyRefs: historyRefs||[], status: { hasPortfolio, hasInsights, hasSocial, hasHistory } };
|
|
@@ -69,20 +76,159 @@ async function streamAndProcess(dateStr, todayRefs, state, passName, config, dep
|
|
|
69
76
|
|
|
70
77
|
/** Stage 9: Run standard computations */
|
|
71
78
|
async function runStandardComputationPass(date, calcs, passName, config, deps, rootData) {
|
|
72
|
-
const dStr=date.toISOString().slice(0,10), logger=deps.logger;
|
|
79
|
+
const dStr = date.toISOString().slice(0, 10), logger = deps.logger;
|
|
73
80
|
logger.log('INFO', `[${passName}] Running ${dStr} with ${calcs.length} calcs.`);
|
|
74
|
-
const fullRoot=await loadHistoricalData(date, calcs, config, deps, rootData);
|
|
75
|
-
const state=initializeCalculators(calcs, logger);
|
|
81
|
+
const fullRoot = await loadHistoricalData(date, calcs, config, deps, rootData);
|
|
82
|
+
const state = initializeCalculators(calcs, logger);
|
|
76
83
|
await streamAndProcess(dStr, fullRoot.portfolioRefs, state, passName, config, deps, fullRoot);
|
|
77
|
-
|
|
78
|
-
|
|
84
|
+
|
|
85
|
+
// --- START: FULL COMMIT LOGIC ---
|
|
86
|
+
let success = 0;
|
|
87
|
+
const standardWrites = [];
|
|
88
|
+
const shardedWrites = {}; // Format: { [collectionName]: { [docId]: data } }
|
|
89
|
+
|
|
90
|
+
for (const name in state) {
|
|
91
|
+
const calc = state[name];
|
|
92
|
+
if (!calc || typeof calc.getResult !== 'function') continue;
|
|
93
|
+
|
|
94
|
+
try {
|
|
95
|
+
const result = await Promise.resolve(calc.getResult());
|
|
96
|
+
if (result && Object.keys(result).length > 0) {
|
|
97
|
+
|
|
98
|
+
// Separate sharded data from standard data
|
|
99
|
+
const standardResult = {};
|
|
100
|
+
for (const key in result) {
|
|
101
|
+
if (key.startsWith('sharded_')) {
|
|
102
|
+
// This is sharded data, e.g., sharded_user_profitability
|
|
103
|
+
// The value is expected to be: { "collection_name": { "doc1": {...}, "doc2": {...} } }
|
|
104
|
+
const shardedData = result[key];
|
|
105
|
+
for (const collectionName in shardedData) {
|
|
106
|
+
if (!shardedWrites[collectionName]) shardedWrites[collectionName] = {};
|
|
107
|
+
// Merge doc data (e.g., combining data for "user_profitability_shard_1")
|
|
108
|
+
Object.assign(shardedWrites[collectionName], shardedData[collectionName]);
|
|
109
|
+
}
|
|
110
|
+
} else {
|
|
111
|
+
// This is a standard, single-doc result
|
|
112
|
+
standardResult[key] = result[key];
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
// Add standard result to the batch
|
|
117
|
+
if (Object.keys(standardResult).length > 0) {
|
|
118
|
+
const docRef = deps.db.collection(config.resultsCollection).doc(dStr)
|
|
119
|
+
.collection(config.resultsSubcollection).doc(calc.manifest.category)
|
|
120
|
+
.collection(config.computationsSubcollection).doc(name);
|
|
121
|
+
|
|
122
|
+
standardWrites.push({ ref: docRef, data: standardResult });
|
|
123
|
+
}
|
|
124
|
+
success++; // Mark as success even if only sharded data was produced
|
|
125
|
+
}
|
|
126
|
+
} catch (e) {
|
|
127
|
+
logger.log('ERROR', `getResult failed ${name} for ${dStr}`, { err: e.message, stack: e.stack });
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
// Commit standard (non-sharded) writes in chunks
|
|
132
|
+
if (standardWrites.length > 0) {
|
|
133
|
+
await commitBatchInChunks(config, deps, standardWrites, `${passName} Standard ${dStr}`);
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
// Commit all sharded writes
|
|
137
|
+
for (const collectionName in shardedWrites) {
|
|
138
|
+
const docs = shardedWrites[collectionName];
|
|
139
|
+
const shardedDocWrites = [];
|
|
140
|
+
for (const docId in docs) {
|
|
141
|
+
// This assumes docId is the full path for sharded docs, or just the doc ID
|
|
142
|
+
// Based on user_profitability_tracker, it's just the doc ID.
|
|
143
|
+
const docRef = deps.db.collection(collectionName).doc(docId);
|
|
144
|
+
shardedDocWrites.push({ ref: docRef, data: docs[docId] });
|
|
145
|
+
}
|
|
146
|
+
if (shardedDocWrites.length > 0) {
|
|
147
|
+
await commitBatchInChunks(config, deps, shardedDocWrites, `${passName} Sharded ${collectionName} ${dStr}`);
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
// --- END: FULL COMMIT LOGIC ---
|
|
151
|
+
|
|
152
|
+
logger.log(success === calcs.length ? 'SUCCESS' : 'WARN', `[${passName}] Completed ${dStr}. Success: ${success}/${calcs.length}`);
|
|
79
153
|
}
|
|
80
154
|
|
|
81
155
|
/** Stage 10: Run meta computations */
|
|
82
|
-
async function runMetaComputationPass(date, calcs, passName, config, deps, fetchedDeps, rootData) {
|
|
156
|
+
async function runMetaComputationPass(date, calcs, passName, config, deps, fetchedDeps, rootData) {
|
|
157
|
+
const dStr = date.toISOString().slice(0, 10), logger = deps.logger;
|
|
83
158
|
logger.log('INFO', `[${passName}] Running ${dStr} with ${calcs.length} calcs.`);
|
|
84
|
-
|
|
85
|
-
|
|
159
|
+
|
|
160
|
+
// --- START: FULL COMMIT LOGIC ---
|
|
161
|
+
let success = 0;
|
|
162
|
+
const standardWrites = [];
|
|
163
|
+
const shardedWrites = {}; // Format: { [collectionName]: { [docId]: data } }
|
|
164
|
+
|
|
165
|
+
for (const mCalc of calcs) {
|
|
166
|
+
const name = normalizeName(mCalc.name), Cl = mCalc.class;
|
|
167
|
+
if (typeof Cl !== 'function') {
|
|
168
|
+
logger.log('ERROR', `Invalid class ${name}`);
|
|
169
|
+
continue;
|
|
170
|
+
}
|
|
171
|
+
const inst = new Cl();
|
|
172
|
+
try {
|
|
173
|
+
// Pass the full dependencies object to process()
|
|
174
|
+
const result = await Promise.resolve(inst.process(dStr, { ...deps, rootData }, config, fetchedDeps));
|
|
175
|
+
|
|
176
|
+
if (result && Object.keys(result).length > 0) {
|
|
177
|
+
|
|
178
|
+
// Separate sharded data from standard data
|
|
179
|
+
const standardResult = {};
|
|
180
|
+
for (const key in result) {
|
|
181
|
+
if (key.startsWith('sharded_')) {
|
|
182
|
+
const shardedData = result[key];
|
|
183
|
+
for (const collectionName in shardedData) {
|
|
184
|
+
if (!shardedWrites[collectionName]) shardedWrites[collectionName] = {};
|
|
185
|
+
Object.assign(shardedWrites[collectionName], shardedData[collectionName]);
|
|
186
|
+
}
|
|
187
|
+
} else {
|
|
188
|
+
standardResult[key] = result[key];
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
// Add standard result to the batch
|
|
193
|
+
if (Object.keys(standardResult).length > 0) {
|
|
194
|
+
const docRef = deps.db.collection(config.resultsCollection).doc(dStr)
|
|
195
|
+
.collection(config.resultsSubcollection).doc(mCalc.category)
|
|
196
|
+
.collection(config.computationsSubcollection).doc(name);
|
|
197
|
+
|
|
198
|
+
standardWrites.push({ ref: docRef, data: standardResult });
|
|
199
|
+
}
|
|
200
|
+
success++;
|
|
201
|
+
}
|
|
202
|
+
} catch (e) {
|
|
203
|
+
logger.log('ERROR', `Meta-calc failed ${name} for ${dStr}`, { err: e.message, stack: e.stack });
|
|
204
|
+
}
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
// Commit standard (non-sharded) writes in chunks
|
|
208
|
+
if (standardWrites.length > 0) {
|
|
209
|
+
await commitBatchInChunks(config, deps, standardWrites, `${passName} Meta ${dStr}`);
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
// Commit all sharded writes
|
|
213
|
+
for (const collectionName in shardedWrites) {
|
|
214
|
+
const docs = shardedWrites[collectionName];
|
|
215
|
+
const shardedDocWrites = [];
|
|
216
|
+
for (const docId in docs) {
|
|
217
|
+
// Special case for stateful meta-calcs that write to a specific path
|
|
218
|
+
const docRef = docId.includes('/')
|
|
219
|
+
? deps.db.doc(docId) // docId is a full path
|
|
220
|
+
: deps.db.collection(collectionName).doc(docId); // docId is just an ID
|
|
221
|
+
|
|
222
|
+
shardedDocWrites.push({ ref: docRef, data: docs[docId] });
|
|
223
|
+
}
|
|
224
|
+
if (shardedDocWrites.length > 0) {
|
|
225
|
+
await commitBatchInChunks(config, deps, shardedDocWrites, `${passName} Sharded ${collectionName} ${dStr}`);
|
|
226
|
+
}
|
|
227
|
+
}
|
|
228
|
+
// --- END: FULL COMMIT LOGIC ---
|
|
229
|
+
|
|
230
|
+
logger.log(success === calcs.length ? 'SUCCESS' : 'WARN', `[${passName}] Completed ${dStr}. Success: ${success}/${calcs.length}`);
|
|
86
231
|
}
|
|
87
232
|
|
|
88
|
-
|
|
233
|
+
|
|
234
|
+
module.exports = { groupByPass, checkRootDataAvailability, fetchDependenciesForPass, filterCalculations, runStandardComputationPass, runMetaComputationPass };
|