bulltrackers-module 1.0.219 → 1.0.220

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (23) hide show
  1. package/functions/computation-system/WorkflowOrchestrator.js +153 -0
  2. package/functions/computation-system/context/ContextFactory.js +63 -0
  3. package/functions/computation-system/context/ManifestBuilder.js +240 -0
  4. package/functions/computation-system/controllers/computation_controller.js +12 -4
  5. package/functions/computation-system/data/AvailabilityChecker.js +75 -0
  6. package/functions/computation-system/data/CachedDataLoader.js +63 -0
  7. package/functions/computation-system/data/DependencyFetcher.js +70 -0
  8. package/functions/computation-system/executors/MetaExecutor.js +68 -0
  9. package/functions/computation-system/executors/PriceBatchExecutor.js +99 -0
  10. package/functions/computation-system/executors/StandardExecutor.js +115 -0
  11. package/functions/computation-system/helpers/computation_dispatcher.js +3 -3
  12. package/functions/computation-system/helpers/computation_worker.js +44 -18
  13. package/functions/computation-system/layers/mathematics.js +1 -1
  14. package/functions/computation-system/persistence/FirestoreUtils.js +64 -0
  15. package/functions/computation-system/persistence/ResultCommitter.js +118 -0
  16. package/functions/computation-system/persistence/StatusRepository.js +23 -0
  17. package/functions/computation-system/topology/HashManager.js +35 -0
  18. package/functions/computation-system/utils/utils.js +38 -10
  19. package/index.js +8 -3
  20. package/package.json +1 -1
  21. package/functions/computation-system/helpers/computation_manifest_builder.js +0 -320
  22. package/functions/computation-system/helpers/computation_pass_runner.js +0 -119
  23. package/functions/computation-system/helpers/orchestration_helpers.js +0 -352
@@ -1,352 +0,0 @@
1
- /**
2
- * FILENAME: computation-system/helpers/orchestration_helpers.js
3
- * FEATURE: Dynamic Auto-Sharding (Transparent 1MB Limit Handling)
4
- * * DESCRIPTION:
5
- * This module orchestrates the execution of computations. It handles:
6
- * 1. Data Availability Checks
7
- * 2. Dependency Injection (fetching results from previous passes)
8
- * 3. Transparent Auto-Sharding:
9
- * - Writes: Automatically detects if a result > 900KB. Splits it into a '_shards' subcollection.
10
- * - Reads: Automatically detects sharded pointers and re-assembles the data.
11
- */
12
-
13
- const { ComputationController } = require('../controllers/computation_controller');
14
- const { batchStoreSchemas } = require('../utils/schema_capture');
15
- const { normalizeName, commitBatchInChunks } = require('../utils/utils');
16
- const {
17
- getPortfolioPartRefs, loadDailyInsights, loadDailySocialPostInsights,
18
- getHistoryPartRefs, streamPortfolioData, streamHistoryData,
19
- getRelevantShardRefs, loadDataByRefs
20
- } = require('../utils/data_loader');
21
- const mathLayer = require('../layers/index.js');
22
- const pLimit = require('p-limit');
23
-
24
- // Mappings for backward compatibility
25
- const LEGACY_MAPPING = {
26
- DataExtractor: 'extract', HistoryExtractor: 'history', MathPrimitives: 'compute', Aggregators: 'aggregate', Validators: 'validate', SignalPrimitives: 'signals', SCHEMAS: 'schemas', DistributionAnalytics: 'distribution', TimeSeries: 'TimeSeries', priceExtractor: 'priceExtractor', InsightsExtractor: 'insights', UserClassifier: 'classifier', CognitiveBiases: 'bias', SkillAttribution: 'skill', Psychometrics: 'psychometrics'
27
- };
28
-
29
- function groupByPass(manifest) { return manifest.reduce((acc, calc) => { (acc[calc.pass] = acc[calc.pass] || []).push(calc); return acc; }, {}); }
30
-
31
- function validateResultPatterns(logger, calcName, results, category) { // TODO Unused? Could use this for validation
32
- if (category === 'speculator' || category === 'speculators') return;
33
- const tickers = Object.keys(results); const totalItems = tickers.length; if (totalItems < 5) return;
34
- const sampleTicker = tickers.find(t => results[t] && typeof results[t] === 'object'); if (!sampleTicker) return;
35
- Object.keys(results[sampleTicker]).forEach(key => {
36
- if (key.startsWith('_')) return;
37
- let nullCount = 0, nanCount = 0, undefinedCount = 0;
38
- for (const t of tickers) { const val = results[t][key]; if (val === null) nullCount++; if (val === undefined) undefinedCount++; if (typeof val === 'number' && isNaN(val)) nanCount++; }
39
- if (nanCount === totalItems) logger.log('ERROR', `[DataQuality] Calc '${calcName}' field '${key}' is NaN for 100% of items.`);
40
- else if (undefinedCount === totalItems) logger.log('ERROR', `[DataQuality] Calc '${calcName}' field '${key}' is UNDEFINED for 100% of items.`);
41
- else if (nullCount > (totalItems * 0.9)) logger.log('WARN', `[DataQuality] Calc '${calcName}' field '${key}' is NULL for ${nullCount}/${totalItems} items.`);
42
- });
43
- }
44
-
45
- function checkRootDependencies(calcManifest, rootDataStatus) {
46
- const missing = [];
47
- if (!calcManifest.rootDataDependencies) return { canRun: true, missing };
48
- for (const dep of calcManifest.rootDataDependencies) {
49
- if (dep === 'portfolio' && !rootDataStatus.hasPortfolio) missing.push('portfolio');
50
- else if (dep === 'insights' && !rootDataStatus.hasInsights) missing.push('insights');
51
- else if (dep === 'social' && !rootDataStatus.hasSocial) missing.push('social');
52
- else if (dep === 'history' && !rootDataStatus.hasHistory) missing.push('history');
53
- else if (dep === 'price' && !rootDataStatus.hasPrices) missing.push('price');
54
- }
55
- return { canRun: missing.length === 0, missing };
56
- }
57
-
58
- async function checkRootDataAvailability(dateStr, config, dependencies, earliestDates) {
59
- const { logger } = dependencies;
60
- const dateToProcess = new Date(dateStr + 'T00:00:00Z');
61
- let portfolioRefs = [], historyRefs = [];
62
- let hasPortfolio = false, hasInsights = false, hasSocial = false, hasHistory = false, hasPrices = false, insightsData = null, socialData = null;
63
- try {
64
- const tasks = [];
65
- if (dateToProcess >= earliestDates.portfolio) tasks.push(getPortfolioPartRefs(config, dependencies, dateStr).then(r => { portfolioRefs = r; hasPortfolio = !!r.length; }));
66
- if (dateToProcess >= earliestDates.insights) tasks.push(loadDailyInsights(config, dependencies, dateStr).then(r => { insightsData = r; hasInsights = !!r; }));
67
- if (dateToProcess >= earliestDates.social) tasks.push(loadDailySocialPostInsights(config, dependencies, dateStr).then(r => { socialData = r; hasSocial = !!r; }));
68
- if (dateToProcess >= earliestDates.history) tasks.push(getHistoryPartRefs(config, dependencies, dateStr).then(r => { historyRefs = r; hasHistory = !!r.length; }));
69
- if (dateToProcess >= earliestDates.price) { tasks.push(checkPriceDataAvailability(config, dependencies).then(r => { hasPrices = r; })); }
70
- await Promise.all(tasks);
71
- if (!(hasPortfolio || hasInsights || hasSocial || hasHistory || hasPrices)) return null;
72
- return { portfolioRefs, historyRefs, todayInsights: insightsData, todaySocialPostInsights: socialData, status: { hasPortfolio, hasInsights, hasSocial, hasHistory, hasPrices }, yesterdayPortfolioRefs: null };
73
- } catch (err) { logger.log('ERROR', `Error checking data: ${err.message}`); return null; }
74
- }
75
-
76
- async function firestoreHelper(action, { key, updates, config, db }) {
77
- const collections = { price: config.priceCollection || 'asset_prices', status: config.computationStatusCollection || 'computation_status', };
78
- switch (action) {
79
- case 'checkAvailability': try { const snapshot = await db.collection(collections.price).limit(1).get(); return !snapshot.empty; } catch (e) { return false; }
80
- case 'fetchStatus': { if (!key) throw new Error('fetchStatus requires a key'); const docRef = db.collection(collections.status).doc(key); const snap = await docRef.get(); return snap.exists ? snap.data() : {}; }
81
- case 'updateStatus': { if (!key) throw new Error('updateStatus requires a key'); if (!updates || Object.keys(updates).length === 0) return; const docRef = db.collection(collections.status).doc(key); await docRef.set(updates, { merge: true }); return true; }
82
- default: throw new Error(`Unknown action: ${action}`);
83
- }
84
- }
85
-
86
- async function checkPriceDataAvailability(config, dependencies) { return firestoreHelper('checkAvailability', { config, db: dependencies.db }); }
87
- async function fetchComputationStatus(dateStr, config, { db }) { return firestoreHelper('fetchStatus', { key: dateStr, config, db }); }
88
- async function fetchGlobalComputationStatus(config, { db }) { return firestoreHelper('fetchStatus', { key: 'global_status', config, db }); }
89
- async function updateComputationStatus(dateStr, updates, config, { db }) { return firestoreHelper('updateStatus', { key: dateStr, updates, config, db }); }
90
-
91
- /**
92
- * --- REFACTORED: fetchExistingResults ---
93
- * Transparently handles both standard documents and auto-sharded documents.
94
- * 1. Fetches the doc.
95
- * 2. Checks for `_sharded: true` flag.
96
- * 3. If sharded, fetches subcollection and merges data back into a single object.
97
- */
98
- async function fetchExistingResults(dateStr, calcsInPass, fullManifest, config, { db }, includeSelf = false) {
99
- const manifestMap = new Map(fullManifest.map(c => [normalizeName(c.name), c]));
100
- const calcsToFetch = new Set();
101
- for (const calc of calcsInPass) { if (calc.dependencies) calc.dependencies.forEach(d => calcsToFetch.add(normalizeName(d))); if (includeSelf && calc.isHistorical) calcsToFetch.add(normalizeName(calc.name)); }
102
- if (!calcsToFetch.size) return {};
103
- const fetched = {};
104
- const docRefs = [];
105
- const names = [];
106
-
107
- // 1. Prepare Reads
108
- for (const name of calcsToFetch) {
109
- const m = manifestMap.get(name);
110
- if (m) { docRefs.push(db.collection(config.resultsCollection).doc(dateStr).collection(config.resultsSubcollection).doc(m.category || 'unknown').collection(config.computationsSubcollection).doc(name)); names.push(name); }
111
- }
112
-
113
- if (docRefs.length) {
114
- const snaps = await db.getAll(...docRefs);
115
- const hydrationPromises = [];
116
-
117
- // 2. Process Initial Snapshots
118
- snaps.forEach((doc, i) => { const name = names[i]; if (!doc.exists) return; const data = doc.data(); if (data._sharded === true) { hydrationPromises.push(hydrateAutoShardedResult(doc.ref, name)); } else if (data._completed) { fetched[name] = data; } }); // CHECK FOR AUTO-SHARDING FLAG
119
-
120
-
121
- // 3. Hydrate Sharded Data in Parallel
122
- if (hydrationPromises.length > 0) { const hydratedResults = await Promise.all(hydrationPromises); hydratedResults.forEach(res => { fetched[res.name] = res.data; }); }
123
- }
124
- return fetched;
125
- }
126
-
127
- /**
128
- * Helper: Fetches all docs in the '_shards' subcollection and merges them.
129
- */
130
- async function hydrateAutoShardedResult(docRef, resultName) {
131
- // Determine subcollection name (defaulting to '_shards')
132
- const shardsCol = docRef.collection('_shards');
133
- const snapshot = await shardsCol.get();
134
- const assembledData = { _completed: true }; // Rebuild the object
135
- snapshot.forEach(doc => { const chunk = doc.data(); Object.assign(assembledData, chunk); });
136
- // Remove internal flags if they leaked into the shards
137
- delete assembledData._sharded;
138
- delete assembledData._completed;
139
- return { name: resultName, data: assembledData };
140
- }
141
-
142
- async function streamAndProcess(dateStr, state, passName, config, deps, rootData, portfolioRefs, historyRefs, fetchedDeps, previousFetchedDeps) {
143
- const { logger } = deps;
144
- const controller = new ComputationController(config, deps);
145
- const calcs = Object.values(state).filter(c => c && c.manifest);
146
- const streamingCalcs = calcs.filter(c => c.manifest.rootDataDependencies.includes('portfolio') || c.manifest.rootDataDependencies.includes('history'));
147
- if (streamingCalcs.length === 0) return;
148
-
149
- logger.log('INFO', `[${passName}] Streaming for ${streamingCalcs.length} computations...`);
150
- await controller.loader.loadMappings();
151
- const prevDate = new Date(dateStr + 'T00:00:00Z'); prevDate.setUTCDate(prevDate.getUTCDate() - 1);
152
- const prevDateStr = prevDate.toISOString().slice(0, 10);
153
- const tP_iter = streamPortfolioData(config, deps, dateStr, portfolioRefs);
154
- const needsYesterdayPortfolio = streamingCalcs.some(c => c.manifest.isHistorical);
155
- const yP_iter = (needsYesterdayPortfolio && rootData.yesterdayPortfolioRefs) ? streamPortfolioData(config, deps, prevDateStr, rootData.yesterdayPortfolioRefs) : null;
156
- const needsTradingHistory = streamingCalcs.some(c => c.manifest.rootDataDependencies.includes('history'));
157
- const tH_iter = (needsTradingHistory && historyRefs) ? streamHistoryData(config, deps, dateStr, historyRefs) : null;
158
-
159
- let yP_chunk = {}, tH_chunk = {};
160
- for await (const tP_chunk of tP_iter) {
161
- if (yP_iter) yP_chunk = (await yP_iter.next()).value || {};
162
- if (tH_iter) tH_chunk = (await tH_iter.next()).value || {};
163
- const promises = streamingCalcs.map(calc => controller.executor.executePerUser(calc, calc.manifest, dateStr, tP_chunk, yP_chunk, tH_chunk, fetchedDeps, previousFetchedDeps));
164
- await Promise.all(promises);
165
- }
166
- logger.log('INFO', `[${passName}] Streaming complete.`);
167
- }
168
-
169
- async function runStandardComputationPass(date, calcs, passName, config, deps, rootData, fetchedDeps, previousFetchedDeps, skipStatusWrite = false) {
170
- const dStr = date.toISOString().slice(0, 10);
171
- const logger = deps.logger;
172
- const fullRoot = { ...rootData };
173
- if (calcs.some(c => c.isHistorical)) {
174
- const prev = new Date(date); prev.setUTCDate(prev.getUTCDate() - 1);
175
- const prevStr = prev.toISOString().slice(0, 10);
176
- fullRoot.yesterdayPortfolioRefs = await getPortfolioPartRefs(config, deps, prevStr);
177
- }
178
- const state = {};
179
- for (const c of calcs) { try { const inst = new c.class(); inst.manifest = c; state[normalizeName(c.name)] = inst; logger.log('INFO', `${c.name} calculation running for ${dStr}`); } catch (e) { logger.log('WARN', `Failed to init ${c.name}`); } }
180
- await streamAndProcess(dStr, state, passName, config, deps, fullRoot, rootData.portfolioRefs, rootData.historyRefs, fetchedDeps, previousFetchedDeps);
181
- return await commitResults(state, dStr, passName, config, deps, skipStatusWrite);
182
- }
183
-
184
- async function runMetaComputationPass(date, calcs, passName, config, deps, fetchedDeps, previousFetchedDeps, rootData, skipStatusWrite = false) {
185
- const controller = new ComputationController(config, deps);
186
- const dStr = date.toISOString().slice(0, 10);
187
- const state = {};
188
- for (const mCalc of calcs) {
189
- try {
190
- deps.logger.log('INFO', `${mCalc.name} calculation running for ${dStr}`);
191
- const inst = new mCalc.class(); inst.manifest = mCalc;
192
- await controller.executor.executeOncePerDay(inst, mCalc, dStr, fetchedDeps, previousFetchedDeps);
193
- state[normalizeName(mCalc.name)] = inst;
194
- } catch (e) { deps.logger.log('ERROR', `Meta calc failed ${mCalc.name}: ${e.message}`); }
195
- }
196
- return await commitResults(state, dStr, passName, config, deps, skipStatusWrite);
197
- }
198
-
199
- /**
200
- * --- REFACTORED: commitResults ---
201
- * Automatically detects result size.
202
- * If > 900KB, it splits the result into chunks and writes to a subcollection.
203
- * If < 900KB, it writes normally.
204
- */
205
- async function commitResults(stateObj, dStr, passName, config, deps, skipStatusWrite = false) {
206
- const successUpdates = {};
207
- const schemas = [];
208
- for (const name in stateObj) {
209
- const calc = stateObj[name];
210
- let hasData = false;
211
- try {
212
- const result = await calc.getResult();
213
- if (!result) { deps.logger.log('INFO', `${name} for ${dStr}: Skipped (Empty Result)`); continue; }
214
- const mainDocRef = deps.db.collection(config.resultsCollection).doc(dStr).collection(config.resultsSubcollection).doc(calc.manifest.category).collection(config.computationsSubcollection).doc(name);
215
- // AUTO-SHARDING LOGIC
216
- const updates = await prepareAutoShardedWrites(result, mainDocRef, deps.logger);
217
- // Collect Schemas if present
218
- if (calc.manifest.class.getSchema) {
219
- const { class: _cls, ...safeMetadata } = calc.manifest;
220
- schemas.push({ name, category: calc.manifest.category, schema: calc.manifest.class.getSchema(), metadata: safeMetadata });
221
- }
222
- if (updates.length > 0) {
223
- await commitBatchInChunks(config, deps, updates, `${name} Results`);
224
- successUpdates[name] = calc.manifest.hash || true;
225
- const isSharded = updates.some(u => u.data._sharded === true);
226
- deps.logger.log('INFO', `${name} for ${dStr}: \u2714 Success (Written ${isSharded ? 'Sharded' : 'Standard'})`);
227
- } else {
228
- deps.logger.log('INFO', `${name} for ${dStr}: - Empty Data`);
229
- }
230
- } catch (e) { deps.logger.log('ERROR', `${name} for ${dStr}: \u2716 FAILED Commit: ${e.message}`); }
231
- }
232
- if (schemas.length) batchStoreSchemas(deps, config, schemas).catch(() => { });
233
- if (!skipStatusWrite && Object.keys(successUpdates).length > 0) {
234
- await updateComputationStatus(dStr, successUpdates, config, deps);
235
- deps.logger.log('INFO', `[${passName}] Updated status document for ${Object.keys(successUpdates).length} successful computations.`);
236
- }
237
- return successUpdates;
238
- }
239
-
240
- /**
241
- * Accurately calculates the size of a value according to Firestore storage rules.
242
- * Reference: https://firebase.google.com/docs/firestore/storage-size
243
- */
244
- function calculateFirestoreBytes(value) {
245
- if (value === null) return 1;
246
- if (value === undefined) return 0; // Firestore drops undefined fields
247
- if (typeof value === 'boolean') return 1;
248
- if (typeof value === 'number') return 8; // All numbers are 64-bit doubles or integers
249
- if (typeof value === 'string') return Buffer.byteLength(value, 'utf8') + 1;
250
- if (value instanceof Date) return 8; // Timestamps are 8 bytes
251
- if (value.constructor && value.constructor.name === 'DocumentReference') { return Buffer.byteLength(value.path, 'utf8') + 16; }
252
- if (Array.isArray(value)) { let sum = 0; for (const item of value) sum += calculateFirestoreBytes(item); return sum; }
253
- // Handle Objects (Maps): Sum of (Key + 1 + Value)
254
- if (typeof value === 'object') { let sum = 0; for (const k in value) { if (Object.prototype.hasOwnProperty.call(value, k)) { sum += (Buffer.byteLength(k, 'utf8') + 1) + calculateFirestoreBytes(value[k]); } } return sum; }
255
- return 0; // Fallback
256
- }
257
-
258
-
259
- async function prepareAutoShardedWrites(result, docRef, logger) {
260
- const SAFETY_THRESHOLD_BYTES = 1000 * 1024; // 1MB Limit (We target just under this)
261
- const OVERHEAD_ALLOWANCE = 20 * 1024; // 20KB Safety margin for document path & metadata
262
- const CHUNK_LIMIT = SAFETY_THRESHOLD_BYTES - OVERHEAD_ALLOWANCE;
263
- const totalSize = calculateFirestoreBytes(result); // 1. Calculate Total Size Once (O(N))
264
- const docPathSize = Buffer.byteLength(docRef.path, 'utf8') + 16; // Add the size of the document path itself (Firestore counts this against the 1MB limit)
265
- if ((totalSize + docPathSize) < CHUNK_LIMIT) { const data = { ...result, _completed: true, _sharded: false }; return [{ ref: docRef, data, options: { merge: true } }]; } // CASE A: Fits in one document
266
- logger.log('INFO', `[AutoShard] Result size ~${Math.round(totalSize/1024)}KB exceeds limit. Sharding...`);
267
- const writes = [];
268
- const shardCollection = docRef.collection('_shards');
269
- let currentChunk = {};
270
- let currentChunkSize = 0;
271
- let shardIndex = 0;
272
- for (const [key, value] of Object.entries(result)) { // 2. Efficient O(N) Loop
273
- if (key.startsWith('_')) continue;
274
- const keySize = Buffer.byteLength(key, 'utf8') + 1; // Calculate size of just this item
275
- const valueSize = calculateFirestoreBytes(value);
276
- const itemSize = keySize + valueSize;
277
- if (currentChunkSize + itemSize > CHUNK_LIMIT) { // Check if adding this item would overflow the current chunk
278
- // Flush current chunk
279
- writes.push({ ref: shardCollection.doc(`shard_${shardIndex}`), data: currentChunk, options: { merge: false } }); // Overwrite
280
- shardIndex++;
281
- currentChunk = {};
282
- currentChunkSize = 0;
283
- }
284
- // Add to current chunk
285
- currentChunk[key] = value;
286
- currentChunkSize += itemSize;
287
- }
288
- // Flush final chunk
289
- if (Object.keys(currentChunk).length > 0) { writes.push({ ref: shardCollection.doc(`shard_${shardIndex}`), data: currentChunk, options: { merge: false } }); }
290
- const pointerData = { _completed: true, _sharded: true, _shardCount: shardIndex + 1, _lastUpdated: new Date().toISOString() }; // Pointer Document
291
- writes.push({ ref: docRef, data: pointerData, options: { merge: false } }); // Use merge: false to ensure we overwrite any previous non-sharded blob
292
- return writes;
293
- }
294
-
295
- async function runBatchPriceComputation(config, deps, dateStrings, calcs, targetTickers = []) {
296
- const { logger, db, calculationUtils } = deps;
297
- const controller = new ComputationController(config, deps);
298
- const mappings = await controller.loader.loadMappings();
299
- let targetInstrumentIds = [];
300
- if (targetTickers && targetTickers.length > 0) {
301
- const tickerToInst = mappings.tickerToInstrument || {};
302
- targetInstrumentIds = targetTickers.map(t => tickerToInst[t]).filter(id => id);
303
- if (targetInstrumentIds.length === 0) { logger.log('WARN', '[BatchPrice] Target tickers provided but no IDs found. Aborting.'); return; }
304
- }
305
- const allShardRefs = await getRelevantShardRefs(config, deps, targetInstrumentIds);
306
- if (!allShardRefs.length) { logger.log('WARN', '[BatchPrice] No relevant price shards found. Exiting.'); return; }
307
- const OUTER_CONCURRENCY_LIMIT = 2, SHARD_BATCH_SIZE = 20, WRITE_BATCH_LIMIT = 50;
308
- logger.log('INFO', `[BatchPrice] Execution Plan: ${dateStrings.length} days, ${allShardRefs.length} shards. Concurrency: ${OUTER_CONCURRENCY_LIMIT}.`);
309
- const shardChunks = []; for (let i = 0; i < allShardRefs.length; i += SHARD_BATCH_SIZE) { shardChunks.push(allShardRefs.slice(i, i + SHARD_BATCH_SIZE)); }
310
- const outerLimit = pLimit(OUTER_CONCURRENCY_LIMIT);
311
- const chunkPromises = [];
312
- for (let index = 0; index < shardChunks.length; index++) {
313
- const shardChunkRefs = shardChunks[index];
314
- chunkPromises.push(outerLimit(async () => {
315
- try {
316
- logger.log('INFO', `[BatchPrice] Processing chunk ${index + 1}/${shardChunks.length} (${shardChunkRefs.length} shards)...`);
317
- const pricesData = await loadDataByRefs(config, deps, shardChunkRefs);
318
- if (targetInstrumentIds.length > 0) { const requestedSet = new Set(targetInstrumentIds); for (const loadedInstrumentId in pricesData) { if (!requestedSet.has(loadedInstrumentId)) { delete pricesData[loadedInstrumentId]; } } }
319
- const writes = [];
320
- for (const dateStr of dateStrings) {
321
- const dynamicMathContext = {};
322
- for (const [key, value] of Object.entries(mathLayer)) { dynamicMathContext[key] = value; if (LEGACY_MAPPING[key]) { dynamicMathContext[LEGACY_MAPPING[key]] = value;} }
323
- const context = { mappings, prices: { history: pricesData }, date: { today: dateStr }, math: dynamicMathContext };
324
- for (const calcManifest of calcs) {
325
- try {
326
- const instance = new calcManifest.class(); await instance.process(context); const result = await instance.getResult();
327
- if (result && Object.keys(result).length > 0) {
328
- let dataToWrite = result; if (result.by_instrument) dataToWrite = result.by_instrument;
329
- if (Object.keys(dataToWrite).length > 0) {
330
- const docRef = db.collection(config.resultsCollection).doc(dateStr).collection(config.resultsSubcollection).doc(calcManifest.category).collection(config.computationsSubcollection).doc(normalizeName(calcManifest.name));
331
- writes.push({ ref: docRef, data: { ...dataToWrite, _completed: true }, options: { merge: true } });
332
- }
333
- }
334
- } catch (err) { logger.log('ERROR', `[BatchPrice] \u2716 Failed ${calcManifest.name} for ${dateStr}: ${err.message}`); }
335
- }
336
- }
337
- if (writes.length > 0) {
338
- const commitBatches = []; for (let i = 0; i < writes.length; i += WRITE_BATCH_LIMIT) { commitBatches.push(writes.slice(i, i + WRITE_BATCH_LIMIT)); }
339
- const commitLimit = pLimit(10);
340
- await Promise.all(commitBatches.map((batchWrites, bIndex) => commitLimit(async () => {
341
- const batch = db.batch(); batchWrites.forEach(w => batch.set(w.ref, w.data, w.options));
342
- try { await calculationUtils.withRetry(() => batch.commit(), `BatchPrice-C${index}-B${bIndex}`); } catch (commitErr) { logger.log('ERROR', `[BatchPrice] Commit failed for Chunk ${index} Batch ${bIndex}.`, { error: commitErr.message }); }
343
- })));
344
- }
345
- } catch (chunkErr) { logger.log('ERROR', `[BatchPrice] Fatal error processing Chunk ${index}.`, { error: chunkErr.message }); }
346
- }));
347
- }
348
- await Promise.all(chunkPromises);
349
- logger.log('INFO', '[BatchPrice] Optimization pass complete.');
350
- }
351
-
352
- module.exports = { groupByPass, checkRootDependencies, checkRootDataAvailability, fetchExistingResults, fetchComputationStatus, fetchGlobalComputationStatus, updateComputationStatus, runStandardComputationPass, runMetaComputationPass, runBatchPriceComputation };