bulltrackers-module 1.0.190 → 1.0.192

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -169,12 +169,13 @@ async function runComputationPass(config, dependencies, computationManifest) {
169
169
  const prevDateStr = prevDate.toISOString().slice(0, 10);
170
170
  const previousResults = await fetchExistingResults(prevDateStr, calcsRunning, computationManifest, config, dependencies, true);
171
171
 
172
+ // Changed skipstatus write to false to ensure updates are recorded
172
173
  if (finalStandardToRun.length) {
173
- const updates = await runStandardComputationPass(dateToProcess, finalStandardToRun, `Pass ${passToRun} (Std)`, config, dependencies, rootData, existingResults, previousResults, true);
174
+ const updates = await runStandardComputationPass(dateToProcess, finalStandardToRun, `Pass ${passToRun} (Std)`, config, dependencies, rootData, existingResults, previousResults, false);
174
175
  Object.assign(dateUpdates, updates);
175
176
  }
176
177
  if (finalMetaToRun.length) {
177
- const updates = await runMetaComputationPass(dateToProcess, finalMetaToRun, `Pass ${passToRun} (Meta)`, config, dependencies, existingResults, previousResults, rootData, true);
178
+ const updates = await runMetaComputationPass(dateToProcess, finalMetaToRun, `Pass ${passToRun} (Meta)`, config, dependencies, existingResults, previousResults, rootData, false);
178
179
  Object.assign(dateUpdates, updates);
179
180
  }
180
181
  } catch (err) {
@@ -2,7 +2,7 @@
2
2
  * FILENAME: bulltrackers-module/functions/computation-system/helpers/orchestration_helpers.js
3
3
  * FIXED: TS Error (controller.loader.mappings)
4
4
  * ADDED: Smart Shard Lookup for specific tickers
5
- * FIXED: Payload Size Limits & Crash Resilience in runBatchPriceComputation
5
+ * OPTIMIZED: Added Concurrency (Parallel Commits & Pipelined Shards) for runBatchPriceComputation
6
6
  */
7
7
 
8
8
  const { ComputationController } = require('../controllers/computation_controller');
@@ -13,7 +13,7 @@ const {
13
13
  getHistoryPartRefs, streamPortfolioData, streamHistoryData,
14
14
  getRelevantShardRefs, loadDataByRefs
15
15
  } = require('../utils/data_loader');
16
-
16
+ const pLimit = require('p-limit'); // Ensure p-limit is required
17
17
 
18
18
  /**
19
19
  * Groups calculations from a manifest by their 'pass' property.
@@ -328,10 +328,10 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
328
328
  /**
329
329
  * --- UPDATED: runBatchPriceComputation ---
330
330
  * Now supports subset/specific ticker execution via 'targetTickers'
331
- * FIXED: Uses local batch config and try/catch for resilience.
331
+ * OPTIMIZED: Implements concurrency for both Shard Processing and Write Commits
332
332
  */
333
333
  async function runBatchPriceComputation(config, deps, dateStrings, calcs, targetTickers = []) {
334
- const { logger, db } = deps;
334
+ const { logger, db, calculationUtils } = deps; // Ensure calculationUtils is available for retry
335
335
  const controller = new ComputationController(config, deps);
336
336
 
337
337
  // 1. FIX: Call loadMappings() correctly and get the result
@@ -340,17 +340,14 @@ async function runBatchPriceComputation(config, deps, dateStrings, calcs, target
340
340
  // 2. Resolve Shards (All or Subset)
341
341
  let targetInstrumentIds = [];
342
342
  if (targetTickers && targetTickers.length > 0) {
343
- // Convert Tickers -> InstrumentIDs
344
343
  const tickerToInst = mappings.tickerToInstrument || {};
345
344
  targetInstrumentIds = targetTickers.map(t => tickerToInst[t]).filter(id => id);
346
-
347
345
  if (targetInstrumentIds.length === 0) {
348
346
  logger.log('WARN', '[BatchPrice] Target tickers provided but no IDs found. Aborting.');
349
347
  return;
350
348
  }
351
349
  }
352
350
 
353
- // Uses the new data_loader function to look up specific shards if ids are present
354
351
  const allShardRefs = await getRelevantShardRefs(config, deps, targetInstrumentIds);
355
352
 
356
353
  if (!allShardRefs.length) {
@@ -358,73 +355,107 @@ async function runBatchPriceComputation(config, deps, dateStrings, calcs, target
358
355
  return;
359
356
  }
360
357
 
361
- // 3. Process in Chunks
358
+ // 3. Execution Planning
359
+ // CONCURRENCY SETTING:
360
+ // Limit outer concurrency (processing shard chunks) to 2 to prevent contention on daily result docs.
361
+ // While Firestore handles concurrent writes to the same doc, limiting this avoids excessive retries.
362
+ const OUTER_CONCURRENCY_LIMIT = 2;
362
363
  const SHARD_BATCH_SIZE = 20;
363
- logger.log('INFO', `[BatchPrice] Execution Plan: ${dateStrings.length} days, ${allShardRefs.length} shards.`);
364
+ const WRITE_BATCH_LIMIT = 50; // Keep write batch size small (payload safety)
364
365
 
366
+ logger.log('INFO', `[BatchPrice] Execution Plan: ${dateStrings.length} days, ${allShardRefs.length} shards. Concurrency: ${OUTER_CONCURRENCY_LIMIT}.`);
367
+
368
+ // 4. Create Chunks of Shards
369
+ const shardChunks = [];
365
370
  for (let i = 0; i < allShardRefs.length; i += SHARD_BATCH_SIZE) {
366
- const shardChunkRefs = allShardRefs.slice(i, i + SHARD_BATCH_SIZE);
367
- logger.log('INFO', `[BatchPrice] Processing chunk ${Math.floor(i/SHARD_BATCH_SIZE) + 1} (${shardChunkRefs.length} shards)...`);
368
-
369
- const pricesData = await loadDataByRefs(config, deps, shardChunkRefs);
370
-
371
- // --- FILTERING (Optional but Recommended) ---
372
- if (targetInstrumentIds.length > 0) {
373
- // (Filtering logic omitted for brevity as per previous implementation)
374
- }
371
+ shardChunks.push(allShardRefs.slice(i, i + SHARD_BATCH_SIZE));
372
+ }
375
373
 
376
- const writes = [];
377
-
378
- for (const dateStr of dateStrings) {
379
- const context = {
380
- mappings,
381
- prices: { history: pricesData },
382
- date: { today: dateStr },
383
- math: require('../layers/math_primitives.js')
384
- };
385
-
386
- for (const calcManifest of calcs) {
387
- try {
388
- const instance = new calcManifest.class();
389
- await instance.process(context);
390
- const result = await instance.getResult();
391
-
392
- if (result && Object.keys(result).length > 0) {
393
- let dataToWrite = result;
394
- if (result.by_instrument) dataToWrite = result.by_instrument;
374
+ const outerLimit = pLimit(OUTER_CONCURRENCY_LIMIT);
375
+
376
+ // 5. Process Shard Chunks Concurrently
377
+ const chunkPromises = shardChunks.map((shardChunkRefs, index) => outerLimit(async () => {
378
+ try {
379
+ logger.log('INFO', `[BatchPrice] Processing chunk ${index + 1}/${shardChunks.length} (${shardChunkRefs.length} shards)...`);
380
+
381
+ const pricesData = await loadDataByRefs(config, deps, shardChunkRefs);
382
+
383
+ // Optional Filtering for Subset Mode
384
+ if (targetInstrumentIds.length > 0) {
385
+ // (Logic omitted for brevity, but safe to include if strictly needed)
386
+ }
387
+
388
+ const writes = [];
389
+
390
+ // --- CALCULATION PHASE ---
391
+ // This builds up the array of writes (one per date)
392
+ for (const dateStr of dateStrings) {
393
+ const context = {
394
+ mappings,
395
+ prices: { history: pricesData },
396
+ date: { today: dateStr },
397
+ math: require('../layers/math_primitives.js')
398
+ };
399
+
400
+ for (const calcManifest of calcs) {
401
+ try {
402
+ const instance = new calcManifest.class();
403
+ await instance.process(context);
404
+ const result = await instance.getResult();
395
405
 
396
- if (Object.keys(dataToWrite).length > 0) {
397
- const docRef = db.collection(config.resultsCollection).doc(dateStr)
398
- .collection(config.resultsSubcollection).doc(calcManifest.category)
399
- .collection(config.computationsSubcollection).doc(normalizeName(calcManifest.name));
406
+ if (result && Object.keys(result).length > 0) {
407
+ let dataToWrite = result;
408
+ if (result.by_instrument) dataToWrite = result.by_instrument;
400
409
 
401
- writes.push({
402
- ref: docRef,
403
- data: { ...dataToWrite, _completed: true },
404
- options: { merge: true }
405
- });
410
+ if (Object.keys(dataToWrite).length > 0) {
411
+ const docRef = db.collection(config.resultsCollection).doc(dateStr)
412
+ .collection(config.resultsSubcollection).doc(calcManifest.category)
413
+ .collection(config.computationsSubcollection).doc(normalizeName(calcManifest.name));
414
+
415
+ writes.push({
416
+ ref: docRef,
417
+ data: { ...dataToWrite, _completed: true },
418
+ options: { merge: true }
419
+ });
420
+ }
406
421
  }
422
+ } catch (err) {
423
+ logger.log('ERROR', `[BatchPrice] Calc ${calcManifest.name} failed for ${dateStr}`, { error: err.message });
407
424
  }
408
- } catch (err) {
409
- logger.log('ERROR', `[BatchPrice] Calc ${calcManifest.name} failed for ${dateStr}`, { error: err.message });
410
425
  }
411
426
  }
412
- }
413
-
414
- if (writes.length > 0) {
415
- // FIX: Use a lower batch limit for price batches because these result documents are aggregates
416
- // and often exceed the 10MB payload limit when batched aggressively (450).
417
- const safeBatchConfig = { ...config, batchSizeLimit: 50 };
418
-
419
- try {
420
- await commitBatchInChunks(safeBatchConfig, deps, writes, `BatchPrice Chunk ${Math.floor(i/SHARD_BATCH_SIZE)}`);
421
- } catch (err) {
422
- // FIX: Catch the commit failure (e.g. payload size exceeded) but DO NOT CRASH the whole system.
423
- // Log it and allow the next chunk of shards to process.
424
- logger.log('ERROR', `[BatchPrice] Failed to commit results for chunk ${Math.floor(i/SHARD_BATCH_SIZE)}. Proceeding to next chunk.`, { error: err.message });
427
+
428
+ // --- PARALLEL COMMIT PHASE ---
429
+ // Instead of committing sequentially via commitBatchInChunks, we process these writes in parallel.
430
+ // Since each write targets a DIFFERENT date (different document), parallelizing this is safe and fast.
431
+ if (writes.length > 0) {
432
+ const commitBatches = [];
433
+ for (let i = 0; i < writes.length; i += WRITE_BATCH_LIMIT) {
434
+ commitBatches.push(writes.slice(i, i + WRITE_BATCH_LIMIT));
435
+ }
436
+
437
+ // Use a higher concurrency for commits since they target disjoint documents
438
+ const commitLimit = pLimit(10);
439
+
440
+ await Promise.all(commitBatches.map((batchWrites, bIndex) => commitLimit(async () => {
441
+ const batch = db.batch();
442
+ batchWrites.forEach(w => batch.set(w.ref, w.data, w.options));
443
+
444
+ try {
445
+ await calculationUtils.withRetry(() => batch.commit(), `BatchPrice-C${index}-B${bIndex}`);
446
+ } catch (commitErr) {
447
+ logger.log('ERROR', `[BatchPrice] Commit failed for Chunk ${index} Batch ${bIndex}.`, { error: commitErr.message });
448
+ // We log but don't throw, to allow other batches to succeed
449
+ }
450
+ })));
425
451
  }
452
+
453
+ } catch (chunkErr) {
454
+ logger.log('ERROR', `[BatchPrice] Fatal error processing Chunk ${index}.`, { error: chunkErr.message });
426
455
  }
427
- }
456
+ }));
457
+
458
+ await Promise.all(chunkPromises);
428
459
  logger.log('INFO', '[BatchPrice] Optimization pass complete.');
429
460
  }
430
461
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "bulltrackers-module",
3
- "version": "1.0.190",
3
+ "version": "1.0.192",
4
4
  "description": "Helper Functions for Bulltrackers.",
5
5
  "main": "index.js",
6
6
  "files": [