bulltrackers-module 1.0.564 → 1.0.565

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,7 @@
1
1
  /**
2
2
  * @fileoverview Handles saving computation results with observability and Smart Cleanup.
3
3
  * UPDATED: Fixed bug where Alert Computations failed to trigger Pub/Sub on empty FINAL flush.
4
+ * UPDATED: Added support for 'isPage' mode to store per-user data in subcollections.
4
5
  */
5
6
  const { commitBatchInChunks, generateDataHash } = require('../utils/utils');
6
7
  const { updateComputationStatus } = require('./StatusRepository');
@@ -53,8 +54,10 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
53
54
  io: { writes: 0, deletes: 0 }
54
55
  };
55
56
 
56
- // [NEW] Check metadata for alert flag (defaults to false)
57
+ // Check metadata for alert flag (defaults to false)
57
58
  const isAlertComputation = calc.manifest.isAlertComputation === true;
59
+ // [NEW] Check metadata for page flag (defaults to false)
60
+ const isPageComputation = calc.manifest.isPage === true;
58
61
 
59
62
  try {
60
63
  const result = await calc.getResult();
@@ -105,12 +108,9 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
105
108
  continue;
106
109
  }
107
110
 
108
- // [FIX] Force alert trigger on FINAL flush even if result is empty
109
- // This handles cases where all data was written in previous INTERMEDIATE flushes
111
+ // Force alert trigger on FINAL flush even if result is empty
110
112
  if (isAlertComputation && flushMode === 'FINAL') {
111
- // Reconstruct path to ensure downstream system checks for data
112
113
  const docPath = `${config.resultsCollection}/${dStr}/${config.resultsSubcollection}/${calc.manifest.category}/${config.computationsSubcollection}/${name}`;
113
-
114
114
  alertTriggers.push({
115
115
  date: dStr,
116
116
  computationName: name,
@@ -128,7 +128,66 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
128
128
  }
129
129
  continue;
130
130
  }
131
+
132
+ // [NEW] Page Computation Logic (Fan-Out)
133
+ // Bypasses standard compression/sharding to write per-user documents
134
+ if (isPageComputation && !isEmpty) {
135
+ const mainDocRef = db.collection(config.resultsCollection).doc(dStr)
136
+ .collection(config.resultsSubcollection).doc(calc.manifest.category)
137
+ .collection(config.computationsSubcollection).doc(name);
138
+
139
+ // 1. Fan-out writes for each user
140
+ const pageWrites = [];
141
+ // We assume result is { [cid]: { ...data... }, [cid2]: { ... } }
142
+ for (const [cid, userData] of Object.entries(result)) {
143
+ // STRATEGY: Use a fixed collection 'pages' so we can clean it up later
144
+ // Path: .../{ComputationName}/pages/{cid}
145
+ const userDocRef = mainDocRef.collection('pages').doc(cid);
146
+
147
+ pageWrites.push({
148
+ ref: userDocRef,
149
+ data: userData, // Write the raw data directly
150
+ options: { merge: false } // Overwrite specifically for this run
151
+ });
152
+ }
153
+
154
+ // 2. Commit the fan-out writes
155
+ if (pageWrites.length > 0) {
156
+ await commitBatchInChunks(config, deps, pageWrites, `${name}::PageFanOut`);
157
+ runMetrics.io.writes += pageWrites.length;
158
+ runMetrics.storage.keys = pageWrites.length;
159
+ logger.log('INFO', `[PageMode] ${name}: Wrote ${pageWrites.length} user pages.`);
160
+ }
161
+
162
+ // 3. Write the "Header" document (Important for Status/Metrics)
163
+ // We store NO data here, just metadata saying "Go look in /pages"
164
+ if (flushMode !== 'INTERMEDIATE') {
165
+ const headerData = {
166
+ _completed: true,
167
+ _isPageMode: true, // Flag for readers to know where to look
168
+ _pageCount: pageWrites.length,
169
+ _lastUpdated: new Date().toISOString()
170
+ };
171
+
172
+ await mainDocRef.set(headerData, { merge: true });
173
+ runMetrics.io.writes += 1;
174
+
175
+ if (calc.manifest.hash) {
176
+ successUpdates[name] = {
177
+ hash: calc.manifest.hash,
178
+ simHash: simHash,
179
+ resultHash: resultHash,
180
+ category: calc.manifest.category,
181
+ composition: calc.manifest.composition,
182
+ metrics: runMetrics
183
+ };
184
+ }
185
+ }
186
+
187
+ continue; // Skip the standard writeSingleResult logic
188
+ }
131
189
 
190
+ // Standard Computation Logic (Compression or Sharding)
132
191
  if (typeof result === 'object') runMetrics.storage.keys = Object.keys(result).length;
133
192
  const resultKeys = Object.keys(result || {});
134
193
  const isMultiDate = resultKeys.length > 0 && resultKeys.every(k => /^\d{4}-\d{2}-\d{2}$/.test(k));
@@ -203,7 +262,6 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
203
262
  }
204
263
 
205
264
  // Alert triggers are now handled via Firestore triggers
206
- // No need to publish Pub/Sub messages - the Firestore write itself triggers the alert system
207
265
  if (alertTriggers.length > 0) {
208
266
  logger.log('INFO', `[Alert System] ${alertTriggers.length} alert computations written to Firestore - triggers will fire automatically`);
209
267
  }
@@ -236,7 +294,7 @@ async function fetchContracts(db, calcNames) {
236
294
  async function writeSingleResult(result, docRef, name, dateContext, logger, config, deps, startShardIndex = 0, flushMode = 'STANDARD', isInitialWrite = false) {
237
295
  const opCounts = { writes: 0, deletes: 0 };
238
296
 
239
- // [FIX 1] Always check for shards if we might compress, not just on 'isInitialWrite'.
297
+ // Always check for shards if we might compress
240
298
  let wasSharded = false;
241
299
  try {
242
300
  const currentSnap = await docRef.get();
@@ -256,7 +314,6 @@ async function writeSingleResult(result, docRef, name, dateContext, logger, conf
256
314
  if (compressedBuffer.length < 900 * 1024) {
257
315
  logger.log('INFO', `[Compression] ${name}: Compressed ${(rawBuffer.length/1024).toFixed(0)}KB -> ${(compressedBuffer.length/1024).toFixed(0)}KB.`);
258
316
 
259
- // [FIX 2] Explicitly wrap buffer to ensure it saves as a Blob, NOT a map of index->byte
260
317
  const payloadBuffer = Buffer.from(compressedBuffer);
261
318
 
262
319
  const compressedPayload = {
@@ -266,7 +323,7 @@ async function writeSingleResult(result, docRef, name, dateContext, logger, conf
266
323
  payload: payloadBuffer
267
324
  };
268
325
 
269
- // [FIX 3] Self-Healing: If we are writing compressed, we MUST ensure shards are gone.
326
+ // Self-Healing: If we are writing compressed, we MUST ensure shards are gone.
270
327
  if (wasSharded) {
271
328
  const updates = [];
272
329
  const shardCol = docRef.collection('_shards');
@@ -299,10 +356,6 @@ async function writeSingleResult(result, docRef, name, dateContext, logger, conf
299
356
  let finalStats = { totalSize: 0, isSharded: false, shardCount: 1, nextShardIndex: startShardIndex };
300
357
  let rootMergeOption = !isInitialWrite;
301
358
 
302
- // If we detected shards earlier but failed compression, we need to make sure we wipe them if we are about to re-shard
303
- // (though prepareAutoShardedWrites handles the _shards logic, we might need to be careful about the root doc).
304
- // In this specific flow, prepareAutoShardedWrites handles new shard creation. The existing logic below handles wiping shards if needed.
305
- // However, since we might have skipped the compression block, let's ensure 'wasSharded' logic is respected here if we need to reset.
306
359
  let shouldWipeShards = wasSharded;
307
360
 
308
361
  for (let attempt = 0; attempt < strategies.length; attempt++) {
@@ -433,12 +486,30 @@ async function deleteOldCalculationData(dateStr, oldCategory, calcName, config,
433
486
  const { withRetry } = calculationUtils || { withRetry: (fn) => fn() };
434
487
  try {
435
488
  const oldDocRef = db.collection(config.resultsCollection).doc(dateStr).collection(config.resultsSubcollection).doc(oldCategory).collection(config.computationsSubcollection).doc(calcName);
489
+
490
+ const batch = db.batch(); let ops = 0;
491
+
492
+ // [NEW] Clean up 'pages' subcollection if it exists (for Page Mode)
493
+ const pagesCol = oldDocRef.collection('pages');
494
+ // Note: listDocuments works nicely for small-ish collections.
495
+ // If 'pages' has 10k+ docs, we rely on the implementation of listDocuments
496
+ // or we might need to paginate this in a real high-scale scenario.
497
+ const pageDocs = await withRetry(() => pagesCol.listDocuments(), 'ListOldPages');
498
+ for (const pDoc of pageDocs) { batch.delete(pDoc); ops++; }
499
+
500
+ // Clean up '_shards' subcollection (for Standard Mode)
436
501
  const shardsCol = oldDocRef.collection('_shards');
437
502
  const shardsSnap = await withRetry(() => shardsCol.listDocuments(), 'ListOldShards');
438
- const batch = db.batch(); let ops = 0;
503
+
439
504
  for (const shardDoc of shardsSnap) { batch.delete(shardDoc); ops++; }
505
+
440
506
  batch.delete(oldDocRef); ops++;
507
+
508
+ // If ops > 500, this simple batch will fail.
509
+ // Re-using commitBatchInChunks logic for cleanup is safer if available,
510
+ // but sticking to standard structure for now as requested.
441
511
  await withRetry(() => batch.commit(), 'CleanupOldCategory');
512
+
442
513
  logger.log('INFO', `[Migration] Cleaned up ${ops} docs for ${calcName} in '${oldCategory}'`);
443
514
  } catch (e) { logger.log('WARN', `[Migration] Failed to clean up ${calcName}: ${e.message}`); }
444
515
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "bulltrackers-module",
3
- "version": "1.0.564",
3
+ "version": "1.0.565",
4
4
  "description": "Helper Functions for Bulltrackers.",
5
5
  "main": "index.js",
6
6
  "files": [