bulltrackers-module 1.0.546 → 1.0.548

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -235,20 +235,16 @@ async function fetchContracts(db, calcNames) {
235
235
 
236
236
  async function writeSingleResult(result, docRef, name, dateContext, logger, config, deps, startShardIndex = 0, flushMode = 'STANDARD', isInitialWrite = false) {
237
237
  const opCounts = { writes: 0, deletes: 0 };
238
+
239
+ // [FIX 1] Always check for shards if we might compress, not just on 'isInitialWrite'.
238
240
  let wasSharded = false;
239
- let shouldWipeShards = false;
240
- let rootMergeOption = !isInitialWrite;
241
-
242
- if (isInitialWrite) {
243
- try {
244
- const currentSnap = await docRef.get();
245
- if (currentSnap.exists) {
246
- const d = currentSnap.data();
247
- wasSharded = (d._sharded === true);
248
- if (wasSharded) shouldWipeShards = true;
249
- }
250
- } catch (e) {}
251
- }
241
+ try {
242
+ const currentSnap = await docRef.get();
243
+ if (currentSnap.exists) {
244
+ const d = currentSnap.data();
245
+ wasSharded = (d._sharded === true);
246
+ }
247
+ } catch (e) {}
252
248
 
253
249
  // --- COMPRESSION STRATEGY ---
254
250
  try {
@@ -259,13 +255,26 @@ async function writeSingleResult(result, docRef, name, dateContext, logger, conf
259
255
  const compressedBuffer = zlib.gzipSync(rawBuffer);
260
256
  if (compressedBuffer.length < 900 * 1024) {
261
257
  logger.log('INFO', `[Compression] ${name}: Compressed ${(rawBuffer.length/1024).toFixed(0)}KB -> ${(compressedBuffer.length/1024).toFixed(0)}KB.`);
262
- const compressedPayload = { _compressed: true, _completed: true, _lastUpdated: new Date().toISOString(), payload: compressedBuffer };
263
258
 
264
- if (shouldWipeShards) {
259
+ // [FIX 2] Explicitly wrap buffer to ensure it saves as a Blob, NOT a map of index->byte
260
+ const payloadBuffer = Buffer.from(compressedBuffer);
261
+
262
+ const compressedPayload = {
263
+ _compressed: true,
264
+ _completed: true,
265
+ _lastUpdated: new Date().toISOString(),
266
+ payload: payloadBuffer
267
+ };
268
+
269
+ // [FIX 3] Self-Healing: If we are writing compressed, we MUST ensure shards are gone.
270
+ if (wasSharded) {
265
271
  const updates = [];
266
272
  const shardCol = docRef.collection('_shards');
267
273
  const shardDocs = await shardCol.listDocuments();
274
+
268
275
  shardDocs.forEach(d => updates.push({ type: 'DELETE', ref: d }));
276
+
277
+ // Use merge: false (overwrite)
269
278
  updates.push({ ref: docRef, data: compressedPayload, options: { merge: false } });
270
279
 
271
280
  opCounts.deletes += shardDocs.length;
@@ -273,30 +282,44 @@ async function writeSingleResult(result, docRef, name, dateContext, logger, conf
273
282
 
274
283
  await commitBatchInChunks(config, deps, updates, `${name}::Cleanup+Compress`);
275
284
  } else {
276
- await docRef.set(compressedPayload, { merge: rootMergeOption });
285
+ await docRef.set(compressedPayload, { merge: false });
277
286
  opCounts.writes += 1;
278
287
  }
279
288
 
280
289
  return { totalSize: compressedBuffer.length, isSharded: false, shardCount: 1, nextShardIndex: startShardIndex, opCounts };
281
290
  }
282
291
  }
283
- } catch (compErr) {}
292
+ } catch (compErr) {
293
+ logger.log('WARN', `[SelfHealing] Compression failed for ${name}, reverting to sharding. Error: ${compErr.message}`);
294
+ }
284
295
 
296
+ // --- SHARDING STRATEGY (Fallback) ---
285
297
  const strategies = [ { bytes: 900 * 1024, keys: null }, { bytes: 450 * 1024, keys: 10000 }, { bytes: 200 * 1024, keys: 2000 }, { bytes: 100 * 1024, keys: 50 } ];
286
298
  let committed = false; let lastError = null;
287
299
  let finalStats = { totalSize: 0, isSharded: false, shardCount: 1, nextShardIndex: startShardIndex };
300
+ let rootMergeOption = !isInitialWrite;
301
+
302
+ // If we detected shards earlier but failed compression, we need to make sure we wipe them if we are about to re-shard
303
+ // (though prepareAutoShardedWrites handles the _shards logic, we might need to be careful about the root doc).
304
+ // In this specific flow, prepareAutoShardedWrites handles new shard creation. The existing logic below handles wiping shards if needed.
305
+ // However, since we might have skipped the compression block, let's ensure 'wasSharded' logic is respected here if we need to reset.
306
+ let shouldWipeShards = wasSharded;
288
307
 
289
308
  for (let attempt = 0; attempt < strategies.length; attempt++) {
290
309
  if (committed) break;
291
310
  const constraints = strategies[attempt];
292
311
  try {
293
312
  const updates = await prepareAutoShardedWrites(result, docRef, logger, constraints.bytes, constraints.keys, startShardIndex, flushMode);
313
+
294
314
  if (shouldWipeShards) {
295
315
  const shardCol = docRef.collection('_shards');
296
316
  const shardDocs = await shardCol.listDocuments();
317
+
318
+ // Prepend delete operations for existing shards to ensure clean slate
297
319
  shardDocs.forEach(d => updates.unshift({ type: 'DELETE', ref: d }));
298
- shouldWipeShards = false;
320
+ shouldWipeShards = false; // Only do this once
299
321
  }
322
+
300
323
  const rootUpdate = updates.find(u => u.ref.path === docRef.path && u.type !== 'DELETE');
301
324
  if (rootUpdate) { rootUpdate.options = { merge: rootMergeOption }; }
302
325
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "bulltrackers-module",
3
- "version": "1.0.546",
3
+ "version": "1.0.548",
4
4
  "description": "Helper Functions for Bulltrackers.",
5
5
  "main": "index.js",
6
6
  "files": [