bulltrackers-module 1.0.547 → 1.0.548
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -237,7 +237,6 @@ async function writeSingleResult(result, docRef, name, dateContext, logger, conf
|
|
|
237
237
|
const opCounts = { writes: 0, deletes: 0 };
|
|
238
238
|
|
|
239
239
|
// [FIX 1] Always check for shards if we might compress, not just on 'isInitialWrite'.
|
|
240
|
-
// This prevents zombie shards from persisting if a previous run failed.
|
|
241
240
|
let wasSharded = false;
|
|
242
241
|
try {
|
|
243
242
|
const currentSnap = await docRef.get();
|
|
@@ -245,9 +244,7 @@ async function writeSingleResult(result, docRef, name, dateContext, logger, conf
|
|
|
245
244
|
const d = currentSnap.data();
|
|
246
245
|
wasSharded = (d._sharded === true);
|
|
247
246
|
}
|
|
248
|
-
} catch (e) {
|
|
249
|
-
// Ignore read errors, proceed with write attempts
|
|
250
|
-
}
|
|
247
|
+
} catch (e) {}
|
|
251
248
|
|
|
252
249
|
// --- COMPRESSION STRATEGY ---
|
|
253
250
|
try {
|
|
@@ -259,24 +256,25 @@ async function writeSingleResult(result, docRef, name, dateContext, logger, conf
|
|
|
259
256
|
if (compressedBuffer.length < 900 * 1024) {
|
|
260
257
|
logger.log('INFO', `[Compression] ${name}: Compressed ${(rawBuffer.length/1024).toFixed(0)}KB -> ${(compressedBuffer.length/1024).toFixed(0)}KB.`);
|
|
261
258
|
|
|
259
|
+
// [FIX 2] Explicitly wrap buffer to ensure it saves as a Blob, NOT a map of index->byte
|
|
260
|
+
const payloadBuffer = Buffer.from(compressedBuffer);
|
|
261
|
+
|
|
262
262
|
const compressedPayload = {
|
|
263
263
|
_compressed: true,
|
|
264
264
|
_completed: true,
|
|
265
265
|
_lastUpdated: new Date().toISOString(),
|
|
266
|
-
payload:
|
|
266
|
+
payload: payloadBuffer
|
|
267
267
|
};
|
|
268
268
|
|
|
269
|
-
// [FIX
|
|
270
|
-
// We use a batch to atomically write the blob and delete the shards.
|
|
269
|
+
// [FIX 3] Self-Healing: If we are writing compressed, we MUST ensure shards are gone.
|
|
271
270
|
if (wasSharded) {
|
|
272
271
|
const updates = [];
|
|
273
272
|
const shardCol = docRef.collection('_shards');
|
|
274
273
|
const shardDocs = await shardCol.listDocuments();
|
|
275
274
|
|
|
276
|
-
// Add delete ops for all shards
|
|
277
275
|
shardDocs.forEach(d => updates.push({ type: 'DELETE', ref: d }));
|
|
278
276
|
|
|
279
|
-
//
|
|
277
|
+
// Use merge: false (overwrite)
|
|
280
278
|
updates.push({ ref: docRef, data: compressedPayload, options: { merge: false } });
|
|
281
279
|
|
|
282
280
|
opCounts.deletes += shardDocs.length;
|
|
@@ -284,7 +282,6 @@ async function writeSingleResult(result, docRef, name, dateContext, logger, conf
|
|
|
284
282
|
|
|
285
283
|
await commitBatchInChunks(config, deps, updates, `${name}::Cleanup+Compress`);
|
|
286
284
|
} else {
|
|
287
|
-
// [FIX 3] Use merge: false here as well for consistency and safety.
|
|
288
285
|
await docRef.set(compressedPayload, { merge: false });
|
|
289
286
|
opCounts.writes += 1;
|
|
290
287
|
}
|
|
@@ -293,8 +290,6 @@ async function writeSingleResult(result, docRef, name, dateContext, logger, conf
|
|
|
293
290
|
}
|
|
294
291
|
}
|
|
295
292
|
} catch (compErr) {
|
|
296
|
-
// [FIX 4] Log the error but allow fallthrough to sharding (Self-Healing).
|
|
297
|
-
// This ensures that if compression fails (e.g., unexpected encoding issue), we still try to save the data via sharding.
|
|
298
293
|
logger.log('WARN', `[SelfHealing] Compression failed for ${name}, reverting to sharding. Error: ${compErr.message}`);
|
|
299
294
|
}
|
|
300
295
|
|