bulltrackers-module 1.0.545 → 1.0.547

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -235,19 +235,18 @@ async function fetchContracts(db, calcNames) {
235
235
 
236
236
  async function writeSingleResult(result, docRef, name, dateContext, logger, config, deps, startShardIndex = 0, flushMode = 'STANDARD', isInitialWrite = false) {
237
237
  const opCounts = { writes: 0, deletes: 0 };
238
+
239
+ // [FIX 1] Always check for shards if we might compress, not just on 'isInitialWrite'.
240
+ // This prevents zombie shards from persisting if a previous run failed.
238
241
  let wasSharded = false;
239
- let shouldWipeShards = false;
240
- let rootMergeOption = !isInitialWrite;
241
-
242
- if (isInitialWrite) {
243
- try {
244
- const currentSnap = await docRef.get();
245
- if (currentSnap.exists) {
246
- const d = currentSnap.data();
247
- wasSharded = (d._sharded === true);
248
- if (wasSharded) shouldWipeShards = true;
249
- }
250
- } catch (e) {}
242
+ try {
243
+ const currentSnap = await docRef.get();
244
+ if (currentSnap.exists) {
245
+ const d = currentSnap.data();
246
+ wasSharded = (d._sharded === true);
247
+ }
248
+ } catch (e) {
249
+ // Ignore read errors, proceed with write attempts
251
250
  }
252
251
 
253
252
  // --- COMPRESSION STRATEGY ---
@@ -259,13 +258,25 @@ async function writeSingleResult(result, docRef, name, dateContext, logger, conf
259
258
  const compressedBuffer = zlib.gzipSync(rawBuffer);
260
259
  if (compressedBuffer.length < 900 * 1024) {
261
260
  logger.log('INFO', `[Compression] ${name}: Compressed ${(rawBuffer.length/1024).toFixed(0)}KB -> ${(compressedBuffer.length/1024).toFixed(0)}KB.`);
262
- const compressedPayload = { _compressed: true, _completed: true, _lastUpdated: new Date().toISOString(), payload: compressedBuffer };
263
261
 
264
- if (shouldWipeShards) {
262
+ const compressedPayload = {
263
+ _compressed: true,
264
+ _completed: true,
265
+ _lastUpdated: new Date().toISOString(),
266
+ payload: compressedBuffer
267
+ };
268
+
269
+ // [FIX 2] Self-Healing: If we are writing compressed, we MUST ensure shards are gone.
270
+ // We use a batch to atomically write the blob and delete the shards.
271
+ if (wasSharded) {
265
272
  const updates = [];
266
273
  const shardCol = docRef.collection('_shards');
267
274
  const shardDocs = await shardCol.listDocuments();
275
+
276
+ // Add delete ops for all shards
268
277
  shardDocs.forEach(d => updates.push({ type: 'DELETE', ref: d }));
278
+
279
+ // [FIX 3] Use merge: false (overwrite) to clear old indexes and ensure we don't inherit "too many indexes" error.
269
280
  updates.push({ ref: docRef, data: compressedPayload, options: { merge: false } });
270
281
 
271
282
  opCounts.deletes += shardDocs.length;
@@ -273,30 +284,47 @@ async function writeSingleResult(result, docRef, name, dateContext, logger, conf
273
284
 
274
285
  await commitBatchInChunks(config, deps, updates, `${name}::Cleanup+Compress`);
275
286
  } else {
276
- await docRef.set(compressedPayload, { merge: rootMergeOption });
287
+ // [FIX 3] Use merge: false here as well for consistency and safety.
288
+ await docRef.set(compressedPayload, { merge: false });
277
289
  opCounts.writes += 1;
278
290
  }
279
291
 
280
292
  return { totalSize: compressedBuffer.length, isSharded: false, shardCount: 1, nextShardIndex: startShardIndex, opCounts };
281
293
  }
282
294
  }
283
- } catch (compErr) {}
295
+ } catch (compErr) {
296
+ // [FIX 4] Log the error but allow fallthrough to sharding (Self-Healing).
297
+ // This ensures that if compression fails (e.g., unexpected encoding issue), we still try to save the data via sharding.
298
+ logger.log('WARN', `[SelfHealing] Compression failed for ${name}, reverting to sharding. Error: ${compErr.message}`);
299
+ }
284
300
 
301
+ // --- SHARDING STRATEGY (Fallback) ---
285
302
  const strategies = [ { bytes: 900 * 1024, keys: null }, { bytes: 450 * 1024, keys: 10000 }, { bytes: 200 * 1024, keys: 2000 }, { bytes: 100 * 1024, keys: 50 } ];
286
303
  let committed = false; let lastError = null;
287
304
  let finalStats = { totalSize: 0, isSharded: false, shardCount: 1, nextShardIndex: startShardIndex };
305
+ let rootMergeOption = !isInitialWrite;
306
+
307
+ // If we detected shards earlier but failed compression, we need to make sure we wipe them if we are about to re-shard
308
+ // (though prepareAutoShardedWrites handles the _shards logic, we might need to be careful about the root doc).
309
+ // In this specific flow, prepareAutoShardedWrites handles new shard creation. The existing logic below handles wiping shards if needed.
310
+ // However, since we might have skipped the compression block, let's ensure 'wasSharded' logic is respected here if we need to reset.
311
+ let shouldWipeShards = wasSharded;
288
312
 
289
313
  for (let attempt = 0; attempt < strategies.length; attempt++) {
290
314
  if (committed) break;
291
315
  const constraints = strategies[attempt];
292
316
  try {
293
317
  const updates = await prepareAutoShardedWrites(result, docRef, logger, constraints.bytes, constraints.keys, startShardIndex, flushMode);
318
+
294
319
  if (shouldWipeShards) {
295
320
  const shardCol = docRef.collection('_shards');
296
321
  const shardDocs = await shardCol.listDocuments();
322
+
323
+ // Prepend delete operations for existing shards to ensure clean slate
297
324
  shardDocs.forEach(d => updates.unshift({ type: 'DELETE', ref: d }));
298
- shouldWipeShards = false;
325
+ shouldWipeShards = false; // Only do this once
299
326
  }
327
+
300
328
  const rootUpdate = updates.find(u => u.ref.path === docRef.path && u.type !== 'DELETE');
301
329
  if (rootUpdate) { rootUpdate.options = { merge: rootMergeOption }; }
302
330
 
@@ -108,22 +108,22 @@ async function processPortfolio(context, config, taskData, isPI) {
108
108
  data.username = username;
109
109
 
110
110
  if (isPI) {
111
- // Deep Dive Logic for PIs
112
- if (config.enablePiDeepPortfolio !== false) {
113
- const topPositions = (data.AggregatedPositions || []).sort((a, b) => b.Invested - a.Invested).slice(0, 10);
114
- const deepPositions = [];
115
- for (const pos of topPositions) {
116
- try {
117
- const posUrl = `${config.ETORO_API_POSITIONS_URL}?cid=${cid}&InstrumentID=${pos.InstrumentID}&client_request_id=${uuid}`;
118
- const deepRes = await fetchWithRetry(posUrl, requestOptions, proxyManager, logger, 'DeepPos', headerManager);
119
- deepPositions.push({ instrumentId: pos.InstrumentID, ...(await deepRes.json()) });
120
- } catch (e) {} // Skip failed deep positions
121
- }
122
- if (deepPositions.length) await storePopularInvestorPortfolio({ db, logger, collectionRegistry, cid, date: today, portfolioData: data, deepPortfolioData: { positions: deepPositions } });
123
- else await storePopularInvestorPortfolio({ db, logger, collectionRegistry, cid, date: today, portfolioData: data });
124
- } else {
125
- await storePopularInvestorPortfolio({ db, logger, collectionRegistry, cid, date: today, portfolioData: data });
126
- }
111
+ // Deep Dive Logic for PIs - DISABLED
112
+ // if (config.enablePiDeepPortfolio === true) {
113
+ // const topPositions = (data.AggregatedPositions || []).sort((a, b) => b.Invested - a.Invested).slice(0, 10);
114
+ // const deepPositions = [];
115
+ // for (const pos of topPositions) {
116
+ // try {
117
+ // const posUrl = `${config.ETORO_API_POSITIONS_URL}?cid=${cid}&InstrumentID=${pos.InstrumentID}&client_request_id=${uuid}`;
118
+ // const deepRes = await fetchWithRetry(posUrl, requestOptions, proxyManager, logger, 'DeepPos', headerManager);
119
+ // deepPositions.push({ instrumentId: pos.InstrumentID, ...(await deepRes.json()) });
120
+ // } catch (e) {} // Skip failed deep positions
121
+ // }
122
+ // if (deepPositions.length) await storePopularInvestorPortfolio({ db, logger, collectionRegistry, cid, date: today, portfolioData: data, deepPortfolioData: { positions: deepPositions } });
123
+ // else await storePopularInvestorPortfolio({ db, logger, collectionRegistry, cid, date: today, portfolioData: data });
124
+ // } else {
125
+ await storePopularInvestorPortfolio({ db, logger, collectionRegistry, cid, date: today, portfolioData: data });
126
+ // }
127
127
  } else {
128
128
  await storeSignedInUserPortfolio({ db, logger, collectionRegistry, cid, date: today, portfolioData: data });
129
129
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "bulltrackers-module",
3
- "version": "1.0.545",
3
+ "version": "1.0.547",
4
4
  "description": "Helper Functions for Bulltrackers.",
5
5
  "main": "index.js",
6
6
  "files": [