bulltrackers-module 1.0.260 → 1.0.262

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -10,7 +10,8 @@ const { StandardExecutor } = require('./executor
10
10
  const { MetaExecutor } = require('./executors/MetaExecutor');
11
11
  const { generateProcessId, PROCESS_TYPES } = require('./logger/logger');
12
12
 
13
- const STATUS_IMPOSSIBLE = 'IMPOSSIBLE';
13
+ // [FIX] Split IMPOSSIBLE into semantic categories
14
+ const STATUS_IMPOSSIBLE_PREFIX = 'IMPOSSIBLE';
14
15
 
15
16
  function groupByPass(manifest) { return manifest.reduce((acc, calc) => { (acc[calc.pass] = acc[calc.pass] || []).push(calc); return acc; }, {}); }
16
17
 
@@ -27,7 +28,8 @@ function analyzeDateExecution(dateStr, calcsInPass, rootDataStatus, dailyStatus,
27
28
  const stored = currentStatusMap[norm];
28
29
  const depManifest = manifestMap.get(norm);
29
30
  if (!stored) return false;
30
- if (stored.hash === STATUS_IMPOSSIBLE) return false;
31
+ // [FIX] Check for any IMPOSSIBLE variant
32
+ if (typeof stored.hash === 'string' && stored.hash.startsWith(STATUS_IMPOSSIBLE_PREFIX)) return false;
31
33
  if (!depManifest) return false;
32
34
  if (stored.hash !== depManifest.hash) return false;
33
35
  return true;
@@ -40,7 +42,12 @@ function analyzeDateExecution(dateStr, calcsInPass, rootDataStatus, dailyStatus,
40
42
  const storedCategory = stored ? stored.category : null;
41
43
  const currentHash = calc.hash;
42
44
 
43
- const markImpossible = (reason) => { report.impossible.push({ name: cName, reason }); simulationStatus[cName] = { hash: STATUS_IMPOSSIBLE, category: calc.category }; };
45
+ // [FIX] Granular impossible marking
46
+ const markImpossible = (reason, type = 'GENERIC') => {
47
+ report.impossible.push({ name: cName, reason });
48
+ const statusHash = `${STATUS_IMPOSSIBLE_PREFIX}:${type}`;
49
+ simulationStatus[cName] = { hash: statusHash, category: calc.category };
50
+ };
44
51
 
45
52
  const markRunnable = (isReRun = false, reRunDetails = null) => {
46
53
  if (isReRun) report.reRuns.push(reRunDetails);
@@ -50,49 +57,48 @@ function analyzeDateExecution(dateStr, calcsInPass, rootDataStatus, dailyStatus,
50
57
 
51
58
  let migrationOldCategory = null;
52
59
  if (storedCategory && storedCategory !== calc.category) { migrationOldCategory = storedCategory; }
53
- if (storedHash === STATUS_IMPOSSIBLE) { report.skipped.push({ name: cName, reason: 'Permanently Impossible' }); continue; }
54
- const rootCheck = checkRootDependencies(calc, rootDataStatus);
55
60
 
56
- // Check Root Data Availability
57
- // LOGIC : Root data is essential for any calculation
58
- // Therefore if a computation has a dependency on rootdata that does not exist for the dates the computation requires, then the computation is impossible to run.
59
- // However, to handle edge cases where we might test trigger the computation system early, we do not mark impossible if the computation requires data for today, it might arrive later, we just block and skip.
61
+ // [FIX] Check for any IMPOSSIBLE variant in storage
62
+ if (typeof storedHash === 'string' && storedHash.startsWith(STATUS_IMPOSSIBLE_PREFIX)) {
63
+ report.skipped.push({ name: cName, reason: `Permanently Impossible (${storedHash})` });
64
+ continue;
65
+ }
60
66
 
67
+ const rootCheck = checkRootDependencies(calc, rootDataStatus);
68
+
61
69
  if (!rootCheck.canRun) {
62
70
  const missingStr = rootCheck.missing.join(', ');
63
71
  if (!isTargetToday) {
64
- markImpossible(`Missing Root Data: ${missingStr} (Historical)`);
72
+ // [FIX] Mark specifically as NO_DATA
73
+ markImpossible(`Missing Root Data: ${missingStr} (Historical)`, 'NO_DATA');
65
74
  } else {
66
75
  report.blocked.push({ name: cName, reason: `Missing Root Data: ${missingStr} (Waiting)` });
67
76
  }
68
77
  continue;
69
78
  }
70
79
 
71
- // Check Calculation Dependencies
72
- // LOGIC : If a calc B depends on calc A, and calc A is impossible, then calc B is always impossible
73
- // This has a cascading effect, if calc C depends on calc B and calc B depends on calc A and calc A is impossible, then calc B and calc C are also impossible.
74
-
75
80
  let dependencyIsImpossible = false;
76
81
  const missingDeps = [];
77
82
  if (calc.dependencies) {
78
83
  for (const dep of calc.dependencies) {
79
84
  const normDep = normalizeName(dep);
80
85
  const depStored = simulationStatus[normDep];
81
- if (depStored && depStored.hash === STATUS_IMPOSSIBLE) { dependencyIsImpossible = true; break; }
86
+ // [FIX] Check for any IMPOSSIBLE variant in dependencies
87
+ if (depStored && typeof depStored.hash === 'string' && depStored.hash.startsWith(STATUS_IMPOSSIBLE_PREFIX)) {
88
+ dependencyIsImpossible = true;
89
+ break;
90
+ }
82
91
  if (!isDepSatisfied(dep, simulationStatus, manifestMap)) { missingDeps.push(dep); }
83
92
  }
84
93
  }
85
94
 
86
- if (dependencyIsImpossible) { markImpossible('Dependency is Impossible'); continue; }
95
+ if (dependencyIsImpossible) {
96
+ // [FIX] Mark specifically as UPSTREAM failure
97
+ markImpossible('Dependency is Impossible', 'UPSTREAM');
98
+ continue;
99
+ }
87
100
  if (missingDeps.length > 0) { report.failedDependency.push({ name: cName, missing: missingDeps }); continue; }
88
101
 
89
- // Historical Continuity Check
90
- // LOGIC : For computations that require historical data, we process them chronologically
91
- // This is to handle the edge case where calc B runs for Tuesday data, but requires Mondays results from calc B.
92
- // If we triggered a hash mismatch through updating the code of calc B, it would overwrite the results for Tuesday and Monday but without this,
93
- // it would never be guaranteed that Monday runs before Tuesday, and so Tuesday would run with the old Monday hash data, or no data.
94
- // This fixes this edge case by ensuring that historical computations only run if the previous day's computation has run with the latest hash, if not, it blocks and waits.
95
-
96
102
  if (calc.isHistorical && prevDailyStatus) {
97
103
  const yesterday = new Date(dateStr + 'T00:00:00Z');
98
104
  yesterday.setUTCDate(yesterday.getUTCDate() - 1);
@@ -104,9 +110,6 @@ function analyzeDateExecution(dateStr, calcsInPass, rootDataStatus, dailyStatus,
104
110
  }
105
111
  }
106
112
  }
107
- // Final Hash Comparison
108
- // LOGIC : If the stored hash matches the current hash, we don't need to run the computation again, unless the category stored does not match the current computation category
109
- // This is to handle the edge case where a developer changes the category of a computation, the stored results need to be moved into the new location so we trigger a re-run to move the data and also delete the old category stored data.
110
113
 
111
114
  if (!storedHash) { markRunnable(); }
112
115
  else if (storedHash !== currentHash) { markRunnable(true, { name: cName, oldHash: storedHash, newHash: currentHash, previousCategory: migrationOldCategory }); }
@@ -71,14 +71,22 @@ class StandardExecutor {
71
71
 
72
72
  let yP_chunk = {}, tH_chunk = {};
73
73
 
74
- for await (const tP_chunk of tP_iter) {
75
- if (yP_iter) yP_chunk = (await yP_iter.next()).value || {};
76
- if (tH_iter) tH_chunk = (await tH_iter.next()).value || {};
77
-
78
- // Execute chunk for all calcs
79
- const promises = streamingCalcs.map(calc => StandardExecutor.executePerUser(calc, calc.manifest, dateStr, tP_chunk, yP_chunk, tH_chunk, fetchedDeps, previousFetchedDeps, config, deps, cachedLoader) );
80
- await Promise.all(promises);
74
+ // [FIX] Ensure manual iterators are closed if loop fails
75
+ try {
76
+ for await (const tP_chunk of tP_iter) {
77
+ if (yP_iter) yP_chunk = (await yP_iter.next()).value || {};
78
+ if (tH_iter) tH_chunk = (await tH_iter.next()).value || {};
79
+
80
+ // Execute chunk for all calcs
81
+ const promises = streamingCalcs.map(calc => StandardExecutor.executePerUser(calc, calc.manifest, dateStr, tP_chunk, yP_chunk, tH_chunk, fetchedDeps, previousFetchedDeps, config, deps, cachedLoader) );
82
+ await Promise.all(promises);
83
+ }
84
+ } finally {
85
+ // Close manual iterators to release resources
86
+ if (yP_iter && yP_iter.return) await yP_iter.return();
87
+ if (tH_iter && tH_iter.return) await tH_iter.return();
81
88
  }
89
+
82
90
  logger.log('INFO', `[${passName}] Streaming complete.`);
83
91
  }
84
92
 
@@ -10,31 +10,22 @@ const { getManifest } = require('../topology/ManifestLoader');
10
10
  const { StructuredLogger } = require('../logger/logger');
11
11
  const { recordRunAttempt } = require('../persistence/RunRecorder');
12
12
 
13
- // 1. IMPORT CALCULATIONS
14
13
  let calculationPackage;
15
- try {
16
- calculationPackage = require('aiden-shared-calculations-unified');
17
- } catch (e) {
18
- console.error("FATAL: Could not load 'aiden-shared-calculations-unified'.");
19
- throw e;
20
- }
21
-
14
+ try { calculationPackage = require('aiden-shared-calculations-unified');
15
+ } catch (e) {console.error("FATAL: Could not load 'aiden-shared-calculations-unified'."); throw e; }
22
16
  const calculations = calculationPackage.calculations;
23
- const MAX_RETRIES = 3; // [NEW] Poison Pill Threshold
17
+ const MAX_RETRIES = 3;
24
18
 
25
19
  /**
26
20
  * Handles a single Pub/Sub message.
27
21
  */
28
22
  async function handleComputationTask(message, config, dependencies) {
29
-
30
- // 2. INITIALIZE SYSTEM LOGGER
31
23
  const systemLogger = new StructuredLogger({ minLevel: config.minLevel || 'INFO', enableStructured: true, ...config });
32
-
33
24
  const runDependencies = { ...dependencies, logger: systemLogger };
34
25
  const { logger, db } = runDependencies;
35
-
36
- // 3. PARSE PAYLOAD
37
26
  let data;
27
+
28
+ // ----------------------------------- Parse message -----------------------------------
38
29
  try {
39
30
  if (message.data && message.data.message && message.data.message.data) { data = JSON.parse(Buffer.from(message.data.message.data, 'base64').toString());
40
31
  } else if (message.data && typeof message.data === 'string') { data = JSON.parse(Buffer.from(message.data, 'base64').toString());
@@ -42,30 +33,22 @@ async function handleComputationTask(message, config, dependencies) {
42
33
  } else { data = message; }
43
34
  } catch (parseError) { logger.log('ERROR', `[Worker] Failed to parse Pub/Sub payload.`, { error: parseError.message }); return; }
44
35
 
36
+ // ----------------------------------- Validate & Execute -----------------------------------
45
37
  if (!data || data.action !== 'RUN_COMPUTATION_DATE') { return; }
46
-
47
- // [UPDATED] Destructure previousCategory from payload
48
38
  const { date, pass, computation, previousCategory } = data;
49
-
50
39
  if (!date || !pass || !computation) { logger.log('ERROR', `[Worker] Invalid payload: Missing date, pass, or computation.`, data); return; }
51
-
52
- // 4. LOAD MANIFEST
53
40
  let computationManifest;
54
- try {
55
- computationManifest = getManifest(config.activeProductLines || [], calculations, runDependencies);
56
- } catch (manifestError) {
57
- logger.log('FATAL', `[Worker] Failed to load Manifest: ${manifestError.message}`);
41
+ try { computationManifest = getManifest(config.activeProductLines || [], calculations, runDependencies);
42
+ } catch (manifestError) { logger.log('FATAL', `[Worker] Failed to load Manifest: ${manifestError.message}`);
58
43
  await recordRunAttempt(db, { date, computation, pass }, 'CRASH', { message: manifestError.message, stage: 'MANIFEST_LOAD' });
59
44
  return;
60
45
  }
61
46
 
62
- // 5. EXECUTE (With Run Ledger)
63
47
  try {
64
48
  logger.log('INFO', `[Worker] 📥 Received: ${computation} for ${date}`);
65
49
 
66
50
  const startTime = Date.now();
67
- // [UPDATED] Pass previousCategory to executor
68
- const result = await executeDispatchTask(
51
+ const result = await executeDispatchTask(
69
52
  date,
70
53
  pass,
71
54
  computation,
@@ -76,36 +59,32 @@ async function handleComputationTask(message, config, dependencies) {
76
59
  );
77
60
  const duration = Date.now() - startTime;
78
61
 
79
- // CHECK FOR INTERNAL FAILURES (Trapped by ResultCommitter)
80
62
  const failureReport = result?.updates?.failureReport || [];
81
63
  const successUpdates = result?.updates?.successUpdates || {};
82
64
 
83
65
  if (failureReport.length > 0) {
84
- // Task ran, but logic or storage failed (e.g., Sharding Limit)
85
- const failReason = failureReport[0]; // Assuming 1 calc per task
66
+ const failReason = failureReport[0];
86
67
  logger.log('ERROR', `[Worker] ❌ Failed logic/storage for ${computation}`, failReason.error);
87
- await recordRunAttempt(db, { date, computation, pass }, 'FAILURE', failReason.error, { durationMs: duration });
68
+ const metrics = failReason.metrics || {};
69
+ metrics.durationMs = duration;
70
+ await recordRunAttempt(db, { date, computation, pass }, 'FAILURE', failReason.error, metrics);
88
71
  throw new Error(failReason.error.message || 'Computation Logic Failed');
89
72
  }
90
73
  else if (Object.keys(successUpdates).length > 0) {
91
- // Success
92
- logger.log('INFO', `[Worker] Stored: ${computation} for ${date}`);
93
- await recordRunAttempt(db, { date, computation, pass }, 'SUCCESS', null, { durationMs: duration });
74
+ const successData = successUpdates[computation];
75
+ const metrics = successData.metrics || {};
76
+ metrics.durationMs = duration;
77
+ logger.log('INFO', `[Worker] ✅ Stored: ${computation} for ${date} (${metrics.storage?.sizeBytes} bytes)`);
78
+ await recordRunAttempt(db, { date, computation, pass }, 'SUCCESS', null, metrics);
94
79
  }
95
80
  else {
96
- // No updates, but no error (e.g. Empty Result) - Log as Success/Skipped
97
81
  logger.log('WARN', `[Worker] ⚠️ No results produced for ${computation} (Empty?)`);
98
82
  await recordRunAttempt(db, { date, computation, pass }, 'SUCCESS', { message: 'Empty Result' }, { durationMs: duration });
99
83
  }
100
-
101
84
  } catch (err) {
102
- // [NEW] POISON PILL LOGIC
103
- // Check retry count from Pub/Sub message if available
104
85
  const retryCount = message.deliveryAttempt || 0;
105
-
106
86
  if (retryCount >= MAX_RETRIES) {
107
87
  logger.log('ERROR', `[Worker] ☠️ Task POISONED. Moved to DLQ: ${computation} ${date} (Attempt ${retryCount})`);
108
-
109
88
  try {
110
89
  await db.collection('computation_dead_letter_queue').add({
111
90
  originalData: data,
@@ -113,19 +92,12 @@ async function handleComputationTask(message, config, dependencies) {
113
92
  finalAttemptAt: new Date(),
114
93
  failureReason: 'MAX_RETRIES_EXCEEDED'
115
94
  });
116
- // Return normally to ACK the message and remove from subscription
117
95
  return;
118
- } catch (dlqErr) {
119
- logger.log('FATAL', `[Worker] Failed to write to DLQ`, dlqErr);
120
- }
96
+ } catch (dlqErr) { logger.log('FATAL', `[Worker] Failed to write to DLQ`, dlqErr); }
121
97
  }
122
-
123
- // Catch System Crashes (OOM, Timeout, Unhandled Exception)
124
98
  logger.log('ERROR', `[Worker] ❌ Crash: ${computation} for ${date}: ${err.message}`);
125
-
126
99
  await recordRunAttempt(db, { date, computation, pass }, 'CRASH', { message: err.message, stack: err.stack, stage: 'SYSTEM_CRASH' });
127
-
128
- throw err; // Trigger Pub/Sub retry
100
+ throw err;
129
101
  }
130
102
  }
131
103
 
@@ -1,7 +1,7 @@
1
1
  /**
2
2
  * @fileoverview Handles saving computation results with observability and Smart Cleanup.
3
- * UPDATED: Returns detailed failure reports for the Run Ledger.
4
- * UPDATED: Stops retrying on non-transient errors (Permissions, Invalid Args).
3
+ * UPDATED: Returns detailed failure reports AND metrics for the Audit Logger.
4
+ * UPDATED: Stops retrying on non-transient errors.
5
5
  */
6
6
  const { commitBatchInChunks } = require('./FirestoreUtils');
7
7
  const { updateComputationStatus } = require('./StatusRepository');
@@ -30,6 +30,13 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
30
30
 
31
31
  for (const name in stateObj) {
32
32
  const calc = stateObj[name];
33
+
34
+ // Prep metrics container
35
+ const runMetrics = {
36
+ storage: { sizeBytes: 0, isSharded: false, shardCount: 1, keys: 0 },
37
+ validation: { isValid: true, anomalies: [] }
38
+ };
39
+
33
40
  try {
34
41
  const result = await calc.getResult();
35
42
 
@@ -37,14 +44,30 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
37
44
  const healthCheck = HeuristicValidator.analyze(calc.manifest.name, result, overrides);
38
45
 
39
46
  if (!healthCheck.valid) {
47
+ // If validation failed, we consider it an anomaly but we BLOCK the write (throw error)
48
+ runMetrics.validation.isValid = false;
49
+ runMetrics.validation.anomalies.push(healthCheck.reason);
40
50
  throw { message: healthCheck.reason, stage: 'QUALITY_CIRCUIT_BREAKER' };
41
51
  }
42
52
 
53
+ // Check for minor anomalies (validation warnings that didn't fail) - optional implementation
54
+ // For now, we assume if valid=true, anomalies are empty unless we add warning logic later.
55
+
43
56
  const isEmpty = !result || (typeof result === 'object' && Object.keys(result).length === 0) || (typeof result === 'number' && result === 0);
44
57
  if (isEmpty) {
45
- if (calc.manifest.hash) { successUpdates[name] = { hash: false, category: calc.manifest.category }; }
58
+ // Log empty success
59
+ if (calc.manifest.hash) {
60
+ successUpdates[name] = {
61
+ hash: false,
62
+ category: calc.manifest.category,
63
+ metrics: runMetrics // Return empty metrics
64
+ };
65
+ }
46
66
  continue;
47
67
  }
68
+
69
+ // Calculate Key Count rough estimate
70
+ if (typeof result === 'object') runMetrics.storage.keys = Object.keys(result).length;
48
71
 
49
72
  const mainDocRef = db.collection(config.resultsCollection)
50
73
  .doc(dStr)
@@ -71,7 +94,18 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
71
94
 
72
95
  try {
73
96
  // 1. Prepare Shards with current constraints
97
+ // This function now needs to help us determine sharding info
74
98
  const updates = await prepareAutoShardedWrites(result, mainDocRef, logger, constraints.bytes, constraints.keys);
99
+
100
+ // METRICS CALCULATION
101
+ const pointer = updates.find(u => u.data._completed === true);
102
+ const isSharded = pointer && pointer.data._sharded === true;
103
+ const shardCount = isSharded ? (pointer.data._shardCount || 1) : 1;
104
+ const totalSize = updates.reduce((acc, u) => acc + (u.data ? JSON.stringify(u.data).length : 0), 0);
105
+
106
+ runMetrics.storage.sizeBytes = totalSize;
107
+ runMetrics.storage.isSharded = isSharded;
108
+ runMetrics.storage.shardCount = shardCount;
75
109
 
76
110
  // 2. Audit Ledger (Only add to the first update batch)
77
111
  if (passNum && calc.manifest) {
@@ -83,16 +117,13 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
83
117
  completedAt: new Date(),
84
118
  actualHash: calc.manifest.hash,
85
119
  _verified: true,
86
- _shardingStrategy: attempt + 1 // Track which strategy worked
120
+ _shardingStrategy: attempt + 1
87
121
  },
88
122
  options: { merge: true }
89
123
  });
90
124
  }
91
125
 
92
126
  // 3. Attempt Commit
93
- const totalSize = updates.reduce((acc, u) => acc + (u.data ? JSON.stringify(u.data).length : 0), 0);
94
- const isSharded = updates.some(u => u.data._sharded === true);
95
-
96
127
  await commitBatchInChunks(config, deps, updates, `${name} Results (Att ${attempt+1})`);
97
128
 
98
129
  // Log Success
@@ -106,23 +137,19 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
106
137
  lastError = commitErr;
107
138
  const msg = commitErr.message || '';
108
139
 
109
- // [IMPROVED] Check for non-retryable errors
110
- const isNonRetryable = NON_RETRYABLE_ERRORS.some(code => msg.includes(code));
140
+ const isNonRetryable = NON_RETRYABLE_ERRORS.includes(commitErr.code);
111
141
  if (isNonRetryable) {
112
142
  logger.log('ERROR', `[SelfHealing] ${name} encountered FATAL error (Attempt ${attempt + 1}): ${msg}. Aborting.`);
113
- throw commitErr; // Stop immediately
143
+ throw commitErr;
114
144
  }
115
145
 
116
- // Check if error is related to size/indexes
117
146
  const isSizeError = msg.includes('Transaction too big') || msg.includes('payload is too large');
118
- const isIndexError = msg.includes('too many index entries') || msg.includes('INVALID_ARGUMENT'); // Note: InvalidArg can be ambiguous, but usually index related in FS
147
+ const isIndexError = msg.includes('too many index entries') || msg.includes('INVALID_ARGUMENT');
119
148
 
120
149
  if (isSizeError || isIndexError) {
121
150
  logger.log('WARN', `[SelfHealing] ${name} failed write attempt ${attempt + 1}. Retrying with tighter constraints...`, { error: msg });
122
151
  continue; // Try next strategy
123
152
  } else {
124
- // If it's a network error or unknown, re-throw or handle based on policy.
125
- // For now, we allow retrying loop if it wasn't explicitly fatal.
126
153
  logger.log('WARN', `[SelfHealing] ${name} unknown error (Attempt ${attempt + 1}). Retrying...`, { error: msg });
127
154
  }
128
155
  }
@@ -137,8 +164,14 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
137
164
  }
138
165
  // ----------------------------------
139
166
 
140
- // Mark Success
141
- if (calc.manifest.hash) { successUpdates[name] = { hash: calc.manifest.hash, category: calc.manifest.category }; }
167
+ // Mark Success & Pass Metrics
168
+ if (calc.manifest.hash) {
169
+ successUpdates[name] = {
170
+ hash: calc.manifest.hash,
171
+ category: calc.manifest.category,
172
+ metrics: runMetrics // Pass metrics up
173
+ };
174
+ }
142
175
 
143
176
  // Capture Schema
144
177
  if (calc.manifest.class.getSchema) {
@@ -164,7 +197,8 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
164
197
 
165
198
  failureReport.push({
166
199
  name,
167
- error: { message: msg, stack: e.stack, stage }
200
+ error: { message: msg, stack: e.stack, stage },
201
+ metrics: runMetrics // Pass incomplete metrics for debugging
168
202
  });
169
203
  }
170
204
  }
@@ -180,7 +214,6 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
180
214
  * Deletes result documents from a previous category location.
181
215
  */
182
216
  async function deleteOldCalculationData(dateStr, oldCategory, calcName, config, deps) {
183
-
184
217
  const { db, logger, calculationUtils } = deps;
185
218
  const { withRetry } = calculationUtils || { withRetry: (fn) => fn() };
186
219
 
@@ -227,8 +260,6 @@ async function prepareAutoShardedWrites(result, docRef, logger, maxBytes = 900 *
227
260
  const OVERHEAD_ALLOWANCE = 20 * 1024;
228
261
  const CHUNK_LIMIT = maxBytes - OVERHEAD_ALLOWANCE;
229
262
 
230
- // We only calculate totalSize loosely here for the "skip sharding" check.
231
- // The loop below enforces the real limits.
232
263
  const totalSize = calculateFirestoreBytes(result);
233
264
  const docPathSize = Buffer.byteLength(docRef.path, 'utf8') + 16;
234
265
 
@@ -70,10 +70,13 @@ class HeuristicValidator {
70
70
  if (numericProp !== undefined) numericValues.push(numericProp);
71
71
  }
72
72
  // --- TYPE B: Scalar / Primitive Result ---
73
- else if (typeof val === 'number') {
74
- if (val === 0) zeroCount++;
75
- if (isNaN(val) || !isFinite(val)) nanCount++;
76
- else numericValues.push(val);
73
+ if (typeof val === 'number') {
74
+ if (isNaN(val) || !isFinite(val)) {
75
+ nanCount++;
76
+ } else {
77
+ numericValues.push(val); // Include zeros
78
+ if (val === 0) zeroCount++;
79
+ }
77
80
  }
78
81
  }
79
82
 
@@ -1,53 +1,148 @@
1
1
  /**
2
- * @fileoverview Utility for recording computation run attempts (The Run Ledger).
3
- * Tracks success, failure, and error contexts for every execution pass.
2
+ * @fileoverview Utility for recording computation run attempts (The Audit Logger).
3
+ * REFACTORED: Organizes logs by Computation Name -> History.
4
+ * Implements aggregated error stats and advanced performance metrics.
4
5
  */
5
- const { generateProcessId } = require('../logger/logger');
6
+
7
+ const { FieldValue } = require('../utils/utils');
8
+ const os = require('os');
9
+
10
+ // Root collection for the new audit system
11
+ const AUDIT_COLLECTION = 'computation_audit_logs';
12
+
13
+ /**
14
+ * Sanitizes error messages to be used as Firestore Map keys.
15
+ * Replaces invalid characters (. / [ ] *) with underscores.
16
+ */
17
+ function sanitizeErrorKey(message) {
18
+ if (!message) return 'Unknown_Error';
19
+ // Take first 100 chars to avoid key limit issues
20
+ const shortMsg = message.toString().substring(0, 100);
21
+ return shortMsg.replace(/[./\[\]*`]/g, '_').trim();
22
+ }
6
23
 
7
24
  /**
8
- * Records a run attempt to the computation_run_history collection.
9
- * * @param {Firestore} db - Firestore instance
10
- * @param {Object} context - { date, computation, pass }
11
- * @param {string} status - 'SUCCESS', 'FAILURE', or 'CRASH'
12
- * @param {Object|null} error - Error object or null
13
- * @param {Object} metrics - { durationMs, ... }
25
+ * Records a run attempt with detailed metrics and aggregated stats.
26
+ * @param {Firestore} db - Firestore instance
27
+ * @param {Object} context - Context object
28
+ * @param {string} context.date - The "Target Date" of the computation
29
+ * @param {string} context.computation - The name of the calculation
30
+ * @param {string} context.pass - The topology pass number
31
+ * @param {string} status - 'SUCCESS', 'FAILURE', 'CRASH', or 'SKIPPED'
32
+ * @param {Object|null} error - Error object if failed
33
+ * @param {Object} detailedMetrics - Expanded metrics object (Optional, defaults provided)
34
+ * @param {number} [detailedMetrics.durationMs] - Execution time
35
+ * @param {Object} [detailedMetrics.storage] - { sizeBytes, isSharded, shardCount }
36
+ * @param {Object} [detailedMetrics.validation] - { isValid, anomalies: [] }
14
37
  */
15
- async function recordRunAttempt(db, context, status, error = null, metrics = {}) {
38
+ async function recordRunAttempt(db, context, status, error = null, detailedMetrics = { durationMs: 0 }) {
16
39
  if (!db || !context) return;
17
40
 
18
- const { date, computation, pass } = context;
19
- // Generate a unique ID for this specific run attempt
20
- const runId = `${Date.now()}_${generateProcessId('run', computation, date)}`;
41
+ const { date: targetDate, computation, pass } = context;
42
+ const now = new Date();
43
+ const triggerTimestamp = now.getTime();
44
+
45
+ // 1. Construct Paths
46
+ // Parent Doc: Stores global aggregates for this computation
47
+ const computationDocRef = db.collection(AUDIT_COLLECTION).doc(computation);
21
48
 
22
- const docRef = db.collection('computation_run_history')
23
- .doc(date)
24
- .collection('runs')
25
- .doc(runId);
49
+ // History Doc: Stores this specific run
50
+ // ID Format: targetDate_triggerTimestamp (Sortable by data date, then execution time)
51
+ const runId = `${targetDate}_${triggerTimestamp}`;
52
+ const runDocRef = computationDocRef.collection('history').doc(runId);
53
+
54
+ // 2. Prepare Metrics & Environment Info
55
+ const workerId = process.env.FUNCTION_TARGET || process.env.K_REVISION || os.hostname();
56
+
57
+ // Calculate size in MB
58
+ let sizeMB = 0;
59
+ if (detailedMetrics.storage && detailedMetrics.storage.sizeBytes) {
60
+ sizeMB = Number((detailedMetrics.storage.sizeBytes / (1024 * 1024)).toFixed(4));
61
+ }
26
62
 
27
- const entry = {
63
+ // Extract Validation Anomalies (Unusual Keys/Values)
64
+ const anomalies = detailedMetrics.validation?.anomalies || [];
65
+ if (error && error.message && error.message.includes('Data Integrity')) {
66
+ // If the error itself was a validation failure, add it to anomalies
67
+ anomalies.push(error.message);
68
+ }
69
+
70
+ // 3. Construct the Run Log Entry
71
+ const runEntry = {
72
+ // Identity
73
+ runId: runId,
28
74
  computationName: computation,
29
- date: date,
30
75
  pass: String(pass),
31
- timestamp: new Date().toISOString(),
76
+ workerId: workerId,
77
+
78
+ // Timing
79
+ targetDate: targetDate, // The date the data belongs to
80
+ triggerTime: now.toISOString(), // The date the code ran
81
+ durationMs: detailedMetrics.durationMs || 0,
82
+
83
+ // Status
32
84
  status: status,
33
- metrics: metrics
85
+
86
+ // Data Metrics
87
+ outputStats: {
88
+ sizeMB: sizeMB,
89
+ isSharded: !!detailedMetrics.storage?.isSharded,
90
+ shardCount: detailedMetrics.storage?.shardCount || 1,
91
+ keysWritten: detailedMetrics.storage?.keys || 0 // If available
92
+ },
93
+
94
+ // Health & Diagnostics
95
+ anomalies: anomalies, // Logs "Consistent 0s", "N/As" etc.
96
+
97
+ // Metadata
98
+ _schemaVersion: '2.0'
34
99
  };
35
100
 
101
+ // Attach Error Details if present
36
102
  if (error) {
37
- entry.error = {
103
+ runEntry.error = {
38
104
  message: error.message || 'Unknown Error',
39
- // Capture specific sharding/firestore stages if available
40
105
  stage: error.stage || 'UNKNOWN',
41
- code: error.code || null,
42
- stack: error.stack || null
106
+ stack: error.stack ? error.stack.substring(0, 1000) : null, // Truncate stack
107
+ code: error.code || null
43
108
  };
44
109
  }
45
110
 
46
- // Fire and forget (await but catch to ensure logging doesn't crash the worker)
111
+ // 4. Prepare Aggregation Update (Atomic Increments)
112
+ const statsUpdate = {
113
+ lastRunAt: now,
114
+ lastRunStatus: status,
115
+ totalRuns: FieldValue.increment(1)
116
+ };
117
+
118
+ if (status === 'SUCCESS') {
119
+ statsUpdate.successCount = FieldValue.increment(1);
120
+ } else {
121
+ statsUpdate.failureCount = FieldValue.increment(1);
122
+ // Increment specific error type counter
123
+ if (error) {
124
+ const safeKey = sanitizeErrorKey(error.message);
125
+ statsUpdate[`errorCounts.${safeKey}`] = FieldValue.increment(1);
126
+ }
127
+ }
128
+
129
+ // 5. Execute as Batch
47
130
  try {
48
- await docRef.set(entry);
131
+ const batch = db.batch();
132
+
133
+ // Set the specific run log
134
+ batch.set(runDocRef, runEntry);
135
+
136
+ // Merge updates into the parent computation document
137
+ // We use { merge: true } implicitly with set or explicit update.
138
+ // Using set({ merge: true }) ensures doc creation if it doesn't exist.
139
+ batch.set(computationDocRef, statsUpdate, { merge: true });
140
+
141
+ await batch.commit();
142
+
49
143
  } catch (e) {
50
- console.error(`[RunRecorder] Failed to save history for ${computation}:`, e.message);
144
+ // Fallback logging if Firestore fails (prevents infinite loop crashing)
145
+ console.error(`[RunRecorder] ❌ CRITICAL: Failed to write audit log for ${computation}`, e);
51
146
  }
52
147
  }
53
148
 
@@ -5,35 +5,39 @@
5
5
  const { build } = require('../context/ManifestBuilder');
6
6
  const { StructuredLogger, PROCESS_TYPES, generateProcessId } = require('../logger/logger');
7
7
 
8
- // Cache the manifest in global scope (warm start optimization)
9
- let cachedManifest = null;
8
+ // [FIX] Cache using a Map to handle different productLine combinations
9
+ const manifestCache = new Map();
10
10
 
11
11
  function getManifest(productLines = [], calculationsDir, dependencies = {}) {
12
- if (cachedManifest) {
13
- return cachedManifest;
12
+ // Generate a unique key for this specific request configuration
13
+ const cacheKey = JSON.stringify(productLines ? productLines.slice().sort() : ['ALL']);
14
+
15
+ if (manifestCache.has(cacheKey)) {
16
+ return manifestCache.get(cacheKey);
14
17
  }
15
18
 
16
19
  const logger = dependencies.logger || new StructuredLogger();
17
20
  const pid = generateProcessId(PROCESS_TYPES.MANIFEST, 'build', new Date().toISOString().slice(0,10));
18
21
 
19
- logger.log('INFO', 'Starting Manifest Build...', { processId: pid });
22
+ logger.log('INFO', 'Starting Manifest Build...', { processId: pid, scope: cacheKey });
20
23
 
21
24
  const startTime = Date.now();
22
25
  try {
23
- cachedManifest = build(productLines, calculationsDir);
26
+ const manifest = build(productLines, calculationsDir);
24
27
 
25
28
  // Log Topology Stats
26
29
  const passCounts = {};
27
- cachedManifest.forEach(c => { passCounts[c.pass] = (passCounts[c.pass] || 0) + 1; });
30
+ manifest.forEach(c => { passCounts[c.pass] = (passCounts[c.pass] || 0) + 1; });
28
31
 
29
32
  logger.log('INFO', 'Manifest Build Success', {
30
33
  processId: pid,
31
34
  durationMs: Date.now() - startTime,
32
- totalCalculations: cachedManifest.length,
35
+ totalCalculations: manifest.length,
33
36
  topology: passCounts
34
37
  });
35
38
 
36
- return cachedManifest;
39
+ manifestCache.set(cacheKey, manifest);
40
+ return manifest;
37
41
  } catch (e) {
38
42
  logger.log('FATAL', 'Manifest Build Failed', { processId: pid, error: e.message });
39
43
  throw e;
@@ -174,21 +174,32 @@ async function getPriceShardRefs(config, deps) {
174
174
  * @param {object} deps
175
175
  * @returns {Promise<Object>} The lookup map { "instrumentId": "shardDocId" }
176
176
  */
177
+ /**
178
+ * Ensures the Price Shard Index exists. If not, builds it by scanning all shards.
179
+ * [FIX] Added TTL check to ensure new instruments are discovered.
180
+ */
177
181
  async function ensurePriceShardIndex(config, deps) {
178
182
  const { db, logger } = deps;
179
183
  const metadataCol = config.metadataCollection || 'system_metadata';
180
- const indexDocRef = db.collection(metadataCol).doc('price_shard_index'); // TODO. TEST THIS SHARD INDEX SYSTEM, CURRENTLY UNUSED IN COMPUTATIONS BUT IS EXTREMELY EFFICIENT AND GREAT FOR COST REDUCTION
184
+ const indexDocRef = db.collection(metadataCol).doc('price_shard_index');
181
185
 
182
186
  // 1. Try to fetch existing index
183
187
  const snap = await indexDocRef.get();
184
188
  if (snap.exists) {
185
189
  const data = snap.data();
186
- // Simple expiry check (optional): Rebuild if older than 24h
187
- // For now, we trust it exists.
188
- return data.index || {};
189
- }
190
+
191
+ // [FIX] Check TTL (24 hours)
192
+ const lastUpdated = data.lastUpdated ? new Date(data.lastUpdated).getTime() : 0;
193
+ const now = Date.now();
194
+ const oneDayMs = 24 * 60 * 60 * 1000;
190
195
 
191
- logger.log('INFO', '[ShardIndex] Index not found. Building new Price Shard Index (Scanning all shards)...');
196
+ if ((now - lastUpdated) < oneDayMs) {
197
+ return data.index || {};
198
+ }
199
+ logger.log('INFO', '[ShardIndex] Index is stale (>24h). Rebuilding...');
200
+ } else {
201
+ logger.log('INFO', '[ShardIndex] Index not found. Building new Price Shard Index...');
202
+ }
192
203
 
193
204
  // 2. Build Index
194
205
  const collection = config.priceCollection || 'asset_prices';
@@ -199,9 +210,8 @@ async function ensurePriceShardIndex(config, deps) {
199
210
 
200
211
  snapshot.forEach(doc => {
201
212
  shardCount++;
202
- const data = doc.data(); // This loads the shard into memory, intensive but necessary once
213
+ const data = doc.data();
203
214
  if (data.history) {
204
- // Keys of history are Instrument IDs
205
215
  Object.keys(data.history).forEach(instId => {
206
216
  index[instId] = doc.id;
207
217
  });
@@ -3,7 +3,7 @@
3
3
  * It selects an available (unlocked) proxy for each request and locks it upon failure.
4
4
  * * This module is designed to be reusable and receives all dependencies
5
5
  * (firestore, logger) and configuration via its constructor.
6
- * --- MODIFIED: Now includes exponential backoff and retries specifically for rate-limit errors. ---
6
+ * --- MODIFIED: Fixed Hostname Collision Bug in _loadConfig ---
7
7
  */
8
8
  const { FieldValue } = require('@google-cloud/firestore');
9
9
  const sleep = (ms) => new Promise(resolve => setTimeout(resolve, ms));
@@ -44,17 +44,44 @@ class IntelligentProxyManager {
44
44
  async _loadConfig() {
45
45
  if (Date.now() - this.configLastLoaded < this.CONFIG_CACHE_DURATION_MS) { return; }
46
46
  if (this.proxyUrls.length === 0) { return; }
47
+
47
48
  this.logger.log('INFO', "[ProxyManager] Refreshing proxy configuration and lock status...");
48
- try { const tempProxyStatus = {};
49
- for (const url of this.proxyUrls) { const owner = new URL(url).hostname; tempProxyStatus[owner] = { owner, url, status: 'unlocked' }; }
50
- if (this.proxyLockingEnabled) { const doc = await this.firestore.doc(this.PERFORMANCE_DOC_PATH).get();
51
- if (doc.exists) { const data = doc.data(); if (data.locks) { for (const owner in data.locks) { if (tempProxyStatus[owner] && data.locks[owner].locked === true) { tempProxyStatus[owner].status = 'locked'; } } } }
52
- } else { this.logger.log('TRACE', '[ProxyManager] Proxy locking is disabled, skipping lock status check.'); }
49
+
50
+ try {
51
+ const tempProxyStatus = {};
52
+ for (const url of this.proxyUrls) {
53
+ // [FIX] Use the full URL as the unique ID, sanitized for Firestore usage.
54
+ // Replaces all non-alphanumeric characters with underscores.
55
+ // Old logic: new URL(url).hostname -> caused collision because all are script.google.com
56
+ const owner = url.replace(/[^a-zA-Z0-9]/g, '_');
57
+
58
+ tempProxyStatus[owner] = { owner, url, status: 'unlocked' };
59
+ }
60
+
61
+ if (this.proxyLockingEnabled) {
62
+ const doc = await this.firestore.doc(this.PERFORMANCE_DOC_PATH).get();
63
+ if (doc.exists) {
64
+ const data = doc.data();
65
+ if (data.locks) {
66
+ for (const owner in data.locks) {
67
+ // If the sanitized URL key exists in locks and is locked, update status
68
+ if (tempProxyStatus[owner] && data.locks[owner].locked === true) {
69
+ tempProxyStatus[owner].status = 'locked';
70
+ }
71
+ }
72
+ }
73
+ }
74
+ } else {
75
+ this.logger.log('TRACE', '[ProxyManager] Proxy locking is disabled, skipping lock status check.');
76
+ }
77
+
53
78
  this.proxies = tempProxyStatus;
54
79
  this.configLastLoaded = Date.now();
55
80
  this.logger.log('SUCCESS', `[ProxyManager] Refreshed ${Object.keys(this.proxies).length} proxy statuses.`);
81
+
56
82
  } catch (error) {
57
- this.logger.log('ERROR', '[ProxyManager] Failed to load proxy config from Firestore.', { errorMessage: error.message, path: this.PERFORMANCE_DOC_PATH }); }
83
+ this.logger.log('ERROR', '[ProxyManager] Failed to load proxy config from Firestore.', { errorMessage: error.message, path: this.PERFORMANCE_DOC_PATH });
84
+ }
58
85
  }
59
86
 
60
87
  /**
@@ -65,6 +92,7 @@ class IntelligentProxyManager {
65
92
  await this._loadConfig();
66
93
  const availableProxies = this.proxyLockingEnabled ? Object.values(this.proxies).filter(p => p.status === 'unlocked') : Object.values(this.proxies);
67
94
  if (availableProxies.length === 0) { const errorMsg = this.proxyLockingEnabled ? "All proxies are locked. No proxy available." : "No proxies are loaded. Cannot make request."; this.logger.log('ERROR', `[ProxyManager] ${errorMsg}`); throw new Error(errorMsg); }
95
+ // Random selection to distribute load (consider Round Robin in future for 20k scale)
68
96
  const selected = availableProxies[Math.floor(Math.random() * availableProxies.length)];
69
97
  return { owner: selected.owner, url: selected.url };
70
98
  }
@@ -77,12 +105,17 @@ class IntelligentProxyManager {
77
105
  if (!this.proxyLockingEnabled) { this.logger.log('TRACE', `[ProxyManager] Locking skipped for ${owner} (locking is disabled).`); return; }
78
106
  if (this.proxies[owner]) { this.proxies[owner].status = 'locked'; }
79
107
  this.logger.log('WARN', `[ProxyManager] Locking proxy: ${owner}`);
80
- try { const docRef = this.firestore.doc(this.PERFORMANCE_DOC_PATH); await docRef.set({ locks: { [owner]: { locked: true, lastLocked: FieldValue.serverTimestamp() } } }, { merge: true });
81
- } catch (error) { this.logger.log('ERROR', `[ProxyManager] Failed to write lock for ${owner} to Firestore.`, { errorMessage: error.message }); }
108
+ try {
109
+ const docRef = this.firestore.doc(this.PERFORMANCE_DOC_PATH);
110
+ // Use the sanitized owner key
111
+ await docRef.set({ locks: { [owner]: { locked: true, lastLocked: FieldValue.serverTimestamp() } } }, { merge: true });
112
+ } catch (error) {
113
+ this.logger.log('ERROR', `[ProxyManager] Failed to write lock for ${owner} to Firestore.`, { errorMessage: error.message });
114
+ }
82
115
  }
83
116
 
84
117
  /**
85
- * --- CORRECTED LOGIC: Makes a fetch request by trying different proxies ---
118
+ * Makes a fetch request by trying different proxies.
86
119
  * @param {string} targetUrl - The URL to fetch.
87
120
  * @param {object} options - Fetch options (e.g., headers).
88
121
  * @returns {Promise<object>} A mock Response object.
@@ -105,7 +138,7 @@ class IntelligentProxyManager {
105
138
 
106
139
  // 2. Make a SINGLE attempt with this selected proxy.
107
140
  const response = await this._fetchViaAppsScript(proxy.url, targetUrl, options);
108
- lastResponse = response; // Save this response in case it's the last one
141
+ lastResponse = response;
109
142
 
110
143
  // 3. Case 1: Success! Return immediately.
111
144
  if (response.ok) {
@@ -124,7 +157,7 @@ class IntelligentProxyManager {
124
157
  // LOCK THE FAILED PROXY so _selectProxy() won't pick it again.
125
158
  await this.lockProxy(proxy.owner);
126
159
 
127
- // Back off slightly before trying the *next* proxy to avoid a thundering herd.
160
+ // Back off slightly before trying the *next* proxy.
128
161
  await sleep(this.INITIAL_BACKOFF_MS * attempt);
129
162
 
130
163
  continue; // Go to the next loop iteration to select a *new* proxy.
@@ -136,22 +169,18 @@ class IntelligentProxyManager {
136
169
  return response;
137
170
  }
138
171
 
139
- // 6. If loop finishes, all (this.MAX_RETRIES) proxy attempts failed.
172
+ // 6. If loop finishes, all proxy attempts failed.
140
173
  this.logger.log('ERROR', `[ProxyManager] Request failed after ${this.MAX_RETRIES} proxy attempts.`, { url: targetUrl, lastStatus: lastResponse?.status });
141
- return lastResponse; // Return the last failed response
174
+ return lastResponse;
142
175
  }
143
176
 
144
-
145
- // Inside backend_npm_pkgs/bulltrackers-module/functions/core/utils/intelligent_proxy_manager.js
146
-
147
177
  /**
148
178
  * Internal function to call the Google AppScript proxy.
149
- * --- MODIFIED: Now checks Content-Type for HTML to robustly detect rate limits ---
150
179
  * @private
151
180
  */
152
181
  async _fetchViaAppsScript(proxyUrl, targetUrl, options) {
153
182
  const payload = { url: targetUrl, ...options };
154
- let response; // Declare response here to access in catch block
183
+ let response;
155
184
 
156
185
  try {
157
186
  response = await fetch(proxyUrl, {
@@ -160,7 +189,6 @@ class IntelligentProxyManager {
160
189
  body: JSON.stringify(payload)
161
190
  });
162
191
 
163
- // --- THIS IS THE DOCTYPE CHECK ---
164
192
  // Check the response headers from the proxy itself.
165
193
  const contentType = response.headers.get('content-type') || '';
166
194
  if (contentType.includes('text/html')) {
@@ -169,20 +197,19 @@ class IntelligentProxyManager {
169
197
  this.logger.log('WARN', `[ProxyManager] Proxy returned HTML error page (rate limit).`, {
170
198
  status: response.status,
171
199
  proxy: proxyUrl,
172
- errorSnippet: errorText.substring(0, 150) // Log a snippet
200
+ errorSnippet: errorText.substring(0, 150)
173
201
  });
174
202
 
175
203
  return {
176
204
  ok: false,
177
- status: response.status, // Will be 500, 503, etc.
205
+ status: response.status,
178
206
  isUrlFetchError: true,
179
- isRateLimitError: true, // <--- This is the key change
207
+ isRateLimitError: true,
180
208
  error: { message: `Proxy returned HTML error page (likely rate limit).` },
181
209
  headers: response.headers,
182
210
  text: () => Promise.resolve(errorText)
183
211
  };
184
212
  }
185
- // --- END DOCTYPE CHECK ---
186
213
 
187
214
  // If it's not HTML, but still not OK (e.g., 400 Bad Request),
188
215
  // it's a non-rate-limit proxy error.
@@ -194,7 +221,6 @@ class IntelligentProxyManager {
194
221
  error: errorText
195
222
  });
196
223
 
197
- // We can still check 429 here, just in case Google sends one.
198
224
  const isRateLimit = response.status === 429;
199
225
 
200
226
  return {
@@ -211,13 +237,9 @@ class IntelligentProxyManager {
211
237
  // If we are here, Content-Type was application/json and status was OK.
212
238
  const proxyResponse = await response.json();
213
239
 
214
- // Now we check for errors *inside* the JSON
215
- // (e.g., the Apps Script caught an error and reported it).
240
+ // Check for errors *inside* the JSON (caught by Apps Script)
216
241
  if (proxyResponse.error) {
217
242
  const errorMsg = proxyResponse.error.message || '';
218
-
219
- // Fallback check for "invoked too many times" *inside* the JSON error,
220
- // just in case. The HTML check is now our primary defense.
221
243
  const isRateLimit = errorMsg.toLowerCase().includes('service invoked too many times');
222
244
 
223
245
  if (isRateLimit) {
@@ -225,7 +247,6 @@ class IntelligentProxyManager {
225
247
  return { ok: false, status: 500, error: proxyResponse.error, isUrlFetchError: true, isRateLimitError: true, headers: new Headers() };
226
248
  }
227
249
 
228
- // Other non-rate-limit error caught by the script
229
250
  return { ok: false, status: 500, error: proxyResponse.error, isUrlFetchError: true, isRateLimitError: false, headers: new Headers(), text: () => Promise.resolve(errorMsg) };
230
251
  }
231
252
 
@@ -246,7 +267,7 @@ class IntelligentProxyManager {
246
267
  ok: false,
247
268
  status: 0,
248
269
  isUrlFetchError: true,
249
- isRateLimitError: false, // Not a rate limit, a network failure
270
+ isRateLimitError: false,
250
271
  error: { message: `Network error: ${networkError.message}` },
251
272
  headers: new Headers()
252
273
  };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "bulltrackers-module",
3
- "version": "1.0.260",
3
+ "version": "1.0.262",
4
4
  "description": "Helper Functions for Bulltrackers.",
5
5
  "main": "index.js",
6
6
  "files": [