bulltrackers-module 1.0.259 → 1.0.260

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,18 +1,26 @@
1
1
  /**
2
2
  * @fileoverview Handles saving computation results with observability and Smart Cleanup.
3
3
  * UPDATED: Returns detailed failure reports for the Run Ledger.
4
+ * UPDATED: Stops retrying on non-transient errors (Permissions, Invalid Args).
4
5
  */
5
- const { commitBatchInChunks } = require('./FirestoreUtils');
6
- const { updateComputationStatus } = require('./StatusRepository');
7
- const { batchStoreSchemas } = require('../utils/schema_capture');
6
+ const { commitBatchInChunks } = require('./FirestoreUtils');
7
+ const { updateComputationStatus } = require('./StatusRepository');
8
+ const { batchStoreSchemas } = require('../utils/schema_capture');
8
9
  const { generateProcessId, PROCESS_TYPES } = require('../logger/logger');
9
10
 
10
- const { HeuristicValidator } = require('./ResultsValidator'); // Validator
11
- const validationOverrides = require('../config/validation_overrides'); // Override file
11
+ const { HeuristicValidator } = require('./ResultsValidator');
12
+ const validationOverrides = require('../config/validation_overrides');
13
+
14
+ const NON_RETRYABLE_ERRORS = [
15
+ 'INVALID_ARGUMENT', // Schema/Type mismatch
16
+ 'PERMISSION_DENIED', // Auth issue
17
+ 'DATA_LOSS', // Firestore corruption
18
+ 'FAILED_PRECONDITION' // Transaction requirements not met
19
+ ];
12
20
 
13
21
  async function commitResults(stateObj, dStr, passName, config, deps, skipStatusWrite = false) {
14
22
  const successUpdates = {};
15
- const failureReport = []; // [NEW] Track failures per calculation
23
+ const failureReport = [];
16
24
  const schemas = [];
17
25
  const cleanupTasks = [];
18
26
  const { logger, db } = deps;
@@ -25,21 +33,18 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
25
33
  try {
26
34
  const result = await calc.getResult();
27
35
 
28
- const overrides = validationOverrides[calc.manifest.name] || {};
36
+ const overrides = validationOverrides[calc.manifest.name] || {};
29
37
  const healthCheck = HeuristicValidator.analyze(calc.manifest.name, result, overrides);
30
38
 
31
39
  if (!healthCheck.valid) {
32
- // We throw a specific error stage so we know to BLOCK it, not retry it.
33
- throw {
34
- message: healthCheck.reason,
35
- stage: 'QUALITY_CIRCUIT_BREAKER'
36
- };
40
+ throw { message: healthCheck.reason, stage: 'QUALITY_CIRCUIT_BREAKER' };
37
41
  }
38
42
 
39
- // Validate Result
40
- const isEmpty = !result || (typeof result === 'object' && Object.keys(result).length === 0) || (typeof result === 'number' && result === 0);
41
-
42
- if (isEmpty) { if (calc.manifest.hash) { successUpdates[name] = { hash: false, category: calc.manifest.category }; } continue; }
43
+ const isEmpty = !result || (typeof result === 'object' && Object.keys(result).length === 0) || (typeof result === 'number' && result === 0);
44
+ if (isEmpty) {
45
+ if (calc.manifest.hash) { successUpdates[name] = { hash: false, category: calc.manifest.category }; }
46
+ continue;
47
+ }
43
48
 
44
49
  const mainDocRef = db.collection(config.resultsCollection)
45
50
  .doc(dStr)
@@ -48,29 +53,92 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
48
53
  .collection(config.computationsSubcollection)
49
54
  .doc(name);
50
55
 
51
- // [CRITICAL UPDATE] Catch errors specifically during Sharding/Prep
52
- let updates;
53
- try {
54
- updates = await prepareAutoShardedWrites(result, mainDocRef, logger);
55
- } catch (prepError) {
56
- // If this fails, it's likely a memory or logic issue before DB commit
57
- throw { message: prepError.message, stack: prepError.stack, stage: 'PREPARE_SHARDS' };
56
+ // --- REACTIVE SELF-HEALING LOOP ---
57
+ let committed = false;
58
+ // Strategy: 1=Normal, 2=Safe (Halved), 3=Aggressive (Quartered + Key Limit)
59
+ const strategies = [
60
+ { bytes: 900 * 1024, keys: null }, // Attempt 1: Standard
61
+ { bytes: 450 * 1024, keys: 10000 }, // Attempt 2: High Index usage
62
+ { bytes: 200 * 1024, keys: 2000 } // Attempt 3: Extreme fragmentation
63
+ ];
64
+
65
+ let lastError = null;
66
+
67
+ for (let attempt = 0; attempt < strategies.length; attempt++) {
68
+ if (committed) break;
69
+
70
+ const constraints = strategies[attempt];
71
+
72
+ try {
73
+ // 1. Prepare Shards with current constraints
74
+ const updates = await prepareAutoShardedWrites(result, mainDocRef, logger, constraints.bytes, constraints.keys);
75
+
76
+ // 2. Audit Ledger (Only add to the first update batch)
77
+ if (passNum && calc.manifest) {
78
+ const ledgerRef = db.collection(`computation_audit_ledger/${dStr}/passes/${passNum}/tasks`).doc(name);
79
+ updates.push({
80
+ ref: ledgerRef,
81
+ data: {
82
+ status: 'COMPLETED',
83
+ completedAt: new Date(),
84
+ actualHash: calc.manifest.hash,
85
+ _verified: true,
86
+ _shardingStrategy: attempt + 1 // Track which strategy worked
87
+ },
88
+ options: { merge: true }
89
+ });
90
+ }
91
+
92
+ // 3. Attempt Commit
93
+ const totalSize = updates.reduce((acc, u) => acc + (u.data ? JSON.stringify(u.data).length : 0), 0);
94
+ const isSharded = updates.some(u => u.data._sharded === true);
95
+
96
+ await commitBatchInChunks(config, deps, updates, `${name} Results (Att ${attempt+1})`);
97
+
98
+ // Log Success
99
+ if (logger && logger.logStorage) {
100
+ logger.logStorage(pid, name, dStr, mainDocRef.path, totalSize, isSharded);
101
+ }
102
+
103
+ committed = true; // Exit loop
104
+
105
+ } catch (commitErr) {
106
+ lastError = commitErr;
107
+ const msg = commitErr.message || '';
108
+
109
+ // [IMPROVED] Check for non-retryable errors
110
+ const isNonRetryable = NON_RETRYABLE_ERRORS.some(code => msg.includes(code));
111
+ if (isNonRetryable) {
112
+ logger.log('ERROR', `[SelfHealing] ${name} encountered FATAL error (Attempt ${attempt + 1}): ${msg}. Aborting.`);
113
+ throw commitErr; // Stop immediately
114
+ }
115
+
116
+ // Check if error is related to size/indexes
117
+ const isSizeError = msg.includes('Transaction too big') || msg.includes('payload is too large');
118
+ const isIndexError = msg.includes('too many index entries') || msg.includes('INVALID_ARGUMENT'); // Note: InvalidArg can be ambiguous, but usually index related in FS
119
+
120
+ if (isSizeError || isIndexError) {
121
+ logger.log('WARN', `[SelfHealing] ${name} failed write attempt ${attempt + 1}. Retrying with tighter constraints...`, { error: msg });
122
+ continue; // Try next strategy
123
+ } else {
124
+ // If it's a network error or unknown, re-throw or handle based on policy.
125
+ // For now, we allow retrying loop if it wasn't explicitly fatal.
126
+ logger.log('WARN', `[SelfHealing] ${name} unknown error (Attempt ${attempt + 1}). Retrying...`, { error: msg });
127
+ }
128
+ }
58
129
  }
59
130
 
60
- // Audit Ledger
61
- if (passNum && calc.manifest) {
62
- const ledgerRef = db.collection(`computation_audit_ledger/${dStr}/passes/${passNum}/tasks`).doc(name);
63
- updates.push({
64
- ref: ledgerRef,
65
- data: {
66
- status: 'COMPLETED',
67
- completedAt: new Date(),
68
- actualHash: calc.manifest.hash,
69
- _verified: true
70
- },
71
- options: { merge: true }
72
- });
131
+ if (!committed) {
132
+ throw {
133
+ message: `Exhausted sharding strategies. Last error: ${lastError?.message}`,
134
+ stack: lastError?.stack,
135
+ stage: 'SHARDING_LIMIT_EXCEEDED'
136
+ };
73
137
  }
138
+ // ----------------------------------
139
+
140
+ // Mark Success
141
+ if (calc.manifest.hash) { successUpdates[name] = { hash: calc.manifest.hash, category: calc.manifest.category }; }
74
142
 
75
143
  // Capture Schema
76
144
  if (calc.manifest.class.getSchema) {
@@ -83,33 +151,12 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
83
151
  });
84
152
  }
85
153
 
86
- if (updates.length > 0) {
87
- const totalSize = updates.reduce((acc, u) => acc + (u.data ? JSON.stringify(u.data).length : 0), 0);
88
- const isSharded = updates.some(u => u.data._sharded === true);
89
-
90
- try {
91
- await commitBatchInChunks(config, deps, updates, `${name} Results`);
92
- } catch (commitErr) {
93
- // Check for Firestore specific limits
94
- let stage = 'COMMIT_BATCH';
95
- let msg = commitErr.message;
96
- if (msg.includes('Transaction too big') || msg.includes('payload is too large')) { stage = 'SHARDING_LIMIT_EXCEEDED'; msg = `Firestore Limit Exceeded: ${msg}`; }
97
- throw { message: msg, stack: commitErr.stack, stage };
98
- }
99
-
100
- // Log Storage
101
- if (logger && logger.logStorage) { logger.logStorage(pid, name, dStr, mainDocRef.path, totalSize, isSharded); }
102
-
103
- // Mark Success
104
- if (calc.manifest.hash) { successUpdates[name] = { hash: calc.manifest.hash, category: calc.manifest.category }; }
105
-
106
- // Cleanup Migration
107
- if (calc.manifest.previousCategory && calc.manifest.previousCategory !== calc.manifest.category) {
108
- cleanupTasks.push(deleteOldCalculationData(dStr, calc.manifest.previousCategory, name, config, deps));
109
- }
154
+ // Cleanup Migration
155
+ if (calc.manifest.previousCategory && calc.manifest.previousCategory !== calc.manifest.category) {
156
+ cleanupTasks.push(deleteOldCalculationData(dStr, calc.manifest.previousCategory, name, config, deps));
110
157
  }
158
+
111
159
  } catch (e) {
112
- // [NEW] Intelligent Failure Reporting
113
160
  const stage = e.stage || 'EXECUTION';
114
161
  const msg = e.message || 'Unknown error';
115
162
 
@@ -123,20 +170,14 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
123
170
  }
124
171
 
125
172
  if (schemas.length) batchStoreSchemas(deps, config, schemas).catch(() => {});
126
-
127
173
  if (cleanupTasks.length > 0) { await Promise.allSettled(cleanupTasks); }
128
-
129
174
  if (!skipStatusWrite && Object.keys(successUpdates).length > 0) { await updateComputationStatus(dStr, successUpdates, config, deps); }
130
175
 
131
- // [UPDATE] Return both success and failures so the Worker can log them
132
176
  return { successUpdates, failureReport };
133
177
  }
134
178
 
135
179
  /**
136
180
  * Deletes result documents from a previous category location.
137
- * This function exists to handle the deleteion of old data,
138
- * The general use case is that if a developer changes a calculations' category,
139
- * We want to clean up and delete the old path, the change of a category would trigger a re-run of the calculation to naturally move the data to the new location
140
181
  */
141
182
  async function deleteOldCalculationData(dateStr, oldCategory, calcName, config, deps) {
142
183
 
@@ -151,7 +192,7 @@ async function deleteOldCalculationData(dateStr, oldCategory, calcName, config,
151
192
  .collection(config.computationsSubcollection)
152
193
  .doc(calcName);
153
194
 
154
- const shardsCol = oldDocRef.collection('_shards');
195
+ const shardsCol = oldDocRef.collection('_shards');
155
196
  const shardsSnap = await withRetry(() => shardsCol.listDocuments(), 'ListOldShards');
156
197
 
157
198
  const batch = db.batch();
@@ -182,19 +223,24 @@ function calculateFirestoreBytes(value) {
182
223
  return 0;
183
224
  }
184
225
 
185
- async function prepareAutoShardedWrites(result, docRef, logger) {
186
- const SAFETY_THRESHOLD_BYTES = 1000 * 1024;
226
+ async function prepareAutoShardedWrites(result, docRef, logger, maxBytes = 900 * 1024, maxKeys = null) {
187
227
  const OVERHEAD_ALLOWANCE = 20 * 1024;
188
- const CHUNK_LIMIT = SAFETY_THRESHOLD_BYTES - OVERHEAD_ALLOWANCE;
228
+ const CHUNK_LIMIT = maxBytes - OVERHEAD_ALLOWANCE;
229
+
230
+ // We only calculate totalSize loosely here for the "skip sharding" check.
231
+ // The loop below enforces the real limits.
189
232
  const totalSize = calculateFirestoreBytes(result);
190
233
  const docPathSize = Buffer.byteLength(docRef.path, 'utf8') + 16;
234
+
191
235
  const writes = [];
192
236
  const shardCollection = docRef.collection('_shards');
193
237
  let currentChunk = {};
194
238
  let currentChunkSize = 0;
239
+ let currentKeyCount = 0;
195
240
  let shardIndex = 0;
196
241
 
197
- if ((totalSize + docPathSize) < CHUNK_LIMIT) {
242
+ // Fast path: If small enough AND keys are safe
243
+ if (!maxKeys && (totalSize + docPathSize) < CHUNK_LIMIT) {
198
244
  const data = {
199
245
  ...result,
200
246
  _completed: true,
@@ -210,17 +256,24 @@ async function prepareAutoShardedWrites(result, docRef, logger) {
210
256
  const valueSize = calculateFirestoreBytes(value);
211
257
  const itemSize = keySize + valueSize;
212
258
 
213
- if (currentChunkSize + itemSize > CHUNK_LIMIT) {
259
+ const byteLimitReached = (currentChunkSize + itemSize > CHUNK_LIMIT);
260
+ const keyLimitReached = (maxKeys && currentKeyCount + 1 >= maxKeys);
261
+
262
+ if (byteLimitReached || keyLimitReached) {
214
263
  writes.push({ ref: shardCollection.doc(`shard_${shardIndex}`), data: currentChunk, options: { merge: false } });
215
264
  shardIndex++;
216
265
  currentChunk = {};
217
266
  currentChunkSize = 0;
267
+ currentKeyCount = 0;
218
268
  }
219
269
  currentChunk[key] = value;
220
270
  currentChunkSize += itemSize;
271
+ currentKeyCount++;
221
272
  }
222
273
 
223
- if (Object.keys(currentChunk).length > 0) { writes.push({ ref: shardCollection.doc(`shard_${shardIndex}`), data: currentChunk, options: { merge: false } }); }
274
+ if (Object.keys(currentChunk).length > 0) {
275
+ writes.push({ ref: shardCollection.doc(`shard_${shardIndex}`), data: currentChunk, options: { merge: false } });
276
+ }
224
277
 
225
278
  const pointerData = {
226
279
  _completed: true,
@@ -1,14 +1,13 @@
1
1
  /**
2
2
  * @fileoverview Build Reporter & Auto-Runner.
3
3
  * Generates a "Pre-Flight" report of what the computation system WILL do.
4
- * UPDATED: Now fetches 'prevDailyStatus' to enforce strict historical continuity checks.
4
+ * UPDATED: Optimized with Parallel Status Fetches inside the date loop.
5
5
  */
6
6
 
7
7
  const { analyzeDateExecution } = require('../WorkflowOrchestrator');
8
8
  const { fetchComputationStatus } = require('../persistence/StatusRepository');
9
9
  const { normalizeName, getExpectedDateStrings, DEFINITIVE_EARLIEST_DATES } = require('../utils/utils');
10
10
  const { checkRootDataAvailability } = require('../data/AvailabilityChecker');
11
- const { FieldValue } = require('@google-cloud/firestore');
12
11
  const pLimit = require('p-limit');
13
12
  const path = require('path');
14
13
  const packageJson = require(path.join(__dirname, '..', '..', '..', 'package.json'));
@@ -21,11 +20,7 @@ const packageVersion = packageJson.version;
21
20
  async function ensureBuildReport(config, dependencies, manifest) {
22
21
  const { db, logger } = dependencies;
23
22
  const now = new Date();
24
-
25
- // BuildId still includes timestamp for uniqueness
26
23
  const buildId = `v${packageVersion}_${now.getFullYear()}-${String(now.getMonth()+1).padStart(2,'0')}-${String(now.getDate()).padStart(2,'0')}_${String(now.getHours()).padStart(2,'0')}-${String(now.getMinutes()).padStart(2,'0')}-${String(now.getSeconds()).padStart(2,'0')}`;
27
-
28
- // Reference to "latest" doc
29
24
  const latestRef = db.collection('computation_build_records').doc('latest');
30
25
 
31
26
  try {
@@ -38,10 +33,8 @@ async function ensureBuildReport(config, dependencies, manifest) {
38
33
  }
39
34
 
40
35
  logger.log('INFO', `[BuildReporter] 🚀 New Version Detected (${packageVersion}). Auto-running Pre-flight Report...`);
41
-
42
36
  await generateBuildReport(config, dependencies, manifest, 90, buildId);
43
-
44
- // Update "latest" pointer
37
+
45
38
  await latestRef.set({
46
39
  packageVersion,
47
40
  buildId,
@@ -55,11 +48,6 @@ async function ensureBuildReport(config, dependencies, manifest) {
55
48
 
56
49
  /**
57
50
  * Generates the report and saves to Firestore.
58
- * @param {object} config
59
- * @param {object} dependencies
60
- * @param {Array} manifest
61
- * @param {number} daysBack - Days to simulate (Default 90)
62
- * @param {string} customBuildId - Optional ID override
63
51
  */
64
52
  async function generateBuildReport(config, dependencies, manifest, daysBack = 90, customBuildId = null) {
65
53
  const { db, logger } = dependencies;
@@ -86,99 +74,61 @@ async function generateBuildReport(config, dependencies, manifest, daysBack = 90
86
74
  let totalReRuns = 0;
87
75
  let totalNew = 0;
88
76
 
89
- // 2. PARALLEL PROCESSING (Fix for DEADLINE_EXCEEDED)
90
- // Run 20 reads in parallel.
77
+ // 2. PARALLEL PROCESSING
91
78
  const limit = pLimit(20);
92
79
 
93
80
  const processingPromises = datesToCheck.map(dateStr => limit(async () => {
94
81
  try {
95
- // A. Fetch REAL status from DB (What ran previously?)
96
- const dailyStatus = await fetchComputationStatus(dateStr, config, dependencies);
97
-
98
- // [NEW] B. Fetch YESTERDAY'S Status (For strict continuity checks)
99
- let prevDailyStatus = null;
100
-
101
- // Only fetch if ANY calculation in the manifest is historical
102
- // (Optimization: In BuildReporter we pass the full manifest, so we check if any exist generally)
82
+ // [IMPROVED] Fetch all statuses in parallel
83
+ const fetchPromises = [
84
+ // A. Real status
85
+ fetchComputationStatus(dateStr, config, dependencies),
86
+ // C. Real Root Data
87
+ checkRootDataAvailability(dateStr, config, dependencies, DEFINITIVE_EARLIEST_DATES)
88
+ ];
89
+
90
+ // B. Yesterday's Status (only if needed)
91
+ let prevDateStr = null;
103
92
  if (manifest.some(c => c.isHistorical)) {
104
93
  const prevDate = new Date(dateStr + 'T00:00:00Z');
105
94
  prevDate.setUTCDate(prevDate.getUTCDate() - 1);
106
- const prevDateStr = prevDate.toISOString().slice(0, 10);
95
+ prevDateStr = prevDate.toISOString().slice(0, 10);
107
96
 
108
- // Ensure we don't look before the dawn of time
109
97
  if (prevDate >= DEFINITIVE_EARLIEST_DATES.absoluteEarliest) {
110
- prevDailyStatus = await fetchComputationStatus(prevDateStr, config, dependencies);
111
- } else {
112
- prevDailyStatus = {}; // Pre-epoch is valid empty context
98
+ fetchPromises.push(fetchComputationStatus(prevDateStr, config, dependencies));
113
99
  }
114
100
  }
115
101
 
116
- // C. REAL Root Data Check
117
- // Uses the new Indexer logic (returns null refs, but valid status flags)
118
- const availability = await checkRootDataAvailability(dateStr, config, dependencies, DEFINITIVE_EARLIEST_DATES);
119
-
102
+ const results = await Promise.all(fetchPromises);
103
+ const dailyStatus = results[0];
104
+ const availability = results[1];
105
+ // If we fetched prevStatus, it's at index 2
106
+ const prevDailyStatus = (prevDateStr && results[2]) ? results[2] : (prevDateStr ? {} : null);
107
+
120
108
  const rootDataStatus = availability ? availability.status : {
121
- hasPortfolio: false,
122
- hasHistory: false,
123
- hasSocial: false,
124
- hasInsights: false,
125
- hasPrices: false
109
+ hasPortfolio: false, hasHistory: false, hasSocial: false, hasInsights: false, hasPrices: false
126
110
  };
127
111
 
128
112
  // D. Run Logic Analysis
129
- // Now passes prevDailyStatus to enable the "Blocked if yesterday missing" logic
130
113
  const analysis = analyzeDateExecution(dateStr, manifest, rootDataStatus, dailyStatus, manifestMap, prevDailyStatus);
131
114
 
132
115
  // E. Format Findings
133
116
  const dateSummary = {
134
- willRun: [],
135
- willReRun: [],
136
- blocked: [],
137
- impossible: []
117
+ willRun: [], willReRun: [], blocked: [], impossible: []
138
118
  };
139
119
 
140
- // -- Runnable (New) --
141
- analysis.runnable.forEach(item => {
142
- dateSummary.willRun.push({ name: item.name, reason: "New / No Previous Record" });
143
- });
120
+ analysis.runnable.forEach(item => dateSummary.willRun.push({ name: item.name, reason: "New / No Previous Record" }));
121
+ analysis.reRuns.forEach(item => dateSummary.willReRun.push({ name: item.name, reason: item.previousCategory ? "Migration" : "Hash Mismatch" }));
122
+ analysis.impossible.forEach(item => dateSummary.impossible.push({ name: item.name, reason: item.reason }));
123
+ [...analysis.blocked, ...analysis.failedDependency].forEach(item => dateSummary.blocked.push({ name: item.name, reason: item.reason || 'Dependency' }));
144
124
 
145
- // -- Re-Runs (Hash Mismatch / Migration) --
146
- analysis.reRuns.forEach(item => {
147
- let reason = "Hash Mismatch";
148
- let details = `Old: ${item.oldHash?.substring(0,6)}... New: ${item.newHash?.substring(0,6)}...`;
149
-
150
- if (item.previousCategory) {
151
- reason = "Migration";
152
- details = `Moving ${item.previousCategory} -> ${item.newCategory}`;
153
- }
154
-
155
- dateSummary.willReRun.push({ name: item.name, reason, details });
156
- });
157
-
158
- // -- Impossible (Permanent) --
159
- analysis.impossible.forEach(item => {
160
- dateSummary.impossible.push({ name: item.name, reason: item.reason });
161
- });
162
-
163
- // -- Blocked (Retriable) --
164
- analysis.blocked.forEach(item => {
165
- dateSummary.blocked.push({ name: item.name, reason: item.reason });
166
- });
167
- analysis.failedDependency.forEach(item => {
168
- dateSummary.blocked.push({ name: item.name, reason: `Dependency Missing: ${item.missing.join(', ')}` });
169
- });
170
-
171
- // Return result for aggregation
172
125
  const hasUpdates = dateSummary.willRun.length || dateSummary.willReRun.length || dateSummary.blocked.length || dateSummary.impossible.length;
173
126
 
174
127
  return {
175
128
  dateStr,
176
129
  dateSummary,
177
130
  hasUpdates,
178
- stats: {
179
- new: dateSummary.willRun.length,
180
- rerun: dateSummary.willReRun.length
181
- }
131
+ stats: { new: dateSummary.willRun.length, rerun: dateSummary.willReRun.length }
182
132
  };
183
133
 
184
134
  } catch (err) {
@@ -187,7 +137,6 @@ async function generateBuildReport(config, dependencies, manifest, daysBack = 90
187
137
  }
188
138
  }));
189
139
 
190
- // Wait for all dates to process
191
140
  const results = await Promise.all(processingPromises);
192
141
 
193
142
  // 3. Aggregate Results
@@ -1,12 +1,34 @@
1
1
  /**
2
2
  * @fileoverview Schema capture utility for computation outputs
3
3
  * This module batches and stores pre-defined static schemas in Firestore.
4
+ * UPDATED: Added schema validation to prevent silent batch failures.
4
5
  */
5
6
 
7
+ /**
8
+ * Validates a schema object before storage.
9
+ * Checks for circular references and size limits.
10
+ * @param {object} schema
11
+ * @returns {object} { valid: boolean, reason: string }
12
+ */
13
+ function validateSchema(schema) {
14
+ try {
15
+ // 1. Detect circular references
16
+ const jsonStr = JSON.stringify(schema);
17
+
18
+ // 2. Ensure it's not too large (Firestore limit: 1MB, reserve 100KB for metadata)
19
+ const size = Buffer.byteLength(jsonStr);
20
+ if (size > 900 * 1024) {
21
+ return { valid: false, reason: `Schema exceeds 900KB limit (${(size/1024).toFixed(2)} KB)` };
22
+ }
23
+
24
+ return { valid: true };
25
+ } catch (e) {
26
+ return { valid: false, reason: `Serialization failed: ${e.message}` };
27
+ }
28
+ }
29
+
6
30
  /**
7
31
  * Batch store schemas for multiple computations.
8
- * This function now expects a fully-formed schema, not sample output.
9
- * It strictly stamps a 'lastUpdated' field to support stale-schema filtering in the API.
10
32
  *
11
33
  * @param {object} dependencies - Contains db, logger
12
34
  * @param {object} config - Configuration object
@@ -31,6 +53,13 @@ async function batchStoreSchemas(dependencies, config, schemas) {
31
53
  continue;
32
54
  }
33
55
 
56
+ // [IMPROVED] Validate before adding to batch
57
+ const validation = validateSchema(item.schema);
58
+ if (!validation.valid) {
59
+ logger.log('WARN', `[SchemaCapture] Invalid schema for ${item.name}: ${validation.reason}`);
60
+ continue;
61
+ }
62
+
34
63
  const docRef = db.collection(schemaCollection).doc(item.name);
35
64
 
36
65
  // Critical: Always overwrite 'lastUpdated' to now
package/index.js CHANGED
@@ -27,7 +27,6 @@ const { handleUpdate } = require('./functions
27
27
 
28
28
  // Computation System
29
29
  const { build: buildManifest } = require('./functions/computation-system/context/ManifestBuilder');
30
- // const { runDateComputation: runComputationPass } = require('./functions/computation-system/WorkflowOrchestrator'); Depreciated
31
30
  const { dispatchComputationPass } = require('./functions/computation-system/helpers/computation_dispatcher');
32
31
  const { handleComputationTask } = require('./functions/computation-system/helpers/computation_worker');
33
32
  // [NEW] Import Report Tools
@@ -54,7 +53,7 @@ const { handlePost } = require('./functions
54
53
 
55
54
  // NEW
56
55
 
57
- const { runRootDataIndexer } = require('./functions/root-data-indexer/index'); // <--- IMPORT
56
+ const { runRootDataIndexer } = require('./functions/root-data-indexer/index');
58
57
 
59
58
  const core = {
60
59
  IntelligentHeaderManager,
@@ -88,7 +87,6 @@ const taskEngine = {
88
87
  };
89
88
 
90
89
  const computationSystem = {
91
- // runComputationPass, Depreciated
92
90
  dispatchComputationPass,
93
91
  handleComputationTask,
94
92
  dataLoader,
@@ -112,7 +110,7 @@ const maintenance = {
112
110
  runSocialOrchestrator,
113
111
  handleSocialTask,
114
112
  runBackfillAssetPrices,
115
- runRootDataIndexer, // <--- EXPORT
113
+ runRootDataIndexer,
116
114
  };
117
115
 
118
116
  const proxy = { handlePost };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "bulltrackers-module",
3
- "version": "1.0.259",
3
+ "version": "1.0.260",
4
4
  "description": "Helper Functions for Bulltrackers.",
5
5
  "main": "index.js",
6
6
  "files": [