bulltrackers-module 1.0.263 → 1.0.265
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/functions/computation-system/WorkflowOrchestrator.js +58 -22
- package/functions/computation-system/context/ManifestBuilder.js +37 -9
- package/functions/computation-system/executors/StandardExecutor.js +42 -7
- package/functions/computation-system/layers/profiling.js +309 -149
- package/functions/computation-system/persistence/FirestoreUtils.js +2 -10
- package/functions/computation-system/persistence/ResultCommitter.js +106 -199
- package/functions/computation-system/persistence/StatusRepository.js +16 -5
- package/functions/computation-system/tools/BuildReporter.js +9 -19
- package/functions/root-data-indexer/index.js +34 -63
- package/package.json +1 -1
|
@@ -1,21 +1,17 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* @fileoverview Handles saving computation results with observability and Smart Cleanup.
|
|
3
|
-
* UPDATED:
|
|
4
|
-
* UPDATED: Stops retrying on non-transient errors.
|
|
3
|
+
* UPDATED: Stores Hash Composition in status for audit trail.
|
|
5
4
|
*/
|
|
6
5
|
const { commitBatchInChunks } = require('./FirestoreUtils');
|
|
7
6
|
const { updateComputationStatus } = require('./StatusRepository');
|
|
8
7
|
const { batchStoreSchemas } = require('../utils/schema_capture');
|
|
9
8
|
const { generateProcessId, PROCESS_TYPES } = require('../logger/logger');
|
|
10
|
-
|
|
11
9
|
const { HeuristicValidator } = require('./ResultsValidator');
|
|
12
10
|
const validationOverrides = require('../config/validation_overrides');
|
|
11
|
+
const pLimit = require('p-limit');
|
|
13
12
|
|
|
14
13
|
const NON_RETRYABLE_ERRORS = [
|
|
15
|
-
'INVALID_ARGUMENT',
|
|
16
|
-
'PERMISSION_DENIED', // Auth issue
|
|
17
|
-
'DATA_LOSS', // Firestore corruption
|
|
18
|
-
'FAILED_PRECONDITION' // Transaction requirements not met
|
|
14
|
+
'INVALID_ARGUMENT', 'PERMISSION_DENIED', 'DATA_LOSS', 'FAILED_PRECONDITION'
|
|
19
15
|
];
|
|
20
16
|
|
|
21
17
|
async function commitResults(stateObj, dStr, passName, config, deps, skipStatusWrite = false) {
|
|
@@ -26,12 +22,11 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
|
|
|
26
22
|
const { logger, db } = deps;
|
|
27
23
|
const pid = generateProcessId(PROCESS_TYPES.STORAGE, passName, dStr);
|
|
28
24
|
|
|
29
|
-
const
|
|
25
|
+
const fanOutLimit = pLimit(10);
|
|
30
26
|
|
|
31
27
|
for (const name in stateObj) {
|
|
32
28
|
const calc = stateObj[name];
|
|
33
29
|
|
|
34
|
-
// Prep metrics container
|
|
35
30
|
const runMetrics = {
|
|
36
31
|
storage: { sizeBytes: 0, isSharded: false, shardCount: 1, keys: 0 },
|
|
37
32
|
validation: { isValid: true, anomalies: [] }
|
|
@@ -39,152 +34,92 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
|
|
|
39
34
|
|
|
40
35
|
try {
|
|
41
36
|
const result = await calc.getResult();
|
|
42
|
-
|
|
43
37
|
const overrides = validationOverrides[calc.manifest.name] || {};
|
|
44
38
|
const healthCheck = HeuristicValidator.analyze(calc.manifest.name, result, overrides);
|
|
45
39
|
|
|
46
40
|
if (!healthCheck.valid) {
|
|
47
|
-
// If validation failed, we consider it an anomaly but we BLOCK the write (throw error)
|
|
48
41
|
runMetrics.validation.isValid = false;
|
|
49
42
|
runMetrics.validation.anomalies.push(healthCheck.reason);
|
|
50
43
|
throw { message: healthCheck.reason, stage: 'QUALITY_CIRCUIT_BREAKER' };
|
|
51
44
|
}
|
|
52
45
|
|
|
53
|
-
// Check for minor anomalies (validation warnings that didn't fail) - optional implementation
|
|
54
|
-
// For now, we assume if valid=true, anomalies are empty unless we add warning logic later.
|
|
55
|
-
|
|
56
46
|
const isEmpty = !result || (typeof result === 'object' && Object.keys(result).length === 0) || (typeof result === 'number' && result === 0);
|
|
57
47
|
if (isEmpty) {
|
|
58
|
-
// Log empty success
|
|
59
48
|
if (calc.manifest.hash) {
|
|
60
|
-
successUpdates[name] = {
|
|
61
|
-
hash:
|
|
62
|
-
category: calc.manifest.category,
|
|
63
|
-
|
|
49
|
+
successUpdates[name] = {
|
|
50
|
+
hash: calc.manifest.hash,
|
|
51
|
+
category: calc.manifest.category,
|
|
52
|
+
composition: calc.manifest.composition, // <--- Added Composition
|
|
53
|
+
metrics: runMetrics
|
|
64
54
|
};
|
|
65
55
|
}
|
|
66
56
|
continue;
|
|
67
57
|
}
|
|
68
58
|
|
|
69
|
-
// Calculate Key Count rough estimate
|
|
70
59
|
if (typeof result === 'object') runMetrics.storage.keys = Object.keys(result).length;
|
|
71
60
|
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
.doc(calc.manifest.category)
|
|
76
|
-
.collection(config.computationsSubcollection)
|
|
77
|
-
.doc(name);
|
|
78
|
-
|
|
79
|
-
// --- REACTIVE SELF-HEALING LOOP ---
|
|
80
|
-
let committed = false;
|
|
81
|
-
// Strategy: 1=Normal, 2=Safe (Halved), 3=Aggressive (Quartered + Key Limit)
|
|
82
|
-
const strategies = [
|
|
83
|
-
{ bytes: 900 * 1024, keys: null }, // Attempt 1: Standard
|
|
84
|
-
{ bytes: 450 * 1024, keys: 10000 }, // Attempt 2: High Index usage
|
|
85
|
-
{ bytes: 200 * 1024, keys: 2000 } // Attempt 3: Extreme fragmentation
|
|
86
|
-
];
|
|
61
|
+
// ... (Fan-out logic remains same) ...
|
|
62
|
+
const resultKeys = Object.keys(result || {});
|
|
63
|
+
const isMultiDate = resultKeys.length > 0 && resultKeys.every(k => /^\d{4}-\d{2}-\d{2}$/.test(k));
|
|
87
64
|
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
for (let attempt = 0; attempt < strategies.length; attempt++) {
|
|
91
|
-
if (committed) break;
|
|
65
|
+
if (isMultiDate) {
|
|
66
|
+
logger.log('INFO', `[ResultCommitter] 🕰️ Multi-Date Output detected for ${name} (${resultKeys.length} days). Throttled Fan-Out...`);
|
|
92
67
|
|
|
93
|
-
const
|
|
68
|
+
const datePromises = resultKeys.map((historicalDate) => fanOutLimit(async () => {
|
|
69
|
+
const dailyData = result[historicalDate];
|
|
70
|
+
if (!dailyData || Object.keys(dailyData).length === 0) return;
|
|
71
|
+
|
|
72
|
+
const historicalDocRef = db.collection(config.resultsCollection)
|
|
73
|
+
.doc(historicalDate)
|
|
74
|
+
.collection(config.resultsSubcollection)
|
|
75
|
+
.doc(calc.manifest.category)
|
|
76
|
+
.collection(config.computationsSubcollection)
|
|
77
|
+
.doc(name);
|
|
78
|
+
|
|
79
|
+
await writeSingleResult(dailyData, historicalDocRef, name, historicalDate, logger, config, deps);
|
|
80
|
+
}));
|
|
94
81
|
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
runMetrics.storage.sizeBytes = totalSize;
|
|
107
|
-
runMetrics.storage.isSharded = isSharded;
|
|
108
|
-
runMetrics.storage.shardCount = shardCount;
|
|
109
|
-
|
|
110
|
-
// 2. Audit Ledger (Only add to the first update batch)
|
|
111
|
-
if (passNum && calc.manifest) {
|
|
112
|
-
const ledgerRef = db.collection(`computation_audit_ledger/${dStr}/passes/${passNum}/tasks`).doc(name);
|
|
113
|
-
updates.push({
|
|
114
|
-
ref: ledgerRef,
|
|
115
|
-
data: {
|
|
116
|
-
status: 'COMPLETED',
|
|
117
|
-
completedAt: new Date(),
|
|
118
|
-
actualHash: calc.manifest.hash,
|
|
119
|
-
_verified: true,
|
|
120
|
-
_shardingStrategy: attempt + 1
|
|
121
|
-
},
|
|
122
|
-
options: { merge: true }
|
|
123
|
-
});
|
|
124
|
-
}
|
|
125
|
-
|
|
126
|
-
// 3. Attempt Commit
|
|
127
|
-
await commitBatchInChunks(config, deps, updates, `${name} Results (Att ${attempt+1})`);
|
|
128
|
-
|
|
129
|
-
// Log Success
|
|
130
|
-
if (logger && logger.logStorage) {
|
|
131
|
-
logger.logStorage(pid, name, dStr, mainDocRef.path, totalSize, isSharded);
|
|
132
|
-
}
|
|
133
|
-
|
|
134
|
-
committed = true; // Exit loop
|
|
82
|
+
await Promise.all(datePromises);
|
|
83
|
+
|
|
84
|
+
if (calc.manifest.hash) {
|
|
85
|
+
successUpdates[name] = {
|
|
86
|
+
hash: calc.manifest.hash,
|
|
87
|
+
category: calc.manifest.category,
|
|
88
|
+
composition: calc.manifest.composition, // <--- Added Composition
|
|
89
|
+
metrics: runMetrics
|
|
90
|
+
};
|
|
91
|
+
}
|
|
135
92
|
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
}
|
|
93
|
+
} else {
|
|
94
|
+
// --- STANDARD MODE ---
|
|
95
|
+
const mainDocRef = db.collection(config.resultsCollection)
|
|
96
|
+
.doc(dStr)
|
|
97
|
+
.collection(config.resultsSubcollection)
|
|
98
|
+
.doc(calc.manifest.category)
|
|
99
|
+
.collection(config.computationsSubcollection)
|
|
100
|
+
.doc(name);
|
|
145
101
|
|
|
146
|
-
|
|
147
|
-
|
|
102
|
+
const writeStats = await writeSingleResult(result, mainDocRef, name, dStr, logger, config, deps);
|
|
103
|
+
|
|
104
|
+
runMetrics.storage.sizeBytes = writeStats.totalSize;
|
|
105
|
+
runMetrics.storage.isSharded = writeStats.isSharded;
|
|
106
|
+
runMetrics.storage.shardCount = writeStats.shardCount;
|
|
148
107
|
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
108
|
+
if (calc.manifest.hash) {
|
|
109
|
+
successUpdates[name] = {
|
|
110
|
+
hash: calc.manifest.hash,
|
|
111
|
+
category: calc.manifest.category,
|
|
112
|
+
composition: calc.manifest.composition, // <--- Added Composition
|
|
113
|
+
metrics: runMetrics
|
|
114
|
+
};
|
|
155
115
|
}
|
|
156
116
|
}
|
|
157
117
|
|
|
158
|
-
if (!committed) {
|
|
159
|
-
throw {
|
|
160
|
-
message: `Exhausted sharding strategies. Last error: ${lastError?.message}`,
|
|
161
|
-
stack: lastError?.stack,
|
|
162
|
-
stage: 'SHARDING_LIMIT_EXCEEDED'
|
|
163
|
-
};
|
|
164
|
-
}
|
|
165
|
-
// ----------------------------------
|
|
166
|
-
|
|
167
|
-
// Mark Success & Pass Metrics
|
|
168
|
-
if (calc.manifest.hash) {
|
|
169
|
-
successUpdates[name] = {
|
|
170
|
-
hash: calc.manifest.hash,
|
|
171
|
-
category: calc.manifest.category,
|
|
172
|
-
metrics: runMetrics // Pass metrics up
|
|
173
|
-
};
|
|
174
|
-
}
|
|
175
|
-
|
|
176
|
-
// Capture Schema
|
|
177
118
|
if (calc.manifest.class.getSchema) {
|
|
178
119
|
const { class: _cls, ...safeMetadata } = calc.manifest;
|
|
179
|
-
schemas.push({
|
|
180
|
-
name,
|
|
181
|
-
category: calc.manifest.category,
|
|
182
|
-
schema: calc.manifest.class.getSchema(),
|
|
183
|
-
metadata: safeMetadata
|
|
184
|
-
});
|
|
120
|
+
schemas.push({ name, category: calc.manifest.category, schema: calc.manifest.class.getSchema(), metadata: safeMetadata });
|
|
185
121
|
}
|
|
186
122
|
|
|
187
|
-
// Cleanup Migration
|
|
188
123
|
if (calc.manifest.previousCategory && calc.manifest.previousCategory !== calc.manifest.category) {
|
|
189
124
|
cleanupTasks.push(deleteOldCalculationData(dStr, calc.manifest.previousCategory, name, config, deps));
|
|
190
125
|
}
|
|
@@ -198,7 +133,7 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
|
|
|
198
133
|
failureReport.push({
|
|
199
134
|
name,
|
|
200
135
|
error: { message: msg, stack: e.stack, stage },
|
|
201
|
-
metrics: runMetrics
|
|
136
|
+
metrics: runMetrics
|
|
202
137
|
});
|
|
203
138
|
}
|
|
204
139
|
}
|
|
@@ -210,107 +145,79 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
|
|
|
210
145
|
return { successUpdates, failureReport };
|
|
211
146
|
}
|
|
212
147
|
|
|
213
|
-
|
|
214
|
-
*
|
|
215
|
-
|
|
148
|
+
async function writeSingleResult(result, docRef, name, dateContext, logger, config, deps) {
|
|
149
|
+
const strategies = [ { bytes: 900 * 1024, keys: null }, { bytes: 450 * 1024, keys: 10000 }, { bytes: 200 * 1024, keys: 2000 } ];
|
|
150
|
+
let committed = false; let lastError = null; let finalStats = { totalSize: 0, isSharded: false, shardCount: 1 };
|
|
151
|
+
|
|
152
|
+
for (let attempt = 0; attempt < strategies.length; attempt++) {
|
|
153
|
+
if (committed) break;
|
|
154
|
+
const constraints = strategies[attempt];
|
|
155
|
+
try {
|
|
156
|
+
const updates = await prepareAutoShardedWrites(result, docRef, logger, constraints.bytes, constraints.keys);
|
|
157
|
+
const pointer = updates.find(u => u.data._completed === true);
|
|
158
|
+
finalStats.isSharded = pointer && pointer.data._sharded === true;
|
|
159
|
+
finalStats.shardCount = finalStats.isSharded ? (pointer.data._shardCount || 1) : 1;
|
|
160
|
+
finalStats.totalSize = updates.reduce((acc, u) => acc + (u.data ? JSON.stringify(u.data).length : 0), 0);
|
|
161
|
+
await commitBatchInChunks(config, deps, updates, `${name}::${dateContext} (Att ${attempt+1})`);
|
|
162
|
+
if (logger && logger.logStorage) { logger.logStorage(null, name, dateContext, docRef.path, finalStats.totalSize, finalStats.isSharded); }
|
|
163
|
+
committed = true;
|
|
164
|
+
} catch (commitErr) {
|
|
165
|
+
lastError = commitErr;
|
|
166
|
+
const msg = commitErr.message || '';
|
|
167
|
+
if (NON_RETRYABLE_ERRORS.includes(commitErr.code)) { logger.log('ERROR', `[SelfHealing] ${name} FATAL error: ${msg}.`); throw commitErr; }
|
|
168
|
+
if (msg.includes('Transaction too big') || msg.includes('payload is too large') || msg.includes('too many index entries')) { logger.log('WARN', `[SelfHealing] ${name} on ${dateContext} failed attempt ${attempt+1}. Retrying...`, { error: msg }); continue; }
|
|
169
|
+
else { logger.log('WARN', `[SelfHealing] ${name} on ${dateContext} unknown error. Retrying...`, { error: msg }); }
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
if (!committed) { throw { message: `Exhausted sharding strategies for ${name}. Last error: ${lastError?.message}`, stack: lastError?.stack, stage: 'SHARDING_LIMIT_EXCEEDED' }; }
|
|
173
|
+
return finalStats;
|
|
174
|
+
}
|
|
175
|
+
|
|
216
176
|
async function deleteOldCalculationData(dateStr, oldCategory, calcName, config, deps) {
|
|
217
177
|
const { db, logger, calculationUtils } = deps;
|
|
218
178
|
const { withRetry } = calculationUtils || { withRetry: (fn) => fn() };
|
|
219
|
-
|
|
220
179
|
try {
|
|
221
|
-
const oldDocRef = db.collection(config.resultsCollection)
|
|
222
|
-
|
|
223
|
-
.collection(config.resultsSubcollection)
|
|
224
|
-
.doc(oldCategory)
|
|
225
|
-
.collection(config.computationsSubcollection)
|
|
226
|
-
.doc(calcName);
|
|
227
|
-
|
|
228
|
-
const shardsCol = oldDocRef.collection('_shards');
|
|
180
|
+
const oldDocRef = db.collection(config.resultsCollection).doc(dateStr).collection(config.resultsSubcollection).doc(oldCategory).collection(config.computationsSubcollection).doc(calcName);
|
|
181
|
+
const shardsCol = oldDocRef.collection('_shards');
|
|
229
182
|
const shardsSnap = await withRetry(() => shardsCol.listDocuments(), 'ListOldShards');
|
|
230
|
-
const batch
|
|
231
|
-
let ops = 0;
|
|
232
|
-
|
|
183
|
+
const batch = db.batch(); let ops = 0;
|
|
233
184
|
for (const shardDoc of shardsSnap) { batch.delete(shardDoc); ops++; }
|
|
234
|
-
batch.delete(oldDocRef);
|
|
235
|
-
ops++;
|
|
236
|
-
|
|
185
|
+
batch.delete(oldDocRef); ops++;
|
|
237
186
|
await withRetry(() => batch.commit(), 'CleanupOldCategory');
|
|
238
|
-
logger.log('INFO', `[Migration] Cleaned up ${ops} docs for ${calcName} in
|
|
239
|
-
|
|
240
|
-
} catch (e) {
|
|
241
|
-
logger.log('WARN', `[Migration] Failed to clean up old data for ${calcName}: ${e.message}`);
|
|
242
|
-
}
|
|
187
|
+
logger.log('INFO', `[Migration] Cleaned up ${ops} docs for ${calcName} in '${oldCategory}'`);
|
|
188
|
+
} catch (e) { logger.log('WARN', `[Migration] Failed to clean up ${calcName}: ${e.message}`); }
|
|
243
189
|
}
|
|
244
190
|
|
|
245
191
|
function calculateFirestoreBytes(value) {
|
|
246
|
-
if (value === null) return 1;
|
|
247
|
-
if (value === undefined) return 0;
|
|
248
|
-
if (typeof value === 'boolean') return 1;
|
|
249
|
-
if (typeof value === 'number') return 8;
|
|
250
|
-
if (typeof value === 'string') return Buffer.byteLength(value, 'utf8') + 1;
|
|
251
|
-
if (value instanceof Date) return 8;
|
|
252
|
-
if (value.constructor && value.constructor.name === 'DocumentReference') { return Buffer.byteLength(value.path, 'utf8') + 16; }
|
|
192
|
+
if (value === null) return 1; if (value === undefined) return 0; if (typeof value === 'boolean') return 1; if (typeof value === 'number') return 8; if (typeof value === 'string') return Buffer.byteLength(value, 'utf8') + 1; if (value instanceof Date) return 8; if (value.constructor && value.constructor.name === 'DocumentReference') { return Buffer.byteLength(value.path, 'utf8') + 16; }
|
|
253
193
|
if (Array.isArray(value)) { let sum = 0; for (const item of value) sum += calculateFirestoreBytes(item); return sum; }
|
|
254
|
-
if (typeof value === 'object') { let sum = 0; for (const k in value) { if (Object.prototype.hasOwnProperty.call(value, k)) { sum += (Buffer.byteLength(k, 'utf8') + 1) + calculateFirestoreBytes(value[k]); } } return sum; }
|
|
255
|
-
return 0;
|
|
194
|
+
if (typeof value === 'object') { let sum = 0; for (const k in value) { if (Object.prototype.hasOwnProperty.call(value, k)) { sum += (Buffer.byteLength(k, 'utf8') + 1) + calculateFirestoreBytes(value[k]); } } return sum; } return 0;
|
|
256
195
|
}
|
|
257
196
|
|
|
258
197
|
async function prepareAutoShardedWrites(result, docRef, logger, maxBytes = 900 * 1024, maxKeys = null) {
|
|
259
|
-
const OVERHEAD_ALLOWANCE
|
|
260
|
-
const
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
const docPathSize = Buffer.byteLength(docRef.path, 'utf8') + 16;
|
|
264
|
-
|
|
265
|
-
const writes = [];
|
|
266
|
-
const shardCollection = docRef.collection('_shards');
|
|
267
|
-
let currentChunk = {};
|
|
268
|
-
let currentChunkSize = 0;
|
|
269
|
-
let currentKeyCount = 0;
|
|
270
|
-
let shardIndex = 0;
|
|
198
|
+
const OVERHEAD_ALLOWANCE = 20 * 1024; const CHUNK_LIMIT = maxBytes - OVERHEAD_ALLOWANCE;
|
|
199
|
+
const totalSize = calculateFirestoreBytes(result); const docPathSize = Buffer.byteLength(docRef.path, 'utf8') + 16;
|
|
200
|
+
const writes = []; const shardCollection = docRef.collection('_shards');
|
|
201
|
+
let currentChunk = {}; let currentChunkSize = 0; let currentKeyCount = 0; let shardIndex = 0;
|
|
271
202
|
|
|
272
|
-
// Fast path: If small enough AND keys are safe
|
|
273
203
|
if (!maxKeys && (totalSize + docPathSize) < CHUNK_LIMIT) {
|
|
274
|
-
const data = {
|
|
275
|
-
...result,
|
|
276
|
-
_completed: true,
|
|
277
|
-
_sharded: false,
|
|
278
|
-
_lastUpdated: new Date().toISOString()
|
|
279
|
-
};
|
|
204
|
+
const data = { ...result, _completed: true, _sharded: false, _lastUpdated: new Date().toISOString() };
|
|
280
205
|
return [{ ref: docRef, data, options: { merge: true } }];
|
|
281
206
|
}
|
|
282
207
|
|
|
283
208
|
for (const [key, value] of Object.entries(result)) {
|
|
284
209
|
if (key.startsWith('_')) continue;
|
|
285
|
-
const keySize
|
|
286
|
-
const
|
|
287
|
-
const itemSize = keySize + valueSize;
|
|
288
|
-
|
|
289
|
-
const byteLimitReached = (currentChunkSize + itemSize > CHUNK_LIMIT);
|
|
290
|
-
const keyLimitReached = (maxKeys && currentKeyCount + 1 >= maxKeys);
|
|
291
|
-
|
|
210
|
+
const keySize = Buffer.byteLength(key, 'utf8') + 1; const valueSize = calculateFirestoreBytes(value); const itemSize = keySize + valueSize;
|
|
211
|
+
const byteLimitReached = (currentChunkSize + itemSize > CHUNK_LIMIT); const keyLimitReached = (maxKeys && currentKeyCount + 1 >= maxKeys);
|
|
292
212
|
if (byteLimitReached || keyLimitReached) {
|
|
293
213
|
writes.push({ ref: shardCollection.doc(`shard_${shardIndex}`), data: currentChunk, options: { merge: false } });
|
|
294
|
-
shardIndex++;
|
|
295
|
-
currentChunk = {};
|
|
296
|
-
currentChunkSize = 0;
|
|
297
|
-
currentKeyCount = 0;
|
|
214
|
+
shardIndex++; currentChunk = {}; currentChunkSize = 0; currentKeyCount = 0;
|
|
298
215
|
}
|
|
299
|
-
currentChunk[key] = value;
|
|
300
|
-
currentChunkSize += itemSize;
|
|
301
|
-
currentKeyCount++;
|
|
302
|
-
}
|
|
303
|
-
|
|
304
|
-
if (Object.keys(currentChunk).length > 0) {
|
|
305
|
-
writes.push({ ref: shardCollection.doc(`shard_${shardIndex}`), data: currentChunk, options: { merge: false } });
|
|
216
|
+
currentChunk[key] = value; currentChunkSize += itemSize; currentKeyCount++;
|
|
306
217
|
}
|
|
218
|
+
if (Object.keys(currentChunk).length > 0) { writes.push({ ref: shardCollection.doc(`shard_${shardIndex}`), data: currentChunk, options: { merge: false } }); }
|
|
307
219
|
|
|
308
|
-
const pointerData = {
|
|
309
|
-
_completed: true,
|
|
310
|
-
_sharded: true,
|
|
311
|
-
_shardCount: shardIndex + 1,
|
|
312
|
-
_lastUpdated: new Date().toISOString()
|
|
313
|
-
};
|
|
220
|
+
const pointerData = { _completed: true, _sharded: true, _shardCount: shardIndex + 1, _lastUpdated: new Date().toISOString() };
|
|
314
221
|
writes.push({ ref: docRef, data: pointerData, options: { merge: false } });
|
|
315
222
|
return writes;
|
|
316
223
|
}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* @fileoverview Manages computation status tracking in Firestore.
|
|
3
|
-
* UPDATED: Supports Schema V2 (Object with Category) for
|
|
3
|
+
* UPDATED: Supports Schema V2 (Object with Category & Composition) for deep auditing.
|
|
4
4
|
*/
|
|
5
5
|
|
|
6
6
|
async function fetchComputationStatus(dateStr, config, { db }) {
|
|
@@ -14,8 +14,11 @@ async function fetchComputationStatus(dateStr, config, { db }) {
|
|
|
14
14
|
|
|
15
15
|
// Normalize V1 (String) to V2 (Object)
|
|
16
16
|
for (const [name, value] of Object.entries(rawData)) {
|
|
17
|
-
if (typeof value === 'string') {
|
|
18
|
-
|
|
17
|
+
if (typeof value === 'string') {
|
|
18
|
+
normalized[name] = { hash: value, category: null, composition: null }; // Legacy entry
|
|
19
|
+
} else {
|
|
20
|
+
normalized[name] = value;
|
|
21
|
+
}
|
|
19
22
|
}
|
|
20
23
|
|
|
21
24
|
return normalized;
|
|
@@ -30,8 +33,16 @@ async function updateComputationStatus(dateStr, updates, config, { db }) {
|
|
|
30
33
|
|
|
31
34
|
const safeUpdates = {};
|
|
32
35
|
for (const [key, val] of Object.entries(updates)) {
|
|
33
|
-
if (typeof val === 'string') {
|
|
34
|
-
|
|
36
|
+
if (typeof val === 'string') {
|
|
37
|
+
// Legacy Call Fallback
|
|
38
|
+
safeUpdates[key] = { hash: val, category: 'unknown', lastUpdated: new Date() };
|
|
39
|
+
} else {
|
|
40
|
+
// V2 Call: val should contain { hash, category, composition }
|
|
41
|
+
safeUpdates[key] = {
|
|
42
|
+
...val,
|
|
43
|
+
lastUpdated: new Date()
|
|
44
|
+
};
|
|
45
|
+
}
|
|
35
46
|
}
|
|
36
47
|
|
|
37
48
|
await docRef.set(safeUpdates, { merge: true });
|
|
@@ -38,9 +38,6 @@ async function ensureBuildReport(config, dependencies, manifest) {
|
|
|
38
38
|
|
|
39
39
|
// Run generation. This function handles writing the 'latest' document with FULL data.
|
|
40
40
|
await generateBuildReport(config, dependencies, manifest, 90, buildId);
|
|
41
|
-
|
|
42
|
-
// [FIX] REMOVED: The redundant write that was overwriting the full report with just metadata.
|
|
43
|
-
// The generateBuildReport function now serves as the single source of truth for writing 'latest'.
|
|
44
41
|
|
|
45
42
|
} catch (e) {
|
|
46
43
|
logger.log('ERROR', `[BuildReporter] Auto-run check failed: ${e.message}`);
|
|
@@ -57,23 +54,23 @@ async function generateBuildReport(config, dependencies, manifest, daysBack = 90
|
|
|
57
54
|
logger.log('INFO', `[BuildReporter] Generating Build Report: ${buildId} (Scope: ${daysBack} days)...`);
|
|
58
55
|
|
|
59
56
|
// 1. Determine Date Range
|
|
60
|
-
const today
|
|
57
|
+
const today = new Date();
|
|
61
58
|
const startDate = new Date();
|
|
62
59
|
startDate.setDate(today.getDate() - daysBack);
|
|
63
60
|
|
|
64
61
|
const datesToCheck = getExpectedDateStrings(startDate, today);
|
|
65
|
-
const manifestMap
|
|
62
|
+
const manifestMap = new Map(manifest.map(c => [normalizeName(c.name), c]));
|
|
66
63
|
|
|
67
64
|
const reportData = {
|
|
68
65
|
buildId,
|
|
69
|
-
packageVersion: packageVersion,
|
|
66
|
+
packageVersion: packageVersion,
|
|
70
67
|
generatedAt: new Date().toISOString(),
|
|
71
68
|
summary: {},
|
|
72
69
|
dates: {}
|
|
73
70
|
};
|
|
74
71
|
|
|
75
72
|
let totalReRuns = 0;
|
|
76
|
-
let totalNew
|
|
73
|
+
let totalNew = 0;
|
|
77
74
|
|
|
78
75
|
// 2. PARALLEL PROCESSING
|
|
79
76
|
const limit = pLimit(20);
|
|
@@ -106,20 +103,17 @@ async function generateBuildReport(config, dependencies, manifest, daysBack = 90
|
|
|
106
103
|
// If we fetched prevStatus, it's at index 2
|
|
107
104
|
const prevDailyStatus = (prevDateStr && results[2]) ? results[2] : (prevDateStr ? {} : null);
|
|
108
105
|
|
|
109
|
-
const rootDataStatus = availability ? availability.status : {
|
|
110
|
-
hasPortfolio: false, hasHistory: false, hasSocial: false, hasInsights: false, hasPrices: false
|
|
111
|
-
};
|
|
106
|
+
const rootDataStatus = availability ? availability.status : { hasPortfolio: false, hasHistory: false, hasSocial: false, hasInsights: false, hasPrices: false };
|
|
112
107
|
|
|
113
108
|
// D. Run Logic Analysis
|
|
114
109
|
const analysis = analyzeDateExecution(dateStr, manifest, rootDataStatus, dailyStatus, manifestMap, prevDailyStatus);
|
|
115
110
|
|
|
116
111
|
// E. Format Findings
|
|
117
|
-
const dateSummary = {
|
|
118
|
-
willRun: [], willReRun: [], blocked: [], impossible: []
|
|
119
|
-
};
|
|
112
|
+
const dateSummary = { willRun: [], willReRun: [], blocked: [], impossible: [] };
|
|
120
113
|
|
|
114
|
+
// Pass the generated "Reason" string through to the report
|
|
121
115
|
analysis.runnable.forEach (item => dateSummary.willRun.push ({ name: item.name, reason: "New / No Previous Record" }));
|
|
122
|
-
analysis.reRuns.forEach (item => dateSummary.willReRun.push ({ name: item.name, reason: item.
|
|
116
|
+
analysis.reRuns.forEach (item => dateSummary.willReRun.push ({ name: item.name, reason: item.reason || "Hash Mismatch" }));
|
|
123
117
|
analysis.impossible.forEach (item => dateSummary.impossible.push ({ name: item.name, reason: item.reason }));
|
|
124
118
|
[...analysis.blocked, ...analysis.failedDependency].forEach(item => dateSummary.blocked.push({ name: item.name, reason: item.reason || 'Dependency' }));
|
|
125
119
|
|
|
@@ -156,11 +150,7 @@ async function generateBuildReport(config, dependencies, manifest, daysBack = 90
|
|
|
156
150
|
await reportRef.set(reportData);
|
|
157
151
|
|
|
158
152
|
// 5. Update 'latest' pointer
|
|
159
|
-
|
|
160
|
-
await db.collection('computation_build_records').doc('latest').set({
|
|
161
|
-
...reportData,
|
|
162
|
-
note: "Latest build report pointer."
|
|
163
|
-
});
|
|
153
|
+
await db.collection('computation_build_records').doc('latest').set({ ...reportData, note: "Latest build report pointer." });
|
|
164
154
|
|
|
165
155
|
logger.log('SUCCESS', `[BuildReporter] Report ${buildId} saved. Re-runs: ${totalReRuns}, New: ${totalNew}.`);
|
|
166
156
|
|