bulltrackers-module 1.0.264 → 1.0.266
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/functions/computation-system/WorkflowOrchestrator.js +60 -24
- package/functions/computation-system/context/ManifestBuilder.js +37 -10
- package/functions/computation-system/executors/MetaExecutor.js +15 -3
- package/functions/computation-system/executors/StandardExecutor.js +30 -9
- package/functions/computation-system/helpers/computation_dispatcher.js +3 -5
- package/functions/computation-system/helpers/computation_worker.js +15 -8
- package/functions/computation-system/persistence/ResultCommitter.js +56 -163
- package/functions/computation-system/persistence/ResultsValidator.js +3 -7
- package/functions/computation-system/persistence/RunRecorder.js +20 -61
- package/functions/computation-system/persistence/StatusRepository.js +16 -5
- package/functions/computation-system/tools/BuildReporter.js +3 -1
- package/package.json +1 -1
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* @fileoverview Main Orchestrator. Coordinates the topological execution.
|
|
3
|
-
* UPDATED:
|
|
3
|
+
* UPDATED: Implements Smart Audit logic to detect WHY a hash mismatch occurred.
|
|
4
4
|
*/
|
|
5
5
|
const { normalizeName, DEFINITIVE_EARLIEST_DATES } = require('./utils/utils');
|
|
6
6
|
const { checkRootDataAvailability, checkRootDependencies } = require('./data/AvailabilityChecker');
|
|
@@ -10,13 +10,13 @@ const { StandardExecutor } = require('./executor
|
|
|
10
10
|
const { MetaExecutor } = require('./executors/MetaExecutor');
|
|
11
11
|
const { generateProcessId, PROCESS_TYPES } = require('./logger/logger');
|
|
12
12
|
|
|
13
|
-
// [FIX] Split IMPOSSIBLE into semantic categories
|
|
14
13
|
const STATUS_IMPOSSIBLE_PREFIX = 'IMPOSSIBLE';
|
|
15
14
|
|
|
16
15
|
function groupByPass(manifest) { return manifest.reduce((acc, calc) => { (acc[calc.pass] = acc[calc.pass] || []).push(calc); return acc; }, {}); }
|
|
17
16
|
|
|
18
17
|
/**
|
|
19
18
|
* Analyzes whether calculations should run, be skipped, or are blocked.
|
|
19
|
+
* Now performs Deep Hash Analysis to explain Re-Runs.
|
|
20
20
|
*/
|
|
21
21
|
function analyzeDateExecution(dateStr, calcsInPass, rootDataStatus, dailyStatus, manifestMap, prevDailyStatus = null) {
|
|
22
22
|
const report = { runnable: [], blocked: [], impossible: [], failedDependency: [], reRuns: [], skipped: [] };
|
|
@@ -28,7 +28,6 @@ function analyzeDateExecution(dateStr, calcsInPass, rootDataStatus, dailyStatus,
|
|
|
28
28
|
const stored = currentStatusMap[norm];
|
|
29
29
|
const depManifest = manifestMap.get(norm);
|
|
30
30
|
if (!stored) return false;
|
|
31
|
-
// [FIX] Check for any IMPOSSIBLE variant
|
|
32
31
|
if (typeof stored.hash === 'string' && stored.hash.startsWith(STATUS_IMPOSSIBLE_PREFIX)) return false;
|
|
33
32
|
if (!depManifest) return false;
|
|
34
33
|
if (stored.hash !== depManifest.hash) return false;
|
|
@@ -42,7 +41,6 @@ function analyzeDateExecution(dateStr, calcsInPass, rootDataStatus, dailyStatus,
|
|
|
42
41
|
const storedCategory = stored ? stored.category : null;
|
|
43
42
|
const currentHash = calc.hash;
|
|
44
43
|
|
|
45
|
-
// [FIX] Granular impossible marking
|
|
46
44
|
const markImpossible = (reason, type = 'GENERIC') => {
|
|
47
45
|
report.impossible.push({ name: cName, reason });
|
|
48
46
|
const statusHash = `${STATUS_IMPOSSIBLE_PREFIX}:${type}`;
|
|
@@ -51,14 +49,13 @@ function analyzeDateExecution(dateStr, calcsInPass, rootDataStatus, dailyStatus,
|
|
|
51
49
|
|
|
52
50
|
const markRunnable = (isReRun = false, reRunDetails = null) => {
|
|
53
51
|
if (isReRun) report.reRuns.push(reRunDetails);
|
|
54
|
-
else report.runnable.push(
|
|
55
|
-
simulationStatus[cName] = { hash: currentHash, category: calc.category };
|
|
52
|
+
else report.runnable.push({ name: cName, ...reRunDetails });
|
|
53
|
+
simulationStatus[cName] = { hash: currentHash, category: calc.category, composition: calc.composition };
|
|
56
54
|
};
|
|
57
55
|
|
|
58
56
|
let migrationOldCategory = null;
|
|
59
57
|
if (storedCategory && storedCategory !== calc.category) { migrationOldCategory = storedCategory; }
|
|
60
58
|
|
|
61
|
-
// [FIX] Check for any IMPOSSIBLE variant in storage
|
|
62
59
|
if (typeof storedHash === 'string' && storedHash.startsWith(STATUS_IMPOSSIBLE_PREFIX)) {
|
|
63
60
|
report.skipped.push({ name: cName, reason: `Permanently Impossible (${storedHash})` });
|
|
64
61
|
continue;
|
|
@@ -69,7 +66,6 @@ function analyzeDateExecution(dateStr, calcsInPass, rootDataStatus, dailyStatus,
|
|
|
69
66
|
if (!rootCheck.canRun) {
|
|
70
67
|
const missingStr = rootCheck.missing.join(', ');
|
|
71
68
|
if (!isTargetToday) {
|
|
72
|
-
// [FIX] Mark specifically as NO_DATA
|
|
73
69
|
markImpossible(`Missing Root Data: ${missingStr} (Historical)`, 'NO_DATA');
|
|
74
70
|
} else {
|
|
75
71
|
report.blocked.push({ name: cName, reason: `Missing Root Data: ${missingStr} (Waiting)` });
|
|
@@ -83,7 +79,6 @@ function analyzeDateExecution(dateStr, calcsInPass, rootDataStatus, dailyStatus,
|
|
|
83
79
|
for (const dep of calc.dependencies) {
|
|
84
80
|
const normDep = normalizeName(dep);
|
|
85
81
|
const depStored = simulationStatus[normDep];
|
|
86
|
-
// [FIX] Check for any IMPOSSIBLE variant in dependencies
|
|
87
82
|
if (depStored && typeof depStored.hash === 'string' && depStored.hash.startsWith(STATUS_IMPOSSIBLE_PREFIX)) {
|
|
88
83
|
dependencyIsImpossible = true;
|
|
89
84
|
break;
|
|
@@ -93,7 +88,6 @@ function analyzeDateExecution(dateStr, calcsInPass, rootDataStatus, dailyStatus,
|
|
|
93
88
|
}
|
|
94
89
|
|
|
95
90
|
if (dependencyIsImpossible) {
|
|
96
|
-
// [FIX] Mark specifically as UPSTREAM failure
|
|
97
91
|
markImpossible('Dependency is Impossible', 'UPSTREAM');
|
|
98
92
|
continue;
|
|
99
93
|
}
|
|
@@ -111,44 +105,88 @@ function analyzeDateExecution(dateStr, calcsInPass, rootDataStatus, dailyStatus,
|
|
|
111
105
|
}
|
|
112
106
|
}
|
|
113
107
|
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
108
|
+
// --- HASH CHECK LOGIC ---
|
|
109
|
+
if (!storedHash) {
|
|
110
|
+
markRunnable(false, { reason: "New Calculation" });
|
|
111
|
+
}
|
|
112
|
+
else if (storedHash !== currentHash) {
|
|
113
|
+
// Smart Logic: Why did it change?
|
|
114
|
+
let changeReason = "Hash Mismatch (Unknown)";
|
|
115
|
+
const oldComp = stored.composition;
|
|
116
|
+
const newComp = calc.composition;
|
|
117
|
+
|
|
118
|
+
if (oldComp && newComp) {
|
|
119
|
+
// 1. Check Code
|
|
120
|
+
if (oldComp.code !== newComp.code) {
|
|
121
|
+
changeReason = "Code Changed";
|
|
122
|
+
}
|
|
123
|
+
// 2. Check Layers
|
|
124
|
+
else if (JSON.stringify(oldComp.layers) !== JSON.stringify(newComp.layers)) {
|
|
125
|
+
// Find specific layer
|
|
126
|
+
const changedLayers = [];
|
|
127
|
+
for(const lKey in newComp.layers) {
|
|
128
|
+
if (newComp.layers[lKey] !== oldComp.layers[lKey]) changedLayers.push(lKey);
|
|
129
|
+
}
|
|
130
|
+
changeReason = `Layer Update: [${changedLayers.join(', ')}]`;
|
|
131
|
+
}
|
|
132
|
+
// 3. Check Dependencies
|
|
133
|
+
else if (JSON.stringify(oldComp.deps) !== JSON.stringify(newComp.deps)) {
|
|
134
|
+
// Find specific dep
|
|
135
|
+
const changedDeps = [];
|
|
136
|
+
for(const dKey in newComp.deps) {
|
|
137
|
+
if (newComp.deps[dKey] !== oldComp.deps[dKey]) changedDeps.push(dKey);
|
|
138
|
+
}
|
|
139
|
+
changeReason = `Upstream Change: [${changedDeps.join(', ')}]`;
|
|
140
|
+
}
|
|
141
|
+
else {
|
|
142
|
+
changeReason = "Logic/Epoch Change";
|
|
143
|
+
}
|
|
144
|
+
} else {
|
|
145
|
+
changeReason = "Hash Mismatch (No prior composition)";
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
markRunnable(true, {
|
|
149
|
+
name: cName,
|
|
150
|
+
oldHash: storedHash,
|
|
151
|
+
newHash: currentHash,
|
|
152
|
+
previousCategory: migrationOldCategory,
|
|
153
|
+
reason: changeReason // <--- Passed to Reporter
|
|
154
|
+
});
|
|
155
|
+
}
|
|
156
|
+
else if (migrationOldCategory) {
|
|
157
|
+
markRunnable(true, { name: cName, reason: 'Category Migration', previousCategory: migrationOldCategory, newCategory: calc.category });
|
|
158
|
+
}
|
|
159
|
+
else {
|
|
160
|
+
report.skipped.push({ name: cName });
|
|
161
|
+
simulationStatus[cName] = { hash: currentHash, category: calc.category, composition: calc.composition };
|
|
162
|
+
}
|
|
118
163
|
}
|
|
119
164
|
return report;
|
|
120
165
|
}
|
|
121
166
|
|
|
122
167
|
/**
|
|
123
168
|
* DIRECT EXECUTION PIPELINE (For Workers)
|
|
124
|
-
* Skips analysis. Assumes the calculation is valid and runnable.
|
|
125
|
-
* [UPDATED] Accepted previousCategory argument to handle migrations.
|
|
126
169
|
*/
|
|
127
170
|
async function executeDispatchTask(dateStr, pass, targetComputation, config, dependencies, computationManifest, previousCategory = null) {
|
|
128
171
|
const { logger } = dependencies;
|
|
129
172
|
const pid = generateProcessId(PROCESS_TYPES.EXECUTOR, targetComputation, dateStr);
|
|
130
173
|
|
|
131
|
-
// 1. Get Calculation Manifest
|
|
132
174
|
const manifestMap = new Map(computationManifest.map(c => [normalizeName(c.name), c]));
|
|
133
175
|
const calcManifest = manifestMap.get(normalizeName(targetComputation));
|
|
134
176
|
|
|
135
177
|
if (!calcManifest) { throw new Error(`Calculation '${targetComputation}' not found in manifest.`); }
|
|
136
178
|
|
|
137
|
-
// [UPDATED] Attach migration context if present
|
|
138
179
|
if (previousCategory) {
|
|
139
180
|
calcManifest.previousCategory = previousCategory;
|
|
140
181
|
logger.log('INFO', `[Executor] Migration detected for ${calcManifest.name}. Old data will be cleaned from: ${previousCategory}`);
|
|
141
182
|
}
|
|
142
183
|
|
|
143
|
-
// 2. Fetch Root Data Availability
|
|
144
184
|
const rootData = await checkRootDataAvailability(dateStr, config, dependencies, DEFINITIVE_EARLIEST_DATES);
|
|
145
|
-
|
|
146
185
|
if (!rootData) {
|
|
147
|
-
logger.log('ERROR', `[Executor] FATAL: Root data check failed for ${targetComputation} on ${dateStr}
|
|
186
|
+
logger.log('ERROR', `[Executor] FATAL: Root data check failed for ${targetComputation} on ${dateStr}.`);
|
|
148
187
|
return;
|
|
149
188
|
}
|
|
150
189
|
|
|
151
|
-
// 3. Fetch Dependencies
|
|
152
190
|
const calcsToRun = [calcManifest];
|
|
153
191
|
const existingResults = await fetchExistingResults(dateStr, calcsToRun, computationManifest, config, dependencies, false);
|
|
154
192
|
|
|
@@ -160,7 +198,6 @@ async function executeDispatchTask(dateStr, pass, targetComputation, config, dep
|
|
|
160
198
|
previousResults = await fetchExistingResults(prevDateStr, calcsToRun, computationManifest, config, dependencies, true);
|
|
161
199
|
}
|
|
162
200
|
|
|
163
|
-
// 4. Execute
|
|
164
201
|
logger.log('INFO', `[Executor] Running ${calcManifest.name} for ${dateStr}`, { processId: pid });
|
|
165
202
|
let resultUpdates = {};
|
|
166
203
|
|
|
@@ -176,5 +213,4 @@ async function executeDispatchTask(dateStr, pass, targetComputation, config, dep
|
|
|
176
213
|
}
|
|
177
214
|
}
|
|
178
215
|
|
|
179
|
-
|
|
180
216
|
module.exports = { executeDispatchTask, groupByPass, analyzeDateExecution };
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* @fileoverview Dynamic Manifest Builder - Handles Topological Sort and Auto-Discovery.
|
|
3
|
+
* UPDATED: Generates Granular Hash Composition for Audit Trails.
|
|
3
4
|
*/
|
|
4
5
|
const { generateCodeHash, LEGACY_MAPPING } = require('../topology/HashManager.js');
|
|
5
6
|
const { normalizeName } = require('../utils/utils');
|
|
@@ -84,7 +85,6 @@ function getDependencySet(endpoints, adjacencyList) {
|
|
|
84
85
|
function buildManifest(productLinesToRun = [], calculations) {
|
|
85
86
|
log.divider('Building Dynamic Manifest');
|
|
86
87
|
|
|
87
|
-
// [LOG VERIFICATION] Log the Input Request
|
|
88
88
|
const requestedLog = (!productLinesToRun || productLinesToRun.length === 0)
|
|
89
89
|
? "ALL (Wildcard/Empty)"
|
|
90
90
|
: productLinesToRun.join(', ');
|
|
@@ -106,9 +106,13 @@ function buildManifest(productLinesToRun = [], calculations) {
|
|
|
106
106
|
const metadata = Class.getMetadata();
|
|
107
107
|
const dependencies = Class.getDependencies().map(normalizeName);
|
|
108
108
|
const codeStr = Class.toString();
|
|
109
|
+
const selfCodeHash = generateCodeHash(codeStr);
|
|
110
|
+
|
|
111
|
+
let compositeHashString = selfCodeHash + `|EPOCH:${SYSTEM_EPOCH}`;
|
|
109
112
|
|
|
110
|
-
let compositeHashString = generateCodeHash(codeStr) + `|EPOCH:${SYSTEM_EPOCH}`; // Here we build the hash
|
|
111
113
|
const usedDeps = [];
|
|
114
|
+
// Track layer hashes for composition analysis
|
|
115
|
+
const usedLayerHashes = {};
|
|
112
116
|
|
|
113
117
|
for (const [layerName, exportsMap] of Object.entries(LAYER_TRIGGERS)) {
|
|
114
118
|
const layerHashes = LAYER_HASHES[layerName];
|
|
@@ -118,19 +122,30 @@ function buildManifest(productLinesToRun = [], calculations) {
|
|
|
118
122
|
if (exportHash) {
|
|
119
123
|
compositeHashString += exportHash;
|
|
120
124
|
usedDeps.push(`${layerName}.${exportName}`);
|
|
125
|
+
|
|
126
|
+
// Group hashes by layer for the composition report
|
|
127
|
+
if (!usedLayerHashes[layerName]) usedLayerHashes[layerName] = '';
|
|
128
|
+
usedLayerHashes[layerName] += exportHash;
|
|
121
129
|
}
|
|
122
130
|
}
|
|
123
131
|
}
|
|
124
132
|
}
|
|
125
133
|
|
|
134
|
+
// Simplify layer hashes to one hash per layer for the report
|
|
135
|
+
const layerComposition = {};
|
|
136
|
+
for(const [lName, lStr] of Object.entries(usedLayerHashes)) {
|
|
137
|
+
layerComposition[lName] = generateCodeHash(lStr);
|
|
138
|
+
}
|
|
139
|
+
|
|
126
140
|
// Safe Mode Fallback
|
|
127
141
|
let isSafeMode = false;
|
|
128
142
|
if (usedDeps.length === 0) {
|
|
129
143
|
isSafeMode = true;
|
|
130
144
|
Object.values(LAYER_HASHES).forEach(layerObj => { Object.values(layerObj).forEach(h => compositeHashString += h); });
|
|
145
|
+
layerComposition['ALL_SAFE_MODE'] = 'ALL';
|
|
131
146
|
}
|
|
132
147
|
|
|
133
|
-
const
|
|
148
|
+
const intrinsicHash = generateCodeHash(compositeHashString);
|
|
134
149
|
|
|
135
150
|
const manifestEntry = {
|
|
136
151
|
name: normalizedName,
|
|
@@ -143,7 +158,16 @@ function buildManifest(productLinesToRun = [], calculations) {
|
|
|
143
158
|
userType: metadata.userType,
|
|
144
159
|
dependencies: dependencies,
|
|
145
160
|
pass: 0,
|
|
146
|
-
hash:
|
|
161
|
+
hash: intrinsicHash, // Will be updated with deps
|
|
162
|
+
|
|
163
|
+
// [NEW] Composition Object for Audit
|
|
164
|
+
composition: {
|
|
165
|
+
epoch: SYSTEM_EPOCH,
|
|
166
|
+
code: selfCodeHash,
|
|
167
|
+
layers: layerComposition,
|
|
168
|
+
deps: {} // Will be populated after topo sort
|
|
169
|
+
},
|
|
170
|
+
|
|
147
171
|
debugUsedLayers: isSafeMode ? ['ALL (Safe Mode)'] : usedDeps
|
|
148
172
|
};
|
|
149
173
|
|
|
@@ -174,8 +198,6 @@ function buildManifest(productLinesToRun = [], calculations) {
|
|
|
174
198
|
}
|
|
175
199
|
|
|
176
200
|
const productLineEndpoints = [];
|
|
177
|
-
|
|
178
|
-
// [UPDATE] Check if we should run ALL product lines (if empty or wildcard)
|
|
179
201
|
const runAll = !productLinesToRun || productLinesToRun.length === 0 || productLinesToRun.includes('*');
|
|
180
202
|
|
|
181
203
|
for (const [name, entry] of manifestMap.entries()) {
|
|
@@ -187,7 +209,6 @@ function buildManifest(productLinesToRun = [], calculations) {
|
|
|
187
209
|
const requiredCalcs = getDependencySet(productLineEndpoints, adjacency);
|
|
188
210
|
log.info(`Filtered down to ${requiredCalcs.size} active calculations.`);
|
|
189
211
|
|
|
190
|
-
// [LOG VERIFICATION] Final Proof of Active Lines
|
|
191
212
|
const activePackages = new Set();
|
|
192
213
|
requiredCalcs.forEach(name => {
|
|
193
214
|
const entry = manifestMap.get(name);
|
|
@@ -240,11 +261,17 @@ function buildManifest(productLinesToRun = [], calculations) {
|
|
|
240
261
|
|
|
241
262
|
// --- Cascading Hash (Phase 2) ---
|
|
242
263
|
for (const entry of sortedManifest) {
|
|
243
|
-
let dependencySignature = entry.hash;
|
|
264
|
+
let dependencySignature = entry.hash; // Start with intrinsic
|
|
265
|
+
|
|
244
266
|
if (entry.dependencies && entry.dependencies.length > 0) {
|
|
245
267
|
const depHashes = entry.dependencies.map(depName => {
|
|
246
|
-
const depEntry = filteredManifestMap.get(depName);
|
|
247
|
-
|
|
268
|
+
const depEntry = filteredManifestMap.get(depName);
|
|
269
|
+
if (depEntry) {
|
|
270
|
+
// Populate Composition
|
|
271
|
+
entry.composition.deps[depName] = depEntry.hash;
|
|
272
|
+
return depEntry.hash;
|
|
273
|
+
}
|
|
274
|
+
return '';
|
|
248
275
|
}).join('|');
|
|
249
276
|
dependencySignature += `|DEPS:${depHashes}`;
|
|
250
277
|
}
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* @fileoverview Executor for "Meta" (global) calculations.
|
|
3
3
|
* UPDATED: Uses CachedDataLoader for all data access.
|
|
4
|
+
* UPDATED: Tracks processed shard/item counts.
|
|
4
5
|
*/
|
|
5
6
|
const { normalizeName } = require('../utils/utils');
|
|
6
7
|
const { CachedDataLoader } = require('../data/CachedDataLoader');
|
|
@@ -19,8 +20,6 @@ class MetaExecutor {
|
|
|
19
20
|
const inst = new mCalc.class();
|
|
20
21
|
inst.manifest = mCalc;
|
|
21
22
|
|
|
22
|
-
// We do not pass 'rootData' (which has null refs) to execution.
|
|
23
|
-
// The Executor fetches its own data via loader.
|
|
24
23
|
await MetaExecutor.executeOncePerDay(inst, mCalc, dStr, fetchedDeps, previousFetchedDeps, config, deps, cachedLoader);
|
|
25
24
|
state[normalizeName(mCalc.name)] = inst;
|
|
26
25
|
} catch (e) {
|
|
@@ -33,6 +32,7 @@ class MetaExecutor {
|
|
|
33
32
|
static async executeOncePerDay(calcInstance, metadata, dateStr, computedDeps, prevDeps, config, deps, loader) {
|
|
34
33
|
const mappings = await loader.loadMappings();
|
|
35
34
|
const { logger } = deps;
|
|
35
|
+
const stats = { processedShards: 0, processedItems: 0 };
|
|
36
36
|
|
|
37
37
|
// Lazy fetch insights/social using the loader
|
|
38
38
|
const insights = metadata.rootDataDependencies?.includes('insights') ? { today: await loader.loadInsights(dateStr) } : null;
|
|
@@ -55,9 +55,16 @@ class MetaExecutor {
|
|
|
55
55
|
await calcInstance.process(partialContext);
|
|
56
56
|
partialContext.prices = null;
|
|
57
57
|
processedCount++;
|
|
58
|
+
|
|
59
|
+
stats.processedShards++;
|
|
60
|
+
stats.processedItems += Object.keys(shardData).length;
|
|
61
|
+
|
|
58
62
|
if (processedCount % 10 === 0 && global.gc) { global.gc(); }
|
|
59
63
|
}
|
|
60
64
|
logger.log('INFO', `[Executor] Finished Batched Execution for ${metadata.name} (${processedCount} shards).`);
|
|
65
|
+
|
|
66
|
+
// Attach stats
|
|
67
|
+
calcInstance._executionStats = stats;
|
|
61
68
|
return calcInstance.getResult ? await calcInstance.getResult() : {};
|
|
62
69
|
} else {
|
|
63
70
|
const context = ContextFactory.buildMetaContext({
|
|
@@ -65,7 +72,12 @@ class MetaExecutor {
|
|
|
65
72
|
prices: {}, computedDependencies: computedDeps,
|
|
66
73
|
previousComputedDependencies: prevDeps, config, deps
|
|
67
74
|
});
|
|
68
|
-
|
|
75
|
+
const res = await calcInstance.process(context);
|
|
76
|
+
|
|
77
|
+
stats.processedItems = 1; // "Global" item
|
|
78
|
+
calcInstance._executionStats = stats;
|
|
79
|
+
|
|
80
|
+
return res;
|
|
69
81
|
}
|
|
70
82
|
}
|
|
71
83
|
}
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
* @fileoverview Executor for "Standard" (per-user) calculations.
|
|
3
3
|
* UPDATED: Handles lazy loading of data references.
|
|
4
4
|
* UPDATED: Supports Multi-Date Fan-Out Aggregation (Time Machine Mode).
|
|
5
|
+
* UPDATED: Tracks processed vs skipped users for telemetry.
|
|
5
6
|
*/
|
|
6
7
|
const { normalizeName } = require('../utils/utils');
|
|
7
8
|
const { streamPortfolioData, streamHistoryData, getPortfolioPartRefs } = require('../utils/data_loader');
|
|
@@ -41,10 +42,6 @@ class StandardExecutor {
|
|
|
41
42
|
await StandardExecutor.streamAndProcess(dStr, state, passName, config, deps, fullRoot, rootData.portfolioRefs, rootData.historyRefs, fetchedDeps, previousFetchedDeps);
|
|
42
43
|
|
|
43
44
|
// 4. Pre-Commit Transformation for Fan-Out
|
|
44
|
-
// If a calc produced multi-date output per user, we must transpose it:
|
|
45
|
-
// FROM: UserA -> { "2024-01-01": data, "2024-01-02": data }
|
|
46
|
-
// TO: "2024-01-01" -> { UserA: data }, "2024-01-02" -> { UserA: data }
|
|
47
|
-
|
|
48
45
|
const transformedState = {};
|
|
49
46
|
for (const [name, inst] of Object.entries(state)) {
|
|
50
47
|
const result = await inst.getResult(); // { userId: { date: data } } or { userId: data }
|
|
@@ -69,7 +66,8 @@ class StandardExecutor {
|
|
|
69
66
|
// Mock a "getResult" for the committer that returns the Transposed Map
|
|
70
67
|
transformedState[name] = {
|
|
71
68
|
manifest: inst.manifest,
|
|
72
|
-
getResult: async () => transposed
|
|
69
|
+
getResult: async () => transposed,
|
|
70
|
+
_executionStats: inst._executionStats // Preserve stats
|
|
73
71
|
};
|
|
74
72
|
continue;
|
|
75
73
|
}
|
|
@@ -91,6 +89,12 @@ class StandardExecutor {
|
|
|
91
89
|
|
|
92
90
|
logger.log('INFO', `[${passName}] Streaming for ${streamingCalcs.length} computations...`);
|
|
93
91
|
|
|
92
|
+
// [NEW] Execution Metrics Container
|
|
93
|
+
const executionStats = {};
|
|
94
|
+
Object.keys(state).forEach(name => {
|
|
95
|
+
executionStats[name] = { processedUsers: 0, skippedUsers: 0 };
|
|
96
|
+
});
|
|
97
|
+
|
|
94
98
|
const cachedLoader = new CachedDataLoader(config, deps);
|
|
95
99
|
await cachedLoader.loadMappings();
|
|
96
100
|
|
|
@@ -113,7 +117,13 @@ class StandardExecutor {
|
|
|
113
117
|
if (tH_iter) tH_chunk = (await tH_iter.next()).value || {};
|
|
114
118
|
|
|
115
119
|
// Execute chunk for all calcs
|
|
116
|
-
const promises = streamingCalcs.map(calc =>
|
|
120
|
+
const promises = streamingCalcs.map(calc =>
|
|
121
|
+
StandardExecutor.executePerUser(
|
|
122
|
+
calc, calc.manifest, dateStr, tP_chunk, yP_chunk, tH_chunk,
|
|
123
|
+
fetchedDeps, previousFetchedDeps, config, deps, cachedLoader,
|
|
124
|
+
executionStats[normalizeName(calc.manifest.name)]
|
|
125
|
+
)
|
|
126
|
+
);
|
|
117
127
|
await Promise.all(promises);
|
|
118
128
|
}
|
|
119
129
|
} finally {
|
|
@@ -122,10 +132,15 @@ class StandardExecutor {
|
|
|
122
132
|
if (tH_iter && tH_iter.return) await tH_iter.return();
|
|
123
133
|
}
|
|
124
134
|
|
|
135
|
+
// Attach stats to the instances so ResultCommitter can find them
|
|
136
|
+
for(const name in state) {
|
|
137
|
+
if(state[name]) state[name]._executionStats = executionStats[name];
|
|
138
|
+
}
|
|
139
|
+
|
|
125
140
|
logger.log('INFO', `[${passName}] Streaming complete.`);
|
|
126
141
|
}
|
|
127
142
|
|
|
128
|
-
static async executePerUser(calcInstance, metadata, dateStr, portfolioData, yesterdayPortfolioData, historyData, computedDeps, prevDeps, config, deps, loader) {
|
|
143
|
+
static async executePerUser(calcInstance, metadata, dateStr, portfolioData, yesterdayPortfolioData, historyData, computedDeps, prevDeps, config, deps, loader, stats) {
|
|
129
144
|
const { logger } = deps;
|
|
130
145
|
const targetUserType = metadata.userType;
|
|
131
146
|
const mappings = await loader.loadMappings();
|
|
@@ -139,7 +154,10 @@ class StandardExecutor {
|
|
|
139
154
|
|
|
140
155
|
if (targetUserType !== 'all') {
|
|
141
156
|
const mappedTarget = (targetUserType === 'speculator') ? SCHEMAS.USER_TYPES.SPECULATOR : SCHEMAS.USER_TYPES.NORMAL;
|
|
142
|
-
if (mappedTarget !== actualUserType)
|
|
157
|
+
if (mappedTarget !== actualUserType) {
|
|
158
|
+
if (stats) stats.skippedUsers++;
|
|
159
|
+
continue;
|
|
160
|
+
}
|
|
143
161
|
}
|
|
144
162
|
|
|
145
163
|
const context = ContextFactory.buildPerUserContext({
|
|
@@ -149,7 +167,10 @@ class StandardExecutor {
|
|
|
149
167
|
config, deps
|
|
150
168
|
});
|
|
151
169
|
|
|
152
|
-
try {
|
|
170
|
+
try {
|
|
171
|
+
await calcInstance.process(context);
|
|
172
|
+
if (stats) stats.processedUsers++;
|
|
173
|
+
}
|
|
153
174
|
catch (e) { logger.log('WARN', `Calc ${metadata.name} failed for user ${userId}: ${e.message}`); }
|
|
154
175
|
}
|
|
155
176
|
}
|
|
@@ -4,6 +4,7 @@
|
|
|
4
4
|
* UPDATED: Implements Audit Ledger creation with Transactions to prevent Race Conditions.
|
|
5
5
|
* UPDATED: Added Preemptive Hash Check.
|
|
6
6
|
* UPDATED: Added Parallel Status Fetching.
|
|
7
|
+
* UPDATED: Include triggerReason in Pub/Sub payload.
|
|
7
8
|
*/
|
|
8
9
|
|
|
9
10
|
const { getExpectedDateStrings, normalizeName, DEFINITIVE_EARLIEST_DATES } = require('../utils/utils.js');
|
|
@@ -35,14 +36,9 @@ async function dispatchComputationPass(config, dependencies, computationManifest
|
|
|
35
36
|
if (!calcsInThisPass.length) { return logger.log('WARN', `[Dispatcher] No calcs for Pass ${passToRun}. Exiting.`); }
|
|
36
37
|
|
|
37
38
|
// --- [NEW] OPTIMIZATION 1: PREEMPTIVE HASH CHECK ---
|
|
38
|
-
// If the combined hash of all calculations hasn't changed, we might not need to do anything.
|
|
39
|
-
// Note: This optimization assumes external data (root data) hasn't changed.
|
|
40
|
-
// To be safe, we only use this to skip code-change re-runs, but root data might have arrived.
|
|
41
|
-
// For now, we calculate it but rely on the deep check.
|
|
42
39
|
const currentManifestHash = generateCodeHash(
|
|
43
40
|
computationManifest.map(c => c.hash).sort().join('|')
|
|
44
41
|
);
|
|
45
|
-
// TODO: Implement metadata storage for this hash to skip "Analysis" phase if needed.
|
|
46
42
|
// ---------------------------------------------------
|
|
47
43
|
|
|
48
44
|
const calcNames = calcsInThisPass.map(c => c.name);
|
|
@@ -122,6 +118,7 @@ async function dispatchComputationPass(config, dependencies, computationManifest
|
|
|
122
118
|
computation: normalizeName(item.name),
|
|
123
119
|
hash: item.hash || item.newHash,
|
|
124
120
|
previousCategory: item.previousCategory || null,
|
|
121
|
+
triggerReason: item.reason || "Unknown", // <--- THE KEY ADDITION
|
|
125
122
|
timestamp: Date.now()
|
|
126
123
|
});
|
|
127
124
|
});
|
|
@@ -157,6 +154,7 @@ async function dispatchComputationPass(config, dependencies, computationManifest
|
|
|
157
154
|
expectedHash: task.hash || 'unknown',
|
|
158
155
|
createdAt: new Date(),
|
|
159
156
|
dispatcherHash: currentManifestHash, // Tracking source
|
|
157
|
+
triggerReason: task.triggerReason, // Track trigger in ledger too
|
|
160
158
|
retries: 0
|
|
161
159
|
}, { merge: true });
|
|
162
160
|
return true;
|
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
* PURPOSE: Consumes computation tasks from Pub/Sub and executes them.
|
|
4
4
|
* UPDATED: Integrated Run Ledger for per-run/per-date success/failure tracking.
|
|
5
5
|
* UPDATED: Added Dead Letter Queue logic for Poison Pills.
|
|
6
|
+
* UPDATED: Now logs the trigger reason.
|
|
6
7
|
*/
|
|
7
8
|
|
|
8
9
|
const { executeDispatchTask } = require('../WorkflowOrchestrator.js');
|
|
@@ -35,17 +36,21 @@ async function handleComputationTask(message, config, dependencies) {
|
|
|
35
36
|
|
|
36
37
|
// ----------------------------------- Validate & Execute -----------------------------------
|
|
37
38
|
if (!data || data.action !== 'RUN_COMPUTATION_DATE') { return; }
|
|
38
|
-
|
|
39
|
+
|
|
40
|
+
// Extract Trigger Reason
|
|
41
|
+
const { date, pass, computation, previousCategory, triggerReason } = data;
|
|
42
|
+
|
|
39
43
|
if (!date || !pass || !computation) { logger.log('ERROR', `[Worker] Invalid payload: Missing date, pass, or computation.`, data); return; }
|
|
40
44
|
let computationManifest;
|
|
41
45
|
try { computationManifest = getManifest(config.activeProductLines || [], calculations, runDependencies);
|
|
42
46
|
} catch (manifestError) { logger.log('FATAL', `[Worker] Failed to load Manifest: ${manifestError.message}`);
|
|
43
|
-
|
|
47
|
+
// FIX: Passing { durationMs: 0 } instead of {} to satisfy type requirements
|
|
48
|
+
await recordRunAttempt(db, { date, computation, pass }, 'CRASH', { message: manifestError.message, stage: 'MANIFEST_LOAD' }, { durationMs: 0 }, triggerReason);
|
|
44
49
|
return;
|
|
45
50
|
}
|
|
46
51
|
|
|
47
52
|
try {
|
|
48
|
-
logger.log('INFO', `[Worker] 📥 Received: ${computation} for ${date}`);
|
|
53
|
+
logger.log('INFO', `[Worker] 📥 Received: ${computation} for ${date} [Reason: ${triggerReason || 'Unknown'}]`);
|
|
49
54
|
|
|
50
55
|
const startTime = Date.now();
|
|
51
56
|
const result = await executeDispatchTask(
|
|
@@ -67,19 +72,21 @@ async function handleComputationTask(message, config, dependencies) {
|
|
|
67
72
|
logger.log('ERROR', `[Worker] ❌ Failed logic/storage for ${computation}`, failReason.error);
|
|
68
73
|
const metrics = failReason.metrics || {};
|
|
69
74
|
metrics.durationMs = duration;
|
|
70
|
-
await recordRunAttempt(db, { date, computation, pass }, 'FAILURE', failReason.error, metrics);
|
|
75
|
+
await recordRunAttempt(db, { date, computation, pass }, 'FAILURE', failReason.error, metrics, triggerReason);
|
|
71
76
|
throw new Error(failReason.error.message || 'Computation Logic Failed');
|
|
72
77
|
}
|
|
73
78
|
else if (Object.keys(successUpdates).length > 0) {
|
|
74
79
|
const successData = successUpdates[computation];
|
|
75
80
|
const metrics = successData.metrics || {};
|
|
76
81
|
metrics.durationMs = duration;
|
|
77
|
-
|
|
78
|
-
|
|
82
|
+
|
|
83
|
+
logger.log('INFO', `[Worker] ✅ Stored: ${computation}. Processed: ${metrics.execution?.processedUsers || metrics.execution?.processedItems || '?'} items.`);
|
|
84
|
+
|
|
85
|
+
await recordRunAttempt(db, { date, computation, pass }, 'SUCCESS', null, metrics, triggerReason);
|
|
79
86
|
}
|
|
80
87
|
else {
|
|
81
88
|
logger.log('WARN', `[Worker] ⚠️ No results produced for ${computation} (Empty?)`);
|
|
82
|
-
await recordRunAttempt(db, { date, computation, pass }, 'SUCCESS', { message: 'Empty Result' }, { durationMs: duration });
|
|
89
|
+
await recordRunAttempt(db, { date, computation, pass }, 'SUCCESS', { message: 'Empty Result' }, { durationMs: duration }, triggerReason);
|
|
83
90
|
}
|
|
84
91
|
} catch (err) {
|
|
85
92
|
const retryCount = message.deliveryAttempt || 0;
|
|
@@ -96,7 +103,7 @@ async function handleComputationTask(message, config, dependencies) {
|
|
|
96
103
|
} catch (dlqErr) { logger.log('FATAL', `[Worker] Failed to write to DLQ`, dlqErr); }
|
|
97
104
|
}
|
|
98
105
|
logger.log('ERROR', `[Worker] ❌ Crash: ${computation} for ${date}: ${err.message}`);
|
|
99
|
-
await recordRunAttempt(db, { date, computation, pass }, 'CRASH', { message: err.message, stack: err.stack, stage: 'SYSTEM_CRASH' });
|
|
106
|
+
await recordRunAttempt(db, { date, computation, pass }, 'CRASH', { message: err.message, stack: err.stack, stage: 'SYSTEM_CRASH' }, { durationMs: 0 }, triggerReason);
|
|
100
107
|
throw err;
|
|
101
108
|
}
|
|
102
109
|
}
|
|
@@ -1,8 +1,7 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* @fileoverview Handles saving computation results with observability and Smart Cleanup.
|
|
3
|
-
* UPDATED:
|
|
4
|
-
* UPDATED:
|
|
5
|
-
* UPDATED: Supports Multi-Date Fan-Out (Time Machine Mode) with CONCURRENCY THROTTLING.
|
|
3
|
+
* UPDATED: Stores Hash Composition in status for audit trail.
|
|
4
|
+
* UPDATED: Captures execution metrics.
|
|
6
5
|
*/
|
|
7
6
|
const { commitBatchInChunks } = require('./FirestoreUtils');
|
|
8
7
|
const { updateComputationStatus } = require('./StatusRepository');
|
|
@@ -10,13 +9,10 @@ const { batchStoreSchemas } = require('../utils/schema_capture');
|
|
|
10
9
|
const { generateProcessId, PROCESS_TYPES } = require('../logger/logger');
|
|
11
10
|
const { HeuristicValidator } = require('./ResultsValidator');
|
|
12
11
|
const validationOverrides = require('../config/validation_overrides');
|
|
13
|
-
const pLimit = require('p-limit');
|
|
12
|
+
const pLimit = require('p-limit');
|
|
14
13
|
|
|
15
14
|
const NON_RETRYABLE_ERRORS = [
|
|
16
|
-
'INVALID_ARGUMENT',
|
|
17
|
-
'PERMISSION_DENIED', // Auth issue
|
|
18
|
-
'DATA_LOSS', // Firestore corruption
|
|
19
|
-
'FAILED_PRECONDITION' // Transaction requirements not met
|
|
15
|
+
'INVALID_ARGUMENT', 'PERMISSION_DENIED', 'DATA_LOSS', 'FAILED_PRECONDITION'
|
|
20
16
|
];
|
|
21
17
|
|
|
22
18
|
async function commitResults(stateObj, dStr, passName, config, deps, skipStatusWrite = false) {
|
|
@@ -27,21 +23,22 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
|
|
|
27
23
|
const { logger, db } = deps;
|
|
28
24
|
const pid = generateProcessId(PROCESS_TYPES.STORAGE, passName, dStr);
|
|
29
25
|
|
|
30
|
-
// SAFETY LIMIT: Only allow 10 concurrent daily writes to prevent network saturation during Fan-Out
|
|
31
26
|
const fanOutLimit = pLimit(10);
|
|
32
27
|
|
|
33
28
|
for (const name in stateObj) {
|
|
34
29
|
const calc = stateObj[name];
|
|
35
30
|
|
|
36
|
-
//
|
|
31
|
+
// [NEW] Check for execution stats attached by Executor
|
|
32
|
+
const execStats = calc._executionStats || { processedUsers: 0, skippedUsers: 0 };
|
|
33
|
+
|
|
37
34
|
const runMetrics = {
|
|
38
35
|
storage: { sizeBytes: 0, isSharded: false, shardCount: 1, keys: 0 },
|
|
39
|
-
validation: { isValid: true, anomalies: [] }
|
|
36
|
+
validation: { isValid: true, anomalies: [] },
|
|
37
|
+
execution: execStats // <--- Pass this to RunRecorder
|
|
40
38
|
};
|
|
41
39
|
|
|
42
40
|
try {
|
|
43
41
|
const result = await calc.getResult();
|
|
44
|
-
|
|
45
42
|
const overrides = validationOverrides[calc.manifest.name] || {};
|
|
46
43
|
const healthCheck = HeuristicValidator.analyze(calc.manifest.name, result, overrides);
|
|
47
44
|
|
|
@@ -54,52 +51,52 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
|
|
|
54
51
|
const isEmpty = !result || (typeof result === 'object' && Object.keys(result).length === 0) || (typeof result === 'number' && result === 0);
|
|
55
52
|
if (isEmpty) {
|
|
56
53
|
if (calc.manifest.hash) {
|
|
57
|
-
successUpdates[name] = {
|
|
54
|
+
successUpdates[name] = {
|
|
55
|
+
hash: calc.manifest.hash,
|
|
56
|
+
category: calc.manifest.category,
|
|
57
|
+
composition: calc.manifest.composition,
|
|
58
|
+
metrics: runMetrics
|
|
59
|
+
};
|
|
58
60
|
}
|
|
59
61
|
continue;
|
|
60
62
|
}
|
|
61
63
|
|
|
62
64
|
if (typeof result === 'object') runMetrics.storage.keys = Object.keys(result).length;
|
|
63
65
|
|
|
64
|
-
//
|
|
65
|
-
// If the result keys are ALL date strings (YYYY-MM-DD), we split the writes.
|
|
66
|
+
// ... (Fan-out logic remains same) ...
|
|
66
67
|
const resultKeys = Object.keys(result || {});
|
|
67
68
|
const isMultiDate = resultKeys.length > 0 && resultKeys.every(k => /^\d{4}-\d{2}-\d{2}$/.test(k));
|
|
68
69
|
|
|
69
70
|
if (isMultiDate) {
|
|
70
71
|
logger.log('INFO', `[ResultCommitter] 🕰️ Multi-Date Output detected for ${name} (${resultKeys.length} days). Throttled Fan-Out...`);
|
|
71
72
|
|
|
72
|
-
// Group updates by DATE. result is { "2024-01-01": { user1: ... }, "2024-01-02": { user1: ... } }
|
|
73
|
-
// We execute a fan-out commit for each date using p-limit.
|
|
74
|
-
|
|
75
73
|
const datePromises = resultKeys.map((historicalDate) => fanOutLimit(async () => {
|
|
76
74
|
const dailyData = result[historicalDate];
|
|
77
75
|
if (!dailyData || Object.keys(dailyData).length === 0) return;
|
|
78
76
|
|
|
79
77
|
const historicalDocRef = db.collection(config.resultsCollection)
|
|
80
|
-
.doc(historicalDate)
|
|
78
|
+
.doc(historicalDate)
|
|
81
79
|
.collection(config.resultsSubcollection)
|
|
82
80
|
.doc(calc.manifest.category)
|
|
83
81
|
.collection(config.computationsSubcollection)
|
|
84
82
|
.doc(name);
|
|
85
83
|
|
|
86
|
-
// Re-use the existing sharding logic for this specific date payload
|
|
87
84
|
await writeSingleResult(dailyData, historicalDocRef, name, historicalDate, logger, config, deps);
|
|
88
85
|
}));
|
|
89
86
|
|
|
90
87
|
await Promise.all(datePromises);
|
|
91
88
|
|
|
92
|
-
// Mark success for the Target Date (dStr) so the workflow continues
|
|
93
89
|
if (calc.manifest.hash) {
|
|
94
90
|
successUpdates[name] = {
|
|
95
|
-
hash:
|
|
96
|
-
category:
|
|
97
|
-
|
|
91
|
+
hash: calc.manifest.hash,
|
|
92
|
+
category: calc.manifest.category,
|
|
93
|
+
composition: calc.manifest.composition,
|
|
94
|
+
metrics: runMetrics
|
|
98
95
|
};
|
|
99
96
|
}
|
|
100
97
|
|
|
101
98
|
} else {
|
|
102
|
-
// --- STANDARD MODE
|
|
99
|
+
// --- STANDARD MODE ---
|
|
103
100
|
const mainDocRef = db.collection(config.resultsCollection)
|
|
104
101
|
.doc(dStr)
|
|
105
102
|
.collection(config.resultsSubcollection)
|
|
@@ -107,30 +104,27 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
|
|
|
107
104
|
.collection(config.computationsSubcollection)
|
|
108
105
|
.doc(name);
|
|
109
106
|
|
|
110
|
-
// Use the encapsulated write function
|
|
111
107
|
const writeStats = await writeSingleResult(result, mainDocRef, name, dStr, logger, config, deps);
|
|
112
108
|
|
|
113
|
-
runMetrics.storage.sizeBytes
|
|
114
|
-
runMetrics.storage.isSharded
|
|
109
|
+
runMetrics.storage.sizeBytes = writeStats.totalSize;
|
|
110
|
+
runMetrics.storage.isSharded = writeStats.isSharded;
|
|
115
111
|
runMetrics.storage.shardCount = writeStats.shardCount;
|
|
116
112
|
|
|
117
|
-
// Mark Success & Pass Metrics
|
|
118
113
|
if (calc.manifest.hash) {
|
|
119
114
|
successUpdates[name] = {
|
|
120
|
-
hash:
|
|
121
|
-
category:
|
|
115
|
+
hash: calc.manifest.hash,
|
|
116
|
+
category: calc.manifest.category,
|
|
117
|
+
composition: calc.manifest.composition,
|
|
122
118
|
metrics: runMetrics
|
|
123
119
|
};
|
|
124
120
|
}
|
|
125
121
|
}
|
|
126
122
|
|
|
127
|
-
// Capture Schema
|
|
128
123
|
if (calc.manifest.class.getSchema) {
|
|
129
124
|
const { class: _cls, ...safeMetadata } = calc.manifest;
|
|
130
125
|
schemas.push({ name, category: calc.manifest.category, schema: calc.manifest.class.getSchema(), metadata: safeMetadata });
|
|
131
126
|
}
|
|
132
127
|
|
|
133
|
-
// Cleanup Migration
|
|
134
128
|
if (calc.manifest.previousCategory && calc.manifest.previousCategory !== calc.manifest.category) {
|
|
135
129
|
cleanupTasks.push(deleteOldCalculationData(dStr, calc.manifest.previousCategory, name, config, deps));
|
|
136
130
|
}
|
|
@@ -144,7 +138,7 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
|
|
|
144
138
|
failureReport.push({
|
|
145
139
|
name,
|
|
146
140
|
error: { message: msg, stack: e.stack, stage },
|
|
147
|
-
metrics: runMetrics
|
|
141
|
+
metrics: runMetrics
|
|
148
142
|
});
|
|
149
143
|
}
|
|
150
144
|
}
|
|
@@ -156,181 +150,80 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
|
|
|
156
150
|
return { successUpdates, failureReport };
|
|
157
151
|
}
|
|
158
152
|
|
|
159
|
-
|
|
160
|
-
* Encapsulated write logic for reuse in Fan-Out.
|
|
161
|
-
* Handles sharding strategy and retries.
|
|
162
|
-
*/
|
|
153
|
+
// ... (Helper functions remain unchanged from context) ...
|
|
163
154
|
async function writeSingleResult(result, docRef, name, dateContext, logger, config, deps) {
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
{ bytes: 900 * 1024, keys: null }, // Attempt 1: Standard
|
|
167
|
-
{ bytes: 450 * 1024, keys: 10000 }, // Attempt 2: High Index usage
|
|
168
|
-
{ bytes: 200 * 1024, keys: 2000 } // Attempt 3: Extreme fragmentation
|
|
169
|
-
];
|
|
170
|
-
|
|
171
|
-
let committed = false;
|
|
172
|
-
let lastError = null;
|
|
173
|
-
let finalStats = { totalSize: 0, isSharded: false, shardCount: 1 };
|
|
155
|
+
const strategies = [ { bytes: 900 * 1024, keys: null }, { bytes: 450 * 1024, keys: 10000 }, { bytes: 200 * 1024, keys: 2000 } ];
|
|
156
|
+
let committed = false; let lastError = null; let finalStats = { totalSize: 0, isSharded: false, shardCount: 1 };
|
|
174
157
|
|
|
175
158
|
for (let attempt = 0; attempt < strategies.length; attempt++) {
|
|
176
159
|
if (committed) break;
|
|
177
|
-
|
|
178
160
|
const constraints = strategies[attempt];
|
|
179
|
-
|
|
180
161
|
try {
|
|
181
|
-
// 1. Prepare Shards with current constraints
|
|
182
162
|
const updates = await prepareAutoShardedWrites(result, docRef, logger, constraints.bytes, constraints.keys);
|
|
183
|
-
|
|
184
|
-
// Stats
|
|
185
163
|
const pointer = updates.find(u => u.data._completed === true);
|
|
186
164
|
finalStats.isSharded = pointer && pointer.data._sharded === true;
|
|
187
165
|
finalStats.shardCount = finalStats.isSharded ? (pointer.data._shardCount || 1) : 1;
|
|
188
166
|
finalStats.totalSize = updates.reduce((acc, u) => acc + (u.data ? JSON.stringify(u.data).length : 0), 0);
|
|
189
|
-
|
|
190
|
-
// 2. Attempt Commit
|
|
191
167
|
await commitBatchInChunks(config, deps, updates, `${name}::${dateContext} (Att ${attempt+1})`);
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
if (logger && logger.logStorage) {
|
|
195
|
-
logger.logStorage(null, name, dateContext, docRef.path, finalStats.totalSize, finalStats.isSharded);
|
|
196
|
-
}
|
|
197
|
-
|
|
198
|
-
committed = true; // Exit loop
|
|
199
|
-
|
|
168
|
+
if (logger && logger.logStorage) { logger.logStorage(null, name, dateContext, docRef.path, finalStats.totalSize, finalStats.isSharded); }
|
|
169
|
+
committed = true;
|
|
200
170
|
} catch (commitErr) {
|
|
201
171
|
lastError = commitErr;
|
|
202
172
|
const msg = commitErr.message || '';
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
logger.log('ERROR', `[SelfHealing] ${name} encountered FATAL error (Attempt ${attempt + 1}): ${msg}. Aborting.`);
|
|
207
|
-
throw commitErr;
|
|
208
|
-
}
|
|
209
|
-
|
|
210
|
-
const isSizeError = msg.includes('Transaction too big') || msg.includes('payload is too large');
|
|
211
|
-
const isIndexError = msg.includes('too many index entries') || msg.includes('INVALID_ARGUMENT');
|
|
212
|
-
|
|
213
|
-
if (isSizeError || isIndexError) {
|
|
214
|
-
logger.log('WARN', `[SelfHealing] ${name} on ${dateContext} failed write attempt ${attempt + 1}. Retrying with tighter constraints...`, { error: msg });
|
|
215
|
-
continue; // Try next strategy
|
|
216
|
-
} else {
|
|
217
|
-
logger.log('WARN', `[SelfHealing] ${name} on ${dateContext} unknown error (Attempt ${attempt + 1}). Retrying...`, { error: msg });
|
|
218
|
-
}
|
|
173
|
+
if (NON_RETRYABLE_ERRORS.includes(commitErr.code)) { logger.log('ERROR', `[SelfHealing] ${name} FATAL error: ${msg}.`); throw commitErr; }
|
|
174
|
+
if (msg.includes('Transaction too big') || msg.includes('payload is too large') || msg.includes('too many index entries')) { logger.log('WARN', `[SelfHealing] ${name} on ${dateContext} failed attempt ${attempt+1}. Retrying...`, { error: msg }); continue; }
|
|
175
|
+
else { logger.log('WARN', `[SelfHealing] ${name} on ${dateContext} unknown error. Retrying...`, { error: msg }); }
|
|
219
176
|
}
|
|
220
177
|
}
|
|
221
|
-
|
|
222
|
-
if (!committed) {
|
|
223
|
-
throw {
|
|
224
|
-
message: `Exhausted sharding strategies for ${name} on ${dateContext}. Last error: ${lastError?.message}`,
|
|
225
|
-
stack: lastError?.stack,
|
|
226
|
-
stage: 'SHARDING_LIMIT_EXCEEDED'
|
|
227
|
-
};
|
|
228
|
-
}
|
|
229
|
-
|
|
178
|
+
if (!committed) { throw { message: `Exhausted sharding strategies for ${name}. Last error: ${lastError?.message}`, stack: lastError?.stack, stage: 'SHARDING_LIMIT_EXCEEDED' }; }
|
|
230
179
|
return finalStats;
|
|
231
180
|
}
|
|
232
181
|
|
|
233
|
-
/**
|
|
234
|
-
* Deletes result documents from a previous category location.
|
|
235
|
-
*/
|
|
236
182
|
async function deleteOldCalculationData(dateStr, oldCategory, calcName, config, deps) {
|
|
237
183
|
const { db, logger, calculationUtils } = deps;
|
|
238
184
|
const { withRetry } = calculationUtils || { withRetry: (fn) => fn() };
|
|
239
|
-
|
|
240
185
|
try {
|
|
241
|
-
const oldDocRef = db.collection(config.resultsCollection)
|
|
242
|
-
|
|
243
|
-
.collection(config.resultsSubcollection)
|
|
244
|
-
.doc(oldCategory)
|
|
245
|
-
.collection(config.computationsSubcollection)
|
|
246
|
-
.doc(calcName);
|
|
247
|
-
|
|
248
|
-
const shardsCol = oldDocRef.collection('_shards');
|
|
186
|
+
const oldDocRef = db.collection(config.resultsCollection).doc(dateStr).collection(config.resultsSubcollection).doc(oldCategory).collection(config.computationsSubcollection).doc(calcName);
|
|
187
|
+
const shardsCol = oldDocRef.collection('_shards');
|
|
249
188
|
const shardsSnap = await withRetry(() => shardsCol.listDocuments(), 'ListOldShards');
|
|
250
|
-
const batch
|
|
251
|
-
let ops = 0;
|
|
252
|
-
|
|
189
|
+
const batch = db.batch(); let ops = 0;
|
|
253
190
|
for (const shardDoc of shardsSnap) { batch.delete(shardDoc); ops++; }
|
|
254
|
-
batch.delete(oldDocRef);
|
|
255
|
-
ops++;
|
|
256
|
-
|
|
191
|
+
batch.delete(oldDocRef); ops++;
|
|
257
192
|
await withRetry(() => batch.commit(), 'CleanupOldCategory');
|
|
258
|
-
logger.log('INFO', `[Migration] Cleaned up ${ops} docs for ${calcName} in
|
|
259
|
-
|
|
260
|
-
} catch (e) {
|
|
261
|
-
logger.log('WARN', `[Migration] Failed to clean up old data for ${calcName}: ${e.message}`);
|
|
262
|
-
}
|
|
193
|
+
logger.log('INFO', `[Migration] Cleaned up ${ops} docs for ${calcName} in '${oldCategory}'`);
|
|
194
|
+
} catch (e) { logger.log('WARN', `[Migration] Failed to clean up ${calcName}: ${e.message}`); }
|
|
263
195
|
}
|
|
264
196
|
|
|
265
197
|
function calculateFirestoreBytes(value) {
|
|
266
|
-
if (value === null) return 1;
|
|
267
|
-
if (value === undefined) return 0;
|
|
268
|
-
if (typeof value === 'boolean') return 1;
|
|
269
|
-
if (typeof value === 'number') return 8;
|
|
270
|
-
if (typeof value === 'string') return Buffer.byteLength(value, 'utf8') + 1;
|
|
271
|
-
if (value instanceof Date) return 8;
|
|
272
|
-
if (value.constructor && value.constructor.name === 'DocumentReference') { return Buffer.byteLength(value.path, 'utf8') + 16; }
|
|
198
|
+
if (value === null) return 1; if (value === undefined) return 0; if (typeof value === 'boolean') return 1; if (typeof value === 'number') return 8; if (typeof value === 'string') return Buffer.byteLength(value, 'utf8') + 1; if (value instanceof Date) return 8; if (value.constructor && value.constructor.name === 'DocumentReference') { return Buffer.byteLength(value.path, 'utf8') + 16; }
|
|
273
199
|
if (Array.isArray(value)) { let sum = 0; for (const item of value) sum += calculateFirestoreBytes(item); return sum; }
|
|
274
|
-
if (typeof value === 'object') { let sum = 0; for (const k in value) { if (Object.prototype.hasOwnProperty.call(value, k)) { sum += (Buffer.byteLength(k, 'utf8') + 1) + calculateFirestoreBytes(value[k]); } } return sum; }
|
|
275
|
-
return 0;
|
|
200
|
+
if (typeof value === 'object') { let sum = 0; for (const k in value) { if (Object.prototype.hasOwnProperty.call(value, k)) { sum += (Buffer.byteLength(k, 'utf8') + 1) + calculateFirestoreBytes(value[k]); } } return sum; } return 0;
|
|
276
201
|
}
|
|
277
202
|
|
|
278
203
|
async function prepareAutoShardedWrites(result, docRef, logger, maxBytes = 900 * 1024, maxKeys = null) {
|
|
279
|
-
const OVERHEAD_ALLOWANCE
|
|
280
|
-
const
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
const docPathSize = Buffer.byteLength(docRef.path, 'utf8') + 16;
|
|
284
|
-
|
|
285
|
-
const writes = [];
|
|
286
|
-
const shardCollection = docRef.collection('_shards');
|
|
287
|
-
let currentChunk = {};
|
|
288
|
-
let currentChunkSize = 0;
|
|
289
|
-
let currentKeyCount = 0;
|
|
290
|
-
let shardIndex = 0;
|
|
204
|
+
const OVERHEAD_ALLOWANCE = 20 * 1024; const CHUNK_LIMIT = maxBytes - OVERHEAD_ALLOWANCE;
|
|
205
|
+
const totalSize = calculateFirestoreBytes(result); const docPathSize = Buffer.byteLength(docRef.path, 'utf8') + 16;
|
|
206
|
+
const writes = []; const shardCollection = docRef.collection('_shards');
|
|
207
|
+
let currentChunk = {}; let currentChunkSize = 0; let currentKeyCount = 0; let shardIndex = 0;
|
|
291
208
|
|
|
292
|
-
// Fast path: If small enough AND keys are safe
|
|
293
209
|
if (!maxKeys && (totalSize + docPathSize) < CHUNK_LIMIT) {
|
|
294
|
-
const data = {
|
|
295
|
-
...result,
|
|
296
|
-
_completed: true,
|
|
297
|
-
_sharded: false,
|
|
298
|
-
_lastUpdated: new Date().toISOString()
|
|
299
|
-
};
|
|
210
|
+
const data = { ...result, _completed: true, _sharded: false, _lastUpdated: new Date().toISOString() };
|
|
300
211
|
return [{ ref: docRef, data, options: { merge: true } }];
|
|
301
212
|
}
|
|
302
213
|
|
|
303
214
|
for (const [key, value] of Object.entries(result)) {
|
|
304
215
|
if (key.startsWith('_')) continue;
|
|
305
|
-
const keySize
|
|
306
|
-
const
|
|
307
|
-
const itemSize = keySize + valueSize;
|
|
308
|
-
|
|
309
|
-
const byteLimitReached = (currentChunkSize + itemSize > CHUNK_LIMIT);
|
|
310
|
-
const keyLimitReached = (maxKeys && currentKeyCount + 1 >= maxKeys);
|
|
311
|
-
|
|
216
|
+
const keySize = Buffer.byteLength(key, 'utf8') + 1; const valueSize = calculateFirestoreBytes(value); const itemSize = keySize + valueSize;
|
|
217
|
+
const byteLimitReached = (currentChunkSize + itemSize > CHUNK_LIMIT); const keyLimitReached = (maxKeys && currentKeyCount + 1 >= maxKeys);
|
|
312
218
|
if (byteLimitReached || keyLimitReached) {
|
|
313
219
|
writes.push({ ref: shardCollection.doc(`shard_${shardIndex}`), data: currentChunk, options: { merge: false } });
|
|
314
|
-
shardIndex++;
|
|
315
|
-
currentChunk = {};
|
|
316
|
-
currentChunkSize = 0;
|
|
317
|
-
currentKeyCount = 0;
|
|
220
|
+
shardIndex++; currentChunk = {}; currentChunkSize = 0; currentKeyCount = 0;
|
|
318
221
|
}
|
|
319
|
-
currentChunk[key] = value;
|
|
320
|
-
currentChunkSize += itemSize;
|
|
321
|
-
currentKeyCount++;
|
|
322
|
-
}
|
|
323
|
-
|
|
324
|
-
if (Object.keys(currentChunk).length > 0) {
|
|
325
|
-
writes.push({ ref: shardCollection.doc(`shard_${shardIndex}`), data: currentChunk, options: { merge: false } });
|
|
222
|
+
currentChunk[key] = value; currentChunkSize += itemSize; currentKeyCount++;
|
|
326
223
|
}
|
|
224
|
+
if (Object.keys(currentChunk).length > 0) { writes.push({ ref: shardCollection.doc(`shard_${shardIndex}`), data: currentChunk, options: { merge: false } }); }
|
|
327
225
|
|
|
328
|
-
const pointerData = {
|
|
329
|
-
_completed: true,
|
|
330
|
-
_sharded: true,
|
|
331
|
-
_shardCount: shardIndex + 1,
|
|
332
|
-
_lastUpdated: new Date().toISOString()
|
|
333
|
-
};
|
|
226
|
+
const pointerData = { _completed: true, _sharded: true, _shardCount: shardIndex + 1, _lastUpdated: new Date().toISOString() };
|
|
334
227
|
writes.push({ ref: docRef, data: pointerData, options: { merge: false } });
|
|
335
228
|
return writes;
|
|
336
229
|
}
|
|
@@ -59,11 +59,7 @@ class HeuristicValidator {
|
|
|
59
59
|
// Vector/Profile Empty Check (Specific to your System)
|
|
60
60
|
// If result contains 'profile', 'history', 'sparkline', or 'buckets' arrays
|
|
61
61
|
const arrayProps = ['profile', 'history', 'sparkline', 'buckets', 'prices'];
|
|
62
|
-
for (const prop of arrayProps) {
|
|
63
|
-
if (Array.isArray(val[prop]) && val[prop].length === 0) {
|
|
64
|
-
emptyVectorCount++;
|
|
65
|
-
}
|
|
66
|
-
}
|
|
62
|
+
for (const prop of arrayProps) { if (Array.isArray(val[prop]) && val[prop].length === 0) { emptyVectorCount++; } }
|
|
67
63
|
|
|
68
64
|
// Extract primary numeric score for Flatline check (heuristically guessing the 'main' metric)
|
|
69
65
|
const numericProp = subValues.find(v => typeof v === 'number' && v !== 0);
|
|
@@ -71,8 +67,8 @@ class HeuristicValidator {
|
|
|
71
67
|
}
|
|
72
68
|
// --- TYPE B: Scalar / Primitive Result ---
|
|
73
69
|
if (typeof val === 'number') {
|
|
74
|
-
if (isNaN(val) || !isFinite(val))
|
|
75
|
-
nanCount++;
|
|
70
|
+
if (isNaN(val) || !isFinite(val))
|
|
71
|
+
{ nanCount++;
|
|
76
72
|
} else {
|
|
77
73
|
numericValues.push(val); // Include zeros
|
|
78
74
|
if (val === 0) zeroCount++;
|
|
@@ -1,142 +1,101 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* @fileoverview Utility for recording computation run attempts (The Audit Logger).
|
|
3
|
-
*
|
|
4
|
-
* Implements aggregated error stats and advanced performance metrics.
|
|
3
|
+
* UPDATED: Stores 'trigger' reason and 'execution' stats.
|
|
5
4
|
*/
|
|
6
5
|
|
|
7
6
|
const { FieldValue } = require('../utils/utils');
|
|
8
7
|
const os = require('os');
|
|
9
8
|
|
|
10
|
-
// Root collection for the new audit system
|
|
11
9
|
const AUDIT_COLLECTION = 'computation_audit_logs';
|
|
12
10
|
|
|
13
|
-
/**
|
|
14
|
-
* Sanitizes error messages to be used as Firestore Map keys.
|
|
15
|
-
* Replaces invalid characters (. / [ ] *) with underscores.
|
|
16
|
-
*/
|
|
17
11
|
function sanitizeErrorKey(message) {
|
|
18
12
|
if (!message) return 'Unknown_Error';
|
|
19
|
-
// Take first 100 chars to avoid key limit issues
|
|
20
13
|
const shortMsg = message.toString().substring(0, 100);
|
|
21
14
|
return shortMsg.replace(/[./\[\]*`]/g, '_').trim();
|
|
22
15
|
}
|
|
23
16
|
|
|
24
17
|
/**
|
|
25
18
|
* Records a run attempt with detailed metrics and aggregated stats.
|
|
26
|
-
*
|
|
27
|
-
* @param {Object} context - Context object
|
|
28
|
-
* @param {string} context.date - The "Target Date" of the computation
|
|
29
|
-
* @param {string} context.computation - The name of the calculation
|
|
30
|
-
* @param {string} context.pass - The topology pass number
|
|
31
|
-
* @param {string} status - 'SUCCESS', 'FAILURE', 'CRASH', or 'SKIPPED'
|
|
32
|
-
* @param {Object|null} error - Error object if failed
|
|
33
|
-
* @param {Object} detailedMetrics - Expanded metrics object (Optional, defaults provided)
|
|
34
|
-
* @param {number} [detailedMetrics.durationMs] - Execution time
|
|
35
|
-
* @param {Object} [detailedMetrics.storage] - { sizeBytes, isSharded, shardCount }
|
|
36
|
-
* @param {Object} [detailedMetrics.validation] - { isValid, anomalies: [] }
|
|
19
|
+
* ADDED: 'triggerReason' param.
|
|
37
20
|
*/
|
|
38
|
-
async function recordRunAttempt(db, context, status, error = null, detailedMetrics = { durationMs: 0 }) {
|
|
21
|
+
async function recordRunAttempt(db, context, status, error = null, detailedMetrics = { durationMs: 0 }, triggerReason = 'Unknown') {
|
|
39
22
|
if (!db || !context) return;
|
|
40
23
|
|
|
41
24
|
const { date: targetDate, computation, pass } = context;
|
|
42
25
|
const now = new Date();
|
|
43
26
|
const triggerTimestamp = now.getTime();
|
|
44
27
|
|
|
45
|
-
// 1. Construct Paths
|
|
46
|
-
// Parent Doc: Stores global aggregates for this computation
|
|
47
28
|
const computationDocRef = db.collection(AUDIT_COLLECTION).doc(computation);
|
|
48
|
-
|
|
49
|
-
// History Doc: Stores this specific run
|
|
50
|
-
// ID Format: targetDate_triggerTimestamp (Sortable by data date, then execution time)
|
|
51
29
|
const runId = `${targetDate}_${triggerTimestamp}`;
|
|
52
30
|
const runDocRef = computationDocRef.collection('history').doc(runId);
|
|
53
31
|
|
|
54
|
-
// 2. Prepare Metrics & Environment Info
|
|
55
32
|
const workerId = process.env.FUNCTION_TARGET || process.env.K_REVISION || os.hostname();
|
|
56
33
|
|
|
57
|
-
// Calculate size in MB
|
|
58
34
|
let sizeMB = 0;
|
|
59
35
|
if (detailedMetrics.storage && detailedMetrics.storage.sizeBytes) { sizeMB = Number((detailedMetrics.storage.sizeBytes / (1024 * 1024)).toFixed(4)); }
|
|
60
36
|
|
|
61
|
-
// Extract Validation Anomalies (Unusual Keys/Values)
|
|
62
37
|
const anomalies = detailedMetrics.validation?.anomalies || [];
|
|
63
38
|
if (error && error.message && error.message.includes('Data Integrity')) { anomalies.push(error.message); }
|
|
64
39
|
|
|
65
|
-
// 3. Construct the Run Log Entry
|
|
66
40
|
const runEntry = {
|
|
67
|
-
// Identity
|
|
68
41
|
runId: runId,
|
|
69
42
|
computationName: computation,
|
|
70
43
|
pass: String(pass),
|
|
71
44
|
workerId: workerId,
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
targetDate: targetDate, // The date the data belongs to
|
|
75
|
-
triggerTime: now.toISOString(), // The date the code ran
|
|
45
|
+
targetDate: targetDate,
|
|
46
|
+
triggerTime: now.toISOString(),
|
|
76
47
|
durationMs: detailedMetrics.durationMs || 0,
|
|
77
|
-
|
|
78
|
-
// Status
|
|
79
48
|
status: status,
|
|
80
49
|
|
|
81
|
-
//
|
|
50
|
+
// [NEW] Trigger Context
|
|
51
|
+
trigger: {
|
|
52
|
+
reason: triggerReason || 'Unknown',
|
|
53
|
+
type: (triggerReason && triggerReason.includes('Layer')) ? 'CASCADE' : ((triggerReason && triggerReason.includes('New')) ? 'INIT' : 'UPDATE')
|
|
54
|
+
},
|
|
55
|
+
|
|
56
|
+
// [NEW] Execution Stats (Internal Loop Data)
|
|
57
|
+
executionStats: detailedMetrics.execution || {},
|
|
58
|
+
|
|
82
59
|
outputStats: {
|
|
83
60
|
sizeMB: sizeMB,
|
|
84
61
|
isSharded: !!detailedMetrics.storage?.isSharded,
|
|
85
62
|
shardCount: detailedMetrics.storage?.shardCount || 1,
|
|
86
|
-
keysWritten: detailedMetrics.storage?.keys || 0
|
|
63
|
+
keysWritten: detailedMetrics.storage?.keys || 0
|
|
87
64
|
},
|
|
88
65
|
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
// Metadata
|
|
93
|
-
_schemaVersion: '2.0'
|
|
66
|
+
anomalies: anomalies,
|
|
67
|
+
_schemaVersion: '2.1'
|
|
94
68
|
};
|
|
95
69
|
|
|
96
|
-
// Attach Error Details if present
|
|
97
70
|
if (error) {
|
|
98
71
|
runEntry.error = {
|
|
99
72
|
message: error.message || 'Unknown Error',
|
|
100
73
|
stage: error.stage || 'UNKNOWN',
|
|
101
|
-
stack: error.stack ? error.stack.substring(0, 1000) : null,
|
|
74
|
+
stack: error.stack ? error.stack.substring(0, 1000) : null,
|
|
102
75
|
code: error.code || null
|
|
103
76
|
};
|
|
104
77
|
}
|
|
105
78
|
|
|
106
|
-
// 4. Prepare Aggregation Update (Atomic Increments)
|
|
107
79
|
const statsUpdate = {
|
|
108
80
|
lastRunAt: now,
|
|
109
81
|
lastRunStatus: status,
|
|
110
82
|
totalRuns: FieldValue.increment(1)
|
|
111
83
|
};
|
|
112
84
|
|
|
113
|
-
if (status === 'SUCCESS') {
|
|
114
|
-
|
|
115
|
-
} else {
|
|
116
|
-
statsUpdate.failureCount = FieldValue.increment(1);
|
|
117
|
-
// Increment specific error type counter
|
|
85
|
+
if (status === 'SUCCESS') { statsUpdate.successCount = FieldValue.increment(1);
|
|
86
|
+
} else { statsUpdate.failureCount = FieldValue.increment(1);
|
|
118
87
|
if (error) {
|
|
119
88
|
const safeKey = sanitizeErrorKey(error.message);
|
|
120
89
|
statsUpdate[`errorCounts.${safeKey}`] = FieldValue.increment(1);
|
|
121
90
|
}
|
|
122
91
|
}
|
|
123
92
|
|
|
124
|
-
// 5. Execute as Batch
|
|
125
93
|
try {
|
|
126
94
|
const batch = db.batch();
|
|
127
|
-
|
|
128
|
-
// Set the specific run log
|
|
129
95
|
batch.set(runDocRef, runEntry);
|
|
130
|
-
|
|
131
|
-
// Merge updates into the parent computation document
|
|
132
|
-
// We use { merge: true } implicitly with set or explicit update.
|
|
133
|
-
// Using set({ merge: true }) ensures doc creation if it doesn't exist.
|
|
134
96
|
batch.set(computationDocRef, statsUpdate, { merge: true });
|
|
135
|
-
|
|
136
97
|
await batch.commit();
|
|
137
|
-
|
|
138
98
|
} catch (e) {
|
|
139
|
-
// Fallback logging if Firestore fails (prevents infinite loop crashing)
|
|
140
99
|
console.error(`[RunRecorder] ❌ CRITICAL: Failed to write audit log for ${computation}`, e);
|
|
141
100
|
}
|
|
142
101
|
}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* @fileoverview Manages computation status tracking in Firestore.
|
|
3
|
-
* UPDATED: Supports Schema V2 (Object with Category) for
|
|
3
|
+
* UPDATED: Supports Schema V2 (Object with Category & Composition) for deep auditing.
|
|
4
4
|
*/
|
|
5
5
|
|
|
6
6
|
async function fetchComputationStatus(dateStr, config, { db }) {
|
|
@@ -14,8 +14,11 @@ async function fetchComputationStatus(dateStr, config, { db }) {
|
|
|
14
14
|
|
|
15
15
|
// Normalize V1 (String) to V2 (Object)
|
|
16
16
|
for (const [name, value] of Object.entries(rawData)) {
|
|
17
|
-
if (typeof value === 'string') {
|
|
18
|
-
|
|
17
|
+
if (typeof value === 'string') {
|
|
18
|
+
normalized[name] = { hash: value, category: null, composition: null }; // Legacy entry
|
|
19
|
+
} else {
|
|
20
|
+
normalized[name] = value;
|
|
21
|
+
}
|
|
19
22
|
}
|
|
20
23
|
|
|
21
24
|
return normalized;
|
|
@@ -30,8 +33,16 @@ async function updateComputationStatus(dateStr, updates, config, { db }) {
|
|
|
30
33
|
|
|
31
34
|
const safeUpdates = {};
|
|
32
35
|
for (const [key, val] of Object.entries(updates)) {
|
|
33
|
-
if (typeof val === 'string') {
|
|
34
|
-
|
|
36
|
+
if (typeof val === 'string') {
|
|
37
|
+
// Legacy Call Fallback
|
|
38
|
+
safeUpdates[key] = { hash: val, category: 'unknown', lastUpdated: new Date() };
|
|
39
|
+
} else {
|
|
40
|
+
// V2 Call: val should contain { hash, category, composition }
|
|
41
|
+
safeUpdates[key] = {
|
|
42
|
+
...val,
|
|
43
|
+
lastUpdated: new Date()
|
|
44
|
+
};
|
|
45
|
+
}
|
|
35
46
|
}
|
|
36
47
|
|
|
37
48
|
await docRef.set(safeUpdates, { merge: true });
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
* @fileoverview Build Reporter & Auto-Runner.
|
|
3
3
|
* Generates a "Pre-Flight" report of what the computation system WILL do.
|
|
4
4
|
* UPDATED: Fixed 'latest' document overwrite bug.
|
|
5
|
+
* UPDATED: Now reports specific reasons for Re-Runs.
|
|
5
6
|
*/
|
|
6
7
|
|
|
7
8
|
const { analyzeDateExecution } = require('../WorkflowOrchestrator');
|
|
@@ -111,8 +112,9 @@ async function generateBuildReport(config, dependencies, manifest, daysBack = 90
|
|
|
111
112
|
// E. Format Findings
|
|
112
113
|
const dateSummary = { willRun: [], willReRun: [], blocked: [], impossible: [] };
|
|
113
114
|
|
|
115
|
+
// Pass the generated "Reason" string through to the report
|
|
114
116
|
analysis.runnable.forEach (item => dateSummary.willRun.push ({ name: item.name, reason: "New / No Previous Record" }));
|
|
115
|
-
analysis.reRuns.forEach (item => dateSummary.willReRun.push ({ name: item.name, reason: item.
|
|
117
|
+
analysis.reRuns.forEach (item => dateSummary.willReRun.push ({ name: item.name, reason: item.reason || "Hash Mismatch" }));
|
|
116
118
|
analysis.impossible.forEach (item => dateSummary.impossible.push ({ name: item.name, reason: item.reason }));
|
|
117
119
|
[...analysis.blocked, ...analysis.failedDependency].forEach(item => dateSummary.blocked.push({ name: item.name, reason: item.reason || 'Dependency' }));
|
|
118
120
|
|