bulltrackers-module 1.0.265 → 1.0.267
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/functions/computation-system/WorkflowOrchestrator.js +15 -9
- package/functions/computation-system/context/ManifestBuilder.js +0 -1
- package/functions/computation-system/executors/MetaExecutor.js +15 -3
- package/functions/computation-system/executors/StandardExecutor.js +30 -9
- package/functions/computation-system/helpers/computation_dispatcher.js +3 -5
- package/functions/computation-system/helpers/computation_worker.js +15 -8
- package/functions/computation-system/persistence/ResultCommitter.js +18 -12
- package/functions/computation-system/persistence/ResultsValidator.js +3 -7
- package/functions/computation-system/persistence/RunRecorder.js +20 -61
- package/functions/computation-system/tools/BuildReporter.js +1 -0
- package/package.json +1 -1
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* @fileoverview Main Orchestrator. Coordinates the topological execution.
|
|
3
3
|
* UPDATED: Implements Smart Audit logic to detect WHY a hash mismatch occurred.
|
|
4
|
+
* FIX: Added 'Audit Upgrade' check to force re-run if composition metadata is missing.
|
|
4
5
|
*/
|
|
5
6
|
const { normalizeName, DEFINITIVE_EARLIEST_DATES } = require('./utils/utils');
|
|
6
7
|
const { checkRootDataAvailability, checkRootDependencies } = require('./data/AvailabilityChecker');
|
|
@@ -49,8 +50,9 @@ function analyzeDateExecution(dateStr, calcsInPass, rootDataStatus, dailyStatus,
|
|
|
49
50
|
|
|
50
51
|
const markRunnable = (isReRun = false, reRunDetails = null) => {
|
|
51
52
|
if (isReRun) report.reRuns.push(reRunDetails);
|
|
52
|
-
else report.runnable.push(
|
|
53
|
-
|
|
53
|
+
else report.runnable.push({ name: cName, ...reRunDetails });
|
|
54
|
+
// Simulate success so dependents can pass their check
|
|
55
|
+
simulationStatus[cName] = { hash: currentHash, category: calc.category, composition: calc.composition };
|
|
54
56
|
};
|
|
55
57
|
|
|
56
58
|
let migrationOldCategory = null;
|
|
@@ -107,7 +109,7 @@ function analyzeDateExecution(dateStr, calcsInPass, rootDataStatus, dailyStatus,
|
|
|
107
109
|
|
|
108
110
|
// --- HASH CHECK LOGIC ---
|
|
109
111
|
if (!storedHash) {
|
|
110
|
-
markRunnable(false
|
|
112
|
+
markRunnable(false, { reason: "New Calculation" });
|
|
111
113
|
}
|
|
112
114
|
else if (storedHash !== currentHash) {
|
|
113
115
|
// Smart Logic: Why did it change?
|
|
@@ -116,22 +118,17 @@ function analyzeDateExecution(dateStr, calcsInPass, rootDataStatus, dailyStatus,
|
|
|
116
118
|
const newComp = calc.composition;
|
|
117
119
|
|
|
118
120
|
if (oldComp && newComp) {
|
|
119
|
-
// 1. Check Code
|
|
120
121
|
if (oldComp.code !== newComp.code) {
|
|
121
122
|
changeReason = "Code Changed";
|
|
122
123
|
}
|
|
123
|
-
// 2. Check Layers
|
|
124
124
|
else if (JSON.stringify(oldComp.layers) !== JSON.stringify(newComp.layers)) {
|
|
125
|
-
// Find specific layer
|
|
126
125
|
const changedLayers = [];
|
|
127
126
|
for(const lKey in newComp.layers) {
|
|
128
127
|
if (newComp.layers[lKey] !== oldComp.layers[lKey]) changedLayers.push(lKey);
|
|
129
128
|
}
|
|
130
129
|
changeReason = `Layer Update: [${changedLayers.join(', ')}]`;
|
|
131
130
|
}
|
|
132
|
-
// 3. Check Dependencies
|
|
133
131
|
else if (JSON.stringify(oldComp.deps) !== JSON.stringify(newComp.deps)) {
|
|
134
|
-
// Find specific dep
|
|
135
132
|
const changedDeps = [];
|
|
136
133
|
for(const dKey in newComp.deps) {
|
|
137
134
|
if (newComp.deps[dKey] !== oldComp.deps[dKey]) changedDeps.push(dKey);
|
|
@@ -150,12 +147,21 @@ function analyzeDateExecution(dateStr, calcsInPass, rootDataStatus, dailyStatus,
|
|
|
150
147
|
oldHash: storedHash,
|
|
151
148
|
newHash: currentHash,
|
|
152
149
|
previousCategory: migrationOldCategory,
|
|
153
|
-
reason: changeReason
|
|
150
|
+
reason: changeReason
|
|
154
151
|
});
|
|
155
152
|
}
|
|
156
153
|
else if (migrationOldCategory) {
|
|
157
154
|
markRunnable(true, { name: cName, reason: 'Category Migration', previousCategory: migrationOldCategory, newCategory: calc.category });
|
|
158
155
|
}
|
|
156
|
+
// [CRITICAL FIX] Audit Upgrade Check: Force re-run if hash matches but composition is missing (Legacy Record)
|
|
157
|
+
else if (!stored.composition) {
|
|
158
|
+
markRunnable(true, {
|
|
159
|
+
name: cName,
|
|
160
|
+
oldHash: storedHash,
|
|
161
|
+
newHash: currentHash,
|
|
162
|
+
reason: 'Audit Upgrade (Populating Composition Metadata)'
|
|
163
|
+
});
|
|
164
|
+
}
|
|
159
165
|
else {
|
|
160
166
|
report.skipped.push({ name: cName });
|
|
161
167
|
simulationStatus[cName] = { hash: currentHash, category: calc.category, composition: calc.composition };
|
|
@@ -85,7 +85,6 @@ function getDependencySet(endpoints, adjacencyList) {
|
|
|
85
85
|
function buildManifest(productLinesToRun = [], calculations) {
|
|
86
86
|
log.divider('Building Dynamic Manifest');
|
|
87
87
|
|
|
88
|
-
// [LOG VERIFICATION] Log the Input Request
|
|
89
88
|
const requestedLog = (!productLinesToRun || productLinesToRun.length === 0)
|
|
90
89
|
? "ALL (Wildcard/Empty)"
|
|
91
90
|
: productLinesToRun.join(', ');
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* @fileoverview Executor for "Meta" (global) calculations.
|
|
3
3
|
* UPDATED: Uses CachedDataLoader for all data access.
|
|
4
|
+
* UPDATED: Tracks processed shard/item counts.
|
|
4
5
|
*/
|
|
5
6
|
const { normalizeName } = require('../utils/utils');
|
|
6
7
|
const { CachedDataLoader } = require('../data/CachedDataLoader');
|
|
@@ -19,8 +20,6 @@ class MetaExecutor {
|
|
|
19
20
|
const inst = new mCalc.class();
|
|
20
21
|
inst.manifest = mCalc;
|
|
21
22
|
|
|
22
|
-
// We do not pass 'rootData' (which has null refs) to execution.
|
|
23
|
-
// The Executor fetches its own data via loader.
|
|
24
23
|
await MetaExecutor.executeOncePerDay(inst, mCalc, dStr, fetchedDeps, previousFetchedDeps, config, deps, cachedLoader);
|
|
25
24
|
state[normalizeName(mCalc.name)] = inst;
|
|
26
25
|
} catch (e) {
|
|
@@ -33,6 +32,7 @@ class MetaExecutor {
|
|
|
33
32
|
static async executeOncePerDay(calcInstance, metadata, dateStr, computedDeps, prevDeps, config, deps, loader) {
|
|
34
33
|
const mappings = await loader.loadMappings();
|
|
35
34
|
const { logger } = deps;
|
|
35
|
+
const stats = { processedShards: 0, processedItems: 0 };
|
|
36
36
|
|
|
37
37
|
// Lazy fetch insights/social using the loader
|
|
38
38
|
const insights = metadata.rootDataDependencies?.includes('insights') ? { today: await loader.loadInsights(dateStr) } : null;
|
|
@@ -55,9 +55,16 @@ class MetaExecutor {
|
|
|
55
55
|
await calcInstance.process(partialContext);
|
|
56
56
|
partialContext.prices = null;
|
|
57
57
|
processedCount++;
|
|
58
|
+
|
|
59
|
+
stats.processedShards++;
|
|
60
|
+
stats.processedItems += Object.keys(shardData).length;
|
|
61
|
+
|
|
58
62
|
if (processedCount % 10 === 0 && global.gc) { global.gc(); }
|
|
59
63
|
}
|
|
60
64
|
logger.log('INFO', `[Executor] Finished Batched Execution for ${metadata.name} (${processedCount} shards).`);
|
|
65
|
+
|
|
66
|
+
// Attach stats
|
|
67
|
+
calcInstance._executionStats = stats;
|
|
61
68
|
return calcInstance.getResult ? await calcInstance.getResult() : {};
|
|
62
69
|
} else {
|
|
63
70
|
const context = ContextFactory.buildMetaContext({
|
|
@@ -65,7 +72,12 @@ class MetaExecutor {
|
|
|
65
72
|
prices: {}, computedDependencies: computedDeps,
|
|
66
73
|
previousComputedDependencies: prevDeps, config, deps
|
|
67
74
|
});
|
|
68
|
-
|
|
75
|
+
const res = await calcInstance.process(context);
|
|
76
|
+
|
|
77
|
+
stats.processedItems = 1; // "Global" item
|
|
78
|
+
calcInstance._executionStats = stats;
|
|
79
|
+
|
|
80
|
+
return res;
|
|
69
81
|
}
|
|
70
82
|
}
|
|
71
83
|
}
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
* @fileoverview Executor for "Standard" (per-user) calculations.
|
|
3
3
|
* UPDATED: Handles lazy loading of data references.
|
|
4
4
|
* UPDATED: Supports Multi-Date Fan-Out Aggregation (Time Machine Mode).
|
|
5
|
+
* UPDATED: Tracks processed vs skipped users for telemetry.
|
|
5
6
|
*/
|
|
6
7
|
const { normalizeName } = require('../utils/utils');
|
|
7
8
|
const { streamPortfolioData, streamHistoryData, getPortfolioPartRefs } = require('../utils/data_loader');
|
|
@@ -41,10 +42,6 @@ class StandardExecutor {
|
|
|
41
42
|
await StandardExecutor.streamAndProcess(dStr, state, passName, config, deps, fullRoot, rootData.portfolioRefs, rootData.historyRefs, fetchedDeps, previousFetchedDeps);
|
|
42
43
|
|
|
43
44
|
// 4. Pre-Commit Transformation for Fan-Out
|
|
44
|
-
// If a calc produced multi-date output per user, we must transpose it:
|
|
45
|
-
// FROM: UserA -> { "2024-01-01": data, "2024-01-02": data }
|
|
46
|
-
// TO: "2024-01-01" -> { UserA: data }, "2024-01-02" -> { UserA: data }
|
|
47
|
-
|
|
48
45
|
const transformedState = {};
|
|
49
46
|
for (const [name, inst] of Object.entries(state)) {
|
|
50
47
|
const result = await inst.getResult(); // { userId: { date: data } } or { userId: data }
|
|
@@ -69,7 +66,8 @@ class StandardExecutor {
|
|
|
69
66
|
// Mock a "getResult" for the committer that returns the Transposed Map
|
|
70
67
|
transformedState[name] = {
|
|
71
68
|
manifest: inst.manifest,
|
|
72
|
-
getResult: async () => transposed
|
|
69
|
+
getResult: async () => transposed,
|
|
70
|
+
_executionStats: inst._executionStats // Preserve stats
|
|
73
71
|
};
|
|
74
72
|
continue;
|
|
75
73
|
}
|
|
@@ -91,6 +89,12 @@ class StandardExecutor {
|
|
|
91
89
|
|
|
92
90
|
logger.log('INFO', `[${passName}] Streaming for ${streamingCalcs.length} computations...`);
|
|
93
91
|
|
|
92
|
+
// [NEW] Execution Metrics Container
|
|
93
|
+
const executionStats = {};
|
|
94
|
+
Object.keys(state).forEach(name => {
|
|
95
|
+
executionStats[name] = { processedUsers: 0, skippedUsers: 0 };
|
|
96
|
+
});
|
|
97
|
+
|
|
94
98
|
const cachedLoader = new CachedDataLoader(config, deps);
|
|
95
99
|
await cachedLoader.loadMappings();
|
|
96
100
|
|
|
@@ -113,7 +117,13 @@ class StandardExecutor {
|
|
|
113
117
|
if (tH_iter) tH_chunk = (await tH_iter.next()).value || {};
|
|
114
118
|
|
|
115
119
|
// Execute chunk for all calcs
|
|
116
|
-
const promises = streamingCalcs.map(calc =>
|
|
120
|
+
const promises = streamingCalcs.map(calc =>
|
|
121
|
+
StandardExecutor.executePerUser(
|
|
122
|
+
calc, calc.manifest, dateStr, tP_chunk, yP_chunk, tH_chunk,
|
|
123
|
+
fetchedDeps, previousFetchedDeps, config, deps, cachedLoader,
|
|
124
|
+
executionStats[normalizeName(calc.manifest.name)]
|
|
125
|
+
)
|
|
126
|
+
);
|
|
117
127
|
await Promise.all(promises);
|
|
118
128
|
}
|
|
119
129
|
} finally {
|
|
@@ -122,10 +132,15 @@ class StandardExecutor {
|
|
|
122
132
|
if (tH_iter && tH_iter.return) await tH_iter.return();
|
|
123
133
|
}
|
|
124
134
|
|
|
135
|
+
// Attach stats to the instances so ResultCommitter can find them
|
|
136
|
+
for(const name in state) {
|
|
137
|
+
if(state[name]) state[name]._executionStats = executionStats[name];
|
|
138
|
+
}
|
|
139
|
+
|
|
125
140
|
logger.log('INFO', `[${passName}] Streaming complete.`);
|
|
126
141
|
}
|
|
127
142
|
|
|
128
|
-
static async executePerUser(calcInstance, metadata, dateStr, portfolioData, yesterdayPortfolioData, historyData, computedDeps, prevDeps, config, deps, loader) {
|
|
143
|
+
static async executePerUser(calcInstance, metadata, dateStr, portfolioData, yesterdayPortfolioData, historyData, computedDeps, prevDeps, config, deps, loader, stats) {
|
|
129
144
|
const { logger } = deps;
|
|
130
145
|
const targetUserType = metadata.userType;
|
|
131
146
|
const mappings = await loader.loadMappings();
|
|
@@ -139,7 +154,10 @@ class StandardExecutor {
|
|
|
139
154
|
|
|
140
155
|
if (targetUserType !== 'all') {
|
|
141
156
|
const mappedTarget = (targetUserType === 'speculator') ? SCHEMAS.USER_TYPES.SPECULATOR : SCHEMAS.USER_TYPES.NORMAL;
|
|
142
|
-
if (mappedTarget !== actualUserType)
|
|
157
|
+
if (mappedTarget !== actualUserType) {
|
|
158
|
+
if (stats) stats.skippedUsers++;
|
|
159
|
+
continue;
|
|
160
|
+
}
|
|
143
161
|
}
|
|
144
162
|
|
|
145
163
|
const context = ContextFactory.buildPerUserContext({
|
|
@@ -149,7 +167,10 @@ class StandardExecutor {
|
|
|
149
167
|
config, deps
|
|
150
168
|
});
|
|
151
169
|
|
|
152
|
-
try {
|
|
170
|
+
try {
|
|
171
|
+
await calcInstance.process(context);
|
|
172
|
+
if (stats) stats.processedUsers++;
|
|
173
|
+
}
|
|
153
174
|
catch (e) { logger.log('WARN', `Calc ${metadata.name} failed for user ${userId}: ${e.message}`); }
|
|
154
175
|
}
|
|
155
176
|
}
|
|
@@ -4,6 +4,7 @@
|
|
|
4
4
|
* UPDATED: Implements Audit Ledger creation with Transactions to prevent Race Conditions.
|
|
5
5
|
* UPDATED: Added Preemptive Hash Check.
|
|
6
6
|
* UPDATED: Added Parallel Status Fetching.
|
|
7
|
+
* UPDATED: Include triggerReason in Pub/Sub payload.
|
|
7
8
|
*/
|
|
8
9
|
|
|
9
10
|
const { getExpectedDateStrings, normalizeName, DEFINITIVE_EARLIEST_DATES } = require('../utils/utils.js');
|
|
@@ -35,14 +36,9 @@ async function dispatchComputationPass(config, dependencies, computationManifest
|
|
|
35
36
|
if (!calcsInThisPass.length) { return logger.log('WARN', `[Dispatcher] No calcs for Pass ${passToRun}. Exiting.`); }
|
|
36
37
|
|
|
37
38
|
// --- [NEW] OPTIMIZATION 1: PREEMPTIVE HASH CHECK ---
|
|
38
|
-
// If the combined hash of all calculations hasn't changed, we might not need to do anything.
|
|
39
|
-
// Note: This optimization assumes external data (root data) hasn't changed.
|
|
40
|
-
// To be safe, we only use this to skip code-change re-runs, but root data might have arrived.
|
|
41
|
-
// For now, we calculate it but rely on the deep check.
|
|
42
39
|
const currentManifestHash = generateCodeHash(
|
|
43
40
|
computationManifest.map(c => c.hash).sort().join('|')
|
|
44
41
|
);
|
|
45
|
-
// TODO: Implement metadata storage for this hash to skip "Analysis" phase if needed.
|
|
46
42
|
// ---------------------------------------------------
|
|
47
43
|
|
|
48
44
|
const calcNames = calcsInThisPass.map(c => c.name);
|
|
@@ -122,6 +118,7 @@ async function dispatchComputationPass(config, dependencies, computationManifest
|
|
|
122
118
|
computation: normalizeName(item.name),
|
|
123
119
|
hash: item.hash || item.newHash,
|
|
124
120
|
previousCategory: item.previousCategory || null,
|
|
121
|
+
triggerReason: item.reason || "Unknown", // <--- THE KEY ADDITION
|
|
125
122
|
timestamp: Date.now()
|
|
126
123
|
});
|
|
127
124
|
});
|
|
@@ -157,6 +154,7 @@ async function dispatchComputationPass(config, dependencies, computationManifest
|
|
|
157
154
|
expectedHash: task.hash || 'unknown',
|
|
158
155
|
createdAt: new Date(),
|
|
159
156
|
dispatcherHash: currentManifestHash, // Tracking source
|
|
157
|
+
triggerReason: task.triggerReason, // Track trigger in ledger too
|
|
160
158
|
retries: 0
|
|
161
159
|
}, { merge: true });
|
|
162
160
|
return true;
|
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
* PURPOSE: Consumes computation tasks from Pub/Sub and executes them.
|
|
4
4
|
* UPDATED: Integrated Run Ledger for per-run/per-date success/failure tracking.
|
|
5
5
|
* UPDATED: Added Dead Letter Queue logic for Poison Pills.
|
|
6
|
+
* UPDATED: Now logs the trigger reason.
|
|
6
7
|
*/
|
|
7
8
|
|
|
8
9
|
const { executeDispatchTask } = require('../WorkflowOrchestrator.js');
|
|
@@ -35,17 +36,21 @@ async function handleComputationTask(message, config, dependencies) {
|
|
|
35
36
|
|
|
36
37
|
// ----------------------------------- Validate & Execute -----------------------------------
|
|
37
38
|
if (!data || data.action !== 'RUN_COMPUTATION_DATE') { return; }
|
|
38
|
-
|
|
39
|
+
|
|
40
|
+
// Extract Trigger Reason
|
|
41
|
+
const { date, pass, computation, previousCategory, triggerReason } = data;
|
|
42
|
+
|
|
39
43
|
if (!date || !pass || !computation) { logger.log('ERROR', `[Worker] Invalid payload: Missing date, pass, or computation.`, data); return; }
|
|
40
44
|
let computationManifest;
|
|
41
45
|
try { computationManifest = getManifest(config.activeProductLines || [], calculations, runDependencies);
|
|
42
46
|
} catch (manifestError) { logger.log('FATAL', `[Worker] Failed to load Manifest: ${manifestError.message}`);
|
|
43
|
-
|
|
47
|
+
// FIX: Passing { durationMs: 0 } instead of {} to satisfy type requirements
|
|
48
|
+
await recordRunAttempt(db, { date, computation, pass }, 'CRASH', { message: manifestError.message, stage: 'MANIFEST_LOAD' }, { durationMs: 0 }, triggerReason);
|
|
44
49
|
return;
|
|
45
50
|
}
|
|
46
51
|
|
|
47
52
|
try {
|
|
48
|
-
logger.log('INFO', `[Worker] 📥 Received: ${computation} for ${date}`);
|
|
53
|
+
logger.log('INFO', `[Worker] 📥 Received: ${computation} for ${date} [Reason: ${triggerReason || 'Unknown'}]`);
|
|
49
54
|
|
|
50
55
|
const startTime = Date.now();
|
|
51
56
|
const result = await executeDispatchTask(
|
|
@@ -67,19 +72,21 @@ async function handleComputationTask(message, config, dependencies) {
|
|
|
67
72
|
logger.log('ERROR', `[Worker] ❌ Failed logic/storage for ${computation}`, failReason.error);
|
|
68
73
|
const metrics = failReason.metrics || {};
|
|
69
74
|
metrics.durationMs = duration;
|
|
70
|
-
await recordRunAttempt(db, { date, computation, pass }, 'FAILURE', failReason.error, metrics);
|
|
75
|
+
await recordRunAttempt(db, { date, computation, pass }, 'FAILURE', failReason.error, metrics, triggerReason);
|
|
71
76
|
throw new Error(failReason.error.message || 'Computation Logic Failed');
|
|
72
77
|
}
|
|
73
78
|
else if (Object.keys(successUpdates).length > 0) {
|
|
74
79
|
const successData = successUpdates[computation];
|
|
75
80
|
const metrics = successData.metrics || {};
|
|
76
81
|
metrics.durationMs = duration;
|
|
77
|
-
|
|
78
|
-
|
|
82
|
+
|
|
83
|
+
logger.log('INFO', `[Worker] ✅ Stored: ${computation}. Processed: ${metrics.execution?.processedUsers || metrics.execution?.processedItems || '?'} items.`);
|
|
84
|
+
|
|
85
|
+
await recordRunAttempt(db, { date, computation, pass }, 'SUCCESS', null, metrics, triggerReason);
|
|
79
86
|
}
|
|
80
87
|
else {
|
|
81
88
|
logger.log('WARN', `[Worker] ⚠️ No results produced for ${computation} (Empty?)`);
|
|
82
|
-
await recordRunAttempt(db, { date, computation, pass }, 'SUCCESS', { message: 'Empty Result' }, { durationMs: duration });
|
|
89
|
+
await recordRunAttempt(db, { date, computation, pass }, 'SUCCESS', { message: 'Empty Result' }, { durationMs: duration }, triggerReason);
|
|
83
90
|
}
|
|
84
91
|
} catch (err) {
|
|
85
92
|
const retryCount = message.deliveryAttempt || 0;
|
|
@@ -96,7 +103,7 @@ async function handleComputationTask(message, config, dependencies) {
|
|
|
96
103
|
} catch (dlqErr) { logger.log('FATAL', `[Worker] Failed to write to DLQ`, dlqErr); }
|
|
97
104
|
}
|
|
98
105
|
logger.log('ERROR', `[Worker] ❌ Crash: ${computation} for ${date}: ${err.message}`);
|
|
99
|
-
await recordRunAttempt(db, { date, computation, pass }, 'CRASH', { message: err.message, stack: err.stack, stage: 'SYSTEM_CRASH' });
|
|
106
|
+
await recordRunAttempt(db, { date, computation, pass }, 'CRASH', { message: err.message, stack: err.stack, stage: 'SYSTEM_CRASH' }, { durationMs: 0 }, triggerReason);
|
|
100
107
|
throw err;
|
|
101
108
|
}
|
|
102
109
|
}
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* @fileoverview Handles saving computation results with observability and Smart Cleanup.
|
|
3
3
|
* UPDATED: Stores Hash Composition in status for audit trail.
|
|
4
|
+
* UPDATED: Captures execution metrics.
|
|
4
5
|
*/
|
|
5
6
|
const { commitBatchInChunks } = require('./FirestoreUtils');
|
|
6
7
|
const { updateComputationStatus } = require('./StatusRepository');
|
|
@@ -27,9 +28,13 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
|
|
|
27
28
|
for (const name in stateObj) {
|
|
28
29
|
const calc = stateObj[name];
|
|
29
30
|
|
|
31
|
+
// [NEW] Check for execution stats attached by Executor
|
|
32
|
+
const execStats = calc._executionStats || { processedUsers: 0, skippedUsers: 0 };
|
|
33
|
+
|
|
30
34
|
const runMetrics = {
|
|
31
35
|
storage: { sizeBytes: 0, isSharded: false, shardCount: 1, keys: 0 },
|
|
32
|
-
validation: { isValid: true, anomalies: [] }
|
|
36
|
+
validation: { isValid: true, anomalies: [] },
|
|
37
|
+
execution: execStats // <--- Pass this to RunRecorder
|
|
33
38
|
};
|
|
34
39
|
|
|
35
40
|
try {
|
|
@@ -47,9 +52,9 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
|
|
|
47
52
|
if (isEmpty) {
|
|
48
53
|
if (calc.manifest.hash) {
|
|
49
54
|
successUpdates[name] = {
|
|
50
|
-
hash:
|
|
51
|
-
category:
|
|
52
|
-
composition: calc.manifest.composition,
|
|
55
|
+
hash: calc.manifest.hash,
|
|
56
|
+
category: calc.manifest.category,
|
|
57
|
+
composition: calc.manifest.composition,
|
|
53
58
|
metrics: runMetrics
|
|
54
59
|
};
|
|
55
60
|
}
|
|
@@ -83,9 +88,9 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
|
|
|
83
88
|
|
|
84
89
|
if (calc.manifest.hash) {
|
|
85
90
|
successUpdates[name] = {
|
|
86
|
-
hash:
|
|
87
|
-
category:
|
|
88
|
-
composition: calc.manifest.composition,
|
|
91
|
+
hash: calc.manifest.hash,
|
|
92
|
+
category: calc.manifest.category,
|
|
93
|
+
composition: calc.manifest.composition,
|
|
89
94
|
metrics: runMetrics
|
|
90
95
|
};
|
|
91
96
|
}
|
|
@@ -101,15 +106,15 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
|
|
|
101
106
|
|
|
102
107
|
const writeStats = await writeSingleResult(result, mainDocRef, name, dStr, logger, config, deps);
|
|
103
108
|
|
|
104
|
-
runMetrics.storage.sizeBytes
|
|
105
|
-
runMetrics.storage.isSharded
|
|
109
|
+
runMetrics.storage.sizeBytes = writeStats.totalSize;
|
|
110
|
+
runMetrics.storage.isSharded = writeStats.isSharded;
|
|
106
111
|
runMetrics.storage.shardCount = writeStats.shardCount;
|
|
107
112
|
|
|
108
113
|
if (calc.manifest.hash) {
|
|
109
114
|
successUpdates[name] = {
|
|
110
|
-
hash:
|
|
111
|
-
category:
|
|
112
|
-
composition: calc.manifest.composition,
|
|
115
|
+
hash: calc.manifest.hash,
|
|
116
|
+
category: calc.manifest.category,
|
|
117
|
+
composition: calc.manifest.composition,
|
|
113
118
|
metrics: runMetrics
|
|
114
119
|
};
|
|
115
120
|
}
|
|
@@ -145,6 +150,7 @@ async function commitResults(stateObj, dStr, passName, config, deps, skipStatusW
|
|
|
145
150
|
return { successUpdates, failureReport };
|
|
146
151
|
}
|
|
147
152
|
|
|
153
|
+
// ... (Helper functions remain unchanged from context) ...
|
|
148
154
|
async function writeSingleResult(result, docRef, name, dateContext, logger, config, deps) {
|
|
149
155
|
const strategies = [ { bytes: 900 * 1024, keys: null }, { bytes: 450 * 1024, keys: 10000 }, { bytes: 200 * 1024, keys: 2000 } ];
|
|
150
156
|
let committed = false; let lastError = null; let finalStats = { totalSize: 0, isSharded: false, shardCount: 1 };
|
|
@@ -59,11 +59,7 @@ class HeuristicValidator {
|
|
|
59
59
|
// Vector/Profile Empty Check (Specific to your System)
|
|
60
60
|
// If result contains 'profile', 'history', 'sparkline', or 'buckets' arrays
|
|
61
61
|
const arrayProps = ['profile', 'history', 'sparkline', 'buckets', 'prices'];
|
|
62
|
-
for (const prop of arrayProps) {
|
|
63
|
-
if (Array.isArray(val[prop]) && val[prop].length === 0) {
|
|
64
|
-
emptyVectorCount++;
|
|
65
|
-
}
|
|
66
|
-
}
|
|
62
|
+
for (const prop of arrayProps) { if (Array.isArray(val[prop]) && val[prop].length === 0) { emptyVectorCount++; } }
|
|
67
63
|
|
|
68
64
|
// Extract primary numeric score for Flatline check (heuristically guessing the 'main' metric)
|
|
69
65
|
const numericProp = subValues.find(v => typeof v === 'number' && v !== 0);
|
|
@@ -71,8 +67,8 @@ class HeuristicValidator {
|
|
|
71
67
|
}
|
|
72
68
|
// --- TYPE B: Scalar / Primitive Result ---
|
|
73
69
|
if (typeof val === 'number') {
|
|
74
|
-
if (isNaN(val) || !isFinite(val))
|
|
75
|
-
nanCount++;
|
|
70
|
+
if (isNaN(val) || !isFinite(val))
|
|
71
|
+
{ nanCount++;
|
|
76
72
|
} else {
|
|
77
73
|
numericValues.push(val); // Include zeros
|
|
78
74
|
if (val === 0) zeroCount++;
|
|
@@ -1,142 +1,101 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* @fileoverview Utility for recording computation run attempts (The Audit Logger).
|
|
3
|
-
*
|
|
4
|
-
* Implements aggregated error stats and advanced performance metrics.
|
|
3
|
+
* UPDATED: Stores 'trigger' reason and 'execution' stats.
|
|
5
4
|
*/
|
|
6
5
|
|
|
7
6
|
const { FieldValue } = require('../utils/utils');
|
|
8
7
|
const os = require('os');
|
|
9
8
|
|
|
10
|
-
// Root collection for the new audit system
|
|
11
9
|
const AUDIT_COLLECTION = 'computation_audit_logs';
|
|
12
10
|
|
|
13
|
-
/**
|
|
14
|
-
* Sanitizes error messages to be used as Firestore Map keys.
|
|
15
|
-
* Replaces invalid characters (. / [ ] *) with underscores.
|
|
16
|
-
*/
|
|
17
11
|
function sanitizeErrorKey(message) {
|
|
18
12
|
if (!message) return 'Unknown_Error';
|
|
19
|
-
// Take first 100 chars to avoid key limit issues
|
|
20
13
|
const shortMsg = message.toString().substring(0, 100);
|
|
21
14
|
return shortMsg.replace(/[./\[\]*`]/g, '_').trim();
|
|
22
15
|
}
|
|
23
16
|
|
|
24
17
|
/**
|
|
25
18
|
* Records a run attempt with detailed metrics and aggregated stats.
|
|
26
|
-
*
|
|
27
|
-
* @param {Object} context - Context object
|
|
28
|
-
* @param {string} context.date - The "Target Date" of the computation
|
|
29
|
-
* @param {string} context.computation - The name of the calculation
|
|
30
|
-
* @param {string} context.pass - The topology pass number
|
|
31
|
-
* @param {string} status - 'SUCCESS', 'FAILURE', 'CRASH', or 'SKIPPED'
|
|
32
|
-
* @param {Object|null} error - Error object if failed
|
|
33
|
-
* @param {Object} detailedMetrics - Expanded metrics object (Optional, defaults provided)
|
|
34
|
-
* @param {number} [detailedMetrics.durationMs] - Execution time
|
|
35
|
-
* @param {Object} [detailedMetrics.storage] - { sizeBytes, isSharded, shardCount }
|
|
36
|
-
* @param {Object} [detailedMetrics.validation] - { isValid, anomalies: [] }
|
|
19
|
+
* ADDED: 'triggerReason' param.
|
|
37
20
|
*/
|
|
38
|
-
async function recordRunAttempt(db, context, status, error = null, detailedMetrics = { durationMs: 0 }) {
|
|
21
|
+
async function recordRunAttempt(db, context, status, error = null, detailedMetrics = { durationMs: 0 }, triggerReason = 'Unknown') {
|
|
39
22
|
if (!db || !context) return;
|
|
40
23
|
|
|
41
24
|
const { date: targetDate, computation, pass } = context;
|
|
42
25
|
const now = new Date();
|
|
43
26
|
const triggerTimestamp = now.getTime();
|
|
44
27
|
|
|
45
|
-
// 1. Construct Paths
|
|
46
|
-
// Parent Doc: Stores global aggregates for this computation
|
|
47
28
|
const computationDocRef = db.collection(AUDIT_COLLECTION).doc(computation);
|
|
48
|
-
|
|
49
|
-
// History Doc: Stores this specific run
|
|
50
|
-
// ID Format: targetDate_triggerTimestamp (Sortable by data date, then execution time)
|
|
51
29
|
const runId = `${targetDate}_${triggerTimestamp}`;
|
|
52
30
|
const runDocRef = computationDocRef.collection('history').doc(runId);
|
|
53
31
|
|
|
54
|
-
// 2. Prepare Metrics & Environment Info
|
|
55
32
|
const workerId = process.env.FUNCTION_TARGET || process.env.K_REVISION || os.hostname();
|
|
56
33
|
|
|
57
|
-
// Calculate size in MB
|
|
58
34
|
let sizeMB = 0;
|
|
59
35
|
if (detailedMetrics.storage && detailedMetrics.storage.sizeBytes) { sizeMB = Number((detailedMetrics.storage.sizeBytes / (1024 * 1024)).toFixed(4)); }
|
|
60
36
|
|
|
61
|
-
// Extract Validation Anomalies (Unusual Keys/Values)
|
|
62
37
|
const anomalies = detailedMetrics.validation?.anomalies || [];
|
|
63
38
|
if (error && error.message && error.message.includes('Data Integrity')) { anomalies.push(error.message); }
|
|
64
39
|
|
|
65
|
-
// 3. Construct the Run Log Entry
|
|
66
40
|
const runEntry = {
|
|
67
|
-
// Identity
|
|
68
41
|
runId: runId,
|
|
69
42
|
computationName: computation,
|
|
70
43
|
pass: String(pass),
|
|
71
44
|
workerId: workerId,
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
targetDate: targetDate, // The date the data belongs to
|
|
75
|
-
triggerTime: now.toISOString(), // The date the code ran
|
|
45
|
+
targetDate: targetDate,
|
|
46
|
+
triggerTime: now.toISOString(),
|
|
76
47
|
durationMs: detailedMetrics.durationMs || 0,
|
|
77
|
-
|
|
78
|
-
// Status
|
|
79
48
|
status: status,
|
|
80
49
|
|
|
81
|
-
//
|
|
50
|
+
// [NEW] Trigger Context
|
|
51
|
+
trigger: {
|
|
52
|
+
reason: triggerReason || 'Unknown',
|
|
53
|
+
type: (triggerReason && triggerReason.includes('Layer')) ? 'CASCADE' : ((triggerReason && triggerReason.includes('New')) ? 'INIT' : 'UPDATE')
|
|
54
|
+
},
|
|
55
|
+
|
|
56
|
+
// [NEW] Execution Stats (Internal Loop Data)
|
|
57
|
+
executionStats: detailedMetrics.execution || {},
|
|
58
|
+
|
|
82
59
|
outputStats: {
|
|
83
60
|
sizeMB: sizeMB,
|
|
84
61
|
isSharded: !!detailedMetrics.storage?.isSharded,
|
|
85
62
|
shardCount: detailedMetrics.storage?.shardCount || 1,
|
|
86
|
-
keysWritten: detailedMetrics.storage?.keys || 0
|
|
63
|
+
keysWritten: detailedMetrics.storage?.keys || 0
|
|
87
64
|
},
|
|
88
65
|
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
// Metadata
|
|
93
|
-
_schemaVersion: '2.0'
|
|
66
|
+
anomalies: anomalies,
|
|
67
|
+
_schemaVersion: '2.1'
|
|
94
68
|
};
|
|
95
69
|
|
|
96
|
-
// Attach Error Details if present
|
|
97
70
|
if (error) {
|
|
98
71
|
runEntry.error = {
|
|
99
72
|
message: error.message || 'Unknown Error',
|
|
100
73
|
stage: error.stage || 'UNKNOWN',
|
|
101
|
-
stack: error.stack ? error.stack.substring(0, 1000) : null,
|
|
74
|
+
stack: error.stack ? error.stack.substring(0, 1000) : null,
|
|
102
75
|
code: error.code || null
|
|
103
76
|
};
|
|
104
77
|
}
|
|
105
78
|
|
|
106
|
-
// 4. Prepare Aggregation Update (Atomic Increments)
|
|
107
79
|
const statsUpdate = {
|
|
108
80
|
lastRunAt: now,
|
|
109
81
|
lastRunStatus: status,
|
|
110
82
|
totalRuns: FieldValue.increment(1)
|
|
111
83
|
};
|
|
112
84
|
|
|
113
|
-
if (status === 'SUCCESS') {
|
|
114
|
-
|
|
115
|
-
} else {
|
|
116
|
-
statsUpdate.failureCount = FieldValue.increment(1);
|
|
117
|
-
// Increment specific error type counter
|
|
85
|
+
if (status === 'SUCCESS') { statsUpdate.successCount = FieldValue.increment(1);
|
|
86
|
+
} else { statsUpdate.failureCount = FieldValue.increment(1);
|
|
118
87
|
if (error) {
|
|
119
88
|
const safeKey = sanitizeErrorKey(error.message);
|
|
120
89
|
statsUpdate[`errorCounts.${safeKey}`] = FieldValue.increment(1);
|
|
121
90
|
}
|
|
122
91
|
}
|
|
123
92
|
|
|
124
|
-
// 5. Execute as Batch
|
|
125
93
|
try {
|
|
126
94
|
const batch = db.batch();
|
|
127
|
-
|
|
128
|
-
// Set the specific run log
|
|
129
95
|
batch.set(runDocRef, runEntry);
|
|
130
|
-
|
|
131
|
-
// Merge updates into the parent computation document
|
|
132
|
-
// We use { merge: true } implicitly with set or explicit update.
|
|
133
|
-
// Using set({ merge: true }) ensures doc creation if it doesn't exist.
|
|
134
96
|
batch.set(computationDocRef, statsUpdate, { merge: true });
|
|
135
|
-
|
|
136
97
|
await batch.commit();
|
|
137
|
-
|
|
138
98
|
} catch (e) {
|
|
139
|
-
// Fallback logging if Firestore fails (prevents infinite loop crashing)
|
|
140
99
|
console.error(`[RunRecorder] ❌ CRITICAL: Failed to write audit log for ${computation}`, e);
|
|
141
100
|
}
|
|
142
101
|
}
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
* @fileoverview Build Reporter & Auto-Runner.
|
|
3
3
|
* Generates a "Pre-Flight" report of what the computation system WILL do.
|
|
4
4
|
* UPDATED: Fixed 'latest' document overwrite bug.
|
|
5
|
+
* UPDATED: Now reports specific reasons for Re-Runs.
|
|
5
6
|
*/
|
|
6
7
|
|
|
7
8
|
const { analyzeDateExecution } = require('../WorkflowOrchestrator');
|