bulltrackers-module 1.0.346 โ 1.0.348
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/functions/computation-system/helpers/computation_dispatcher.js +31 -2
- package/functions/computation-system/tools/FinalSweepReporter.js +304 -0
- package/functions/computation-system/workflows/bulltrackers_pipeline.yaml +35 -16
- package/functions/generic-api/helpers/api_helpers.js +23 -1
- package/package.json +1 -1
|
@@ -11,8 +11,8 @@ const { groupByPass, analyzeDateExecution } = require('../WorkflowOrchestrator.j
|
|
|
11
11
|
const { PubSubUtils } = require('../../core/utils/pubsub_utils');
|
|
12
12
|
const { fetchComputationStatus } = require('../persistence/StatusRepository');
|
|
13
13
|
const { checkRootDataAvailability } = require('../data/AvailabilityChecker');
|
|
14
|
+
const { runFinalSweepCheck } = require('../tools/FinalSweepReporter'); // [NEW]
|
|
14
15
|
const crypto = require('crypto');
|
|
15
|
-
const monConfig = require('../config/monitoring_config');
|
|
16
16
|
|
|
17
17
|
const OOM_THRESHOLD_MB = 1500; // Unused
|
|
18
18
|
const BASE_SECONDS_PER_WEIGHT_UNIT = 3;
|
|
@@ -148,7 +148,7 @@ async function getStableDateSession(config, dependencies, passToRun, dateLimitSt
|
|
|
148
148
|
// MAIN ENTRY POINT
|
|
149
149
|
// =============================================================================
|
|
150
150
|
async function dispatchComputationPass(config, dependencies, computationManifest, reqBody = {}) {
|
|
151
|
-
const action = reqBody.action || 'DISPATCH';
|
|
151
|
+
const action = reqBody.action || 'DISPATCH';
|
|
152
152
|
|
|
153
153
|
if (action === 'VERIFY') {
|
|
154
154
|
return handlePassVerification(config, dependencies, computationManifest, reqBody);
|
|
@@ -156,10 +156,39 @@ async function dispatchComputationPass(config, dependencies, computationManifest
|
|
|
156
156
|
else if (action === 'SWEEP') {
|
|
157
157
|
return handleSweepDispatch(config, dependencies, computationManifest, reqBody);
|
|
158
158
|
}
|
|
159
|
+
// [NEW] Handler for Final Forensics Reporting
|
|
160
|
+
else if (action === 'REPORT') {
|
|
161
|
+
return handleFinalSweepReporting(config, dependencies, computationManifest, reqBody);
|
|
162
|
+
}
|
|
159
163
|
|
|
160
164
|
return handleStandardDispatch(config, dependencies, computationManifest, reqBody);
|
|
161
165
|
}
|
|
162
166
|
|
|
167
|
+
// =============================================================================
|
|
168
|
+
// NEW: Final Sweep Reporting Handler
|
|
169
|
+
// =============================================================================
|
|
170
|
+
async function handleFinalSweepReporting(config, dependencies, computationManifest, reqBody) {
|
|
171
|
+
const { logger } = dependencies;
|
|
172
|
+
const passToRun = String(reqBody.pass || "1");
|
|
173
|
+
// Target date is required for detailed forensics
|
|
174
|
+
const date = reqBody.date || new Date().toISOString().slice(0, 10);
|
|
175
|
+
|
|
176
|
+
logger.log('INFO', `[Dispatcher] ๐ Triggering Final Sweep Forensics for Pass ${passToRun} on ${date}...`);
|
|
177
|
+
|
|
178
|
+
try {
|
|
179
|
+
const result = await runFinalSweepCheck(config, dependencies, date, passToRun, computationManifest);
|
|
180
|
+
return {
|
|
181
|
+
status: 'COMPLETED',
|
|
182
|
+
date: date,
|
|
183
|
+
pass: passToRun,
|
|
184
|
+
issuesFound: result.issuesCount
|
|
185
|
+
};
|
|
186
|
+
} catch (e) {
|
|
187
|
+
logger.log('ERROR', `[Dispatcher] Forensics failed: ${e.message}`);
|
|
188
|
+
return { status: 'ERROR', error: e.message };
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
|
|
163
192
|
// =============================================================================
|
|
164
193
|
// LOGIC: Verify Pass Completion
|
|
165
194
|
// =============================================================================
|
|
@@ -0,0 +1,304 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* FILENAME: computation-system/tools/FinalSweepReporter.js
|
|
3
|
+
* PURPOSE: Comprehensive forensic tool to diagnose why computations failed.
|
|
4
|
+
* EXECUTION: Triggered by Dispatcher (via Workflow) after execution passes complete.
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
const { normalizeName, DEFINITIVE_EARLIEST_DATES } = require('../utils/utils');
|
|
8
|
+
const { fetchComputationStatus } = require('../persistence/StatusRepository');
|
|
9
|
+
const { checkRootDataAvailability } = require('../data/AvailabilityChecker');
|
|
10
|
+
const { analyzeDateExecution } = require('../WorkflowOrchestrator');
|
|
11
|
+
|
|
12
|
+
const REPORT_COLLECTION = 'Final_sweep_check_report';
|
|
13
|
+
const LEDGER_BASE = 'computation_audit_ledger';
|
|
14
|
+
const STALE_THRESHOLD_MS = 1000 * 60 * 20; // 20 minutes
|
|
15
|
+
|
|
16
|
+
class FinalSweepReporter {
|
|
17
|
+
constructor(config, dependencies) {
|
|
18
|
+
this.config = config;
|
|
19
|
+
this.db = dependencies.db;
|
|
20
|
+
this.logger = dependencies.logger;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
/**
|
|
24
|
+
* Main Entry Point
|
|
25
|
+
* @param {string} dateStr - Target date (YYYY-MM-DD)
|
|
26
|
+
* @param {string} passId - The pass to analyze ("1", "2"...)
|
|
27
|
+
* @param {Array} manifest - Full computation manifest
|
|
28
|
+
*/
|
|
29
|
+
async runSweep(dateStr, passId, manifest) {
|
|
30
|
+
this.logger.log('INFO', `[FinalSweep] ๐ต๏ธ Starting forensic analysis for ${dateStr} (Pass ${passId})`);
|
|
31
|
+
|
|
32
|
+
// 1. Filter Manifest for Target Pass
|
|
33
|
+
const passCalcs = manifest.filter(c => String(c.pass) === passId);
|
|
34
|
+
if (passCalcs.length === 0) {
|
|
35
|
+
this.logger.log('WARN', `[FinalSweep] No calculations found for Pass ${passId}.`);
|
|
36
|
+
return { issuesCount: 0 };
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
const manifestMap = new Map(manifest.map(c => [normalizeName(c.name), c]));
|
|
40
|
+
|
|
41
|
+
// 2. Fetch Global State
|
|
42
|
+
const [dailyStatus, availability] = await Promise.all([
|
|
43
|
+
fetchComputationStatus(dateStr, this.config, { db: this.db }),
|
|
44
|
+
checkRootDataAvailability(dateStr, this.config, { db: this.db, logger: this.logger }, DEFINITIVE_EARLIEST_DATES)
|
|
45
|
+
]);
|
|
46
|
+
|
|
47
|
+
// 3. Fetch Yesterday (if needed for historical checks)
|
|
48
|
+
let prevDailyStatus = null;
|
|
49
|
+
if (passCalcs.some(c => c.isHistorical)) {
|
|
50
|
+
const prevDate = new Date(dateStr + 'T00:00:00Z');
|
|
51
|
+
prevDate.setUTCDate(prevDate.getUTCDate() - 1);
|
|
52
|
+
if (prevDate >= DEFINITIVE_EARLIEST_DATES.absoluteEarliest) {
|
|
53
|
+
prevDailyStatus = await fetchComputationStatus(prevDate.toISOString().slice(0, 10), this.config, { db: this.db });
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
const rootDataStatus = availability ? availability.status : {};
|
|
58
|
+
|
|
59
|
+
// 4. Run Logic Analysis
|
|
60
|
+
// We use the Orchestrator's logic to see who SHOULD have run but didn't
|
|
61
|
+
const analysis = analyzeDateExecution(
|
|
62
|
+
dateStr,
|
|
63
|
+
passCalcs,
|
|
64
|
+
rootDataStatus,
|
|
65
|
+
dailyStatus,
|
|
66
|
+
manifestMap,
|
|
67
|
+
prevDailyStatus
|
|
68
|
+
);
|
|
69
|
+
|
|
70
|
+
// Collect all potential issues
|
|
71
|
+
const problematicItems = [
|
|
72
|
+
...analysis.impossible.map(x => ({ ...x, category: 'IMPOSSIBLE' })),
|
|
73
|
+
...analysis.blocked.map(x => ({ ...x, category: 'BLOCKED' })),
|
|
74
|
+
...analysis.failedDependency.map(x => ({ ...x, category: 'DEPENDENCY_FAILURE' })),
|
|
75
|
+
// If it's still 'RUNNABLE' after the final pass, it means it was skipped/missed/failed silently
|
|
76
|
+
...analysis.runnable.map(x => ({ ...x, category: 'STUCK_RUNNABLE' })),
|
|
77
|
+
...analysis.reRuns.map(x => ({ ...x, category: 'STUCK_RERUN' }))
|
|
78
|
+
];
|
|
79
|
+
|
|
80
|
+
// 5. Deep Investigation & Reporting
|
|
81
|
+
let issuesCount = 0;
|
|
82
|
+
const batch = this.db.batch();
|
|
83
|
+
let opCount = 0;
|
|
84
|
+
|
|
85
|
+
// A. Analyze Standard Problems
|
|
86
|
+
for (const item of problematicItems) {
|
|
87
|
+
const forensics = await this.investigateComputation(
|
|
88
|
+
item.name, dateStr, passId, item.category, manifestMap,
|
|
89
|
+
dailyStatus, prevDailyStatus, rootDataStatus
|
|
90
|
+
);
|
|
91
|
+
|
|
92
|
+
if (forensics) {
|
|
93
|
+
this._queueReportWrite(batch, dateStr, item.name, forensics);
|
|
94
|
+
opCount++;
|
|
95
|
+
issuesCount++;
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
// B. Analyze "Ghost" Completions (Marked Complete but Empty)
|
|
100
|
+
for (const calc of passCalcs) {
|
|
101
|
+
const name = normalizeName(calc.name);
|
|
102
|
+
const status = dailyStatus[name];
|
|
103
|
+
|
|
104
|
+
if (status && status.hash === calc.hash) {
|
|
105
|
+
const hasData = await this.verifyDataExists(name, calc.category, dateStr);
|
|
106
|
+
if (!hasData) {
|
|
107
|
+
const forensics = {
|
|
108
|
+
computation: name,
|
|
109
|
+
date: dateStr,
|
|
110
|
+
category: 'GHOST_COMPLETION',
|
|
111
|
+
rootCause: 'DATA_WRITE_FAILURE',
|
|
112
|
+
reason: 'Status index says COMPLETE, but storage document is missing.',
|
|
113
|
+
severity: 'HIGH',
|
|
114
|
+
recommendations: ['Force Re-run', 'Check Write Permissions']
|
|
115
|
+
};
|
|
116
|
+
this._queueReportWrite(batch, dateStr, name, forensics);
|
|
117
|
+
opCount++;
|
|
118
|
+
issuesCount++;
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
if (opCount > 0) await batch.commit();
|
|
124
|
+
|
|
125
|
+
this.logger.log('SUCCESS', `[FinalSweep] Pass ${passId}: Generated ${issuesCount} forensic reports.`);
|
|
126
|
+
return { issuesCount };
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
_queueReportWrite(batch, dateStr, calcName, forensics) {
|
|
130
|
+
// Path: Final_sweep_check_report/YYYY-MM-DD/Computation_Name/document
|
|
131
|
+
const reportRef = this.db.collection(REPORT_COLLECTION)
|
|
132
|
+
.doc(dateStr)
|
|
133
|
+
.collection(normalizeName(calcName))
|
|
134
|
+
.doc('document');
|
|
135
|
+
|
|
136
|
+
batch.set(reportRef, {
|
|
137
|
+
...forensics,
|
|
138
|
+
generatedAt: new Date().toISOString()
|
|
139
|
+
});
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
/**
|
|
143
|
+
* Deep dive investigation logic
|
|
144
|
+
*/
|
|
145
|
+
async investigateComputation(calcName, dateStr, passId, category, manifestMap, dailyStatus, prevDailyStatus, rootDataStatus) {
|
|
146
|
+
const manifest = manifestMap.get(calcName);
|
|
147
|
+
if (!manifest) return null;
|
|
148
|
+
|
|
149
|
+
const forensics = {
|
|
150
|
+
computation: calcName,
|
|
151
|
+
pass: passId,
|
|
152
|
+
date: dateStr,
|
|
153
|
+
category: category,
|
|
154
|
+
rootCause: null,
|
|
155
|
+
reason: null,
|
|
156
|
+
ledgerState: null,
|
|
157
|
+
recommendations: [],
|
|
158
|
+
severity: 'MEDIUM'
|
|
159
|
+
};
|
|
160
|
+
|
|
161
|
+
// 1. CHECK AUDIT LEDGER
|
|
162
|
+
const ledgerPath = `${LEDGER_BASE}/${dateStr}/passes/${passId}/tasks/${normalizeName(calcName)}`;
|
|
163
|
+
const ledgerSnap = await this.db.doc(ledgerPath).get();
|
|
164
|
+
|
|
165
|
+
if (ledgerSnap.exists) {
|
|
166
|
+
const data = ledgerSnap.data();
|
|
167
|
+
forensics.ledgerState = { status: data.status, workerId: data.workerId, error: data.error };
|
|
168
|
+
|
|
169
|
+
if (['PENDING', 'IN_PROGRESS'].includes(data.status)) {
|
|
170
|
+
const lastHb = data.telemetry?.lastHeartbeat ? new Date(data.telemetry.lastHeartbeat).getTime() : 0;
|
|
171
|
+
if (Date.now() - lastHb > STALE_THRESHOLD_MS) {
|
|
172
|
+
forensics.rootCause = 'ZOMBIE_PROCESS';
|
|
173
|
+
forensics.reason = `Worker ${data.workerId} stopped heartbeating. Likely crashed/timeout.`;
|
|
174
|
+
forensics.severity = 'CRITICAL';
|
|
175
|
+
} else {
|
|
176
|
+
forensics.rootCause = 'STILL_RUNNING'; // Rare if sweep runs after everything
|
|
177
|
+
}
|
|
178
|
+
} else if (data.status === 'FAILED') {
|
|
179
|
+
if (data.resourceTier === 'high-mem') {
|
|
180
|
+
forensics.rootCause = 'CRASH_HIGH_MEM';
|
|
181
|
+
forensics.reason = 'Failed even on High-Memory tier. Code optimization required.';
|
|
182
|
+
forensics.severity = 'CRITICAL';
|
|
183
|
+
} else {
|
|
184
|
+
forensics.rootCause = 'FAILED_STANDARD';
|
|
185
|
+
forensics.reason = data.error?.message || 'Unknown Error';
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
if (data.error?.stage === 'OOM' || (data.error?.message || '').includes('memory')) {
|
|
189
|
+
forensics.rootCause = 'OUT_OF_MEMORY';
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
} else {
|
|
193
|
+
forensics.ledgerState = 'NEVER_DISPATCHED';
|
|
194
|
+
if (category === 'STUCK_RUNNABLE') {
|
|
195
|
+
forensics.rootCause = 'DISPATCHER_MISS';
|
|
196
|
+
forensics.reason = 'Logic says runnable, but Dispatcher never queued it.';
|
|
197
|
+
forensics.severity = 'HIGH';
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
// 2. ROOT DATA ANALYSIS
|
|
202
|
+
if (category === 'IMPOSSIBLE') {
|
|
203
|
+
const missing = [];
|
|
204
|
+
const userType = manifest.userType || 'all';
|
|
205
|
+
const deps = manifest.rootDataDependencies || [];
|
|
206
|
+
|
|
207
|
+
if (deps.includes('portfolio')) {
|
|
208
|
+
if (userType === 'speculator' && !rootDataStatus.speculatorPortfolio) missing.push('speculatorPortfolio');
|
|
209
|
+
else if (userType === 'normal' && !rootDataStatus.normalPortfolio) missing.push('normalPortfolio');
|
|
210
|
+
else if (userType === 'all' && !rootDataStatus.hasPortfolio) missing.push('portfolio');
|
|
211
|
+
}
|
|
212
|
+
if (deps.includes('history')) {
|
|
213
|
+
if (userType === 'speculator' && !rootDataStatus.speculatorHistory) missing.push('speculatorHistory');
|
|
214
|
+
else if (userType === 'normal' && !rootDataStatus.normalHistory) missing.push('normalHistory');
|
|
215
|
+
else if (userType === 'all' && !rootDataStatus.hasHistory) missing.push('history');
|
|
216
|
+
}
|
|
217
|
+
if (deps.includes('price') && !rootDataStatus.hasPrices) missing.push('price');
|
|
218
|
+
if (deps.includes('insights') && !rootDataStatus.hasInsights) missing.push('insights');
|
|
219
|
+
|
|
220
|
+
forensics.rootCause = 'MISSING_ROOT_DATA';
|
|
221
|
+
forensics.reason = `Missing: ${missing.join(', ')}`;
|
|
222
|
+
forensics.severity = 'LOW'; // Expected behavior
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
// 3. DEPENDENCY ANALYSIS
|
|
226
|
+
if (category === 'DEPENDENCY_FAILURE' || category === 'BLOCKED') {
|
|
227
|
+
const chain = await this.traceDependencyChain(calcName, manifestMap, dailyStatus, prevDailyStatus, manifest.isHistorical);
|
|
228
|
+
if (chain.length > 0) {
|
|
229
|
+
const root = chain[chain.length - 1];
|
|
230
|
+
forensics.rootCause = 'UPSTREAM_FAILURE';
|
|
231
|
+
forensics.reason = `Blocked by ${root.name} (${root.reason})`;
|
|
232
|
+
forensics.chain = chain;
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
return forensics;
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
async traceDependencyChain(calcName, manifestMap, dailyStatus, prevDailyStatus, isHistorical) {
|
|
240
|
+
const chain = [];
|
|
241
|
+
const visited = new Set();
|
|
242
|
+
|
|
243
|
+
const trace = (name, isPrev) => {
|
|
244
|
+
if (visited.has(name)) return;
|
|
245
|
+
visited.add(name);
|
|
246
|
+
|
|
247
|
+
const m = manifestMap.get(name);
|
|
248
|
+
if (!m) return; // Unknown calc
|
|
249
|
+
|
|
250
|
+
const status = isPrev ? (prevDailyStatus?.[name]) : (dailyStatus?.[name]);
|
|
251
|
+
|
|
252
|
+
if (!status) {
|
|
253
|
+
chain.push({ name, reason: 'MISSING_STATUS', isPrev });
|
|
254
|
+
return;
|
|
255
|
+
}
|
|
256
|
+
if (String(status.hash).startsWith('IMPOSSIBLE')) {
|
|
257
|
+
chain.push({ name, reason: 'IMPOSSIBLE', isPrev });
|
|
258
|
+
return;
|
|
259
|
+
}
|
|
260
|
+
if (m.hash !== status.hash) {
|
|
261
|
+
chain.push({ name, reason: 'VERSION_MISMATCH', isPrev });
|
|
262
|
+
return;
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
if (m.dependencies) {
|
|
266
|
+
for (const dep of m.dependencies) trace(normalizeName(dep), false);
|
|
267
|
+
}
|
|
268
|
+
};
|
|
269
|
+
|
|
270
|
+
const m = manifestMap.get(calcName);
|
|
271
|
+
if (m) {
|
|
272
|
+
if (m.dependencies) m.dependencies.forEach(d => trace(normalizeName(d), false));
|
|
273
|
+
if (isHistorical) trace(calcName, true);
|
|
274
|
+
}
|
|
275
|
+
return chain;
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
async verifyDataExists(calcName, category, dateStr) {
|
|
279
|
+
try {
|
|
280
|
+
const docRef = this.db.collection(this.config.resultsCollection)
|
|
281
|
+
.doc(dateStr)
|
|
282
|
+
.collection(this.config.resultsSubcollection)
|
|
283
|
+
.doc(category)
|
|
284
|
+
.collection(this.config.computationsSubcollection)
|
|
285
|
+
.doc(calcName);
|
|
286
|
+
|
|
287
|
+
const snap = await docRef.get();
|
|
288
|
+
if (!snap.exists) return false;
|
|
289
|
+
|
|
290
|
+
const data = snap.data();
|
|
291
|
+
if (data._completed === true) return true;
|
|
292
|
+
return false;
|
|
293
|
+
} catch (e) {
|
|
294
|
+
return false;
|
|
295
|
+
}
|
|
296
|
+
}
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
async function runFinalSweepCheck(config, dependencies, dateStr, pass, manifest) {
|
|
300
|
+
const reporter = new FinalSweepReporter(config, dependencies);
|
|
301
|
+
return await reporter.runSweep(dateStr, pass, manifest);
|
|
302
|
+
}
|
|
303
|
+
|
|
304
|
+
module.exports = { runFinalSweepCheck };
|
|
@@ -1,7 +1,4 @@
|
|
|
1
1
|
# Cloud Workflows: Precision Cursor-Based Orchestrator
|
|
2
|
-
# FAST-FORWARD MODE: Dispatch -> (Internal Loop) -> Wait ETA -> Jump to Next Valid Cursor
|
|
3
|
-
# UPGRADED: Supports 'nextCursor' to skip empty date ranges instantly.
|
|
4
|
-
|
|
5
2
|
main:
|
|
6
3
|
params: [input]
|
|
7
4
|
steps:
|
|
@@ -12,6 +9,7 @@ main:
|
|
|
12
9
|
- current_date: '${text.split(time.format(sys.now()), "T")[0]}'
|
|
13
10
|
- date_to_run: '${default(map.get(input, "date"), current_date)}'
|
|
14
11
|
|
|
12
|
+
# --- PHASE 1: EXECUTION (Standard + High Mem Retry) ---
|
|
15
13
|
- run_sequential_passes:
|
|
16
14
|
for:
|
|
17
15
|
value: pass_id
|
|
@@ -34,28 +32,29 @@ main:
|
|
|
34
32
|
pass: '${pass_id}'
|
|
35
33
|
cursorIndex: '${n_cursor}'
|
|
36
34
|
date: '${date_to_run}'
|
|
35
|
+
action: 'DISPATCH'
|
|
37
36
|
auth: { type: OIDC }
|
|
38
37
|
result: dispatch_res
|
|
39
38
|
|
|
40
39
|
- evaluate_dispatch:
|
|
41
40
|
switch:
|
|
42
|
-
# 1. End of Session
|
|
41
|
+
# 1. End of Session
|
|
43
42
|
- condition: '${dispatch_res.body.status == "MOVE_TO_NEXT_PASS"}'
|
|
44
43
|
assign:
|
|
45
44
|
- pass_complete: true
|
|
46
45
|
|
|
47
|
-
# 2. Satiation Check
|
|
46
|
+
# 2. Satiation Check
|
|
48
47
|
- condition: '${dispatch_res.body.status == "CONTINUE_PASS" and dispatch_res.body.remainingDates == 0 and dispatch_res.body.dispatched == 0}'
|
|
49
48
|
steps:
|
|
50
49
|
- log_satiation:
|
|
51
50
|
call: sys.log
|
|
52
51
|
args:
|
|
53
|
-
text: '${"Pass " + pass_id + " - โ
Pass satiated
|
|
52
|
+
text: '${"Pass " + pass_id + " - โ
Pass satiated. Starting Verification."}'
|
|
54
53
|
- mark_complete:
|
|
55
54
|
assign:
|
|
56
55
|
- pass_complete: true
|
|
57
56
|
|
|
58
|
-
# 3. Work Dispatched
|
|
57
|
+
# 3. Work Dispatched
|
|
59
58
|
- condition: '${dispatch_res.body.dispatched > 0}'
|
|
60
59
|
steps:
|
|
61
60
|
- wait_for_completion:
|
|
@@ -64,26 +63,24 @@ main:
|
|
|
64
63
|
seconds: '${int(dispatch_res.body.etaSeconds)}'
|
|
65
64
|
- update_cursor:
|
|
66
65
|
assign:
|
|
67
|
-
# CRITICAL CHANGE: Use dispatcher's specific jump target
|
|
68
66
|
- n_cursor: '${default(dispatch_res.body.nextCursor, n_cursor + 1)}'
|
|
69
67
|
- next_loop_work:
|
|
70
68
|
next: sequential_date_loop
|
|
71
69
|
|
|
72
|
-
# 4. No Work (Fast-Forward
|
|
70
|
+
# 4. No Work (Fast-Forward)
|
|
73
71
|
- condition: '${dispatch_res.body.dispatched == 0}'
|
|
74
72
|
steps:
|
|
75
73
|
- wait_short:
|
|
76
74
|
call: sys.sleep
|
|
77
75
|
args:
|
|
78
|
-
seconds: 2
|
|
76
|
+
seconds: 2
|
|
79
77
|
- update_cursor_retry:
|
|
80
78
|
assign:
|
|
81
|
-
# CRITICAL CHANGE: Resume exactly where the Fast-Forward loop stopped
|
|
82
79
|
- n_cursor: '${default(dispatch_res.body.nextCursor, n_cursor + 1)}'
|
|
83
80
|
- next_loop_retry:
|
|
84
81
|
next: sequential_date_loop
|
|
85
82
|
|
|
86
|
-
# --- VERIFICATION & SWEEP
|
|
83
|
+
# --- VERIFICATION & SWEEP ---
|
|
87
84
|
- verify_pass_completion:
|
|
88
85
|
call: http.post
|
|
89
86
|
args:
|
|
@@ -103,8 +100,7 @@ main:
|
|
|
103
100
|
- log_sweep:
|
|
104
101
|
call: sys.log
|
|
105
102
|
args:
|
|
106
|
-
text: '${"๐งน SWEEP: Disposing " + sweep_task.taskCount + " high-mem tasks for " + sweep_task.date
|
|
107
|
-
|
|
103
|
+
text: '${"๐งน SWEEP: Disposing " + sweep_task.taskCount + " high-mem tasks for " + sweep_task.date}'
|
|
108
104
|
- dispatch_force_sweep:
|
|
109
105
|
call: http.post
|
|
110
106
|
args:
|
|
@@ -114,11 +110,34 @@ main:
|
|
|
114
110
|
pass: '${pass_id}'
|
|
115
111
|
date: '${sweep_task.date}'
|
|
116
112
|
auth: { type: OIDC }
|
|
117
|
-
|
|
118
113
|
- wait_sweep_completion:
|
|
119
114
|
call: sys.sleep
|
|
120
115
|
args:
|
|
121
116
|
seconds: '${int(sweep_task.eta)}'
|
|
122
117
|
|
|
118
|
+
# --- PHASE 2: FINAL FORENSIC REPORTING ---
|
|
119
|
+
# Triggered after ALL execution attempts for this pass (Standard -> Verify -> HighMem Sweep)
|
|
120
|
+
# We ask the dispatcher to run the FinalSweepReporter for the target date.
|
|
121
|
+
- run_final_forensics:
|
|
122
|
+
for:
|
|
123
|
+
value: pass_id
|
|
124
|
+
in: ${passes}
|
|
125
|
+
steps:
|
|
126
|
+
- generate_final_report:
|
|
127
|
+
call: http.post
|
|
128
|
+
args:
|
|
129
|
+
url: '${"https://europe-west1-" + project + ".cloudfunctions.net/computation-pass-" + pass_id}'
|
|
130
|
+
body:
|
|
131
|
+
action: 'REPORT'
|
|
132
|
+
pass: '${pass_id}'
|
|
133
|
+
date: '${date_to_run}'
|
|
134
|
+
auth: { type: OIDC }
|
|
135
|
+
result: report_res
|
|
136
|
+
|
|
137
|
+
- log_forensics:
|
|
138
|
+
call: sys.log
|
|
139
|
+
args:
|
|
140
|
+
text: '${"๐ FINAL REPORT: Pass " + pass_id + " -> " + report_res.body.issuesFound + " detailed forensic documents created."}'
|
|
141
|
+
|
|
123
142
|
- finish:
|
|
124
|
-
return: "Pipeline
|
|
143
|
+
return: "Pipeline Complete with Forensic Analysis"
|
|
@@ -77,16 +77,38 @@ class AvailabilityCache {
|
|
|
77
77
|
|
|
78
78
|
/**
|
|
79
79
|
* Helper: Resolve which dates to fetch based on mode and availability.
|
|
80
|
+
* UPDATED: Added "Inferred Availability" for backfilled computations (user-history-reconstructor).
|
|
80
81
|
*/
|
|
81
82
|
async function resolveTargetDates(availabilityCache, computationKeys, mode, limit) {
|
|
82
83
|
const map = await availabilityCache.getMap();
|
|
83
|
-
|
|
84
84
|
const relevantDatesSet = new Set();
|
|
85
|
+
|
|
86
|
+
// [NEW] Special Handling for Backfilled Computations
|
|
87
|
+
// These computations write to past dates but only update status for the current run date.
|
|
88
|
+
// If we see a recent status, we infer that history exists backward from that point.
|
|
89
|
+
const BACKFILL_COMPUTATIONS = ['user-history-reconstructor'];
|
|
90
|
+
const hasBackfillCalc = computationKeys.some(k => BACKFILL_COMPUTATIONS.includes(k));
|
|
91
|
+
|
|
85
92
|
computationKeys.forEach(key => {
|
|
86
93
|
const dates = map[key] || [];
|
|
87
94
|
dates.forEach(d => relevantDatesSet.add(d));
|
|
88
95
|
});
|
|
89
96
|
|
|
97
|
+
// If we are requesting a backfill-capable computation and have at least one valid date
|
|
98
|
+
if (hasBackfillCalc && relevantDatesSet.size > 0 && mode === 'series') {
|
|
99
|
+
const sortedExisting = Array.from(relevantDatesSet).sort((a, b) => b.localeCompare(a));
|
|
100
|
+
const anchorDate = sortedExisting[0]; // Latest available date acts as the anchor
|
|
101
|
+
|
|
102
|
+
// Generate 'limit' days backwards from the anchor
|
|
103
|
+
const anchorTime = new Date(anchorDate).getTime();
|
|
104
|
+
for (let i = 0; i < limit; i++) {
|
|
105
|
+
const d = new Date(anchorTime - (i * 86400000));
|
|
106
|
+
const dateStr = d.toISOString().slice(0, 10);
|
|
107
|
+
relevantDatesSet.add(dateStr);
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
// Standard sorting and slicing
|
|
90
112
|
const sortedDates = Array.from(relevantDatesSet).sort((a, b) => b.localeCompare(a));
|
|
91
113
|
|
|
92
114
|
if (sortedDates.length === 0) return [];
|