bulltrackers-module 1.0.283 → 1.0.285
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/functions/computation-system/helpers/computation_dispatcher.js +20 -15
- package/functions/computation-system/helpers/computation_worker.js +52 -21
- package/functions/computation-system/helpers/monitor.js +63 -0
- package/functions/computation-system/workflows/bulltrackers_pipeline.yaml +143 -0
- package/index.js +5 -4
- package/package.json +1 -1
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* FILENAME: computation-system/helpers/computation_dispatcher.js
|
|
3
|
-
* PURPOSE: "Smart Dispatcher" - Analyzes state and
|
|
4
|
-
* UPDATED:
|
|
3
|
+
* PURPOSE: "Smart Dispatcher" - Analyzes state and dispatches tasks.
|
|
4
|
+
* UPDATED: Removed "Zombie" logic. Now forcefully dispatches any task
|
|
5
|
+
* that is not explicitly COMPLETED, ensuring reliability for one-shot execution.
|
|
5
6
|
*/
|
|
6
7
|
|
|
7
8
|
const { getExpectedDateStrings, normalizeName, DEFINITIVE_EARLIEST_DATES } = require('../utils/utils.js');
|
|
@@ -116,7 +117,6 @@ async function dispatchComputationPass(config, dependencies, computationManifest
|
|
|
116
117
|
hash: item.hash || item.newHash,
|
|
117
118
|
previousCategory: item.previousCategory || null,
|
|
118
119
|
triggerReason: item.reason || "Unknown",
|
|
119
|
-
// [NEW] Pass Content-Based hashes provided by analyzeDateExecution
|
|
120
120
|
dependencyResultHashes: item.dependencyResultHashes || {},
|
|
121
121
|
timestamp: Date.now()
|
|
122
122
|
});
|
|
@@ -142,21 +142,25 @@ async function dispatchComputationPass(config, dependencies, computationManifest
|
|
|
142
142
|
await db.runTransaction(async (t) => {
|
|
143
143
|
const doc = await t.get(ledgerRef);
|
|
144
144
|
|
|
145
|
-
// [
|
|
145
|
+
// [UPDATED] Robust One-Shot Dispatch Logic
|
|
146
|
+
// We REMOVED the "Zombie Timeout" check.
|
|
147
|
+
// If the Dispatcher is running, we assume the user intends to ensure these tasks are dispatched.
|
|
148
|
+
|
|
146
149
|
if (doc.exists) {
|
|
147
150
|
const data = doc.data();
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
// Fallback: If no lease exists, assume 1 hour timeout for legacy zombie detection
|
|
153
|
-
const isLegacyZombie = !data.leaseExpiresAt && data.createdAt && (now - data.createdAt.toMillis() > 3600000);
|
|
154
|
-
|
|
155
|
-
if (isPending && !isLeaseExpired && !isLegacyZombie) {
|
|
156
|
-
return false; // Valid active pending task, do not double dispatch
|
|
151
|
+
|
|
152
|
+
// 1. If it's already COMPLETED, do not re-run (Strict idempotency).
|
|
153
|
+
if (data.status === 'COMPLETED') {
|
|
154
|
+
return false;
|
|
157
155
|
}
|
|
156
|
+
|
|
157
|
+
// 2. If it is PENDING or IN_PROGRESS:
|
|
158
|
+
// Since the Dispatcher runs ONCE per day, seeing PENDING here means
|
|
159
|
+
// the *previous* run failed to complete, or the worker died.
|
|
160
|
+
// We overwrite it to force a restart.
|
|
158
161
|
}
|
|
159
162
|
|
|
163
|
+
// Create/Overwrite entry with PENDING to start the cycle
|
|
160
164
|
t.set(ledgerRef, {
|
|
161
165
|
status: 'PENDING',
|
|
162
166
|
dispatchId: task.dispatchId,
|
|
@@ -165,8 +169,9 @@ async function dispatchComputationPass(config, dependencies, computationManifest
|
|
|
165
169
|
createdAt: new Date(),
|
|
166
170
|
dispatcherHash: currentManifestHash,
|
|
167
171
|
triggerReason: task.triggerReason,
|
|
168
|
-
retries: 0
|
|
172
|
+
retries: 0 // Reset retries for the new attempt
|
|
169
173
|
}, { merge: true });
|
|
174
|
+
|
|
170
175
|
return true;
|
|
171
176
|
});
|
|
172
177
|
|
|
@@ -191,7 +196,7 @@ async function dispatchComputationPass(config, dependencies, computationManifest
|
|
|
191
196
|
|
|
192
197
|
return { dispatched: finalDispatched.length };
|
|
193
198
|
} else {
|
|
194
|
-
logger.log('INFO', `[Dispatcher] All tasks were already
|
|
199
|
+
logger.log('INFO', `[Dispatcher] All tasks were already COMPLETED (Double Dispatch avoided).`);
|
|
195
200
|
return { dispatched: 0 };
|
|
196
201
|
}
|
|
197
202
|
|
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* FILENAME: computation-system/helpers/computation_worker.js
|
|
3
3
|
* PURPOSE: Consumes computation tasks from Pub/Sub.
|
|
4
|
-
* UPDATED:
|
|
4
|
+
* UPDATED: Fixed "Silent Failure" bug where tasks got stuck in PENDING.
|
|
5
|
+
* Increased MAX_RETRIES and ensured Ledger is updated on poison messages.
|
|
5
6
|
*/
|
|
6
7
|
|
|
7
8
|
const { executeDispatchTask } = require('../WorkflowOrchestrator.js');
|
|
@@ -13,7 +14,10 @@ let calculationPackage;
|
|
|
13
14
|
try { calculationPackage = require('aiden-shared-calculations-unified');
|
|
14
15
|
} catch (e) {console.error("FATAL: Could not load 'aiden-shared-calculations-unified'."); throw e; }
|
|
15
16
|
const calculations = calculationPackage.calculations;
|
|
16
|
-
|
|
17
|
+
|
|
18
|
+
// [FIX] Increased from 0 to 3.
|
|
19
|
+
// 0 caused "retryCount >= MAX_RETRIES" to trigger immediately on the first run.
|
|
20
|
+
const MAX_RETRIES = 3;
|
|
17
21
|
|
|
18
22
|
async function handleComputationTask(message, config, dependencies) {
|
|
19
23
|
const systemLogger = new StructuredLogger({ minLevel: config.minLevel || 'INFO', enableStructured: true, ...config });
|
|
@@ -35,24 +39,49 @@ async function handleComputationTask(message, config, dependencies) {
|
|
|
35
39
|
|
|
36
40
|
if (!date || !pass || !computation) { logger.log('ERROR', `[Worker] Invalid payload.`, data); return; }
|
|
37
41
|
|
|
38
|
-
|
|
42
|
+
// [FIX] Ensure retryCount defaults to 1 (PubSub usually sends 1 for the first attempt)
|
|
43
|
+
const retryCount = message.deliveryAttempt || 1;
|
|
44
|
+
|
|
45
|
+
// [FIX] Changed condition to '>' so attempts 1, 2, and 3 are allowed to run.
|
|
46
|
+
if (retryCount > MAX_RETRIES) {
|
|
47
|
+
logger.log('ERROR', `[Worker] ☠️ Task POISONED. Moved to DLQ: ${computation}`);
|
|
48
|
+
try {
|
|
49
|
+
await db.collection('computation_dead_letter_queue').add({
|
|
50
|
+
originalData: data,
|
|
51
|
+
dispatchId: dispatchId,
|
|
52
|
+
error: { message: 'Max Retries Exceeded', stack: 'PubSub delivery limit reached' },
|
|
53
|
+
finalAttemptAt: new Date(),
|
|
54
|
+
failureReason: 'MAX_RETRIES_EXCEEDED'
|
|
55
|
+
});
|
|
56
|
+
|
|
57
|
+
// [FIX] CRITICAL: Update Ledger to FAILED.
|
|
58
|
+
// Previously, this returned without updating, leaving the Ledger stuck in 'PENDING'.
|
|
59
|
+
// Now we explicitly mark it FAILED so the pipeline knows it's dead.
|
|
60
|
+
await db.collection(`computation_audit_ledger/${date}/passes/${pass}/tasks`).doc(computation).set({
|
|
61
|
+
status: 'FAILED',
|
|
62
|
+
error: 'Max Retries Exceeded (Poison Message)',
|
|
63
|
+
failedAt: new Date()
|
|
64
|
+
}, { merge: true });
|
|
65
|
+
|
|
66
|
+
return;
|
|
67
|
+
} catch (dlqErr) { logger.log('FATAL', `[Worker] Failed to write to DLQ`, dlqErr); }
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
logger.log('INFO', `[Worker] 📥 Received Task: ${computation} (${date}) [Attempt ${retryCount}/${MAX_RETRIES}]`, {
|
|
39
71
|
dispatchId: dispatchId || 'legacy',
|
|
40
72
|
reason: triggerReason
|
|
41
73
|
});
|
|
42
74
|
|
|
43
|
-
//
|
|
44
|
-
// Mark task as IN_PROGRESS and set a lease timeout (e.g., 20 minutes) to prevent Zombies
|
|
75
|
+
// Mark task as IN_PROGRESS (Visual only, dispatcher does not use this for logic anymore)
|
|
45
76
|
try {
|
|
46
|
-
const leaseTimeMs = (config.workerLeaseMinutes || 20) * 60 * 1000;
|
|
47
77
|
await db.collection(`computation_audit_ledger/${date}/passes/${pass}/tasks`).doc(computation).set({
|
|
48
78
|
status: 'IN_PROGRESS',
|
|
49
79
|
workerId: process.env.K_REVISION || 'unknown',
|
|
50
80
|
startedAt: new Date(),
|
|
51
|
-
leaseExpiresAt: Date.now() + leaseTimeMs,
|
|
52
81
|
dispatchId: dispatchId
|
|
53
82
|
}, { merge: true });
|
|
54
83
|
} catch (leaseErr) {
|
|
55
|
-
logger.log('WARN', `[Worker] Failed to
|
|
84
|
+
logger.log('WARN', `[Worker] Failed to update status to IN_PROGRESS for ${computation}. Continuing...`, leaseErr);
|
|
56
85
|
}
|
|
57
86
|
|
|
58
87
|
let computationManifest;
|
|
@@ -73,7 +102,7 @@ async function handleComputationTask(message, config, dependencies) {
|
|
|
73
102
|
runDependencies,
|
|
74
103
|
computationManifest,
|
|
75
104
|
previousCategory,
|
|
76
|
-
dependencyResultHashes
|
|
105
|
+
dependencyResultHashes
|
|
77
106
|
);
|
|
78
107
|
const duration = Date.now() - startTime;
|
|
79
108
|
|
|
@@ -121,28 +150,30 @@ async function handleComputationTask(message, config, dependencies) {
|
|
|
121
150
|
finalAttemptAt: new Date(),
|
|
122
151
|
failureReason: 'PERMANENT_DETERMINISTIC_ERROR'
|
|
123
152
|
});
|
|
153
|
+
|
|
154
|
+
// [FIX] Update Ledger to FAILED immediately for deterministic errors
|
|
155
|
+
await db.collection(`computation_audit_ledger/${date}/passes/${pass}/tasks`).doc(computation).set({
|
|
156
|
+
status: 'FAILED',
|
|
157
|
+
error: err.message || 'Permanent Deterministic Error',
|
|
158
|
+
failedAt: new Date()
|
|
159
|
+
}, { merge: true });
|
|
160
|
+
|
|
124
161
|
await recordRunAttempt(db, { date, computation, pass }, 'FAILURE', { message: err.message, stage: err.stage || 'PERMANENT_FAIL' }, { durationMs: 0 }, triggerReason);
|
|
125
162
|
return;
|
|
126
163
|
} catch (dlqErr) { logger.log('FATAL', `[Worker] Failed to write to DLQ`, dlqErr); }
|
|
127
164
|
}
|
|
128
165
|
|
|
129
|
-
|
|
166
|
+
// Standard Retryable Error (Crash)
|
|
130
167
|
if (retryCount >= MAX_RETRIES) {
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
originalData: data,
|
|
135
|
-
dispatchId: dispatchId,
|
|
136
|
-
error: { message: err.message, stack: err.stack },
|
|
137
|
-
finalAttemptAt: new Date(),
|
|
138
|
-
failureReason: 'MAX_RETRIES_EXCEEDED'
|
|
139
|
-
});
|
|
140
|
-
return;
|
|
141
|
-
} catch (dlqErr) { logger.log('FATAL', `[Worker] Failed to write to DLQ`, dlqErr); }
|
|
168
|
+
// We throw here, PubSub will retry, and the "Poison Logic" at the top
|
|
169
|
+
// will catch it on the NEXT attempt to finalize the failure.
|
|
170
|
+
throw err;
|
|
142
171
|
}
|
|
143
172
|
|
|
144
173
|
logger.log('ERROR', `[Worker] ❌ Crash: ${computation}: ${err.message}`);
|
|
174
|
+
|
|
145
175
|
await recordRunAttempt(db, { date, computation, pass }, 'CRASH', { message: err.message, stack: err.stack, stage: 'SYSTEM_CRASH' }, { durationMs: 0 }, triggerReason);
|
|
176
|
+
// Throwing triggers Pub/Sub retry
|
|
146
177
|
throw err;
|
|
147
178
|
}
|
|
148
179
|
}
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @fileoverview Monitor helper for Cloud Workflows.
|
|
3
|
+
* Checks the state of the Audit Ledger to determine if a pass is complete.
|
|
4
|
+
* This function is stateless and receives dependencies via injection.
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
* Checks the status of a specific computation pass.
|
|
9
|
+
* @param {object} req - Express request object (query: date, pass).
|
|
10
|
+
* @param {object} res - Express response object.
|
|
11
|
+
* @param {object} dependencies - Contains db (Firestore), logger.
|
|
12
|
+
*/
|
|
13
|
+
async function checkPassStatus(req, res, dependencies) {
|
|
14
|
+
const { db, logger } = dependencies;
|
|
15
|
+
const { date, pass } = req.query;
|
|
16
|
+
|
|
17
|
+
if (!date || !pass) {
|
|
18
|
+
return res.status(400).json({ error: "Missing 'date' or 'pass' query parameters." });
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
const ledgerPath = `computation_audit_ledger/${date}/passes/${pass}/tasks`;
|
|
22
|
+
logger.log('INFO', `[Monitor] Checking status for ${date} Pass ${pass} at ${ledgerPath}`);
|
|
23
|
+
|
|
24
|
+
try {
|
|
25
|
+
const tasksRef = db.collection(ledgerPath);
|
|
26
|
+
|
|
27
|
+
// 1. Check for Active Tasks (Blocking)
|
|
28
|
+
// If anything is PENDING or IN_PROGRESS, the system is still working.
|
|
29
|
+
const runningSnap = await tasksRef.where('status', 'in', ['PENDING', 'IN_PROGRESS']).get();
|
|
30
|
+
|
|
31
|
+
if (!runningSnap.empty) {
|
|
32
|
+
logger.log('INFO', `[Monitor] Pass ${pass} is RUNNING. Active tasks: ${runningSnap.size}`);
|
|
33
|
+
return res.status(200).json({
|
|
34
|
+
state: 'RUNNING',
|
|
35
|
+
activeCount: runningSnap.size
|
|
36
|
+
});
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
// 2. Check for Failures (Retry Condition)
|
|
40
|
+
// If nothing is running, we check if anything ended in FAILED state.
|
|
41
|
+
// We consider these "retryable" by re-triggering the dispatcher.
|
|
42
|
+
const failedSnap = await tasksRef.where('status', '==', 'FAILED').get();
|
|
43
|
+
|
|
44
|
+
if (!failedSnap.empty) {
|
|
45
|
+
logger.log('WARN', `[Monitor] Pass ${pass} finished with FAILURES. Count: ${failedSnap.size}`);
|
|
46
|
+
return res.status(200).json({
|
|
47
|
+
state: 'HAS_FAILURES',
|
|
48
|
+
failureCount: failedSnap.size
|
|
49
|
+
});
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
// 3. Clean Success
|
|
53
|
+
// No running tasks, no failed tasks.
|
|
54
|
+
logger.log('INFO', `[Monitor] Pass ${pass} COMPLETED successfully.`);
|
|
55
|
+
return res.status(200).json({ state: 'SUCCESS' });
|
|
56
|
+
|
|
57
|
+
} catch (error) {
|
|
58
|
+
logger.log('ERROR', `[Monitor] Failed to check status: ${error.message}`);
|
|
59
|
+
return res.status(500).json({ error: error.message });
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
module.exports = { checkPassStatus };
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
# Cloud Workflows Definition for BullTrackers Computation Pipeline
|
|
2
|
+
# Orchestrates 5 sequential passes with Self-Healing (Retry) logic.
|
|
3
|
+
|
|
4
|
+
main:
|
|
5
|
+
params: [input]
|
|
6
|
+
steps:
|
|
7
|
+
- init:
|
|
8
|
+
assign:
|
|
9
|
+
- project: ${sys.get_env("GOOGLE_CLOUD_PROJECT_ID")}
|
|
10
|
+
- location: "europe-west1"
|
|
11
|
+
# If 'date' is provided in input, use it. Otherwise default to today (YYYY-MM-DD).
|
|
12
|
+
- date_to_run: ${default(map.get(input, "date"), text.substring(time.format(sys.now()), 0, 10))}
|
|
13
|
+
- passes: ["1", "2", "3", "4", "5"]
|
|
14
|
+
- max_retries: 3
|
|
15
|
+
- propagation_wait_seconds: 300 # 5 Minutes
|
|
16
|
+
# URL of the new Monitor Function
|
|
17
|
+
- monitor_url: ${"https://europe-west1-" + project + ".cloudfunctions.net/computation-monitor"}
|
|
18
|
+
|
|
19
|
+
# ======================================================
|
|
20
|
+
# MAIN LOOP: Iterate through Passes 1 to 5
|
|
21
|
+
# ======================================================
|
|
22
|
+
- run_passes:
|
|
23
|
+
for:
|
|
24
|
+
value: pass_id
|
|
25
|
+
in: ${passes}
|
|
26
|
+
steps:
|
|
27
|
+
- init_pass_vars:
|
|
28
|
+
assign:
|
|
29
|
+
- attempt_count: 0
|
|
30
|
+
- pass_success: false
|
|
31
|
+
# Construct URL for the specific pass function (e.g. computation-pass-1)
|
|
32
|
+
- dispatcher_url: ${"https://europe-west1-" + project + ".cloudfunctions.net/computation-pass-" + pass_id}
|
|
33
|
+
|
|
34
|
+
# -----------------------------------------------
|
|
35
|
+
# RETRY LOOP: Try to complete the pass up to 3 times
|
|
36
|
+
# -----------------------------------------------
|
|
37
|
+
- pass_retry_loop:
|
|
38
|
+
switch:
|
|
39
|
+
- condition: ${attempt_count < max_retries and not pass_success}
|
|
40
|
+
steps:
|
|
41
|
+
- increment_attempt:
|
|
42
|
+
assign:
|
|
43
|
+
- attempt_count: ${attempt_count + 1}
|
|
44
|
+
|
|
45
|
+
- log_start:
|
|
46
|
+
call: sys.log
|
|
47
|
+
args:
|
|
48
|
+
text: ${"Starting Pass " + pass_id + " (Attempt " + attempt_count + ") for " + date_to_run}
|
|
49
|
+
severity: "INFO"
|
|
50
|
+
|
|
51
|
+
# 1. TRIGGER DISPATCHER (Fire and Forget mechanism via HTTP)
|
|
52
|
+
# The dispatcher analyzes missing data and queues tasks.
|
|
53
|
+
- trigger_dispatcher:
|
|
54
|
+
call: http.get
|
|
55
|
+
args:
|
|
56
|
+
url: ${dispatcher_url}
|
|
57
|
+
query:
|
|
58
|
+
date: ${date_to_run}
|
|
59
|
+
auth:
|
|
60
|
+
type: OIDC
|
|
61
|
+
timeout: 1800 # 30 mins max for dispatch analysis
|
|
62
|
+
result: dispatch_response
|
|
63
|
+
|
|
64
|
+
# 2. PROPAGATION WAIT
|
|
65
|
+
# Wait for dispatcher to queue tasks and workers to start/finish
|
|
66
|
+
- wait_for_propagation:
|
|
67
|
+
call: sys.log
|
|
68
|
+
args:
|
|
69
|
+
text: ${"Pass " + pass_id + " dispatched. Waiting " + propagation_wait_seconds + "s for propagation..."}
|
|
70
|
+
next: sleep_propagation
|
|
71
|
+
|
|
72
|
+
- sleep_propagation:
|
|
73
|
+
call: sys.sleep
|
|
74
|
+
args:
|
|
75
|
+
seconds: ${propagation_wait_seconds}
|
|
76
|
+
|
|
77
|
+
# 3. MONITORING LOOP
|
|
78
|
+
# Poll until RUNNING state clears
|
|
79
|
+
- monitor_loop:
|
|
80
|
+
call: http.get
|
|
81
|
+
args:
|
|
82
|
+
url: ${monitor_url}
|
|
83
|
+
query:
|
|
84
|
+
date: ${date_to_run}
|
|
85
|
+
pass: ${pass_id}
|
|
86
|
+
auth:
|
|
87
|
+
type: OIDC
|
|
88
|
+
result: status_resp
|
|
89
|
+
|
|
90
|
+
- evaluate_status:
|
|
91
|
+
switch:
|
|
92
|
+
# CASE A: Still Running -> Sleep and Poll Again
|
|
93
|
+
- condition: ${status_resp.body.state == "RUNNING"}
|
|
94
|
+
steps:
|
|
95
|
+
- log_running:
|
|
96
|
+
call: sys.log
|
|
97
|
+
args:
|
|
98
|
+
text: ${"Pass " + pass_id + " is RUNNING (" + status_resp.body.activeCount + " active). Waiting..."}
|
|
99
|
+
- sleep_polling:
|
|
100
|
+
call: sys.sleep
|
|
101
|
+
args:
|
|
102
|
+
seconds: 60
|
|
103
|
+
- next: monitor_loop
|
|
104
|
+
|
|
105
|
+
# CASE B: Clean Success -> Mark done, Break Retry Loop
|
|
106
|
+
- condition: ${status_resp.body.state == "SUCCESS"}
|
|
107
|
+
steps:
|
|
108
|
+
- log_success:
|
|
109
|
+
call: sys.log
|
|
110
|
+
args:
|
|
111
|
+
text: ${"Pass " + pass_id + " COMPLETED successfully."}
|
|
112
|
+
severity: "INFO"
|
|
113
|
+
- mark_success:
|
|
114
|
+
assign:
|
|
115
|
+
- pass_success: true
|
|
116
|
+
- next: pass_retry_loop # Will exit loop due to pass_success=true
|
|
117
|
+
|
|
118
|
+
# CASE C: Failures Found -> Continue Retry Loop (will trigger dispatcher again)
|
|
119
|
+
- condition: ${status_resp.body.state == "HAS_FAILURES"}
|
|
120
|
+
steps:
|
|
121
|
+
- log_failure:
|
|
122
|
+
call: sys.log
|
|
123
|
+
args:
|
|
124
|
+
text: ${"Pass " + pass_id + " has " + status_resp.body.failureCount + " FAILURES. Attempting Retry."}
|
|
125
|
+
severity: "WARNING"
|
|
126
|
+
- next: pass_retry_loop
|
|
127
|
+
|
|
128
|
+
# -----------------------------------------------
|
|
129
|
+
# END RETRY LOOP
|
|
130
|
+
# -----------------------------------------------
|
|
131
|
+
|
|
132
|
+
- check_final_status:
|
|
133
|
+
switch:
|
|
134
|
+
- condition: ${not pass_success}
|
|
135
|
+
steps:
|
|
136
|
+
- log_giving_up:
|
|
137
|
+
call: sys.log
|
|
138
|
+
args:
|
|
139
|
+
text: ${"Pass " + pass_id + " failed after " + max_retries + " attempts. Proceeding to next pass with potential gaps."}
|
|
140
|
+
severity: "ERROR"
|
|
141
|
+
|
|
142
|
+
- finish:
|
|
143
|
+
return: "Pipeline Execution Complete"
|
package/index.js
CHANGED
|
@@ -29,8 +29,9 @@ const { handleUpdate } = require('./functions
|
|
|
29
29
|
const { build: buildManifest } = require('./functions/computation-system/context/ManifestBuilder');
|
|
30
30
|
const { dispatchComputationPass } = require('./functions/computation-system/helpers/computation_dispatcher');
|
|
31
31
|
const { handleComputationTask } = require('./functions/computation-system/helpers/computation_worker');
|
|
32
|
-
// [NEW] Import Report Tools
|
|
33
32
|
const { ensureBuildReport, generateBuildReport } = require('./functions/computation-system/tools/BuildReporter');
|
|
33
|
+
// [NEW] Import Monitor
|
|
34
|
+
const { checkPassStatus } = require('./functions/computation-system/helpers/monitor');
|
|
34
35
|
|
|
35
36
|
const dataLoader = require('./functions/computation-system/utils/data_loader');
|
|
36
37
|
const computationUtils = require('./functions/computation-system/utils/utils');
|
|
@@ -51,8 +52,7 @@ const { runBackfillAssetPrices } = require('./functions
|
|
|
51
52
|
// Proxy
|
|
52
53
|
const { handlePost } = require('./functions/appscript-api/index');
|
|
53
54
|
|
|
54
|
-
//
|
|
55
|
-
|
|
55
|
+
// Root Indexer
|
|
56
56
|
const { runRootDataIndexer } = require('./functions/root-data-indexer/index');
|
|
57
57
|
|
|
58
58
|
const core = {
|
|
@@ -92,9 +92,10 @@ const computationSystem = {
|
|
|
92
92
|
dataLoader,
|
|
93
93
|
computationUtils,
|
|
94
94
|
buildManifest,
|
|
95
|
-
// [NEW] Export Tools
|
|
96
95
|
ensureBuildReport,
|
|
97
96
|
generateBuildReport,
|
|
97
|
+
// [NEW] Export Monitor Pipe
|
|
98
|
+
checkPassStatus
|
|
98
99
|
};
|
|
99
100
|
|
|
100
101
|
const api = {
|