bulltrackers-module 1.0.769 → 1.0.770
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -6,17 +6,17 @@ class RiskScoreIncrease extends Computation {
|
|
|
6
6
|
return {
|
|
7
7
|
name: 'RiskScoreIncrease',
|
|
8
8
|
// V2 Migration: Switch to per-entity for parallel processing
|
|
9
|
-
type: 'per-entity',
|
|
9
|
+
type: 'per-entity',
|
|
10
10
|
category: 'alerts',
|
|
11
11
|
isHistorical: true,
|
|
12
|
-
|
|
12
|
+
|
|
13
13
|
// V2 Migration: Define strict data requirements
|
|
14
14
|
requires: {
|
|
15
|
-
'
|
|
15
|
+
'pi_rankings': {
|
|
16
16
|
lookback: 1, // Fetches Today + Yesterday
|
|
17
17
|
mandatory: true,
|
|
18
18
|
// Optimization: Only fetch needed fields
|
|
19
|
-
fields: ['
|
|
19
|
+
fields: ['pi_id', 'rankings_data', 'date'],
|
|
20
20
|
// Filter applied at data fetch level (replacing V1 userType logic)
|
|
21
21
|
filter: { user_type: 'POPULAR_INVESTOR' }
|
|
22
22
|
}
|
|
@@ -28,7 +28,7 @@ class RiskScoreIncrease extends Computation {
|
|
|
28
28
|
firestore: {
|
|
29
29
|
enabled: true,
|
|
30
30
|
// Ensures V1 alert system can find the document
|
|
31
|
-
path: 'alerts/{date}/RiskScoreIncrease/{entityId}',
|
|
31
|
+
path: 'alerts/{date}/RiskScoreIncrease/{entityId}',
|
|
32
32
|
merge: true
|
|
33
33
|
}
|
|
34
34
|
},
|
|
@@ -44,7 +44,7 @@ class RiskScoreIncrease extends Computation {
|
|
|
44
44
|
description: 'Alert when a Popular Investor\'s risk score increases',
|
|
45
45
|
messageTemplate: '{piUsername}\'s risk score increased by {change} points (from {previous} to {current})',
|
|
46
46
|
severity: 'high',
|
|
47
|
-
configKey: 'increasedRisk',
|
|
47
|
+
configKey: 'increasedRisk',
|
|
48
48
|
isDynamic: true,
|
|
49
49
|
dynamicConfig: {
|
|
50
50
|
thresholds: [
|
|
@@ -72,9 +72,9 @@ class RiskScoreIncrease extends Computation {
|
|
|
72
72
|
}
|
|
73
73
|
],
|
|
74
74
|
resultFields: {
|
|
75
|
-
change: 'change',
|
|
76
|
-
newValue: 'currentRisk',
|
|
77
|
-
oldValue: 'previousRisk'
|
|
75
|
+
change: 'change',
|
|
76
|
+
newValue: 'currentRisk',
|
|
77
|
+
oldValue: 'previousRisk'
|
|
78
78
|
}
|
|
79
79
|
}
|
|
80
80
|
}
|
|
@@ -87,15 +87,15 @@ class RiskScoreIncrease extends Computation {
|
|
|
87
87
|
// 1. Data Access
|
|
88
88
|
// In V2 with lookback: 1, this is an array of entries sorted by date
|
|
89
89
|
const rankingHistory = data['rankings'] || [];
|
|
90
|
-
|
|
90
|
+
|
|
91
91
|
// Find today's and yesterday's entries specifically by date string
|
|
92
92
|
const todayEntry = rankingHistory.find(d => d.date === date);
|
|
93
|
-
|
|
93
|
+
|
|
94
94
|
// Calculate yesterday's date string for lookup
|
|
95
95
|
const yesterdayDate = new Date(date);
|
|
96
96
|
yesterdayDate.setDate(yesterdayDate.getDate() - 1);
|
|
97
97
|
const yesterdayStr = yesterdayDate.toISOString().split('T')[0];
|
|
98
|
-
|
|
98
|
+
|
|
99
99
|
const yesterdayEntry = rankingHistory.find(d => d.date === yesterdayStr);
|
|
100
100
|
|
|
101
101
|
// 2. Business Logic (Using Rules)
|
|
@@ -112,7 +112,7 @@ class RiskScoreIncrease extends Computation {
|
|
|
112
112
|
// If today's risk data is missing, we cannot evaluate
|
|
113
113
|
if (currentRisk === null || currentRisk === undefined) {
|
|
114
114
|
this.log('WARN', `No current risk data for ${entityId}`);
|
|
115
|
-
return;
|
|
115
|
+
return;
|
|
116
116
|
}
|
|
117
117
|
|
|
118
118
|
// 4. Comparison Logic
|
|
@@ -1,106 +1,130 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* @fileoverview Scheduler
|
|
3
|
-
* * 1.
|
|
4
|
-
*
|
|
5
|
-
*
|
|
6
|
-
* - Enqueues Cloud Tasks with `scheduleTime` and `configHash`.
|
|
7
|
-
* * 2. runWatchdog: Runs frequently (e.g. every 15 mins).
|
|
8
|
-
* - Detects Zombies (stuck running tasks).
|
|
9
|
-
* - Re-queues them immediately.
|
|
2
|
+
* @fileoverview Scheduler V3.1: Reconciler with Active Garbage Collection
|
|
3
|
+
* * * 1. Reconcile: Iterates a time window (Past -> Future) to ensure valid tasks exist.
|
|
4
|
+
* * 2. Purge: Scans the queue for "Orphans" (tasks for deleted computations) and deletes them.
|
|
5
|
+
* * 3. Watchdog: Recovers "Zombie" tasks (running but stuck).
|
|
10
6
|
*/
|
|
11
7
|
|
|
12
8
|
const { CloudTasksClient } = require('@google-cloud/tasks');
|
|
13
9
|
const crypto = require('crypto');
|
|
14
10
|
const pLimit = require('p-limit');
|
|
11
|
+
|
|
12
|
+
// Framework
|
|
15
13
|
const { ManifestBuilder } = require('../framework');
|
|
16
14
|
const { StorageManager } = require('../framework/storage/StorageManager');
|
|
15
|
+
const { StateRepository } = require('../framework/storage/StateRepository');
|
|
17
16
|
const config = require('../config/bulltrackers.config');
|
|
18
17
|
|
|
19
|
-
|
|
18
|
+
// Config
|
|
19
|
+
const CLOUD_TASKS_CONCURRENCY = 20;
|
|
20
|
+
const PLANNING_LOOKBACK_DAYS = 7; // Look back to ensure recent history is correct
|
|
21
|
+
const PLANNING_LOOKAHEAD_HOURS = 24; // Schedule future tasks
|
|
20
22
|
const ZOMBIE_THRESHOLD_MINUTES = 15;
|
|
21
|
-
const PLANNING_WINDOW_HOURS = 24; // Look ahead window
|
|
22
23
|
|
|
23
24
|
// Cache singleton instances
|
|
24
25
|
let manifest = null;
|
|
25
26
|
let tasksClient = null;
|
|
26
27
|
let storageManager = null;
|
|
28
|
+
let stateRepository = null;
|
|
27
29
|
|
|
28
30
|
async function initialize() {
|
|
29
31
|
if (manifest) return;
|
|
30
32
|
console.log('[Scheduler] Initializing services...');
|
|
31
|
-
|
|
32
|
-
//
|
|
33
|
-
const builder = new ManifestBuilder(config, { log: () => {} });
|
|
33
|
+
|
|
34
|
+
// Use a no-op logger for manifest builder to reduce noise
|
|
35
|
+
const builder = new ManifestBuilder(config, { log: () => { } });
|
|
34
36
|
manifest = builder.build(config.computations || []);
|
|
35
|
-
|
|
37
|
+
|
|
36
38
|
tasksClient = new CloudTasksClient();
|
|
37
39
|
storageManager = new StorageManager(config, console);
|
|
38
|
-
|
|
40
|
+
stateRepository = new StateRepository(config, console);
|
|
41
|
+
|
|
39
42
|
console.log(`[Scheduler] Loaded ${manifest.length} computations.`);
|
|
40
43
|
}
|
|
41
44
|
|
|
42
45
|
/**
|
|
43
|
-
* ENTRY POINT 1: The
|
|
46
|
+
* ENTRY POINT 1: The Reconciler & Garbage Collector
|
|
44
47
|
* Trigger: Cloud Scheduler -> "0 * * * *" (Every Hour)
|
|
45
|
-
* Goals: Ensure all future tasks for the next 24h are in the queue.
|
|
46
48
|
*/
|
|
47
49
|
async function planComputations(req, res) {
|
|
48
|
-
const startTime = Date.now();
|
|
49
50
|
try {
|
|
50
51
|
await initialize();
|
|
51
52
|
|
|
53
|
+
// --- PHASE 1: RECONCILIATION (Ensure valid tasks exist) ---
|
|
52
54
|
const now = new Date();
|
|
53
|
-
const
|
|
55
|
+
const windowStart = new Date(now);
|
|
56
|
+
windowStart.setDate(now.getDate() - PLANNING_LOOKBACK_DAYS);
|
|
57
|
+
windowStart.setHours(0, 0, 0, 0);
|
|
54
58
|
|
|
55
|
-
|
|
59
|
+
const windowEnd = new Date(now);
|
|
60
|
+
windowEnd.setTime(now.getTime() + (PLANNING_LOOKAHEAD_HOURS * 60 * 60 * 1000));
|
|
56
61
|
|
|
57
|
-
|
|
62
|
+
console.log(`[Planner] Reconciling window: ${windowStart.toISOString()} to ${windowEnd.toISOString()}`);
|
|
58
63
|
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
// Generate Tasks for each occurrence
|
|
70
|
-
for (const dateObj of occurrences) {
|
|
71
|
-
// Resilience: Generate a hash of the critical scheduling config.
|
|
72
|
-
// If schedule OR pass changes, this hash changes, creating a new Task ID.
|
|
73
|
-
const configHash = generateConfigHash(entry);
|
|
74
|
-
const targetDateStr = dateObj.toISOString().split('T')[0];
|
|
75
|
-
|
|
76
|
-
tasksToSchedule.push({
|
|
77
|
-
computation: entry.originalName,
|
|
78
|
-
targetDate: targetDateStr,
|
|
79
|
-
runAtSeconds: dateObj.getTime() / 1000,
|
|
80
|
-
configHash: configHash,
|
|
81
|
-
queuePath: getQueuePath(entry)
|
|
82
|
-
});
|
|
83
|
-
}
|
|
64
|
+
const tasksToSchedule = [];
|
|
65
|
+
const stats = { checked: 0, scheduled: 0, mismatched: 0, missing: 0 };
|
|
66
|
+
|
|
67
|
+
// Iterate dates in window
|
|
68
|
+
const targetDates = [];
|
|
69
|
+
let cursor = new Date(windowStart);
|
|
70
|
+
while (cursor <= windowEnd) {
|
|
71
|
+
targetDates.push(new Date(cursor));
|
|
72
|
+
cursor.setDate(cursor.getDate() + 1);
|
|
84
73
|
}
|
|
85
74
|
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
75
|
+
const dateLimit = pLimit(5);
|
|
76
|
+
await Promise.all(targetDates.map(dateObj => dateLimit(async () => {
|
|
77
|
+
const dateStr = dateObj.toISOString().split('T')[0];
|
|
78
|
+
const dailyStatus = await stateRepository.getDailyStatus(dateStr);
|
|
79
|
+
|
|
80
|
+
for (const entry of manifest) {
|
|
81
|
+
if (entry.pass !== 1) continue; // Only schedule Roots
|
|
82
|
+
if (!shouldRunOnDate(entry.schedule, dateObj)) continue;
|
|
83
|
+
|
|
84
|
+
stats.checked++;
|
|
85
|
+
const lastRun = dailyStatus.get(entry.name);
|
|
86
|
+
let reason = null;
|
|
87
|
+
|
|
88
|
+
if (!lastRun) {
|
|
89
|
+
reason = 'MISSING_RUN';
|
|
90
|
+
stats.missing++;
|
|
91
|
+
} else if (lastRun.hash !== entry.hash) {
|
|
92
|
+
reason = 'HASH_MISMATCH';
|
|
93
|
+
stats.mismatched++;
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
if (reason) {
|
|
97
|
+
tasksToSchedule.push({
|
|
98
|
+
computation: entry.originalName,
|
|
99
|
+
targetDate: dateStr,
|
|
100
|
+
runAtSeconds: getRunTimeSeconds(entry.schedule, dateObj),
|
|
101
|
+
configHash: entry.hash,
|
|
102
|
+
queuePath: getQueuePath(),
|
|
103
|
+
reason
|
|
104
|
+
});
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
})));
|
|
89
108
|
|
|
90
|
-
// 2
|
|
91
|
-
|
|
109
|
+
// --- PHASE 2: GARBAGE COLLECTION (Remove invalid tasks) ---
|
|
110
|
+
console.log('[Planner] Starting Garbage Collection...');
|
|
111
|
+
const deletedCount = await cleanupOrphanedTasks();
|
|
92
112
|
|
|
93
|
-
|
|
94
|
-
|
|
113
|
+
// --- PHASE 3: DISPATCH ---
|
|
114
|
+
let scheduledCount = 0;
|
|
115
|
+
if (tasksToSchedule.length > 0) {
|
|
116
|
+
const results = await dispatchTasks(tasksToSchedule);
|
|
117
|
+
scheduledCount = results.filter(r => r.status === 'scheduled').length;
|
|
118
|
+
}
|
|
95
119
|
|
|
96
|
-
console.log(`[Planner]
|
|
120
|
+
console.log(`[Planner] Complete. Scheduled: ${scheduledCount}, Deleted Orphans: ${deletedCount}`);
|
|
97
121
|
|
|
98
122
|
return res.status(200).json({
|
|
99
|
-
status: '
|
|
100
|
-
window: `${
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
123
|
+
status: 'success',
|
|
124
|
+
window: `${PLANNING_LOOKBACK_DAYS}d back, ${PLANNING_LOOKAHEAD_HOURS}h fwd`,
|
|
125
|
+
scheduled: scheduledCount,
|
|
126
|
+
deletedOrphans: deletedCount,
|
|
127
|
+
stats
|
|
104
128
|
});
|
|
105
129
|
|
|
106
130
|
} catch (error) {
|
|
@@ -112,57 +136,32 @@ async function planComputations(req, res) {
|
|
|
112
136
|
/**
|
|
113
137
|
* ENTRY POINT 2: The Watchdog
|
|
114
138
|
* Trigger: Cloud Scheduler -> "*\/15 * * * *" (Every 15 mins)
|
|
115
|
-
* Goals: Find stuck tasks and re-queue them.
|
|
116
139
|
*/
|
|
117
140
|
async function runWatchdog(req, res) {
|
|
118
141
|
try {
|
|
119
142
|
await initialize();
|
|
120
|
-
|
|
121
|
-
// 1. Find Zombies
|
|
122
143
|
const zombies = await storageManager.findZombies(ZOMBIE_THRESHOLD_MINUTES);
|
|
123
|
-
|
|
124
|
-
// Filter out excessive attempts
|
|
125
|
-
const actionableZombies = [];
|
|
126
|
-
for (const z of zombies) {
|
|
127
|
-
if ((z.attempts || 0) >= 3) {
|
|
128
|
-
console.warn(`[Watchdog] Ignoring zombie ${z.name} (Checkpoint: ${z.checkpointId}) - Max attempts reached (${z.attempts})`);
|
|
129
|
-
continue;
|
|
130
|
-
}
|
|
131
|
-
actionableZombies.push(z);
|
|
132
|
-
}
|
|
133
|
-
|
|
134
|
-
if (actionableZombies.length === 0) {
|
|
135
|
-
return res.status(200).send('No recoverable zombies.');
|
|
136
|
-
}
|
|
144
|
+
const actionableZombies = zombies.filter(z => (z.attempts || 0) < 3);
|
|
137
145
|
|
|
138
|
-
|
|
146
|
+
if (actionableZombies.length === 0) return res.status(200).send('No recoverable zombies.');
|
|
139
147
|
|
|
140
|
-
|
|
141
|
-
// We claim them first so the next watchdog doesn't grab them while we are dispatching
|
|
148
|
+
console.log(`[Watchdog] 🧟 Found ${actionableZombies.length} zombies. Recovering...`);
|
|
142
149
|
await Promise.all(actionableZombies.map(z => storageManager.claimZombie(z.checkpointId)));
|
|
143
150
|
|
|
144
151
|
const recoveryTasks = actionableZombies.map(z => {
|
|
145
152
|
const entry = manifest.find(m => m.name === z.name);
|
|
146
|
-
if (!entry)
|
|
147
|
-
console.error(`[Watchdog] Computation ${z.name} no longer exists in manifest. Cannot recover.`);
|
|
148
|
-
return null;
|
|
149
|
-
}
|
|
153
|
+
if (!entry) return null;
|
|
150
154
|
return {
|
|
151
155
|
computation: entry.originalName,
|
|
152
156
|
targetDate: z.date,
|
|
153
157
|
isRecovery: true,
|
|
154
158
|
recoveryId: z.checkpointId,
|
|
155
|
-
queuePath: getQueuePath(
|
|
159
|
+
queuePath: getQueuePath()
|
|
156
160
|
};
|
|
157
161
|
}).filter(Boolean);
|
|
158
162
|
|
|
159
|
-
const results = await
|
|
160
|
-
|
|
161
|
-
return res.status(200).json({
|
|
162
|
-
status: 'recovered',
|
|
163
|
-
count: results.length,
|
|
164
|
-
details: results
|
|
165
|
-
});
|
|
163
|
+
const results = await dispatchTasks(recoveryTasks);
|
|
164
|
+
return res.status(200).json({ status: 'recovered', count: results.length });
|
|
166
165
|
|
|
167
166
|
} catch (error) {
|
|
168
167
|
console.error('[Watchdog] Error:', error);
|
|
@@ -171,131 +170,106 @@ async function runWatchdog(req, res) {
|
|
|
171
170
|
}
|
|
172
171
|
|
|
173
172
|
// =============================================================================
|
|
174
|
-
//
|
|
173
|
+
// ACTIVE GARBAGE COLLECTION LOGIC
|
|
175
174
|
// =============================================================================
|
|
176
175
|
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
const times = [];
|
|
183
|
-
const [h, m] = (schedule.time || '02:00').split(':').map(Number);
|
|
184
|
-
|
|
185
|
-
// Clone start date to iterate
|
|
186
|
-
let current = new Date(start);
|
|
187
|
-
current.setUTCHours(h, m, 0, 0);
|
|
188
|
-
|
|
189
|
-
// If current is before start (e.g. window starts at 10:00, schedule is 02:00), move to tomorrow
|
|
190
|
-
if (current < start) {
|
|
191
|
-
current.setDate(current.getDate() + 1);
|
|
192
|
-
}
|
|
176
|
+
async function cleanupOrphanedTasks() {
|
|
177
|
+
const parent = getQueuePath();
|
|
178
|
+
const validKebabNames = new Set(manifest.map(m => toKebab(m.originalName)));
|
|
179
|
+
const limit = pLimit(CLOUD_TASKS_CONCURRENCY);
|
|
180
|
+
let deletedCount = 0;
|
|
193
181
|
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
// Monthly Check
|
|
203
|
-
if (schedule.frequency === 'monthly' && current.getUTCDate() !== (schedule.dayOfMonth ?? 1)) {
|
|
204
|
-
match = false;
|
|
205
|
-
}
|
|
182
|
+
try {
|
|
183
|
+
// Iterate over ALL tasks in the queue
|
|
184
|
+
// Note: listTasksAsync handles pagination automatically
|
|
185
|
+
const tasksToDelete = [];
|
|
186
|
+
|
|
187
|
+
for await (const task of tasksClient.listTasksAsync({ parent, responseView: 'BASIC' })) {
|
|
188
|
+
const taskNameFull = task.name;
|
|
189
|
+
const taskNameShort = taskNameFull.split('/').pop(); // e.g., root-my-comp-2023-01-01-abcdef
|
|
206
190
|
|
|
207
|
-
|
|
208
|
-
|
|
191
|
+
// 1. Regex Match: Capture the computation name part
|
|
192
|
+
// Pattern: (root|recovery)-{kebabName}-{date}-{hash}
|
|
193
|
+
// Date is YYYY-MM-DD (10 chars)
|
|
194
|
+
// Hash is 8 chars (or more)
|
|
195
|
+
const match = taskNameShort.match(/^(?:root|recovery)-(.+)-\d{4}-\d{2}-\d{2}-/);
|
|
196
|
+
|
|
197
|
+
if (!match) continue; // Skip tasks that don't match our naming convention
|
|
198
|
+
|
|
199
|
+
const extractedKebabName = match[1];
|
|
200
|
+
|
|
201
|
+
// 2. Check Validity
|
|
202
|
+
if (!validKebabNames.has(extractedKebabName)) {
|
|
203
|
+
// ORPHAN DETECTED!
|
|
204
|
+
tasksToDelete.push(taskNameFull);
|
|
205
|
+
}
|
|
209
206
|
}
|
|
210
207
|
|
|
211
|
-
|
|
212
|
-
|
|
208
|
+
if (tasksToDelete.length === 0) return 0;
|
|
209
|
+
|
|
210
|
+
console.log(`[Planner] 🗑️ Found ${tasksToDelete.length} orphaned tasks. Deleting...`);
|
|
211
|
+
|
|
212
|
+
// 3. Delete in parallel
|
|
213
|
+
await Promise.all(tasksToDelete.map(name => limit(async () => {
|
|
214
|
+
try {
|
|
215
|
+
await tasksClient.deleteTask({ name });
|
|
216
|
+
deletedCount++;
|
|
217
|
+
} catch (e) {
|
|
218
|
+
console.warn(`[Planner] Failed to delete orphan ${name}: ${e.message}`);
|
|
219
|
+
}
|
|
220
|
+
})));
|
|
221
|
+
|
|
222
|
+
} catch (e) {
|
|
223
|
+
console.error(`[Planner] GC Error: ${e.message}`);
|
|
213
224
|
}
|
|
214
|
-
|
|
215
|
-
return
|
|
225
|
+
|
|
226
|
+
return deletedCount;
|
|
216
227
|
}
|
|
217
228
|
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
function
|
|
223
|
-
|
|
224
|
-
|
|
229
|
+
// =============================================================================
|
|
230
|
+
// HELPERS
|
|
231
|
+
// =============================================================================
|
|
232
|
+
|
|
233
|
+
function shouldRunOnDate(schedule, dateObj) {
|
|
234
|
+
if (schedule.frequency === 'weekly' && dateObj.getUTCDay() !== (schedule.dayOfWeek ?? 0)) return false;
|
|
235
|
+
if (schedule.frequency === 'monthly' && dateObj.getUTCDate() !== (schedule.dayOfMonth ?? 1)) return false;
|
|
236
|
+
return true;
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
function getRunTimeSeconds(schedule, dateObj) {
|
|
240
|
+
const [h, m] = (schedule.time || '02:00').split(':').map(Number);
|
|
241
|
+
const runTime = new Date(dateObj);
|
|
242
|
+
runTime.setUTCHours(h, m, 0, 0);
|
|
243
|
+
return runTime.getTime() / 1000;
|
|
225
244
|
}
|
|
226
245
|
|
|
227
|
-
function getQueuePath(
|
|
246
|
+
function getQueuePath() {
|
|
228
247
|
const { projectId, location, queueName } = config.cloudTasks;
|
|
229
248
|
return tasksClient.queuePath(projectId, location, queueName);
|
|
230
249
|
}
|
|
231
250
|
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
* Uses deterministic naming for deduplication.
|
|
235
|
-
*/
|
|
236
|
-
async function dispatchPlannedTasks(tasks) {
|
|
237
|
-
const limit = pLimit(CLOUD_TASKS_CONCURRENCY);
|
|
238
|
-
const { dispatcherUrl, serviceAccountEmail } = config.cloudTasks;
|
|
239
|
-
|
|
240
|
-
return Promise.all(tasks.map(t => limit(async () => {
|
|
241
|
-
try {
|
|
242
|
-
// Task Name: root-{name}-{date}-{configHash}
|
|
243
|
-
// If developer changes schedule -> hash changes -> new task created.
|
|
244
|
-
// If developer changes code but not schedule -> hash same -> existing task preserved.
|
|
245
|
-
const taskName = `${t.queuePath}/tasks/root-${toKebab(t.computation)}-${t.targetDate}-${t.configHash}`;
|
|
246
|
-
|
|
247
|
-
const payload = {
|
|
248
|
-
computationName: t.computation,
|
|
249
|
-
targetDate: t.targetDate,
|
|
250
|
-
source: 'scheduled',
|
|
251
|
-
configHash: t.configHash // Sent to dispatcher for potential validation
|
|
252
|
-
};
|
|
253
|
-
|
|
254
|
-
const task = {
|
|
255
|
-
httpRequest: {
|
|
256
|
-
httpMethod: 'POST',
|
|
257
|
-
url: dispatcherUrl,
|
|
258
|
-
headers: { 'Content-Type': 'application/json' },
|
|
259
|
-
body: Buffer.from(JSON.stringify(payload)).toString('base64'),
|
|
260
|
-
oidcToken: {
|
|
261
|
-
serviceAccountEmail,
|
|
262
|
-
audience: dispatcherUrl // <--- FIXED: Must exactly match function URL
|
|
263
|
-
}
|
|
264
|
-
},
|
|
265
|
-
scheduleTime: { seconds: t.runAtSeconds },
|
|
266
|
-
name: taskName
|
|
267
|
-
};
|
|
268
|
-
|
|
269
|
-
await tasksClient.createTask({ parent: t.queuePath, task });
|
|
270
|
-
return { computation: t.computation, date: t.targetDate, status: 'scheduled' };
|
|
271
|
-
|
|
272
|
-
} catch (e) {
|
|
273
|
-
if (e.code === 6 || e.code === 409) {
|
|
274
|
-
return { computation: t.computation, date: t.targetDate, status: 'exists' };
|
|
275
|
-
}
|
|
276
|
-
console.error(`[Planner] Failed to schedule ${t.computation}:`, e.message);
|
|
277
|
-
return { computation: t.computation, status: 'error', error: e.message };
|
|
278
|
-
}
|
|
279
|
-
})));
|
|
251
|
+
function toKebab(str) {
|
|
252
|
+
return str.replace(/([a-z])([A-Z])/g, '$1-$2').toLowerCase().replace(/[^a-z0-9-]/g, '');
|
|
280
253
|
}
|
|
281
254
|
|
|
282
|
-
|
|
283
|
-
* Dispatches Recovery Tasks (Zombies)
|
|
284
|
-
* Always creates unique task names to ensure retry.
|
|
285
|
-
*/
|
|
286
|
-
async function dispatchRecoveryTasks(tasks) {
|
|
255
|
+
async function dispatchTasks(tasks) {
|
|
287
256
|
const limit = pLimit(CLOUD_TASKS_CONCURRENCY);
|
|
288
257
|
const { dispatcherUrl, serviceAccountEmail } = config.cloudTasks;
|
|
289
258
|
|
|
290
259
|
return Promise.all(tasks.map(t => limit(async () => {
|
|
291
260
|
try {
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
261
|
+
const name = t.isRecovery
|
|
262
|
+
? `recovery-${toKebab(t.computation)}-${t.targetDate}-${Date.now()}`
|
|
263
|
+
: `root-${toKebab(t.computation)}-${t.targetDate}-${t.configHash}`;
|
|
264
|
+
|
|
265
|
+
const taskName = `${t.queuePath}/tasks/${name}`;
|
|
266
|
+
|
|
295
267
|
const payload = {
|
|
296
268
|
computationName: t.computation,
|
|
297
269
|
targetDate: t.targetDate,
|
|
298
|
-
source: 'zombie-recovery'
|
|
270
|
+
source: t.isRecovery ? 'zombie-recovery' : 'scheduled',
|
|
271
|
+
configHash: t.configHash,
|
|
272
|
+
reason: t.reason
|
|
299
273
|
};
|
|
300
274
|
|
|
301
275
|
const task = {
|
|
@@ -304,26 +278,21 @@ async function dispatchRecoveryTasks(tasks) {
|
|
|
304
278
|
url: dispatcherUrl,
|
|
305
279
|
headers: { 'Content-Type': 'application/json' },
|
|
306
280
|
body: Buffer.from(JSON.stringify(payload)).toString('base64'),
|
|
307
|
-
oidcToken: {
|
|
308
|
-
serviceAccountEmail,
|
|
309
|
-
audience: dispatcherUrl // <--- FIXED: Must exactly match function URL
|
|
310
|
-
}
|
|
281
|
+
oidcToken: { serviceAccountEmail, audience: dispatcherUrl }
|
|
311
282
|
},
|
|
312
|
-
// Run Immediately (no scheduleTime)
|
|
313
283
|
name: taskName
|
|
314
284
|
};
|
|
315
285
|
|
|
316
|
-
|
|
317
|
-
return { computation: t.computation, status: 'recovered' };
|
|
286
|
+
if (t.runAtSeconds) task.scheduleTime = { seconds: t.runAtSeconds };
|
|
318
287
|
|
|
288
|
+
await tasksClient.createTask({ parent: t.queuePath, task });
|
|
289
|
+
return { status: 'scheduled' };
|
|
319
290
|
} catch (e) {
|
|
320
|
-
|
|
291
|
+
if (e.code === 6 || e.code === 409) return { status: 'exists' };
|
|
292
|
+
console.error(`[Planner] Failed task ${t.computation}: ${e.message}`);
|
|
293
|
+
return { status: 'error' };
|
|
321
294
|
}
|
|
322
295
|
})));
|
|
323
296
|
}
|
|
324
297
|
|
|
325
|
-
function toKebab(str) {
|
|
326
|
-
return str.replace(/([a-z])([A-Z])/g, '$1-$2').toLowerCase().replace(/[^a-z0-9-]/g, '');
|
|
327
|
-
}
|
|
328
|
-
|
|
329
298
|
module.exports = { planComputations, runWatchdog };
|
package/package.json
CHANGED
|
@@ -1,51 +0,0 @@
|
|
|
1
|
-
const { Computation } = require('../framework');
|
|
2
|
-
|
|
3
|
-
class DebugSignedInUsers extends Computation {
|
|
4
|
-
static getConfig() {
|
|
5
|
-
return {
|
|
6
|
-
name: 'DebugSignedInUsers',
|
|
7
|
-
type: 'per-entity',
|
|
8
|
-
category: 'debug',
|
|
9
|
-
|
|
10
|
-
// CRITICAL: Tells the Orchestrator which partition to use
|
|
11
|
-
userType: 'SIGNED_IN_USER',
|
|
12
|
-
|
|
13
|
-
requires: {
|
|
14
|
-
// DRIVER TABLE: This MUST be first.
|
|
15
|
-
// The DAG will only create tasks for Entity IDs found in this filtered query.
|
|
16
|
-
'portfolio_snapshots': {
|
|
17
|
-
lookback: 0, // Just need today's existence
|
|
18
|
-
mandatory: true,
|
|
19
|
-
fields: ['user_id', 'date'],
|
|
20
|
-
filter: { user_type: 'SIGNED_IN_USER' }
|
|
21
|
-
}
|
|
22
|
-
},
|
|
23
|
-
|
|
24
|
-
storage: {
|
|
25
|
-
// Just log to console or a debug table for now
|
|
26
|
-
bigquery: false,
|
|
27
|
-
firestore: { enabled: false }
|
|
28
|
-
}
|
|
29
|
-
};
|
|
30
|
-
}
|
|
31
|
-
|
|
32
|
-
async process(context) {
|
|
33
|
-
const { entityId, data } = context;
|
|
34
|
-
|
|
35
|
-
// If the Orchestrator works correctly, we should ONLY see Signed-In Users here.
|
|
36
|
-
// We verify by returning their ID.
|
|
37
|
-
|
|
38
|
-
const row = data['portfolio_snapshots'];
|
|
39
|
-
|
|
40
|
-
// Sanity Check: If row exists, we are good.
|
|
41
|
-
if (row) {
|
|
42
|
-
this.setResult(entityId, {
|
|
43
|
-
status: 'Found',
|
|
44
|
-
userType: 'SIGNED_IN_USER',
|
|
45
|
-
checkedAt: new Date().toISOString()
|
|
46
|
-
});
|
|
47
|
-
}
|
|
48
|
-
}
|
|
49
|
-
}
|
|
50
|
-
|
|
51
|
-
module.exports = DebugSignedInUsers;
|