bulltrackers-module 1.0.769 → 1.0.771

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -17,7 +17,7 @@ class StateRepository {
17
17
  constructor(config, logger = null) {
18
18
  this.config = config;
19
19
  this.logger = logger || console;
20
-
20
+
21
21
  this.bigquery = new BigQuery({
22
22
  projectId: config.bigquery.projectId,
23
23
  location: config.bigquery.location
@@ -156,6 +156,8 @@ class StateRepository {
156
156
  location: this.config.bigquery.location
157
157
  });
158
158
 
159
+ this._log('INFO', `getResult('${computationName}', '${dateStr}') table=${table} rows=${rows.length}`);
160
+
159
161
  if (rows.length === 0) {
160
162
  this.resultCache.set(cacheKey, null);
161
163
  return null;
@@ -213,8 +215,8 @@ class StateRepository {
213
215
 
214
216
  const [rows] = await this.bigquery.query({
215
217
  query,
216
- params: {
217
- targetDate: dateStr,
218
+ params: {
219
+ targetDate: dateStr,
218
220
  compName: computationName.toLowerCase(),
219
221
  entityId: String(entityId)
220
222
  },
@@ -243,11 +245,11 @@ class StateRepository {
243
245
  */
244
246
  async getBatchEntityResults(dateStr, computationName, entityIds) {
245
247
  if (!entityIds || entityIds.length === 0) return {};
246
-
248
+
247
249
  const cacheKeyPrefix = `${dateStr}:${computationName.toLowerCase()}`;
248
250
  const results = {};
249
251
  const uncachedIds = [];
250
-
252
+
251
253
  // Check cache first
252
254
  for (const entityId of entityIds) {
253
255
  const key = `${cacheKeyPrefix}:${entityId}`;
@@ -257,9 +259,9 @@ class StateRepository {
257
259
  uncachedIds.push(entityId);
258
260
  }
259
261
  }
260
-
262
+
261
263
  if (uncachedIds.length === 0) return results;
262
-
264
+
263
265
  // Fetch uncached in batch
264
266
  try {
265
267
  const table = this.config.resultStore?.table || 'computation_results';
@@ -275,8 +277,8 @@ class StateRepository {
275
277
 
276
278
  const [rows] = await this.bigquery.query({
277
279
  query,
278
- params: {
279
- targetDate: dateStr,
280
+ params: {
281
+ targetDate: dateStr,
280
282
  compName: computationName.toLowerCase(),
281
283
  entityIds: uncachedIds.map(String)
282
284
  },
@@ -290,11 +292,11 @@ class StateRepository {
290
292
  if (typeof data === 'string') {
291
293
  try { data = JSON.parse(data); } catch (e) { /* keep */ }
292
294
  }
293
-
295
+
294
296
  results[entityId] = data;
295
297
  this.resultCache.set(`${cacheKeyPrefix}:${entityId}`, data);
296
298
  }
297
-
299
+
298
300
  return results;
299
301
  } catch (e) {
300
302
  this._log('ERROR', `Batch fetch failed: ${e.message}`);
@@ -1,106 +1,130 @@
1
1
  /**
2
- * @fileoverview Scheduler V2: Planner & Watchdog
3
- * * 1. planComputations: Runs infrequently (e.g. Hourly/Daily).
4
- * - Loads Manifest.
5
- * - Forecasts all Root executions for the next 24-48h.
6
- * - Enqueues Cloud Tasks with `scheduleTime` and `configHash`.
7
- * * 2. runWatchdog: Runs frequently (e.g. every 15 mins).
8
- * - Detects Zombies (stuck running tasks).
9
- * - Re-queues them immediately.
2
+ * @fileoverview Scheduler V3.1: Reconciler with Active Garbage Collection
3
+ * * * 1. Reconcile: Iterates a time window (Past -> Future) to ensure valid tasks exist.
4
+ * * 2. Purge: Scans the queue for "Orphans" (tasks for deleted computations) and deletes them.
5
+ * * 3. Watchdog: Recovers "Zombie" tasks (running but stuck).
10
6
  */
11
7
 
12
8
  const { CloudTasksClient } = require('@google-cloud/tasks');
13
9
  const crypto = require('crypto');
14
10
  const pLimit = require('p-limit');
11
+
12
+ // Framework
15
13
  const { ManifestBuilder } = require('../framework');
16
14
  const { StorageManager } = require('../framework/storage/StorageManager');
15
+ const { StateRepository } = require('../framework/storage/StateRepository');
17
16
  const config = require('../config/bulltrackers.config');
18
17
 
19
- const CLOUD_TASKS_CONCURRENCY = 10;
18
+ // Config
19
+ const CLOUD_TASKS_CONCURRENCY = 20;
20
+ const PLANNING_LOOKBACK_DAYS = 7; // Look back to ensure recent history is correct
21
+ const PLANNING_LOOKAHEAD_HOURS = 24; // Schedule future tasks
20
22
  const ZOMBIE_THRESHOLD_MINUTES = 15;
21
- const PLANNING_WINDOW_HOURS = 24; // Look ahead window
22
23
 
23
24
  // Cache singleton instances
24
25
  let manifest = null;
25
26
  let tasksClient = null;
26
27
  let storageManager = null;
28
+ let stateRepository = null;
27
29
 
28
30
  async function initialize() {
29
31
  if (manifest) return;
30
32
  console.log('[Scheduler] Initializing services...');
31
-
32
- // We pass a no-op logger to prevent noise during frequent checks
33
- const builder = new ManifestBuilder(config, { log: () => {} });
33
+
34
+ // Use a no-op logger for manifest builder to reduce noise
35
+ const builder = new ManifestBuilder(config, { log: () => { } });
34
36
  manifest = builder.build(config.computations || []);
35
-
37
+
36
38
  tasksClient = new CloudTasksClient();
37
39
  storageManager = new StorageManager(config, console);
38
-
40
+ stateRepository = new StateRepository(config, console);
41
+
39
42
  console.log(`[Scheduler] Loaded ${manifest.length} computations.`);
40
43
  }
41
44
 
42
45
  /**
43
- * ENTRY POINT 1: The Planner
46
+ * ENTRY POINT 1: The Reconciler & Garbage Collector
44
47
  * Trigger: Cloud Scheduler -> "0 * * * *" (Every Hour)
45
- * Goals: Ensure all future tasks for the next 24h are in the queue.
46
48
  */
47
49
  async function planComputations(req, res) {
48
- const startTime = Date.now();
49
50
  try {
50
51
  await initialize();
51
52
 
53
+ // --- PHASE 1: RECONCILIATION (Ensure valid tasks exist) ---
52
54
  const now = new Date();
53
- const windowEnd = new Date(now.getTime() + PLANNING_WINDOW_HOURS * 60 * 60 * 1000);
55
+ const windowStart = new Date(now);
56
+ windowStart.setDate(now.getDate() - PLANNING_LOOKBACK_DAYS);
57
+ windowStart.setHours(0, 0, 0, 0);
54
58
 
55
- console.log(`[Planner] Planning window: ${now.toISOString()} to ${windowEnd.toISOString()}`);
59
+ const windowEnd = new Date(now);
60
+ windowEnd.setTime(now.getTime() + (PLANNING_LOOKAHEAD_HOURS * 60 * 60 * 1000));
56
61
 
57
- const tasksToSchedule = [];
62
+ console.log(`[Planner] Reconciling window: ${windowStart.toISOString()} to ${windowEnd.toISOString()}`);
58
63
 
59
- // 1. Walk the Manifest
60
- for (const entry of manifest) {
61
- // FILTER: Only Roots (Pass 1)
62
- // Graph.js assigns roots (no dependencies) to Pass 1.
63
- // Resilience: If code changes and a comp becomes Pass 2+, it won't be scheduled here.
64
- if (entry.pass !== 1) continue;
65
-
66
- // Calculate Occurrences
67
- const occurrences = getOccurrencesInWindow(entry.schedule, now, windowEnd);
68
-
69
- // Generate Tasks for each occurrence
70
- for (const dateObj of occurrences) {
71
- // Resilience: Generate a hash of the critical scheduling config.
72
- // If schedule OR pass changes, this hash changes, creating a new Task ID.
73
- const configHash = generateConfigHash(entry);
74
- const targetDateStr = dateObj.toISOString().split('T')[0];
75
-
76
- tasksToSchedule.push({
77
- computation: entry.originalName,
78
- targetDate: targetDateStr,
79
- runAtSeconds: dateObj.getTime() / 1000,
80
- configHash: configHash,
81
- queuePath: getQueuePath(entry)
82
- });
83
- }
64
+ const tasksToSchedule = [];
65
+ const stats = { checked: 0, scheduled: 0, mismatched: 0, missing: 0 };
66
+
67
+ // Iterate dates in window
68
+ const targetDates = [];
69
+ let cursor = new Date(windowStart);
70
+ while (cursor <= windowEnd) {
71
+ targetDates.push(new Date(cursor));
72
+ cursor.setDate(cursor.getDate() + 1);
84
73
  }
85
74
 
86
- if (tasksToSchedule.length === 0) {
87
- return res.status(200).send('No root computations due in planning window.');
88
- }
75
+ const dateLimit = pLimit(5);
76
+ await Promise.all(targetDates.map(dateObj => dateLimit(async () => {
77
+ const dateStr = dateObj.toISOString().split('T')[0];
78
+ const dailyStatus = await stateRepository.getDailyStatus(dateStr);
79
+
80
+ for (const entry of manifest) {
81
+ if (entry.pass !== 1) continue; // Only schedule Roots
82
+ if (!shouldRunOnDate(entry.schedule, dateObj)) continue;
83
+
84
+ stats.checked++;
85
+ const lastRun = dailyStatus.get(entry.name);
86
+ let reason = null;
87
+
88
+ if (!lastRun) {
89
+ reason = 'MISSING_RUN';
90
+ stats.missing++;
91
+ } else if (lastRun.hash !== entry.hash) {
92
+ reason = 'HASH_MISMATCH';
93
+ stats.mismatched++;
94
+ }
95
+
96
+ if (reason) {
97
+ tasksToSchedule.push({
98
+ computation: entry.originalName,
99
+ targetDate: dateStr,
100
+ runAtSeconds: getRunTimeSeconds(entry.schedule, dateObj),
101
+ configHash: entry.hash,
102
+ queuePath: getQueuePath(),
103
+ reason
104
+ });
105
+ }
106
+ }
107
+ })));
89
108
 
90
- // 2. Dispatch to Cloud Tasks (Idempotent)
91
- const results = await dispatchPlannedTasks(tasksToSchedule);
109
+ // --- PHASE 2: GARBAGE COLLECTION (Remove invalid tasks) ---
110
+ console.log('[Planner] Starting Garbage Collection...');
111
+ const deletedCount = await cleanupOrphanedTasks();
92
112
 
93
- const created = results.filter(r => r.status === 'scheduled').length;
94
- const exists = results.filter(r => r.status === 'exists').length;
113
+ // --- PHASE 3: DISPATCH ---
114
+ let scheduledCount = 0;
115
+ if (tasksToSchedule.length > 0) {
116
+ const results = await dispatchTasks(tasksToSchedule);
117
+ scheduledCount = results.filter(r => r.status === 'scheduled').length;
118
+ }
95
119
 
96
- console.log(`[Planner] Window Processed. Created: ${created}, Already Existed: ${exists}, Errors: ${results.length - created - exists}`);
120
+ console.log(`[Planner] Complete. Scheduled: ${scheduledCount}, Deleted Orphans: ${deletedCount}`);
97
121
 
98
122
  return res.status(200).json({
99
- status: 'ok',
100
- window: `${PLANNING_WINDOW_HOURS}h`,
101
- created,
102
- exists,
103
- details: results
123
+ status: 'success',
124
+ window: `${PLANNING_LOOKBACK_DAYS}d back, ${PLANNING_LOOKAHEAD_HOURS}h fwd`,
125
+ scheduled: scheduledCount,
126
+ deletedOrphans: deletedCount,
127
+ stats
104
128
  });
105
129
 
106
130
  } catch (error) {
@@ -112,57 +136,32 @@ async function planComputations(req, res) {
112
136
  /**
113
137
  * ENTRY POINT 2: The Watchdog
114
138
  * Trigger: Cloud Scheduler -> "*\/15 * * * *" (Every 15 mins)
115
- * Goals: Find stuck tasks and re-queue them.
116
139
  */
117
140
  async function runWatchdog(req, res) {
118
141
  try {
119
142
  await initialize();
120
-
121
- // 1. Find Zombies
122
143
  const zombies = await storageManager.findZombies(ZOMBIE_THRESHOLD_MINUTES);
123
-
124
- // Filter out excessive attempts
125
- const actionableZombies = [];
126
- for (const z of zombies) {
127
- if ((z.attempts || 0) >= 3) {
128
- console.warn(`[Watchdog] Ignoring zombie ${z.name} (Checkpoint: ${z.checkpointId}) - Max attempts reached (${z.attempts})`);
129
- continue;
130
- }
131
- actionableZombies.push(z);
132
- }
133
-
134
- if (actionableZombies.length === 0) {
135
- return res.status(200).send('No recoverable zombies.');
136
- }
144
+ const actionableZombies = zombies.filter(z => (z.attempts || 0) < 3);
137
145
 
138
- console.log(`[Watchdog] 🧟 Found ${actionableZombies.length} zombies. Initiating recovery...`);
146
+ if (actionableZombies.length === 0) return res.status(200).send('No recoverable zombies.');
139
147
 
140
- // 2. Claim & Recover
141
- // We claim them first so the next watchdog doesn't grab them while we are dispatching
148
+ console.log(`[Watchdog] 🧟 Found ${actionableZombies.length} zombies. Recovering...`);
142
149
  await Promise.all(actionableZombies.map(z => storageManager.claimZombie(z.checkpointId)));
143
150
 
144
151
  const recoveryTasks = actionableZombies.map(z => {
145
152
  const entry = manifest.find(m => m.name === z.name);
146
- if (!entry) {
147
- console.error(`[Watchdog] Computation ${z.name} no longer exists in manifest. Cannot recover.`);
148
- return null;
149
- }
153
+ if (!entry) return null;
150
154
  return {
151
155
  computation: entry.originalName,
152
156
  targetDate: z.date,
153
157
  isRecovery: true,
154
158
  recoveryId: z.checkpointId,
155
- queuePath: getQueuePath(entry)
159
+ queuePath: getQueuePath()
156
160
  };
157
161
  }).filter(Boolean);
158
162
 
159
- const results = await dispatchRecoveryTasks(recoveryTasks);
160
-
161
- return res.status(200).json({
162
- status: 'recovered',
163
- count: results.length,
164
- details: results
165
- });
163
+ const results = await dispatchTasks(recoveryTasks);
164
+ return res.status(200).json({ status: 'recovered', count: results.length });
166
165
 
167
166
  } catch (error) {
168
167
  console.error('[Watchdog] Error:', error);
@@ -171,131 +170,106 @@ async function runWatchdog(req, res) {
171
170
  }
172
171
 
173
172
  // =============================================================================
174
- // HELPER FUNCTIONS
173
+ // ACTIVE GARBAGE COLLECTION LOGIC
175
174
  // =============================================================================
176
175
 
177
- /**
178
- * Calculates all execution times for a schedule within a start/end window.
179
- * Returns Array<Date>
180
- */
181
- function getOccurrencesInWindow(schedule, start, end) {
182
- const times = [];
183
- const [h, m] = (schedule.time || '02:00').split(':').map(Number);
184
-
185
- // Clone start date to iterate
186
- let current = new Date(start);
187
- current.setUTCHours(h, m, 0, 0);
188
-
189
- // If current is before start (e.g. window starts at 10:00, schedule is 02:00), move to tomorrow
190
- if (current < start) {
191
- current.setDate(current.getDate() + 1);
192
- }
176
+ async function cleanupOrphanedTasks() {
177
+ const parent = getQueuePath();
178
+ const validKebabNames = new Set(manifest.map(m => toKebab(m.originalName)));
179
+ const limit = pLimit(CLOUD_TASKS_CONCURRENCY);
180
+ let deletedCount = 0;
193
181
 
194
- while (current <= end) {
195
- let match = true;
196
-
197
- // Weekly Check
198
- if (schedule.frequency === 'weekly' && current.getUTCDay() !== (schedule.dayOfWeek ?? 0)) {
199
- match = false;
200
- }
201
-
202
- // Monthly Check
203
- if (schedule.frequency === 'monthly' && current.getUTCDate() !== (schedule.dayOfMonth ?? 1)) {
204
- match = false;
205
- }
182
+ try {
183
+ // Iterate over ALL tasks in the queue
184
+ // Note: listTasksAsync handles pagination automatically
185
+ const tasksToDelete = [];
186
+
187
+ for await (const task of tasksClient.listTasksAsync({ parent, responseView: 'BASIC' })) {
188
+ const taskNameFull = task.name;
189
+ const taskNameShort = taskNameFull.split('/').pop(); // e.g., root-my-comp-2023-01-01-abcdef
206
190
 
207
- if (match) {
208
- times.push(new Date(current));
191
+ // 1. Regex Match: Capture the computation name part
192
+ // Pattern: (root|recovery)-{kebabName}-{date}-{hash}
193
+ // Date is YYYY-MM-DD (10 chars)
194
+ // Hash is 8 chars (or more)
195
+ const match = taskNameShort.match(/^(?:root|recovery)-(.+)-\d{4}-\d{2}-\d{2}-/);
196
+
197
+ if (!match) continue; // Skip tasks that don't match our naming convention
198
+
199
+ const extractedKebabName = match[1];
200
+
201
+ // 2. Check Validity
202
+ if (!validKebabNames.has(extractedKebabName)) {
203
+ // ORPHAN DETECTED!
204
+ tasksToDelete.push(taskNameFull);
205
+ }
209
206
  }
210
207
 
211
- // Advance 1 day
212
- current.setDate(current.getDate() + 1);
208
+ if (tasksToDelete.length === 0) return 0;
209
+
210
+ console.log(`[Planner] 🗑️ Found ${tasksToDelete.length} orphaned tasks. Deleting...`);
211
+
212
+ // 3. Delete in parallel
213
+ await Promise.all(tasksToDelete.map(name => limit(async () => {
214
+ try {
215
+ await tasksClient.deleteTask({ name });
216
+ deletedCount++;
217
+ } catch (e) {
218
+ console.warn(`[Planner] Failed to delete orphan ${name}: ${e.message}`);
219
+ }
220
+ })));
221
+
222
+ } catch (e) {
223
+ console.error(`[Planner] GC Error: ${e.message}`);
213
224
  }
214
-
215
- return times;
225
+
226
+ return deletedCount;
216
227
  }
217
228
 
218
- /**
219
- * Generates a short hash of the Scheduling Config + Pass.
220
- * If this changes, we want a new Task ID to enforce the new schedule.
221
- */
222
- function generateConfigHash(entry) {
223
- const input = JSON.stringify(entry.schedule) + `|PASS:${entry.pass}`;
224
- return crypto.createHash('md5').update(input).digest('hex').substring(0, 8);
229
+ // =============================================================================
230
+ // HELPERS
231
+ // =============================================================================
232
+
233
+ function shouldRunOnDate(schedule, dateObj) {
234
+ if (schedule.frequency === 'weekly' && dateObj.getUTCDay() !== (schedule.dayOfWeek ?? 0)) return false;
235
+ if (schedule.frequency === 'monthly' && dateObj.getUTCDate() !== (schedule.dayOfMonth ?? 1)) return false;
236
+ return true;
237
+ }
238
+
239
+ function getRunTimeSeconds(schedule, dateObj) {
240
+ const [h, m] = (schedule.time || '02:00').split(':').map(Number);
241
+ const runTime = new Date(dateObj);
242
+ runTime.setUTCHours(h, m, 0, 0);
243
+ return runTime.getTime() / 1000;
225
244
  }
226
245
 
227
- function getQueuePath(entry) {
246
+ function getQueuePath() {
228
247
  const { projectId, location, queueName } = config.cloudTasks;
229
248
  return tasksClient.queuePath(projectId, location, queueName);
230
249
  }
231
250
 
232
- /**
233
- * Dispatches Planned Root Tasks
234
- * Uses deterministic naming for deduplication.
235
- */
236
- async function dispatchPlannedTasks(tasks) {
237
- const limit = pLimit(CLOUD_TASKS_CONCURRENCY);
238
- const { dispatcherUrl, serviceAccountEmail } = config.cloudTasks;
239
-
240
- return Promise.all(tasks.map(t => limit(async () => {
241
- try {
242
- // Task Name: root-{name}-{date}-{configHash}
243
- // If developer changes schedule -> hash changes -> new task created.
244
- // If developer changes code but not schedule -> hash same -> existing task preserved.
245
- const taskName = `${t.queuePath}/tasks/root-${toKebab(t.computation)}-${t.targetDate}-${t.configHash}`;
246
-
247
- const payload = {
248
- computationName: t.computation,
249
- targetDate: t.targetDate,
250
- source: 'scheduled',
251
- configHash: t.configHash // Sent to dispatcher for potential validation
252
- };
253
-
254
- const task = {
255
- httpRequest: {
256
- httpMethod: 'POST',
257
- url: dispatcherUrl,
258
- headers: { 'Content-Type': 'application/json' },
259
- body: Buffer.from(JSON.stringify(payload)).toString('base64'),
260
- oidcToken: {
261
- serviceAccountEmail,
262
- audience: dispatcherUrl // <--- FIXED: Must exactly match function URL
263
- }
264
- },
265
- scheduleTime: { seconds: t.runAtSeconds },
266
- name: taskName
267
- };
268
-
269
- await tasksClient.createTask({ parent: t.queuePath, task });
270
- return { computation: t.computation, date: t.targetDate, status: 'scheduled' };
271
-
272
- } catch (e) {
273
- if (e.code === 6 || e.code === 409) {
274
- return { computation: t.computation, date: t.targetDate, status: 'exists' };
275
- }
276
- console.error(`[Planner] Failed to schedule ${t.computation}:`, e.message);
277
- return { computation: t.computation, status: 'error', error: e.message };
278
- }
279
- })));
251
+ function toKebab(str) {
252
+ return str.replace(/([a-z])([A-Z])/g, '$1-$2').toLowerCase().replace(/[^a-z0-9-]/g, '');
280
253
  }
281
254
 
282
- /**
283
- * Dispatches Recovery Tasks (Zombies)
284
- * Always creates unique task names to ensure retry.
285
- */
286
- async function dispatchRecoveryTasks(tasks) {
255
+ async function dispatchTasks(tasks) {
287
256
  const limit = pLimit(CLOUD_TASKS_CONCURRENCY);
288
257
  const { dispatcherUrl, serviceAccountEmail } = config.cloudTasks;
289
258
 
290
259
  return Promise.all(tasks.map(t => limit(async () => {
291
260
  try {
292
- // Unique ID for every recovery attempt
293
- const taskName = `${t.queuePath}/tasks/recovery-${toKebab(t.computation)}-${t.recoveryId}-${Date.now()}`;
294
-
261
+ const name = t.isRecovery
262
+ ? `recovery-${toKebab(t.computation)}-${t.targetDate}-${Date.now()}`
263
+ : `root-${toKebab(t.computation)}-${t.targetDate}-${t.configHash}`;
264
+
265
+ const taskName = `${t.queuePath}/tasks/${name}`;
266
+
295
267
  const payload = {
296
268
  computationName: t.computation,
297
269
  targetDate: t.targetDate,
298
- source: 'zombie-recovery'
270
+ source: t.isRecovery ? 'zombie-recovery' : 'scheduled',
271
+ configHash: t.configHash,
272
+ reason: t.reason
299
273
  };
300
274
 
301
275
  const task = {
@@ -304,26 +278,21 @@ async function dispatchRecoveryTasks(tasks) {
304
278
  url: dispatcherUrl,
305
279
  headers: { 'Content-Type': 'application/json' },
306
280
  body: Buffer.from(JSON.stringify(payload)).toString('base64'),
307
- oidcToken: {
308
- serviceAccountEmail,
309
- audience: dispatcherUrl // <--- FIXED: Must exactly match function URL
310
- }
281
+ oidcToken: { serviceAccountEmail, audience: dispatcherUrl }
311
282
  },
312
- // Run Immediately (no scheduleTime)
313
283
  name: taskName
314
284
  };
315
285
 
316
- await tasksClient.createTask({ parent: t.queuePath, task });
317
- return { computation: t.computation, status: 'recovered' };
286
+ if (t.runAtSeconds) task.scheduleTime = { seconds: t.runAtSeconds };
318
287
 
288
+ await tasksClient.createTask({ parent: t.queuePath, task });
289
+ return { status: 'scheduled' };
319
290
  } catch (e) {
320
- return { computation: t.computation, status: 'error', error: e.message };
291
+ if (e.code === 6 || e.code === 409) return { status: 'exists' };
292
+ console.error(`[Planner] Failed task ${t.computation}: ${e.message}`);
293
+ return { status: 'error' };
321
294
  }
322
295
  })));
323
296
  }
324
297
 
325
- function toKebab(str) {
326
- return str.replace(/([a-z])([A-Z])/g, '$1-$2').toLowerCase().replace(/[^a-z0-9-]/g, '');
327
- }
328
-
329
298
  module.exports = { planComputations, runWatchdog };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "bulltrackers-module",
3
- "version": "1.0.769",
3
+ "version": "1.0.771",
4
4
  "description": "Helper Functions for Bulltrackers.",
5
5
  "main": "index.js",
6
6
  "files": [
@@ -1,51 +0,0 @@
1
- const { Computation } = require('../framework');
2
-
3
- class DebugSignedInUsers extends Computation {
4
- static getConfig() {
5
- return {
6
- name: 'DebugSignedInUsers',
7
- type: 'per-entity',
8
- category: 'debug',
9
-
10
- // CRITICAL: Tells the Orchestrator which partition to use
11
- userType: 'SIGNED_IN_USER',
12
-
13
- requires: {
14
- // DRIVER TABLE: This MUST be first.
15
- // The DAG will only create tasks for Entity IDs found in this filtered query.
16
- 'portfolio_snapshots': {
17
- lookback: 0, // Just need today's existence
18
- mandatory: true,
19
- fields: ['user_id', 'date'],
20
- filter: { user_type: 'SIGNED_IN_USER' }
21
- }
22
- },
23
-
24
- storage: {
25
- // Just log to console or a debug table for now
26
- bigquery: false,
27
- firestore: { enabled: false }
28
- }
29
- };
30
- }
31
-
32
- async process(context) {
33
- const { entityId, data } = context;
34
-
35
- // If the Orchestrator works correctly, we should ONLY see Signed-In Users here.
36
- // We verify by returning their ID.
37
-
38
- const row = data['portfolio_snapshots'];
39
-
40
- // Sanity Check: If row exists, we are good.
41
- if (row) {
42
- this.setResult(entityId, {
43
- status: 'Found',
44
- userType: 'SIGNED_IN_USER',
45
- checkedAt: new Date().toISOString()
46
- });
47
- }
48
- }
49
- }
50
-
51
- module.exports = DebugSignedInUsers;