bulltrackers-module 1.0.324 → 1.0.326

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,8 @@
1
1
  /**
2
2
  * FILENAME: computation-system/helpers/computation_dispatcher.js
3
- * PURPOSE: Sequential Cursor-Based Dispatcher with Ledger Awareness, SimHash Stability, and Session Caching.
4
- * UPDATED: Fixed Ledger Blindness, Cursor Shifting, and Live Analysis Disconnect.
3
+ * PURPOSE: Sequential Cursor-Based Dispatcher.
4
+ * BEHAVIOR: Dispatch -> Wait ETA -> Next Date.
5
+ * UPDATED: Added "Zombie Protocol" to auto-recover stale locks.
5
6
  */
6
7
 
7
8
  const { getExpectedDateStrings, getEarliestDataDates, normalizeName, DEFINITIVE_EARLIEST_DATES } = require('../utils/utils.js');
@@ -15,10 +16,14 @@ const OOM_THRESHOLD_MB = 1500;
15
16
  const BASE_SECONDS_PER_WEIGHT_UNIT = 3;
16
17
  const SESSION_CACHE_DURATION_MS = 1000 * 60 * 30; // 30 Minutes
17
18
 
19
+ // [NEW] Zombie Timeout: Max Cloud Function run is 9m (540s).
20
+ // If no heartbeat/start within 15m, it's definitely dead.
21
+ const STALE_LOCK_THRESHOLD_MS = 1000 * 60 * 15;
22
+
18
23
  // =============================================================================
19
- // HELPER: Ledger Awareness (Prevents Race Conditions)
24
+ // HELPER: Ledger Awareness (Prevents Race Conditions & Clears Zombies)
20
25
  // =============================================================================
21
- async function filterActiveTasks(db, date, pass, tasks) {
26
+ async function filterActiveTasks(db, date, pass, tasks, logger) {
22
27
  if (!tasks || tasks.length === 0) return [];
23
28
 
24
29
  const checkPromises = tasks.map(async (t) => {
@@ -28,13 +33,37 @@ async function filterActiveTasks(db, date, pass, tasks) {
28
33
 
29
34
  if (snap.exists) {
30
35
  const data = snap.data();
31
- // Check PENDING, IN_PROGRESS, or "Ghost" (Completed < 1 min ago)
32
36
  const isActive = ['PENDING', 'IN_PROGRESS'].includes(data.status);
37
+
38
+ // 1. ZOMBIE CHECK (Recover Stale Locks)
39
+ if (isActive) {
40
+ // Prefer heartbeat, fall back to start time
41
+ const lastActivityTime = data.telemetry?.lastHeartbeat
42
+ ? new Date(data.telemetry.lastHeartbeat).getTime()
43
+ : (data.startedAt ? new Date(data.startedAt).getTime() : 0);
44
+
45
+ const timeSinceActive = Date.now() - lastActivityTime;
46
+
47
+ if (timeSinceActive > STALE_LOCK_THRESHOLD_MS) {
48
+ if (logger) {
49
+ logger.log('WARN', `[Dispatcher] 🧟 Breaking stale lock for ${taskName}. Inactive for ${(timeSinceActive/60000).toFixed(1)} mins.`);
50
+ }
51
+ // Return task (Re-dispatching it will overwrite the old lock in Firestore)
52
+ return t;
53
+ }
54
+
55
+ // If distinct and recent, filter it out (let it run)
56
+ return null;
57
+ }
58
+
59
+ // 2. GHOST CHECK (Debounce immediate re-runs)
60
+ // If it finished less than 1 minute ago, don't re-dispatch immediately
61
+ // (prevents double-tap if latency is high)
33
62
  const isJustFinished = data.status === 'COMPLETED' &&
34
63
  data.completedAt &&
35
64
  (Date.now() - new Date(data.completedAt).getTime() < 60 * 1000);
36
65
 
37
- if (isActive || isJustFinished) return null;
66
+ if (isJustFinished) return null; // Filter out
38
67
  }
39
68
  return t;
40
69
  });
@@ -50,18 +79,13 @@ async function attemptSimHashResolution(dependencies, date, tasks, dailyStatus,
50
79
  const { db, logger } = dependencies;
51
80
  const resolvedTasks = [];
52
81
  const remainingTasks = [];
53
-
54
- // Cache for SimHashes to avoid redundant DB lookups in loop
55
82
  const simHashCache = new Map();
56
83
 
57
84
  for (const task of tasks) {
58
- // Only apply to Re-Runs (Hash Mismatches), not fresh runs (Missing Data)
59
85
  const currentStatus = dailyStatus ? dailyStatus[task.name] : null;
60
86
  const manifestItem = manifestMap.get(normalizeName(task.name));
61
87
 
62
88
  if (currentStatus && currentStatus.simHash && manifestItem) {
63
-
64
- // 1. Get the SimHash for the NEW code (from Registry)
65
89
  let newSimHash = simHashCache.get(manifestItem.hash);
66
90
  if (!newSimHash) {
67
91
  const simDoc = await db.collection('system_simhash_registry').doc(manifestItem.hash).get();
@@ -71,13 +95,12 @@ async function attemptSimHashResolution(dependencies, date, tasks, dailyStatus,
71
95
  }
72
96
  }
73
97
 
74
- // 2. Compare
75
98
  if (newSimHash && newSimHash === currentStatus.simHash) {
76
99
  resolvedTasks.push({
77
100
  name: task.name,
78
101
  hash: manifestItem.hash,
79
102
  simHash: newSimHash,
80
- prevStatus: currentStatus // Pass previous status to preserve other fields
103
+ prevStatus: currentStatus
81
104
  });
82
105
  continue;
83
106
  }
@@ -85,30 +108,24 @@ async function attemptSimHashResolution(dependencies, date, tasks, dailyStatus,
85
108
  remainingTasks.push(task);
86
109
  }
87
110
 
88
- // 3. Apply Updates for Stable Tasks
89
111
  if (resolvedTasks.length > 0) {
90
112
  const updatePayload = {};
91
-
92
113
  resolvedTasks.forEach(t => {
93
- // [FIXED] Construct full nested object to avoid dot-notation issues with .set()
94
- // We merge existing data (like resultHash) so we don't lose the valid calculation output
95
114
  updatePayload[t.name] = {
96
- ...(t.prevStatus || {}), // Keep existing resultHash, output, etc.
97
- hash: t.hash, // Update to new code hash
98
- simHash: t.simHash, // Confirmed stable simHash
115
+ ...(t.prevStatus || {}),
116
+ hash: t.hash,
117
+ simHash: t.simHash,
99
118
  reason: 'SimHash Stable (Auto-Resolved)',
100
119
  lastUpdated: new Date().toISOString()
101
120
  };
102
121
  });
103
-
104
- // Use set with merge: true. Now that keys are "clean" (no dots),
105
- // objects will merge correctly into the document structure.
106
122
  await db.collection('computation_status').doc(date).set(updatePayload, { merge: true });
107
123
  logger.log('INFO', `[SimHash] ⏩ Fast-forwarded ${resolvedTasks.length} tasks for ${date} (Logic Unchanged).`);
108
124
  }
109
125
 
110
126
  return remainingTasks;
111
127
  }
128
+
112
129
  // =============================================================================
113
130
  // HELPER: Stable Session Management (Solves Cursor Shifting)
114
131
  // =============================================================================
@@ -117,36 +134,21 @@ async function getStableDateSession(config, dependencies, passToRun, dateLimitSt
117
134
  const sessionId = `pass_${passToRun}_${dateLimitStr.replace(/-/g, '')}`;
118
135
  const sessionRef = db.collection('dispatcher_sessions').doc(sessionId);
119
136
 
120
- // 1. Try to Load Session
121
137
  if (!forceRebuild) {
122
138
  const sessionSnap = await sessionRef.get();
123
139
  if (sessionSnap.exists) {
124
140
  const data = sessionSnap.data();
125
- const age = Date.now() - new Date(data.createdAt).getTime();
126
- if (age < SESSION_CACHE_DURATION_MS) {
127
- logger.log('INFO', `[Session] 📂 Loaded stable session for Pass ${passToRun} (${data.dates.length} dates).`);
141
+ if ((Date.now() - new Date(data.createdAt).getTime()) < SESSION_CACHE_DURATION_MS) {
128
142
  return data.dates;
129
143
  }
130
144
  }
131
145
  }
132
146
 
133
- // 2. Rebuild Session (Expensive Scan)
134
147
  logger.log('INFO', `[Session] 🔄 Rebuilding dispatch session for Pass ${passToRun}...`);
135
148
  const earliestDates = await getEarliestDataDates(config, dependencies);
136
149
  const allDates = getExpectedDateStrings(earliestDates.absoluteEarliest, new Date(dateLimitStr + 'T00:00:00Z'));
137
150
 
138
- // We only want dates that *might* be dirty.
139
- // Optimization: We add ALL dates to the list. The dispatcher checks them individually.
140
- // Why? Because if we pre-filter here, we repeat the work of the dispatcher.
141
- // Better: Store the plain list of dates sorted descending (newest first usually better for backfills, ascending for standard).
142
- // Let's stick to Ascending (oldest first) as standard.
143
-
144
- await sessionRef.set({
145
- dates: allDates,
146
- createdAt: new Date().toISOString(),
147
- configHash: dateLimitStr // Simple versioning
148
- });
149
-
151
+ await sessionRef.set({ dates: allDates, createdAt: new Date().toISOString(), configHash: dateLimitStr });
150
152
  return allDates;
151
153
  }
152
154
 
@@ -157,9 +159,9 @@ async function dispatchComputationPass(config, dependencies, computationManifest
157
159
  const { logger, db } = dependencies;
158
160
  const pubsubUtils = new PubSubUtils(dependencies);
159
161
 
160
- const passToRun = String(reqBody.pass || config.COMPUTATION_PASS_TO_RUN || "1");
162
+ const passToRun = String(reqBody.pass || "1");
161
163
  const targetCursorN = parseInt(reqBody.cursorIndex || 1);
162
- const dateLimitStr = reqBody.date || config.date || "2025-01-01";
164
+ const dateLimitStr = reqBody.date || "2025-01-01";
163
165
  const forceRebuild = reqBody.forceRebuild === true;
164
166
 
165
167
  const manifestMap = new Map(computationManifest.map(c => [normalizeName(c.name), c]));
@@ -167,38 +169,26 @@ async function dispatchComputationPass(config, dependencies, computationManifest
167
169
  const calcsInThisPass = passes[passToRun] || [];
168
170
  const manifestWeightMap = new Map(computationManifest.map(c => [normalizeName(c.name), c.weight || 1.0]));
169
171
 
170
- if (!calcsInThisPass.length) {
171
- return { status: 'MOVE_TO_NEXT_PASS', dispatched: 0 };
172
- }
172
+ if (!calcsInThisPass.length) return { status: 'MOVE_TO_NEXT_PASS', dispatched: 0 };
173
173
 
174
- // 1. Get Stable Date List (Solves Shifting Cursor)
174
+ // 1. Get Stable Date List
175
175
  const sessionDates = await getStableDateSession(config, dependencies, passToRun, dateLimitStr, forceRebuild);
176
-
177
- if (!sessionDates || sessionDates.length === 0) {
178
- return { status: 'MOVE_TO_NEXT_PASS', dispatched: 0 };
179
- }
176
+ if (!sessionDates || sessionDates.length === 0) return { status: 'MOVE_TO_NEXT_PASS', dispatched: 0 };
180
177
 
181
- // 2. Select Date based on Cursor
178
+ // 2. Select Date
182
179
  let selectedDate = null;
183
180
  let selectedTasks = [];
184
- let isReroute = false;
185
- let isSweep = false;
186
181
 
187
- // Check bounds
188
182
  if (targetCursorN <= sessionDates.length) {
189
- // Normal Operation
190
183
  selectedDate = sessionDates[targetCursorN - 1];
191
184
  } else {
192
- // End of list
193
185
  return { status: 'MOVE_TO_NEXT_PASS', dispatched: 0 };
194
186
  }
195
187
 
196
- // 3. Analyze SPECIFIC Date (Live Analysis)
197
- // We only fetch status for the ONE date we are looking at + context
188
+ // 3. Analyze SPECIFIC Date
198
189
  if (selectedDate) {
199
- // A. Fetch Context
200
- const needsHistory = calcsInThisPass.some(c => c.isHistorical);
201
190
  const earliestDates = await getEarliestDataDates(config, dependencies);
191
+ const needsHistory = calcsInThisPass.some(c => c.isHistorical);
202
192
 
203
193
  let prevDailyStatusPromise = Promise.resolve(null);
204
194
  if (needsHistory) {
@@ -219,51 +209,41 @@ async function dispatchComputationPass(config, dependencies, computationManifest
219
209
  const report = analyzeDateExecution(selectedDate, calcsInThisPass, availability.status, dailyStatus, manifestMap, prevDailyStatus);
220
210
  let rawTasks = [...report.runnable, ...report.reRuns];
221
211
 
222
- // B. Apply SimHash Resolution (Problem #1)
223
212
  if (rawTasks.length > 0) {
224
213
  rawTasks = await attemptSimHashResolution(dependencies, selectedDate, rawTasks, dailyStatus, manifestMap);
225
- }
226
-
227
- // C. Apply Ledger Filter (Problem #2)
228
- if (rawTasks.length > 0) {
229
- selectedTasks = await filterActiveTasks(db, selectedDate, passToRun, rawTasks);
214
+
215
+ // [UPDATED] Pass logger to filterActiveTasks for zombie warnings
216
+ selectedTasks = await filterActiveTasks(db, selectedDate, passToRun, rawTasks, logger);
230
217
  }
231
218
 
232
- // D. Check for High-Mem Reroutes (OOM handling)
219
+ // OOM / High-Mem Reroute Check
233
220
  if (selectedTasks.length > 0) {
234
221
  const reroutes = await getHighMemReroutes(db, selectedDate, passToRun, selectedTasks);
235
222
  if (reroutes.length > 0) {
236
223
  selectedTasks = reroutes;
237
- isReroute = true;
238
224
  }
239
225
  }
240
- } else {
241
- logger.log('WARN', `[Dispatcher] Date ${selectedDate} skipped (Data Unavailable).`);
242
226
  }
243
227
  }
244
228
 
245
229
  // 4. Dispatch Logic
246
230
  if (selectedTasks.length === 0) {
247
- // Nothing to do for this date.
248
- // CRITICAL: We return dispatched: 0, but n_cursor_ignored: FALSE.
249
- // This tells workflow to increment cursor and check the next date in the Stable Session.
250
231
  return {
251
232
  status: 'CONTINUE_PASS',
252
233
  dateProcessed: selectedDate,
253
234
  dispatched: 0,
254
- n_cursor_ignored: false, // Proceed to next date
235
+ n_cursor_ignored: false,
255
236
  etaSeconds: 0,
256
237
  remainingDates: sessionDates.length - targetCursorN
257
238
  };
258
239
  }
259
240
 
260
- // 5. Publish Tasks
241
+ // 5. Send Tasks
261
242
  const totalweight = selectedTasks.reduce((sum, t) => sum + (manifestWeightMap.get(normalizeName(t.name)) || 1.0), 0);
262
243
  const currentDispatchId = crypto.randomUUID();
263
244
  const etaSeconds = Math.max(20, Math.ceil(totalweight * BASE_SECONDS_PER_WEIGHT_UNIT));
264
245
 
265
246
  const taskDetails = selectedTasks.map(t => `${t.name} (${t.reason})`);
266
-
267
247
  logger.log('INFO', `[Dispatcher] ✅ Dispatching ${selectedTasks.length} tasks for ${selectedDate}.`, {
268
248
  date: selectedDate,
269
249
  pass: passToRun,
@@ -305,13 +285,11 @@ async function dispatchComputationPass(config, dependencies, computationManifest
305
285
  }
306
286
  await Promise.all(pubPromises);
307
287
 
308
- // CRITICAL: We dispatched work. We want to check THIS date again next time
309
- // to ensure tasks completed. So we IGNORE cursor increment.
310
288
  return {
311
289
  status: 'CONTINUE_PASS',
312
290
  dateProcessed: selectedDate,
313
291
  dispatched: selectedTasks.length,
314
- n_cursor_ignored: true, // Hold cursor until this date is clean
292
+ n_cursor_ignored: false,
315
293
  etaSeconds: etaSeconds,
316
294
  remainingDates: sessionDates.length - targetCursorN
317
295
  };
@@ -2,6 +2,7 @@
2
2
  * FILENAME: bulltrackers-module/functions/computation-system/tools/BuildReporter.js
3
3
  * UPGRADED: Offloads heavy logic to a dedicated Cloud Function via Pub/Sub.
4
4
  * FEATURES: Patch versioning, data-drift detection (window changes), and checkpointed writes.
5
+ * FIX: Ensures ALL dates in the window are reported, even if analysis fails.
5
6
  */
6
7
 
7
8
  const { analyzeDateExecution } = require('../WorkflowOrchestrator');
@@ -62,7 +63,7 @@ async function handleBuildReportTrigger(message, context, config, dependencies,
62
63
  function getSystemFingerprint(manifest) {
63
64
  const sortedManifestHashes = manifest.map(c => c.hash).sort().join('|');
64
65
  return crypto.createHash('sha256')
65
- .update(sortedManifestHashes + SYSTEM_EPOCH + REPORTER_EPOCH) // [UPDATED]
66
+ .update(sortedManifestHashes + SYSTEM_EPOCH + REPORTER_EPOCH)
66
67
  .digest('hex');
67
68
  }
68
69
 
@@ -187,20 +188,20 @@ async function generateBuildReport(config, dependencies, manifest) {
187
188
  const lastEarliestStr = latest?.windowEarliest || 'NONE';
188
189
  const windowChanged = currentEarliestStr !== lastEarliestStr;
189
190
 
190
- const epochChanged = latest?.reporterEpoch !== REPORTER_EPOCH; // [NEW]
191
+ const epochChanged = latest?.reporterEpoch !== REPORTER_EPOCH;
191
192
 
192
193
  // If fingerprints match AND the window is the same, we can truly skip.
193
194
  if (latest &&
194
195
  latest.systemFingerprint === currentFingerprint &&
195
196
  !windowChanged &&
196
- !epochChanged) { // [NEW]
197
+ !epochChanged) {
197
198
  logger.log('INFO', `[BuildReporter] ⚡ System fingerprint, window, and reporter epoch stable. Skipping report.`);
198
199
  return { success: true, status: 'SKIPPED_IDENTICAL' };
199
200
  }
200
201
 
201
202
  // Determine primary reason for logging
202
203
  let reason = 'Code Change';
203
- if (epochChanged) reason = 'Master Epoch Override'; // [NEW]
204
+ if (epochChanged) reason = 'Master Epoch Override';
204
205
  else if (windowChanged) reason = 'Data Window Drift';
205
206
 
206
207
  // Increment patch version
@@ -251,7 +252,7 @@ async function generateBuildReport(config, dependencies, manifest) {
251
252
  // Initialize the build record
252
253
  await db.collection(BUILD_RECORDS_COLLECTION).doc(buildId).set(reportHeader);
253
254
 
254
- let totalRun = 0, totalReRun = 0, totalStable = 0;
255
+ let totalRun = 0, totalReRun = 0, totalStable = 0, totalErrors = 0;
255
256
  const limit = pLimit(10); // Concurrency for fetching statuses
256
257
 
257
258
  // Process dates in chunks of 5 for checkpointed writing
@@ -336,17 +337,33 @@ async function generateBuildReport(config, dependencies, manifest) {
336
337
  // Write detailed date record
337
338
  await db.collection(BUILD_RECORDS_COLLECTION).doc(buildId).collection('details').doc(dateStr).set(dateSummary);
338
339
 
339
- return { run: dateSummary.run.length, rerun: dateSummary.rerun.length, stable: dateSummary.stable.length };
340
+ return { run: dateSummary.run.length, rerun: dateSummary.rerun.length, stable: dateSummary.stable.length, error: false };
340
341
  } catch (err) {
341
342
  logger.log('ERROR', `[BuildReporter] Analysis failed for ${dateStr}: ${err.message}`);
342
- return { run: 0, rerun: 0, stable: 0 };
343
+
344
+ // [FIX] Write error record so the date appears in the report
345
+ await db.collection(BUILD_RECORDS_COLLECTION).doc(buildId).collection('details').doc(dateStr).set({
346
+ error: err.message,
347
+ status: 'ANALYSIS_FAILED',
348
+ meta: { totalIncluded: 0, totalExpected: 0, match: false }
349
+ }).catch(e => logger.log('ERROR', `Failed to write error record for ${dateStr}: ${e.message}`));
350
+
351
+ return { run: 0, rerun: 0, stable: 0, error: true };
343
352
  }
344
353
  })));
345
354
 
346
355
  // Accumulate stats and write a progress checkpoint
347
- results.forEach(res => { totalRun += res.run; totalReRun += res.rerun; totalStable += res.stable; });
356
+ results.forEach(res => {
357
+ if (res.error) totalErrors++;
358
+ else {
359
+ totalRun += res.run;
360
+ totalReRun += res.rerun;
361
+ totalStable += res.stable;
362
+ }
363
+ });
364
+
348
365
  await db.collection(BUILD_RECORDS_COLLECTION).doc(buildId).update({
349
- checkpoint: `Processed ${i + dateBatch.length}/${datesToCheck.length} dates`
366
+ checkpoint: `Processed ${Math.min(i + dateBatch.length, datesToCheck.length)}/${datesToCheck.length} dates`
350
367
  });
351
368
  }
352
369
 
@@ -355,6 +372,7 @@ async function generateBuildReport(config, dependencies, manifest) {
355
372
  totalReRuns: totalReRun,
356
373
  totalNew: totalRun,
357
374
  totalStable: totalStable,
375
+ totalErrors: totalErrors,
358
376
  scanRange: `${datesToCheck[0]} to ${datesToCheck[datesToCheck.length-1]}`
359
377
  };
360
378
 
@@ -362,7 +380,7 @@ async function generateBuildReport(config, dependencies, manifest) {
362
380
  await db.collection(BUILD_RECORDS_COLLECTION).doc(buildId).set(reportHeader);
363
381
  await db.collection(BUILD_RECORDS_COLLECTION).doc('latest').set({ ...reportHeader, note: "Latest completed build report." });
364
382
 
365
- logger.log('SUCCESS', `[BuildReporter] Build ${buildId} completed. Re-runs: ${totalReRun}, Stable: ${totalStable}, New: ${totalRun}.`);
383
+ logger.log('SUCCESS', `[BuildReporter] Build ${buildId} completed. Re-runs: ${totalReRun}, Stable: ${totalStable}, New: ${totalRun}, Errors: ${totalErrors}.`);
366
384
 
367
385
  return { success: true, buildId, summary: reportHeader.summary };
368
386
  }
@@ -1,5 +1,5 @@
1
1
  # Cloud Workflows: Precision Cursor-Based Orchestrator
2
- # UPDATED: Added satiation detection to break early on 0 remaining dates.
2
+ # SIMPLE MODE: Dispatch -> Wait ETA -> Next Date
3
3
 
4
4
  main:
5
5
  params: [input]
@@ -20,7 +20,6 @@ main:
20
20
  assign:
21
21
  - n_cursor: 1
22
22
  - pass_complete: false
23
- - consecutive_empty_dispatches: 0
24
23
 
25
24
  - sequential_date_loop:
26
25
  switch:
@@ -39,26 +38,25 @@ main:
39
38
 
40
39
  - evaluate_dispatch:
41
40
  switch:
41
+ # 1. End of Session (Dispatcher reached end of date list)
42
42
  - condition: '${dispatch_res.body.status == "MOVE_TO_NEXT_PASS"}'
43
43
  assign:
44
44
  - pass_complete: true
45
45
 
46
- # NEW: Explicit Satiation Check
47
- - condition: '${dispatch_res.body.status == "CONTINUE_PASS" and dispatch_res.body.remainingDates == 0}'
46
+ # 2. Satiation Check (Specific to date/logic)
47
+ - condition: '${dispatch_res.body.status == "CONTINUE_PASS" and dispatch_res.body.remainingDates == 0 and dispatch_res.body.dispatched == 0}'
48
48
  steps:
49
49
  - log_satiation:
50
50
  call: sys.log
51
51
  args:
52
- text: '${"Pass " + pass_id + " - ✅ Pass satiated (0 remaining dates). Moving to next pass."}'
52
+ text: '${"Pass " + pass_id + " - ✅ Pass satiated (0 remaining). Next pass."}'
53
53
  - mark_complete:
54
54
  assign:
55
55
  - pass_complete: true
56
56
 
57
+ # 3. Work Dispatched: Wait ETA -> Move Next (Ignored flag is FALSE)
57
58
  - condition: '${dispatch_res.body.dispatched > 0}'
58
59
  steps:
59
- - reset_retry_counter:
60
- assign:
61
- - consecutive_empty_dispatches: 0
62
60
  - wait_for_completion:
63
61
  call: sys.sleep
64
62
  args:
@@ -69,26 +67,18 @@ main:
69
67
  - next_loop_work:
70
68
  next: sequential_date_loop
71
69
 
70
+ # 4. No Work (Clean or Busy): Move Next Immediately
72
71
  - condition: '${dispatch_res.body.dispatched == 0}'
73
72
  steps:
74
- - increment_retry:
73
+ - wait_short:
74
+ call: sys.sleep
75
+ args:
76
+ seconds: 2 # Tiny debounce
77
+ - update_cursor_retry:
75
78
  assign:
76
- - consecutive_empty_dispatches: '${consecutive_empty_dispatches + 1}'
77
- - check_break_condition:
78
- switch:
79
- - condition: '${consecutive_empty_dispatches >= 3}'
80
- assign:
81
- - pass_complete: true
82
- - condition: '${true}'
83
- steps:
84
- - wait_short:
85
- call: sys.sleep
86
- args:
87
- seconds: 5
88
- - update_cursor_retry:
89
- assign:
90
- - n_cursor: '${if(dispatch_res.body.n_cursor_ignored, n_cursor, n_cursor + 1)}'
91
- - next_loop_retry:
92
- next: sequential_date_loop
79
+ # Dispatcher sends n_cursor_ignored=false, so we increment.
80
+ - n_cursor: '${if(dispatch_res.body.n_cursor_ignored, n_cursor, n_cursor + 1)}'
81
+ - next_loop_retry:
82
+ next: sequential_date_loop
93
83
  - finish:
94
84
  return: "Pipeline Execution Satiated and Complete"
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "bulltrackers-module",
3
- "version": "1.0.324",
3
+ "version": "1.0.326",
4
4
  "description": "Helper Functions for Bulltrackers.",
5
5
  "main": "index.js",
6
6
  "files": [