@nicnocquee/dataqueue 1.24.0 → 1.26.0-beta.20260223195940
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +44 -0
- package/ai/build-docs-content.ts +96 -0
- package/ai/build-llms-full.ts +42 -0
- package/ai/docs-content.json +278 -0
- package/ai/rules/advanced.md +132 -0
- package/ai/rules/basic.md +159 -0
- package/ai/rules/react-dashboard.md +83 -0
- package/ai/skills/dataqueue-advanced/SKILL.md +320 -0
- package/ai/skills/dataqueue-core/SKILL.md +234 -0
- package/ai/skills/dataqueue-react/SKILL.md +189 -0
- package/dist/cli.cjs +1149 -14
- package/dist/cli.cjs.map +1 -1
- package/dist/cli.d.cts +66 -1
- package/dist/cli.d.ts +66 -1
- package/dist/cli.js +1146 -13
- package/dist/cli.js.map +1 -1
- package/dist/index.cjs +4630 -928
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +1033 -15
- package/dist/index.d.ts +1033 -15
- package/dist/index.js +4626 -929
- package/dist/index.js.map +1 -1
- package/dist/mcp-server.cjs +186 -0
- package/dist/mcp-server.cjs.map +1 -0
- package/dist/mcp-server.d.cts +32 -0
- package/dist/mcp-server.d.ts +32 -0
- package/dist/mcp-server.js +175 -0
- package/dist/mcp-server.js.map +1 -0
- package/migrations/1751131910825_add_timeout_seconds_to_job_queue.sql +2 -2
- package/migrations/1751186053000_add_job_events_table.sql +12 -8
- package/migrations/1751984773000_add_tags_to_job_queue.sql +1 -1
- package/migrations/1765809419000_add_force_kill_on_timeout_to_job_queue.sql +1 -1
- package/migrations/1771100000000_add_idempotency_key_to_job_queue.sql +7 -0
- package/migrations/1781200000000_add_wait_support.sql +12 -0
- package/migrations/1781200000001_create_waitpoints_table.sql +18 -0
- package/migrations/1781200000002_add_performance_indexes.sql +34 -0
- package/migrations/1781200000003_add_progress_to_job_queue.sql +7 -0
- package/migrations/1781200000004_create_cron_schedules_table.sql +33 -0
- package/migrations/1781200000005_add_retry_config_to_job_queue.sql +17 -0
- package/package.json +40 -23
- package/src/backend.ts +328 -0
- package/src/backends/postgres.ts +2040 -0
- package/src/backends/redis-scripts.ts +865 -0
- package/src/backends/redis.test.ts +1906 -0
- package/src/backends/redis.ts +1792 -0
- package/src/cli.test.ts +82 -6
- package/src/cli.ts +73 -10
- package/src/cron.test.ts +126 -0
- package/src/cron.ts +40 -0
- package/src/db-util.ts +4 -2
- package/src/index.test.ts +688 -1
- package/src/index.ts +277 -39
- package/src/init-command.test.ts +449 -0
- package/src/init-command.ts +709 -0
- package/src/install-mcp-command.test.ts +216 -0
- package/src/install-mcp-command.ts +185 -0
- package/src/install-rules-command.test.ts +218 -0
- package/src/install-rules-command.ts +233 -0
- package/src/install-skills-command.test.ts +176 -0
- package/src/install-skills-command.ts +124 -0
- package/src/mcp-server.test.ts +162 -0
- package/src/mcp-server.ts +231 -0
- package/src/processor.test.ts +559 -18
- package/src/processor.ts +456 -49
- package/src/queue.test.ts +682 -6
- package/src/queue.ts +135 -944
- package/src/supervisor.test.ts +340 -0
- package/src/supervisor.ts +162 -0
- package/src/test-util.ts +32 -0
- package/src/types.ts +726 -17
- package/src/wait.test.ts +698 -0
- package/LICENSE +0 -21
package/src/processor.ts
CHANGED
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
import { Pool } from 'pg';
|
|
2
1
|
import { Worker } from 'worker_threads';
|
|
3
2
|
import {
|
|
4
3
|
JobRecord,
|
|
@@ -8,13 +7,13 @@ import {
|
|
|
8
7
|
JobType,
|
|
9
8
|
FailureReason,
|
|
10
9
|
JobHandlers,
|
|
10
|
+
JobContext,
|
|
11
|
+
OnTimeoutCallback,
|
|
12
|
+
WaitSignal,
|
|
13
|
+
WaitDuration,
|
|
14
|
+
WaitTokenResult,
|
|
11
15
|
} from './types.js';
|
|
12
|
-
import {
|
|
13
|
-
getNextBatch,
|
|
14
|
-
completeJob,
|
|
15
|
-
failJob,
|
|
16
|
-
setPendingReasonForUnpickedJobs,
|
|
17
|
-
} from './queue.js';
|
|
16
|
+
import { QueueBackend } from './backend.js';
|
|
18
17
|
import { log, setLogContext } from './log-context.js';
|
|
19
18
|
|
|
20
19
|
/**
|
|
@@ -253,6 +252,254 @@ async function runHandlerInWorker<
|
|
|
253
252
|
});
|
|
254
253
|
}
|
|
255
254
|
|
|
255
|
+
/**
|
|
256
|
+
* Convert a WaitDuration to a target Date.
|
|
257
|
+
*/
|
|
258
|
+
function calculateWaitUntil(duration: WaitDuration): Date {
|
|
259
|
+
const now = Date.now();
|
|
260
|
+
let ms = 0;
|
|
261
|
+
if (duration.seconds) ms += duration.seconds * 1000;
|
|
262
|
+
if (duration.minutes) ms += duration.minutes * 60 * 1000;
|
|
263
|
+
if (duration.hours) ms += duration.hours * 60 * 60 * 1000;
|
|
264
|
+
if (duration.days) ms += duration.days * 24 * 60 * 60 * 1000;
|
|
265
|
+
if (duration.weeks) ms += duration.weeks * 7 * 24 * 60 * 60 * 1000;
|
|
266
|
+
if (duration.months) ms += duration.months * 30 * 24 * 60 * 60 * 1000;
|
|
267
|
+
if (duration.years) ms += duration.years * 365 * 24 * 60 * 60 * 1000;
|
|
268
|
+
if (ms <= 0) {
|
|
269
|
+
throw new Error(
|
|
270
|
+
'waitFor duration must be positive. Provide at least one positive duration field.',
|
|
271
|
+
);
|
|
272
|
+
}
|
|
273
|
+
return new Date(now + ms);
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
/**
|
|
277
|
+
* Create a no-op JobContext for cases where prolong/onTimeout are not supported
|
|
278
|
+
* (e.g. forceKillOnTimeout mode or no timeout set).
|
|
279
|
+
*/
|
|
280
|
+
function createNoOpContext(
|
|
281
|
+
backend: QueueBackend,
|
|
282
|
+
jobId: number,
|
|
283
|
+
reason: string,
|
|
284
|
+
): JobContext {
|
|
285
|
+
return {
|
|
286
|
+
prolong: () => {
|
|
287
|
+
log(`prolong() called but ignored: ${reason}`);
|
|
288
|
+
},
|
|
289
|
+
onTimeout: () => {
|
|
290
|
+
log(`onTimeout() called but ignored: ${reason}`);
|
|
291
|
+
},
|
|
292
|
+
run: async <T>(_stepName: string, fn: () => Promise<T>): Promise<T> => {
|
|
293
|
+
// In no-op context (forceKillOnTimeout), just execute the function directly
|
|
294
|
+
return fn();
|
|
295
|
+
},
|
|
296
|
+
waitFor: async () => {
|
|
297
|
+
throw new Error(
|
|
298
|
+
`waitFor() is not supported when forceKillOnTimeout is enabled. ${reason}`,
|
|
299
|
+
);
|
|
300
|
+
},
|
|
301
|
+
waitUntil: async () => {
|
|
302
|
+
throw new Error(
|
|
303
|
+
`waitUntil() is not supported when forceKillOnTimeout is enabled. ${reason}`,
|
|
304
|
+
);
|
|
305
|
+
},
|
|
306
|
+
createToken: async () => {
|
|
307
|
+
throw new Error(
|
|
308
|
+
`createToken() is not supported when forceKillOnTimeout is enabled. ${reason}`,
|
|
309
|
+
);
|
|
310
|
+
},
|
|
311
|
+
waitForToken: async () => {
|
|
312
|
+
throw new Error(
|
|
313
|
+
`waitForToken() is not supported when forceKillOnTimeout is enabled. ${reason}`,
|
|
314
|
+
);
|
|
315
|
+
},
|
|
316
|
+
setProgress: async (percent: number) => {
|
|
317
|
+
if (percent < 0 || percent > 100)
|
|
318
|
+
throw new Error('Progress must be between 0 and 100');
|
|
319
|
+
await backend.updateProgress(jobId, Math.round(percent));
|
|
320
|
+
},
|
|
321
|
+
};
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
/**
|
|
325
|
+
* Pre-process stepData before handler re-invocation.
|
|
326
|
+
* Marks pending waits as completed and fetches token outputs.
|
|
327
|
+
*/
|
|
328
|
+
async function resolveCompletedWaits(
|
|
329
|
+
backend: QueueBackend,
|
|
330
|
+
stepData: Record<string, any>,
|
|
331
|
+
): Promise<void> {
|
|
332
|
+
for (const key of Object.keys(stepData)) {
|
|
333
|
+
if (!key.startsWith('__wait_')) continue;
|
|
334
|
+
const entry = stepData[key];
|
|
335
|
+
if (!entry || typeof entry !== 'object' || entry.completed) continue;
|
|
336
|
+
|
|
337
|
+
if (entry.type === 'duration' || entry.type === 'date') {
|
|
338
|
+
// Time-based wait has elapsed (we got picked up, so it must have)
|
|
339
|
+
stepData[key] = { ...entry, completed: true };
|
|
340
|
+
} else if (entry.type === 'token' && entry.tokenId) {
|
|
341
|
+
// Token-based wait -- fetch the waitpoint result
|
|
342
|
+
const wp = await backend.getWaitpoint(entry.tokenId);
|
|
343
|
+
if (wp && wp.status === 'completed') {
|
|
344
|
+
stepData[key] = {
|
|
345
|
+
...entry,
|
|
346
|
+
completed: true,
|
|
347
|
+
result: { ok: true, output: wp.output },
|
|
348
|
+
};
|
|
349
|
+
} else if (wp && wp.status === 'timed_out') {
|
|
350
|
+
stepData[key] = {
|
|
351
|
+
...entry,
|
|
352
|
+
completed: true,
|
|
353
|
+
result: { ok: false, error: 'Token timed out' },
|
|
354
|
+
};
|
|
355
|
+
}
|
|
356
|
+
// If still waiting (shouldn't happen), leave as pending
|
|
357
|
+
}
|
|
358
|
+
}
|
|
359
|
+
}
|
|
360
|
+
|
|
361
|
+
/**
|
|
362
|
+
* Build the extended JobContext with step tracking and wait support.
|
|
363
|
+
* Works with any QueueBackend (Postgres or Redis).
|
|
364
|
+
*/
|
|
365
|
+
function buildWaitContext(
|
|
366
|
+
backend: QueueBackend,
|
|
367
|
+
jobId: number,
|
|
368
|
+
stepData: Record<string, any>,
|
|
369
|
+
baseCtx: {
|
|
370
|
+
prolong: JobContext['prolong'];
|
|
371
|
+
onTimeout: JobContext['onTimeout'];
|
|
372
|
+
},
|
|
373
|
+
): JobContext {
|
|
374
|
+
// Wait counter always starts at 0 per invocation.
|
|
375
|
+
// The handler replays from the top each time, so the counter position
|
|
376
|
+
// must match the order of waitFor/waitUntil/waitForToken calls in code.
|
|
377
|
+
let waitCounter = 0;
|
|
378
|
+
|
|
379
|
+
const ctx: JobContext = {
|
|
380
|
+
prolong: baseCtx.prolong,
|
|
381
|
+
onTimeout: baseCtx.onTimeout,
|
|
382
|
+
|
|
383
|
+
run: async <T>(stepName: string, fn: () => Promise<T>): Promise<T> => {
|
|
384
|
+
// Check if step was already completed in a previous invocation
|
|
385
|
+
const cached = stepData[stepName];
|
|
386
|
+
if (cached && typeof cached === 'object' && cached.__completed) {
|
|
387
|
+
log(`Step "${stepName}" replayed from cache for job ${jobId}`);
|
|
388
|
+
return cached.result as T;
|
|
389
|
+
}
|
|
390
|
+
|
|
391
|
+
// Execute the step
|
|
392
|
+
const result = await fn();
|
|
393
|
+
|
|
394
|
+
// Persist step result
|
|
395
|
+
stepData[stepName] = { __completed: true, result };
|
|
396
|
+
await backend.updateStepData(jobId, stepData);
|
|
397
|
+
|
|
398
|
+
return result;
|
|
399
|
+
},
|
|
400
|
+
|
|
401
|
+
waitFor: async (duration: WaitDuration): Promise<void> => {
|
|
402
|
+
const waitKey = `__wait_${waitCounter++}`;
|
|
403
|
+
|
|
404
|
+
// Check if this wait was already completed (from a previous invocation)
|
|
405
|
+
const cached = stepData[waitKey];
|
|
406
|
+
if (cached && typeof cached === 'object' && cached.completed) {
|
|
407
|
+
log(`Wait "${waitKey}" already completed for job ${jobId}, skipping`);
|
|
408
|
+
return;
|
|
409
|
+
}
|
|
410
|
+
|
|
411
|
+
// Calculate when to resume
|
|
412
|
+
const waitUntilDate = calculateWaitUntil(duration);
|
|
413
|
+
|
|
414
|
+
// Record this wait as pending in step data
|
|
415
|
+
stepData[waitKey] = { type: 'duration', completed: false };
|
|
416
|
+
|
|
417
|
+
// Throw WaitSignal to pause the handler
|
|
418
|
+
throw new WaitSignal('duration', waitUntilDate, undefined, stepData);
|
|
419
|
+
},
|
|
420
|
+
|
|
421
|
+
waitUntil: async (date: Date): Promise<void> => {
|
|
422
|
+
const waitKey = `__wait_${waitCounter++}`;
|
|
423
|
+
|
|
424
|
+
// Check if this wait was already completed
|
|
425
|
+
const cached = stepData[waitKey];
|
|
426
|
+
if (cached && typeof cached === 'object' && cached.completed) {
|
|
427
|
+
log(`Wait "${waitKey}" already completed for job ${jobId}, skipping`);
|
|
428
|
+
return;
|
|
429
|
+
}
|
|
430
|
+
|
|
431
|
+
// Record this wait as pending
|
|
432
|
+
stepData[waitKey] = { type: 'date', completed: false };
|
|
433
|
+
|
|
434
|
+
// Throw WaitSignal to pause the handler
|
|
435
|
+
throw new WaitSignal('date', date, undefined, stepData);
|
|
436
|
+
},
|
|
437
|
+
|
|
438
|
+
createToken: async (options?) => {
|
|
439
|
+
const token = await backend.createWaitpoint(jobId, options);
|
|
440
|
+
return token;
|
|
441
|
+
},
|
|
442
|
+
|
|
443
|
+
waitForToken: async <T = any>(
|
|
444
|
+
tokenId: string,
|
|
445
|
+
): Promise<WaitTokenResult<T>> => {
|
|
446
|
+
const waitKey = `__wait_${waitCounter++}`;
|
|
447
|
+
|
|
448
|
+
// Check if this wait was already completed
|
|
449
|
+
const cached = stepData[waitKey];
|
|
450
|
+
if (cached && typeof cached === 'object' && cached.completed) {
|
|
451
|
+
log(
|
|
452
|
+
`Token wait "${waitKey}" already completed for job ${jobId}, returning cached result`,
|
|
453
|
+
);
|
|
454
|
+
return cached.result as WaitTokenResult<T>;
|
|
455
|
+
}
|
|
456
|
+
|
|
457
|
+
// Check if the token is already completed (e.g., completed while job was still processing)
|
|
458
|
+
const wp = await backend.getWaitpoint(tokenId);
|
|
459
|
+
if (wp && wp.status === 'completed') {
|
|
460
|
+
const result: WaitTokenResult<T> = {
|
|
461
|
+
ok: true,
|
|
462
|
+
output: wp.output as T,
|
|
463
|
+
};
|
|
464
|
+
stepData[waitKey] = {
|
|
465
|
+
type: 'token',
|
|
466
|
+
tokenId,
|
|
467
|
+
completed: true,
|
|
468
|
+
result,
|
|
469
|
+
};
|
|
470
|
+
await backend.updateStepData(jobId, stepData);
|
|
471
|
+
return result;
|
|
472
|
+
}
|
|
473
|
+
if (wp && wp.status === 'timed_out') {
|
|
474
|
+
const result: WaitTokenResult<T> = {
|
|
475
|
+
ok: false,
|
|
476
|
+
error: 'Token timed out',
|
|
477
|
+
};
|
|
478
|
+
stepData[waitKey] = {
|
|
479
|
+
type: 'token',
|
|
480
|
+
tokenId,
|
|
481
|
+
completed: true,
|
|
482
|
+
result,
|
|
483
|
+
};
|
|
484
|
+
await backend.updateStepData(jobId, stepData);
|
|
485
|
+
return result;
|
|
486
|
+
}
|
|
487
|
+
|
|
488
|
+
// Token not yet completed -- save pending state and throw WaitSignal
|
|
489
|
+
stepData[waitKey] = { type: 'token', tokenId, completed: false };
|
|
490
|
+
throw new WaitSignal('token', undefined, tokenId, stepData);
|
|
491
|
+
},
|
|
492
|
+
|
|
493
|
+
setProgress: async (percent: number) => {
|
|
494
|
+
if (percent < 0 || percent > 100)
|
|
495
|
+
throw new Error('Progress must be between 0 and 100');
|
|
496
|
+
await backend.updateProgress(jobId, Math.round(percent));
|
|
497
|
+
},
|
|
498
|
+
};
|
|
499
|
+
|
|
500
|
+
return ctx;
|
|
501
|
+
}
|
|
502
|
+
|
|
256
503
|
/**
|
|
257
504
|
* Process a single job using the provided handler map
|
|
258
505
|
*/
|
|
@@ -260,20 +507,18 @@ export async function processJobWithHandlers<
|
|
|
260
507
|
PayloadMap,
|
|
261
508
|
T extends keyof PayloadMap & string,
|
|
262
509
|
>(
|
|
263
|
-
|
|
510
|
+
backend: QueueBackend,
|
|
264
511
|
job: JobRecord<PayloadMap, T>,
|
|
265
512
|
jobHandlers: JobHandlers<PayloadMap>,
|
|
266
513
|
): Promise<void> {
|
|
267
514
|
const handler = jobHandlers[job.jobType];
|
|
268
515
|
|
|
269
516
|
if (!handler) {
|
|
270
|
-
await setPendingReasonForUnpickedJobs(
|
|
271
|
-
pool,
|
|
517
|
+
await backend.setPendingReasonForUnpickedJobs(
|
|
272
518
|
`No handler registered for job type: ${job.jobType}`,
|
|
273
519
|
job.jobType,
|
|
274
520
|
);
|
|
275
|
-
await failJob(
|
|
276
|
-
pool,
|
|
521
|
+
await backend.failJob(
|
|
277
522
|
job.id,
|
|
278
523
|
new Error(`No handler registered for job type: ${job.jobType}`),
|
|
279
524
|
FailureReason.NoHandler,
|
|
@@ -281,6 +526,19 @@ export async function processJobWithHandlers<
|
|
|
281
526
|
return;
|
|
282
527
|
}
|
|
283
528
|
|
|
529
|
+
// Load step data (may contain completed steps from previous invocations)
|
|
530
|
+
const stepData: Record<string, any> = { ...(job.stepData || {}) };
|
|
531
|
+
|
|
532
|
+
// If resuming from a wait, resolve any pending wait entries
|
|
533
|
+
const hasStepHistory = Object.keys(stepData).some((k) =>
|
|
534
|
+
k.startsWith('__wait_'),
|
|
535
|
+
);
|
|
536
|
+
if (hasStepHistory) {
|
|
537
|
+
await resolveCompletedWaits(backend, stepData);
|
|
538
|
+
// Persist the resolved step data
|
|
539
|
+
await backend.updateStepData(job.id, stepData);
|
|
540
|
+
}
|
|
541
|
+
|
|
284
542
|
// Per-job timeout logic
|
|
285
543
|
const timeoutMs = job.timeoutMs ?? undefined;
|
|
286
544
|
const forceKillOnTimeout = job.forceKillOnTimeout ?? false;
|
|
@@ -288,24 +546,98 @@ export async function processJobWithHandlers<
|
|
|
288
546
|
const controller = new AbortController();
|
|
289
547
|
try {
|
|
290
548
|
// If forceKillOnTimeout is true, run handler in a worker thread
|
|
549
|
+
// Note: wait features are not available in forceKillOnTimeout mode
|
|
291
550
|
if (forceKillOnTimeout && timeoutMs && timeoutMs > 0) {
|
|
292
551
|
await runHandlerInWorker(handler, job.payload, timeoutMs, job.jobType);
|
|
293
552
|
} else {
|
|
294
|
-
//
|
|
295
|
-
|
|
296
|
-
|
|
553
|
+
// Build the JobContext for prolong/onTimeout support
|
|
554
|
+
let onTimeoutCallback: OnTimeoutCallback | undefined;
|
|
555
|
+
|
|
556
|
+
// Reference to the reject function of the timeout promise so we can re-arm it
|
|
557
|
+
let timeoutReject: ((error: Error) => void) | undefined;
|
|
558
|
+
|
|
559
|
+
/**
|
|
560
|
+
* Arms (or re-arms) the timeout. When it fires:
|
|
561
|
+
* 1. If an onTimeout callback is registered, call it first.
|
|
562
|
+
* - If it returns a positive number, re-arm with that duration and update DB.
|
|
563
|
+
* - Otherwise, proceed with abort.
|
|
564
|
+
* 2. If no onTimeout callback, proceed with abort.
|
|
565
|
+
*/
|
|
566
|
+
const armTimeout = (ms: number) => {
|
|
567
|
+
if (timeoutId) clearTimeout(timeoutId);
|
|
568
|
+
timeoutId = setTimeout(() => {
|
|
569
|
+
// Check if an onTimeout callback wants to extend
|
|
570
|
+
if (onTimeoutCallback) {
|
|
571
|
+
try {
|
|
572
|
+
const extension = onTimeoutCallback();
|
|
573
|
+
if (typeof extension === 'number' && extension > 0) {
|
|
574
|
+
// Extend: re-arm timeout and update DB
|
|
575
|
+
backend.prolongJob(job.id).catch(() => {});
|
|
576
|
+
armTimeout(extension);
|
|
577
|
+
return;
|
|
578
|
+
}
|
|
579
|
+
} catch (callbackError) {
|
|
580
|
+
log(
|
|
581
|
+
`onTimeout callback threw for job ${job.id}: ${callbackError}`,
|
|
582
|
+
);
|
|
583
|
+
// Treat as "no extension" and proceed with abort
|
|
584
|
+
}
|
|
585
|
+
}
|
|
586
|
+
// No extension -- proceed with abort
|
|
587
|
+
controller.abort();
|
|
588
|
+
const timeoutError = new Error(`Job timed out after ${ms} ms`);
|
|
589
|
+
// @ts-ignore
|
|
590
|
+
timeoutError.failureReason = FailureReason.Timeout;
|
|
591
|
+
if (timeoutReject) {
|
|
592
|
+
timeoutReject(timeoutError);
|
|
593
|
+
}
|
|
594
|
+
}, ms);
|
|
595
|
+
};
|
|
596
|
+
|
|
597
|
+
const hasTimeout = timeoutMs != null && timeoutMs > 0;
|
|
598
|
+
|
|
599
|
+
// Build base prolong/onTimeout context
|
|
600
|
+
const baseCtx = hasTimeout
|
|
601
|
+
? {
|
|
602
|
+
prolong: (ms?: number) => {
|
|
603
|
+
const duration = ms ?? timeoutMs;
|
|
604
|
+
if (duration != null && duration > 0) {
|
|
605
|
+
armTimeout(duration);
|
|
606
|
+
// Update DB locked_at to prevent reclaimStuckJobs
|
|
607
|
+
backend.prolongJob(job.id).catch(() => {});
|
|
608
|
+
}
|
|
609
|
+
},
|
|
610
|
+
onTimeout: (callback: OnTimeoutCallback) => {
|
|
611
|
+
onTimeoutCallback = callback;
|
|
612
|
+
},
|
|
613
|
+
}
|
|
614
|
+
: {
|
|
615
|
+
prolong: () => {
|
|
616
|
+
log('prolong() called but ignored: job has no timeout set');
|
|
617
|
+
},
|
|
618
|
+
onTimeout: () => {
|
|
619
|
+
log('onTimeout() called but ignored: job has no timeout set');
|
|
620
|
+
},
|
|
621
|
+
};
|
|
622
|
+
|
|
623
|
+
// Build context: full wait support for all backends
|
|
624
|
+
const ctx = buildWaitContext(backend, job.id, stepData, baseCtx);
|
|
625
|
+
|
|
626
|
+
// If forceKillOnTimeout was set but timeoutMs was missing, warn
|
|
627
|
+
if (forceKillOnTimeout && !hasTimeout) {
|
|
628
|
+
log(
|
|
629
|
+
`forceKillOnTimeout is set but no timeoutMs for job ${job.id}, running without force kill`,
|
|
630
|
+
);
|
|
631
|
+
}
|
|
632
|
+
|
|
633
|
+
const jobPromise = handler(job.payload, controller.signal, ctx);
|
|
634
|
+
|
|
635
|
+
if (hasTimeout) {
|
|
297
636
|
await Promise.race([
|
|
298
637
|
jobPromise,
|
|
299
|
-
new Promise((_, reject) => {
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
const timeoutError = new Error(
|
|
303
|
-
`Job timed out after ${timeoutMs} ms`,
|
|
304
|
-
);
|
|
305
|
-
// @ts-ignore
|
|
306
|
-
timeoutError.failureReason = FailureReason.Timeout;
|
|
307
|
-
reject(timeoutError);
|
|
308
|
-
}, timeoutMs);
|
|
638
|
+
new Promise<never>((_, reject) => {
|
|
639
|
+
timeoutReject = reject;
|
|
640
|
+
armTimeout(timeoutMs!);
|
|
309
641
|
}),
|
|
310
642
|
]);
|
|
311
643
|
} else {
|
|
@@ -313,21 +645,38 @@ export async function processJobWithHandlers<
|
|
|
313
645
|
}
|
|
314
646
|
}
|
|
315
647
|
if (timeoutId) clearTimeout(timeoutId);
|
|
316
|
-
|
|
648
|
+
|
|
649
|
+
// Job completed successfully -- complete via backend
|
|
650
|
+
await backend.completeJob(job.id);
|
|
317
651
|
} catch (error) {
|
|
318
652
|
if (timeoutId) clearTimeout(timeoutId);
|
|
653
|
+
|
|
654
|
+
// Check if this is a WaitSignal (not a real error)
|
|
655
|
+
if (error instanceof WaitSignal) {
|
|
656
|
+
log(
|
|
657
|
+
`Job ${job.id} entering wait: type=${error.type}, waitUntil=${error.waitUntil?.toISOString() ?? 'none'}, tokenId=${error.tokenId ?? 'none'}`,
|
|
658
|
+
);
|
|
659
|
+
await backend.waitJob(job.id, {
|
|
660
|
+
waitUntil: error.waitUntil,
|
|
661
|
+
waitTokenId: error.tokenId,
|
|
662
|
+
stepData: error.stepData,
|
|
663
|
+
});
|
|
664
|
+
return;
|
|
665
|
+
}
|
|
666
|
+
|
|
667
|
+
// Real error -- handle as failure
|
|
319
668
|
console.error(`Error processing job ${job.id}:`, error);
|
|
320
669
|
let failureReason = FailureReason.HandlerError;
|
|
321
670
|
if (
|
|
322
671
|
error &&
|
|
323
672
|
typeof error === 'object' &&
|
|
324
673
|
'failureReason' in error &&
|
|
325
|
-
(error as
|
|
674
|
+
(error as { failureReason?: FailureReason }).failureReason ===
|
|
675
|
+
FailureReason.Timeout
|
|
326
676
|
) {
|
|
327
677
|
failureReason = FailureReason.Timeout;
|
|
328
678
|
}
|
|
329
|
-
await failJob(
|
|
330
|
-
pool,
|
|
679
|
+
await backend.failJob(
|
|
331
680
|
job.id,
|
|
332
681
|
error instanceof Error ? error : new Error(String(error)),
|
|
333
682
|
failureReason,
|
|
@@ -339,15 +688,15 @@ export async function processJobWithHandlers<
|
|
|
339
688
|
* Process a batch of jobs using the provided handler map and concurrency limit
|
|
340
689
|
*/
|
|
341
690
|
export async function processBatchWithHandlers<PayloadMap>(
|
|
342
|
-
|
|
691
|
+
backend: QueueBackend,
|
|
343
692
|
workerId: string,
|
|
344
693
|
batchSize: number,
|
|
345
694
|
jobType: string | string[] | undefined,
|
|
346
695
|
jobHandlers: JobHandlers<PayloadMap>,
|
|
347
696
|
concurrency?: number,
|
|
697
|
+
onError?: (error: Error) => void,
|
|
348
698
|
): Promise<number> {
|
|
349
|
-
const jobs = await getNextBatch<PayloadMap, JobType<PayloadMap>>(
|
|
350
|
-
pool,
|
|
699
|
+
const jobs = await backend.getNextBatch<PayloadMap, JobType<PayloadMap>>(
|
|
351
700
|
workerId,
|
|
352
701
|
batchSize,
|
|
353
702
|
jobType,
|
|
@@ -355,7 +704,7 @@ export async function processBatchWithHandlers<PayloadMap>(
|
|
|
355
704
|
if (!concurrency || concurrency >= jobs.length) {
|
|
356
705
|
// Default: all in parallel
|
|
357
706
|
await Promise.all(
|
|
358
|
-
jobs.map((job) => processJobWithHandlers(
|
|
707
|
+
jobs.map((job) => processJobWithHandlers(backend, job, jobHandlers)),
|
|
359
708
|
);
|
|
360
709
|
return jobs.length;
|
|
361
710
|
}
|
|
@@ -369,7 +718,7 @@ export async function processBatchWithHandlers<PayloadMap>(
|
|
|
369
718
|
while (running < concurrency && idx < jobs.length) {
|
|
370
719
|
const job = jobs[idx++];
|
|
371
720
|
running++;
|
|
372
|
-
processJobWithHandlers(
|
|
721
|
+
processJobWithHandlers(backend, job, jobHandlers)
|
|
373
722
|
.then(() => {
|
|
374
723
|
running--;
|
|
375
724
|
finished++;
|
|
@@ -378,6 +727,9 @@ export async function processBatchWithHandlers<PayloadMap>(
|
|
|
378
727
|
.catch((err) => {
|
|
379
728
|
running--;
|
|
380
729
|
finished++;
|
|
730
|
+
if (onError) {
|
|
731
|
+
onError(err instanceof Error ? err : new Error(String(err)));
|
|
732
|
+
}
|
|
381
733
|
next();
|
|
382
734
|
});
|
|
383
735
|
}
|
|
@@ -387,16 +739,18 @@ export async function processBatchWithHandlers<PayloadMap>(
|
|
|
387
739
|
}
|
|
388
740
|
|
|
389
741
|
/**
|
|
390
|
-
* Start a job processor that continuously processes jobs
|
|
391
|
-
* @param
|
|
392
|
-
* @param handlers - The job handlers for this processor instance
|
|
742
|
+
* Start a job processor that continuously processes jobs.
|
|
743
|
+
* @param backend - The queue backend.
|
|
744
|
+
* @param handlers - The job handlers for this processor instance.
|
|
393
745
|
* @param options - The processor options. Leave pollInterval empty to run only once. Use jobType to filter jobs by type.
|
|
394
|
-
* @
|
|
746
|
+
* @param onBeforeBatch - Optional callback invoked before each batch. Used internally to enqueue due cron jobs.
|
|
747
|
+
* @returns {Processor} The processor instance.
|
|
395
748
|
*/
|
|
396
749
|
export const createProcessor = <PayloadMap = any>(
|
|
397
|
-
|
|
750
|
+
backend: QueueBackend,
|
|
398
751
|
handlers: JobHandlers<PayloadMap>,
|
|
399
752
|
options: ProcessorOptions = {},
|
|
753
|
+
onBeforeBatch?: () => Promise<void>,
|
|
400
754
|
): Processor => {
|
|
401
755
|
const {
|
|
402
756
|
workerId = `worker-${Math.random().toString(36).substring(2, 9)}`,
|
|
@@ -409,24 +763,42 @@ export const createProcessor = <PayloadMap = any>(
|
|
|
409
763
|
|
|
410
764
|
let running = false;
|
|
411
765
|
let intervalId: NodeJS.Timeout | null = null;
|
|
766
|
+
let currentBatchPromise: Promise<number> | null = null;
|
|
412
767
|
|
|
413
768
|
setLogContext(options.verbose ?? false);
|
|
414
769
|
|
|
415
770
|
const processJobs = async (): Promise<number> => {
|
|
416
771
|
if (!running) return 0;
|
|
417
772
|
|
|
773
|
+
// Run pre-batch hook (e.g. enqueue due cron jobs) before processing
|
|
774
|
+
if (onBeforeBatch) {
|
|
775
|
+
try {
|
|
776
|
+
await onBeforeBatch();
|
|
777
|
+
} catch (hookError) {
|
|
778
|
+
log(`onBeforeBatch hook error: ${hookError}`);
|
|
779
|
+
if (onError) {
|
|
780
|
+
onError(
|
|
781
|
+
hookError instanceof Error
|
|
782
|
+
? hookError
|
|
783
|
+
: new Error(String(hookError)),
|
|
784
|
+
);
|
|
785
|
+
}
|
|
786
|
+
}
|
|
787
|
+
}
|
|
788
|
+
|
|
418
789
|
log(
|
|
419
790
|
`Processing jobs with workerId: ${workerId}${jobType ? ` and jobType: ${Array.isArray(jobType) ? jobType.join(',') : jobType}` : ''}`,
|
|
420
791
|
);
|
|
421
792
|
|
|
422
793
|
try {
|
|
423
794
|
const processed = await processBatchWithHandlers(
|
|
424
|
-
|
|
795
|
+
backend,
|
|
425
796
|
workerId,
|
|
426
797
|
batchSize,
|
|
427
798
|
jobType,
|
|
428
799
|
handlers,
|
|
429
800
|
concurrency,
|
|
801
|
+
onError,
|
|
430
802
|
);
|
|
431
803
|
// Only process one batch in start; do not schedule next batch here
|
|
432
804
|
return processed;
|
|
@@ -447,28 +819,63 @@ export const createProcessor = <PayloadMap = any>(
|
|
|
447
819
|
|
|
448
820
|
log(`Starting job processor with workerId: ${workerId}`);
|
|
449
821
|
running = true;
|
|
450
|
-
|
|
451
|
-
|
|
822
|
+
|
|
823
|
+
// Single serialized loop: process a batch, then either immediately
|
|
824
|
+
// continue (if full batch was returned) or wait pollInterval.
|
|
825
|
+
const scheduleNext = (immediate: boolean) => {
|
|
452
826
|
if (!running) return;
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
827
|
+
if (immediate) {
|
|
828
|
+
intervalId = setTimeout(loop, 0);
|
|
829
|
+
} else {
|
|
830
|
+
intervalId = setTimeout(loop, pollInterval);
|
|
456
831
|
}
|
|
457
832
|
};
|
|
458
|
-
|
|
459
|
-
|
|
833
|
+
|
|
834
|
+
const loop = async () => {
|
|
835
|
+
if (!running) return;
|
|
836
|
+
currentBatchPromise = processJobs();
|
|
837
|
+
const processed = await currentBatchPromise;
|
|
838
|
+
currentBatchPromise = null;
|
|
839
|
+
// If we got a full batch, there may be more work — process immediately
|
|
840
|
+
scheduleNext(processed === batchSize);
|
|
841
|
+
};
|
|
842
|
+
|
|
843
|
+
// Start the first iteration immediately
|
|
844
|
+
loop();
|
|
460
845
|
},
|
|
461
846
|
/**
|
|
462
|
-
* Stop the job processor that runs in the background
|
|
847
|
+
* Stop the job processor that runs in the background.
|
|
848
|
+
* Does not wait for in-flight jobs.
|
|
463
849
|
*/
|
|
464
850
|
stop: () => {
|
|
465
851
|
log(`Stopping job processor with workerId: ${workerId}`);
|
|
466
852
|
running = false;
|
|
467
853
|
if (intervalId) {
|
|
468
|
-
|
|
854
|
+
clearTimeout(intervalId);
|
|
469
855
|
intervalId = null;
|
|
470
856
|
}
|
|
471
857
|
},
|
|
858
|
+
/**
|
|
859
|
+
* Stop the job processor and wait for all in-flight jobs to complete.
|
|
860
|
+
* Useful for graceful shutdown (e.g., SIGTERM handling).
|
|
861
|
+
*/
|
|
862
|
+
stopAndDrain: async (drainTimeoutMs = 30000) => {
|
|
863
|
+
log(`Stopping and draining job processor with workerId: ${workerId}`);
|
|
864
|
+
running = false;
|
|
865
|
+
if (intervalId) {
|
|
866
|
+
clearTimeout(intervalId);
|
|
867
|
+
intervalId = null;
|
|
868
|
+
}
|
|
869
|
+
// Wait for current batch to finish, with a timeout
|
|
870
|
+
if (currentBatchPromise) {
|
|
871
|
+
await Promise.race([
|
|
872
|
+
currentBatchPromise.catch(() => {}),
|
|
873
|
+
new Promise<void>((resolve) => setTimeout(resolve, drainTimeoutMs)),
|
|
874
|
+
]);
|
|
875
|
+
currentBatchPromise = null;
|
|
876
|
+
}
|
|
877
|
+
log(`Job processor ${workerId} drained`);
|
|
878
|
+
},
|
|
472
879
|
/**
|
|
473
880
|
* Start the job processor synchronously.
|
|
474
881
|
* - This will process all jobs immediately and then stop.
|