@nicnocquee/dataqueue 1.16.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,478 @@
1
+ import { Pool } from 'pg';
2
+ import { describe, expect, it, beforeEach, afterEach, vi } from 'vitest';
3
+ import {
4
+ createProcessor,
5
+ processBatchWithHandlers,
6
+ processJobWithHandlers,
7
+ } from './processor.js';
8
+ import * as queue from './queue.js';
9
+ import { createTestDbAndPool, destroyTestDb } from './test-util.js';
10
+ import { FailureReason, JobHandler } from './types.js';
11
+
12
+ // Define the payload map for test jobs
13
+ interface TestPayloadMap {
14
+ test: { foo: string };
15
+ fail: {};
16
+ missing: {};
17
+ batch: { i: number };
18
+ proc: { x: number };
19
+ typeA: { n: number };
20
+ typeB: { n: number };
21
+ typeC: { n: number };
22
+ }
23
+
24
+ // Integration tests for processor
25
+
26
+ describe('processor integration', () => {
27
+ let pool: Pool;
28
+ let dbName: string;
29
+
30
+ beforeEach(async () => {
31
+ const setup = await createTestDbAndPool();
32
+ pool = setup.pool;
33
+ dbName = setup.dbName;
34
+ });
35
+
36
+ afterEach(async () => {
37
+ await pool.end();
38
+ await destroyTestDb(dbName);
39
+ });
40
+
41
+ it('should process a job with a registered handler', async () => {
42
+ const handler = vi.fn(async () => {});
43
+ const handlers = {
44
+ test: handler,
45
+ fail: vi.fn(async () => {}),
46
+ missing: vi.fn(async () => {}),
47
+ batch: vi.fn(async () => {}),
48
+ proc: vi.fn(async () => {}),
49
+ typeA: vi.fn(async () => {}),
50
+ typeB: vi.fn(async () => {}),
51
+ typeC: vi.fn(async () => {}),
52
+ };
53
+ const jobId = await queue.addJob<TestPayloadMap, 'test'>(pool, {
54
+ job_type: 'test',
55
+ payload: { foo: 'bar' },
56
+ });
57
+ const job = await queue.getJob<TestPayloadMap, 'test'>(pool, jobId);
58
+ expect(job).not.toBeNull();
59
+ await processJobWithHandlers(pool, job!, handlers);
60
+ expect(handler).toHaveBeenCalledWith(
61
+ { foo: 'bar' },
62
+ expect.any(AbortSignal),
63
+ );
64
+ const completed = await queue.getJob(pool, jobId);
65
+ expect(completed?.status).toBe('completed');
66
+ });
67
+
68
+ it('should mark job as failed if handler throws', async () => {
69
+ const handler = vi.fn(async () => {
70
+ throw new Error('fail!');
71
+ });
72
+ const handlers = {
73
+ test: vi.fn(async () => {}),
74
+ fail: handler,
75
+ missing: vi.fn(async () => {}),
76
+ batch: vi.fn(async () => {}),
77
+ proc: vi.fn(async () => {}),
78
+ typeA: vi.fn(async () => {}),
79
+ typeB: vi.fn(async () => {}),
80
+ typeC: vi.fn(async () => {}),
81
+ };
82
+ const jobId = await queue.addJob<TestPayloadMap, 'fail'>(pool, {
83
+ job_type: 'fail',
84
+ payload: {},
85
+ });
86
+ const job = await queue.getJob<TestPayloadMap, 'fail'>(pool, jobId);
87
+ expect(job).not.toBeNull();
88
+ await processJobWithHandlers(pool, job!, handlers);
89
+ const failed = await queue.getJob(pool, jobId);
90
+ expect(failed?.status).toBe('failed');
91
+ expect(failed?.error_history?.[0]?.message).toBe('fail!');
92
+ expect(failed?.failure_reason).toBe('handler_error');
93
+ });
94
+
95
+ it('should mark job as failed if no handler registered', async () => {
96
+ const handler = vi.fn(async () => {
97
+ throw new Error('No handler registered');
98
+ });
99
+ const handlers = {
100
+ test: vi.fn(async () => {}),
101
+ fail: handler,
102
+ batch: vi.fn(async () => {}),
103
+ proc: vi.fn(async () => {}),
104
+ typeA: vi.fn(async () => {}),
105
+ typeB: vi.fn(async () => {}),
106
+ typeC: vi.fn(async () => {}),
107
+ };
108
+ const jobId = await queue.addJob<TestPayloadMap, 'missing'>(pool, {
109
+ job_type: 'missing',
110
+ payload: {},
111
+ });
112
+ const job = await queue.getJob<TestPayloadMap, 'missing'>(pool, jobId);
113
+ expect(job).not.toBeNull();
114
+ // @ts-expect-error - test handler is missing
115
+ await processJobWithHandlers(pool, job!, handlers);
116
+ const failed = await queue.getJob(pool, jobId);
117
+ expect(failed?.status).toBe('failed');
118
+ expect(failed?.error_history?.[0]?.message).toContain(
119
+ 'No handler registered',
120
+ );
121
+ expect(failed?.failure_reason).toBe('no_handler');
122
+ });
123
+
124
+ it('should process a batch of jobs', async () => {
125
+ const handler = vi.fn(async () => {});
126
+ const handlers = {
127
+ test: vi.fn(async () => {}),
128
+ fail: vi.fn(async () => {}),
129
+ missing: vi.fn(async () => {}),
130
+ batch: handler,
131
+ proc: vi.fn(async () => {}),
132
+ typeA: vi.fn(async () => {}),
133
+ typeB: vi.fn(async () => {}),
134
+ typeC: vi.fn(async () => {}),
135
+ };
136
+ const ids = await Promise.all([
137
+ queue.addJob<TestPayloadMap, 'batch'>(pool, {
138
+ job_type: 'batch',
139
+ payload: { i: 1 },
140
+ }),
141
+ queue.addJob<TestPayloadMap, 'batch'>(pool, {
142
+ job_type: 'batch',
143
+ payload: { i: 2 },
144
+ }),
145
+ ]);
146
+ const processed = await processBatchWithHandlers(
147
+ pool,
148
+ 'worker-batch',
149
+ 2,
150
+ undefined,
151
+ handlers,
152
+ );
153
+ expect(processed).toBe(2);
154
+ const jobs = await queue.getJobsByStatus<TestPayloadMap, 'batch'>(
155
+ pool,
156
+ 'completed',
157
+ );
158
+ expect(jobs.length).toBeGreaterThanOrEqual(2);
159
+ expect(handler).toHaveBeenCalledTimes(2);
160
+ });
161
+
162
+ it('should start and stop the processor', async () => {
163
+ const handler = vi.fn(async () => {});
164
+ const handlers = {
165
+ test: vi.fn(async () => {}),
166
+ fail: vi.fn(async () => {}),
167
+ missing: vi.fn(async () => {}),
168
+ batch: vi.fn(async () => {}),
169
+ proc: handler,
170
+ typeA: vi.fn(async () => {}),
171
+ typeB: vi.fn(async () => {}),
172
+ typeC: vi.fn(async () => {}),
173
+ };
174
+ await queue.addJob<TestPayloadMap, 'proc'>(pool, {
175
+ job_type: 'proc',
176
+ payload: { x: 1 },
177
+ });
178
+ const processor = createProcessor(pool, handlers, { pollInterval: 200 });
179
+ processor.start();
180
+ // Wait for job to be processed
181
+ await new Promise((r) => setTimeout(r, 500));
182
+ processor.stop();
183
+ expect(processor.isRunning()).toBe(false);
184
+ const jobs = await queue.getJobsByStatus(pool, 'completed');
185
+ expect(jobs.some((j) => j.job_type === 'proc')).toBe(true);
186
+ });
187
+
188
+ it('should process only jobs of a specific job type with processBatch', async () => {
189
+ const handlerA = vi.fn(async () => {});
190
+ const handlerB = vi.fn(async () => {});
191
+ const handlers = {
192
+ test: vi.fn(async () => {}),
193
+ fail: vi.fn(async () => {}),
194
+ missing: vi.fn(async () => {}),
195
+ batch: vi.fn(async () => {}),
196
+ proc: vi.fn(async () => {}),
197
+ typeA: handlerA,
198
+ typeB: handlerB,
199
+ typeC: vi.fn(async () => {}),
200
+ };
201
+ const idA1 = await queue.addJob<TestPayloadMap, 'typeA'>(pool, {
202
+ job_type: 'typeA',
203
+ payload: { n: 1 },
204
+ });
205
+ const idA2 = await queue.addJob<TestPayloadMap, 'typeA'>(pool, {
206
+ job_type: 'typeA',
207
+ payload: { n: 2 },
208
+ });
209
+ const idB1 = await queue.addJob<TestPayloadMap, 'typeB'>(pool, {
210
+ job_type: 'typeB',
211
+ payload: { n: 3 },
212
+ });
213
+ // Only process typeA
214
+ const processed = await processBatchWithHandlers(
215
+ pool,
216
+ 'worker-typeA',
217
+ 10,
218
+ 'typeA',
219
+ handlers,
220
+ );
221
+ expect(processed).toBe(2);
222
+ expect(handlerA).toHaveBeenCalledTimes(2);
223
+ expect(handlerB).not.toHaveBeenCalled();
224
+ const jobsA = await queue.getJobsByStatus<TestPayloadMap, 'typeA'>(
225
+ pool,
226
+ 'completed',
227
+ );
228
+ expect(jobsA.some((j) => j.id === idA1)).toBe(true);
229
+ expect(jobsA.some((j) => j.id === idA2)).toBe(true);
230
+ const jobB = await queue.getJob<TestPayloadMap, 'typeB'>(pool, idB1);
231
+ expect(jobB?.status).not.toBe('completed');
232
+ });
233
+
234
+ it('should process only jobs of specific job types (array) with processBatch', async () => {
235
+ const handlerA = vi.fn(async () => {});
236
+ const handlerB = vi.fn(async () => {});
237
+ const handlerC = vi.fn(async () => {});
238
+ const handlers = {
239
+ test: vi.fn(async () => {}),
240
+ fail: vi.fn(async () => {}),
241
+ missing: vi.fn(async () => {}),
242
+ batch: vi.fn(async () => {}),
243
+ proc: vi.fn(async () => {}),
244
+ typeA: handlerA,
245
+ typeB: handlerB,
246
+ typeC: handlerC,
247
+ };
248
+ const idA = await queue.addJob<TestPayloadMap, 'typeA'>(pool, {
249
+ job_type: 'typeA',
250
+ payload: { n: 1 },
251
+ });
252
+ const idB = await queue.addJob<TestPayloadMap, 'typeB'>(pool, {
253
+ job_type: 'typeB',
254
+ payload: { n: 2 },
255
+ });
256
+ const idC = await queue.addJob<TestPayloadMap, 'typeC'>(pool, {
257
+ job_type: 'typeC',
258
+ payload: { n: 3 },
259
+ });
260
+ // Only process typeA and typeC
261
+ const processed = await processBatchWithHandlers(
262
+ pool,
263
+ 'worker-multi',
264
+ 10,
265
+ ['typeA', 'typeC'],
266
+ handlers,
267
+ );
268
+ expect(processed).toBe(2);
269
+ expect(handlerA).toHaveBeenCalledTimes(1);
270
+ expect(handlerB).not.toHaveBeenCalled();
271
+ expect(handlerC).toHaveBeenCalledTimes(1);
272
+ const jobs = await queue.getJobsByStatus<TestPayloadMap, 'typeA' | 'typeC'>(
273
+ pool,
274
+ 'completed',
275
+ );
276
+ expect(jobs.some((j) => j.id === idA)).toBe(true);
277
+ expect(jobs.some((j) => j.id === idC)).toBe(true);
278
+ const jobB = await queue.getJob<TestPayloadMap, 'typeB'>(pool, idB);
279
+ expect(jobB?.status).not.toBe('completed');
280
+ });
281
+
282
+ it('should process only jobs of a specific job type with createProcessor', async () => {
283
+ const handlerA = vi.fn(async () => {});
284
+ const handlerB = vi.fn(async () => {});
285
+ const handlers = {
286
+ test: vi.fn(async () => {}),
287
+ fail: vi.fn(async () => {}),
288
+ missing: vi.fn(async () => {}),
289
+ batch: vi.fn(async () => {}),
290
+ proc: vi.fn(async () => {}),
291
+ typeA: handlerA,
292
+ typeB: handlerB,
293
+ typeC: vi.fn(async () => {}),
294
+ };
295
+ const idA = await queue.addJob<TestPayloadMap, 'typeA'>(pool, {
296
+ job_type: 'typeA',
297
+ payload: { n: 1 },
298
+ });
299
+ const idB = await queue.addJob<TestPayloadMap, 'typeB'>(pool, {
300
+ job_type: 'typeB',
301
+ payload: { n: 2 },
302
+ });
303
+ const processor = createProcessor(pool, handlers, {
304
+ pollInterval: 100,
305
+ jobType: 'typeA',
306
+ });
307
+ processor.start();
308
+ await new Promise((r) => setTimeout(r, 300));
309
+ processor.stop();
310
+ expect(processor.isRunning()).toBe(false);
311
+ expect(handlerA).toHaveBeenCalledTimes(1);
312
+ expect(handlerB).not.toHaveBeenCalled();
313
+ const jobA = await queue.getJob<TestPayloadMap, 'typeA'>(pool, idA);
314
+ const jobB = await queue.getJob<TestPayloadMap, 'typeB'>(pool, idB);
315
+ expect(jobA?.status).toBe('completed');
316
+ expect(jobB?.status).not.toBe('completed');
317
+ });
318
+ });
319
+
320
+ describe('concurrency option', () => {
321
+ let pool: Pool;
322
+ let dbName: string;
323
+
324
+ beforeEach(async () => {
325
+ const setup = await createTestDbAndPool();
326
+ pool = setup.pool;
327
+ dbName = setup.dbName;
328
+ });
329
+
330
+ afterEach(async () => {
331
+ await pool.end();
332
+ await destroyTestDb(dbName);
333
+ });
334
+
335
+ async function addJobs(n: number) {
336
+ for (let i = 0; i < n; i++) {
337
+ await queue.addJob<{ test: {} }, 'test'>(pool, {
338
+ job_type: 'test',
339
+ payload: {},
340
+ });
341
+ }
342
+ }
343
+
344
+ it('should not process more than default concurrency (3) jobs in parallel', async () => {
345
+ let running = 0;
346
+ let maxParallel = 0;
347
+ const handler = async () => {
348
+ running++;
349
+ maxParallel = Math.max(maxParallel, running);
350
+ await new Promise((r) => setTimeout(r, 30));
351
+ running--;
352
+ };
353
+ const handlers = { test: handler };
354
+ await addJobs(10);
355
+ const processor = createProcessor(pool, handlers, { batchSize: 10 });
356
+ await processor.start();
357
+ expect(maxParallel).toBeLessThanOrEqual(3);
358
+ });
359
+
360
+ it('should not process more than custom concurrency jobs in parallel', async () => {
361
+ let running = 0;
362
+ let maxParallel = 0;
363
+ const handler = async () => {
364
+ running++;
365
+ maxParallel = Math.max(maxParallel, running);
366
+ await new Promise((r) => setTimeout(r, 30));
367
+ running--;
368
+ };
369
+ const handlers = { test: handler };
370
+ await addJobs(10);
371
+ const processor = createProcessor(pool, handlers, {
372
+ batchSize: 10,
373
+ concurrency: 2,
374
+ });
375
+ await processor.start();
376
+ expect(maxParallel).toBeLessThanOrEqual(2);
377
+ });
378
+
379
+ it('should not process more than batchSize jobs in parallel if concurrency > batchSize', async () => {
380
+ let running = 0;
381
+ let maxParallel = 0;
382
+ const handler = async () => {
383
+ running++;
384
+ maxParallel = Math.max(maxParallel, running);
385
+ await new Promise((r) => setTimeout(r, 30));
386
+ running--;
387
+ };
388
+ const handlers = { test: handler };
389
+ await addJobs(2);
390
+ const processor = createProcessor(pool, handlers, {
391
+ batchSize: 2,
392
+ concurrency: 5,
393
+ });
394
+ await processor.start();
395
+ expect(maxParallel).toBeLessThanOrEqual(2);
396
+ });
397
+
398
+ it('should process jobs sequentially if concurrency is 1', async () => {
399
+ let running = 0;
400
+ let maxParallel = 0;
401
+ const handler = async () => {
402
+ running++;
403
+ maxParallel = Math.max(maxParallel, running);
404
+ await new Promise((r) => setTimeout(r, 30));
405
+ running--;
406
+ };
407
+ const handlers = { test: handler };
408
+ await addJobs(5);
409
+ const processor = createProcessor(pool, handlers, {
410
+ batchSize: 5,
411
+ concurrency: 1,
412
+ });
413
+ await processor.start();
414
+ expect(maxParallel).toBe(1);
415
+ });
416
+ });
417
+
418
+ describe('per-job timeout', () => {
419
+ let pool: Pool;
420
+ let dbName: string;
421
+
422
+ beforeEach(async () => {
423
+ const setup = await createTestDbAndPool();
424
+ pool = setup.pool;
425
+ dbName = setup.dbName;
426
+ });
427
+
428
+ afterEach(async () => {
429
+ await pool.end();
430
+ await destroyTestDb(dbName);
431
+ });
432
+
433
+ it('should fail the job if handler exceeds timeoutMs', async () => {
434
+ const handler = vi.fn(async (_payload, signal) => {
435
+ await new Promise((resolve, reject) => {
436
+ const t = setTimeout(resolve, 200);
437
+ signal.addEventListener('abort', () => {
438
+ clearTimeout(t);
439
+ reject(new Error('aborted'));
440
+ });
441
+ });
442
+ });
443
+ const handlers: { test: JobHandler<{ test: {} }, 'test'> } = {
444
+ test: handler,
445
+ };
446
+ const jobId = await queue.addJob<{ test: {} }, 'test'>(pool, {
447
+ job_type: 'test',
448
+ payload: {},
449
+ timeoutMs: 50, // 50ms
450
+ });
451
+ const job = await queue.getJob<{ test: {} }, 'test'>(pool, jobId);
452
+ expect(job).not.toBeNull();
453
+ await processJobWithHandlers(pool, job!, handlers);
454
+ const failed = await queue.getJob(pool, jobId);
455
+ expect(failed?.status).toBe('failed');
456
+ expect(failed?.error_history?.[0]?.message).toContain('timed out');
457
+ expect(failed?.failure_reason).toBe(FailureReason.Timeout);
458
+ });
459
+
460
+ it('should complete the job if handler finishes before timeoutMs', async () => {
461
+ const handler = vi.fn(async (_payload, _signal) => {
462
+ await new Promise((r) => setTimeout(r, 20));
463
+ });
464
+ const handlers: { test: JobHandler<{ test: {} }, 'test'> } = {
465
+ test: handler,
466
+ };
467
+ const jobId = await queue.addJob<{ test: {} }, 'test'>(pool, {
468
+ job_type: 'test',
469
+ payload: {},
470
+ timeoutMs: 200, // 200ms
471
+ });
472
+ const job = await queue.getJob<{ test: {} }, 'test'>(pool, jobId);
473
+ expect(job).not.toBeNull();
474
+ await processJobWithHandlers(pool, job!, handlers);
475
+ const completed = await queue.getJob(pool, jobId);
476
+ expect(completed?.status).toBe('completed');
477
+ });
478
+ });
@@ -0,0 +1,242 @@
1
+ import { Pool } from 'pg';
2
+ import {
3
+ JobRecord,
4
+ ProcessorOptions,
5
+ Processor,
6
+ JobHandler,
7
+ JobType,
8
+ FailureReason,
9
+ JobHandlers,
10
+ } from './types.js';
11
+ import {
12
+ getNextBatch,
13
+ completeJob,
14
+ failJob,
15
+ setPendingReasonForUnpickedJobs,
16
+ } from './queue.js';
17
+ import { log, setLogContext } from './log-context.js';
18
+
19
+ /**
20
+ * Process a single job using the provided handler map
21
+ */
22
+ export async function processJobWithHandlers<
23
+ PayloadMap,
24
+ T extends keyof PayloadMap & string,
25
+ >(
26
+ pool: Pool,
27
+ job: JobRecord<PayloadMap, T>,
28
+ jobHandlers: JobHandlers<PayloadMap>,
29
+ ): Promise<void> {
30
+ const handler = jobHandlers[job.job_type];
31
+
32
+ if (!handler) {
33
+ await setPendingReasonForUnpickedJobs(
34
+ pool,
35
+ `No handler registered for job type: ${job.job_type}`,
36
+ job.job_type,
37
+ );
38
+ await failJob(
39
+ pool,
40
+ job.id,
41
+ new Error(`No handler registered for job type: ${job.job_type}`),
42
+ FailureReason.NoHandler,
43
+ );
44
+ return;
45
+ }
46
+
47
+ // Per-job timeout logic
48
+ const timeoutMs = job.timeout_ms ?? undefined;
49
+ let timeoutId: NodeJS.Timeout | undefined;
50
+ const controller = new AbortController();
51
+ try {
52
+ const jobPromise = handler(job.payload, controller.signal);
53
+ if (timeoutMs && timeoutMs > 0) {
54
+ await Promise.race([
55
+ jobPromise,
56
+ new Promise((_, reject) => {
57
+ timeoutId = setTimeout(() => {
58
+ controller.abort();
59
+ const timeoutError = new Error(
60
+ `Job timed out after ${timeoutMs} ms`,
61
+ );
62
+ // @ts-ignore
63
+ timeoutError.failureReason = FailureReason.Timeout;
64
+ reject(timeoutError);
65
+ }, timeoutMs);
66
+ }),
67
+ ]);
68
+ } else {
69
+ await jobPromise;
70
+ }
71
+ if (timeoutId) clearTimeout(timeoutId);
72
+ await completeJob(pool, job.id);
73
+ } catch (error) {
74
+ if (timeoutId) clearTimeout(timeoutId);
75
+ console.error(`Error processing job ${job.id}:`, error);
76
+ let failureReason = FailureReason.HandlerError;
77
+ if (
78
+ error &&
79
+ typeof error === 'object' &&
80
+ 'failureReason' in error &&
81
+ (error as any).failureReason === FailureReason.Timeout
82
+ ) {
83
+ failureReason = FailureReason.Timeout;
84
+ }
85
+ await failJob(
86
+ pool,
87
+ job.id,
88
+ error instanceof Error ? error : new Error(String(error)),
89
+ failureReason,
90
+ );
91
+ }
92
+ }
93
+
94
+ /**
95
+ * Process a batch of jobs using the provided handler map and concurrency limit
96
+ */
97
+ export async function processBatchWithHandlers<PayloadMap>(
98
+ pool: Pool,
99
+ workerId: string,
100
+ batchSize: number,
101
+ jobType: string | string[] | undefined,
102
+ jobHandlers: JobHandlers<PayloadMap>,
103
+ concurrency?: number,
104
+ ): Promise<number> {
105
+ const jobs = await getNextBatch<PayloadMap, JobType<PayloadMap>>(
106
+ pool,
107
+ workerId,
108
+ batchSize,
109
+ jobType,
110
+ );
111
+ if (!concurrency || concurrency >= jobs.length) {
112
+ // Default: all in parallel
113
+ await Promise.all(
114
+ jobs.map((job) => processJobWithHandlers(pool, job, jobHandlers)),
115
+ );
116
+ return jobs.length;
117
+ }
118
+ // Concurrency-limited pool
119
+ let idx = 0;
120
+ let running = 0;
121
+ let finished = 0;
122
+ return new Promise((resolve, reject) => {
123
+ const next = () => {
124
+ if (finished === jobs.length) return resolve(jobs.length);
125
+ while (running < concurrency && idx < jobs.length) {
126
+ const job = jobs[idx++];
127
+ running++;
128
+ processJobWithHandlers(pool, job, jobHandlers)
129
+ .then(() => {
130
+ running--;
131
+ finished++;
132
+ next();
133
+ })
134
+ .catch((err) => {
135
+ running--;
136
+ finished++;
137
+ next();
138
+ });
139
+ }
140
+ };
141
+ next();
142
+ });
143
+ }
144
+
145
+ /**
146
+ * Start a job processor that continuously processes jobs
147
+ * @param pool - The database pool
148
+ * @param handlers - The job handlers for this processor instance
149
+ * @param options - The processor options. Leave pollInterval empty to run only once. Use jobType to filter jobs by type.
150
+ * @returns {Processor} The processor instance
151
+ */
152
+ export const createProcessor = <PayloadMap = any>(
153
+ pool: Pool,
154
+ handlers: JobHandlers<PayloadMap>,
155
+ options: ProcessorOptions = {},
156
+ ): Processor => {
157
+ const {
158
+ workerId = `worker-${Math.random().toString(36).substring(2, 9)}`,
159
+ batchSize = 10,
160
+ pollInterval = 5000,
161
+ onError = (error: Error) => console.error('Job processor error:', error),
162
+ jobType,
163
+ concurrency = 3,
164
+ } = options;
165
+
166
+ let running = false;
167
+ let intervalId: NodeJS.Timeout | null = null;
168
+
169
+ setLogContext(options.verbose ?? false);
170
+
171
+ const processJobs = async (): Promise<number> => {
172
+ if (!running) return 0;
173
+
174
+ log(
175
+ `Processing jobs with workerId: ${workerId}${jobType ? ` and jobType: ${Array.isArray(jobType) ? jobType.join(',') : jobType}` : ''}`,
176
+ );
177
+
178
+ try {
179
+ const processed = await processBatchWithHandlers(
180
+ pool,
181
+ workerId,
182
+ batchSize,
183
+ jobType,
184
+ handlers,
185
+ concurrency,
186
+ );
187
+ // Only process one batch in start; do not schedule next batch here
188
+ return processed;
189
+ } catch (error) {
190
+ onError(error instanceof Error ? error : new Error(String(error)));
191
+ }
192
+ return 0;
193
+ };
194
+
195
+ return {
196
+ /**
197
+ * Start the job processor in the background.
198
+ * - This will run periodically (every pollInterval milliseconds or 5 seconds if not provided) and process jobs as they become available.
199
+ * - You have to call the stop method to stop the processor.
200
+ */
201
+ startInBackground: () => {
202
+ if (running) return;
203
+
204
+ log(`Starting job processor with workerId: ${workerId}`);
205
+ running = true;
206
+ // Background: process batches repeatedly if needed
207
+ const processBatches = async () => {
208
+ if (!running) return;
209
+ const processed = await processJobs();
210
+ if (processed === batchSize && running) {
211
+ setImmediate(processBatches);
212
+ }
213
+ };
214
+ processBatches(); // Process immediately on start
215
+ intervalId = setInterval(processJobs, pollInterval);
216
+ },
217
+ /**
218
+ * Stop the job processor that runs in the background
219
+ */
220
+ stop: () => {
221
+ log(`Stopping job processor with workerId: ${workerId}`);
222
+ running = false;
223
+ if (intervalId) {
224
+ clearInterval(intervalId);
225
+ intervalId = null;
226
+ }
227
+ },
228
+ /**
229
+ * Start the job processor synchronously.
230
+ * - This will process all jobs immediately and then stop.
231
+ * - The pollInterval is ignored.
232
+ */
233
+ start: async () => {
234
+ log(`Starting job processor with workerId: ${workerId}`);
235
+ running = true;
236
+ const processed = await processJobs();
237
+ running = false;
238
+ return processed;
239
+ },
240
+ isRunning: () => running,
241
+ };
242
+ };