@nicnocquee/dataqueue 1.16.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/cli.cjs +38 -0
- package/dist/index.cjs +663 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +227 -0
- package/dist/index.d.ts +227 -0
- package/dist/index.js +659 -0
- package/dist/index.js.map +1 -0
- package/migrations/1751131910823_initial.sql +32 -0
- package/migrations/1751131910825_add_timeout_seconds_to_job_queue.sql +7 -0
- package/migrations/1751186053000_add_job_events_table.sql +29 -0
- package/package.json +67 -0
- package/src/db-util.ts +7 -0
- package/src/index.test.ts +282 -0
- package/src/index.ts +97 -0
- package/src/log-context.ts +20 -0
- package/src/processor.test.ts +478 -0
- package/src/processor.ts +242 -0
- package/src/queue.test.ts +502 -0
- package/src/queue.ts +547 -0
- package/src/test-util.ts +56 -0
- package/src/types.ts +247 -0
package/src/queue.ts
ADDED
|
@@ -0,0 +1,547 @@
|
|
|
1
|
+
import { Pool } from 'pg';
|
|
2
|
+
import {
|
|
3
|
+
JobOptions,
|
|
4
|
+
JobRecord,
|
|
5
|
+
FailureReason,
|
|
6
|
+
JobEvent,
|
|
7
|
+
JobEventType,
|
|
8
|
+
} from './types.js';
|
|
9
|
+
import { log } from './log-context.js';
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* Record a job event in the job_events table
|
|
13
|
+
*/
|
|
14
|
+
export const recordJobEvent = async (
|
|
15
|
+
pool: Pool,
|
|
16
|
+
jobId: number,
|
|
17
|
+
eventType: JobEventType,
|
|
18
|
+
metadata?: any,
|
|
19
|
+
): Promise<void> => {
|
|
20
|
+
const client = await pool.connect();
|
|
21
|
+
try {
|
|
22
|
+
await client.query(
|
|
23
|
+
`INSERT INTO job_events (job_id, event_type, metadata) VALUES ($1, $2, $3)`,
|
|
24
|
+
[jobId, eventType, metadata ? JSON.stringify(metadata) : null],
|
|
25
|
+
);
|
|
26
|
+
} catch (error) {
|
|
27
|
+
log(`Error recording job event for job ${jobId}: ${error}`);
|
|
28
|
+
// Do not throw, to avoid interfering with main job logic
|
|
29
|
+
} finally {
|
|
30
|
+
client.release();
|
|
31
|
+
}
|
|
32
|
+
};
|
|
33
|
+
|
|
34
|
+
/**
|
|
35
|
+
* Add a job to the queue
|
|
36
|
+
*/
|
|
37
|
+
export const addJob = async <PayloadMap, T extends keyof PayloadMap & string>(
|
|
38
|
+
pool: Pool,
|
|
39
|
+
{
|
|
40
|
+
job_type,
|
|
41
|
+
payload,
|
|
42
|
+
max_attempts = 3,
|
|
43
|
+
priority = 0,
|
|
44
|
+
run_at = null,
|
|
45
|
+
timeoutMs = undefined,
|
|
46
|
+
}: JobOptions<PayloadMap, T>,
|
|
47
|
+
): Promise<number> => {
|
|
48
|
+
const client = await pool.connect();
|
|
49
|
+
try {
|
|
50
|
+
let result;
|
|
51
|
+
if (run_at) {
|
|
52
|
+
result = await client.query(
|
|
53
|
+
`INSERT INTO job_queue
|
|
54
|
+
(job_type, payload, max_attempts, priority, run_at, timeout_ms)
|
|
55
|
+
VALUES ($1, $2, $3, $4, $5, $6)
|
|
56
|
+
RETURNING id`,
|
|
57
|
+
[job_type, payload, max_attempts, priority, run_at, timeoutMs ?? null],
|
|
58
|
+
);
|
|
59
|
+
log(
|
|
60
|
+
`Added job ${result.rows[0].id}: payload ${JSON.stringify(payload)}, run_at ${run_at.toISOString()}, priority ${priority}, max_attempts ${max_attempts} job_type ${job_type}`,
|
|
61
|
+
);
|
|
62
|
+
} else {
|
|
63
|
+
result = await client.query(
|
|
64
|
+
`INSERT INTO job_queue
|
|
65
|
+
(job_type, payload, max_attempts, priority, timeout_ms)
|
|
66
|
+
VALUES ($1, $2, $3, $4, $5)
|
|
67
|
+
RETURNING id`,
|
|
68
|
+
[job_type, payload, max_attempts, priority, timeoutMs ?? null],
|
|
69
|
+
);
|
|
70
|
+
log(
|
|
71
|
+
`Added job ${result.rows[0].id}: payload ${JSON.stringify(payload)}, priority ${priority}, max_attempts ${max_attempts} job_type ${job_type}`,
|
|
72
|
+
);
|
|
73
|
+
}
|
|
74
|
+
await recordJobEvent(pool, result.rows[0].id, JobEventType.Added, {
|
|
75
|
+
job_type,
|
|
76
|
+
payload,
|
|
77
|
+
});
|
|
78
|
+
return result.rows[0].id;
|
|
79
|
+
} catch (error) {
|
|
80
|
+
log(`Error adding job: ${error}`);
|
|
81
|
+
throw error;
|
|
82
|
+
} finally {
|
|
83
|
+
client.release();
|
|
84
|
+
}
|
|
85
|
+
};
|
|
86
|
+
|
|
87
|
+
/**
|
|
88
|
+
* Get a job by ID
|
|
89
|
+
*/
|
|
90
|
+
export const getJob = async <PayloadMap, T extends keyof PayloadMap & string>(
|
|
91
|
+
pool: Pool,
|
|
92
|
+
id: number,
|
|
93
|
+
): Promise<JobRecord<PayloadMap, T> | null> => {
|
|
94
|
+
const client = await pool.connect();
|
|
95
|
+
try {
|
|
96
|
+
const result = await client.query('SELECT * FROM job_queue WHERE id = $1', [
|
|
97
|
+
id,
|
|
98
|
+
]);
|
|
99
|
+
|
|
100
|
+
if (result.rows.length === 0) {
|
|
101
|
+
log(`Job ${id} not found`);
|
|
102
|
+
return null;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
log(`Found job ${id}`);
|
|
106
|
+
|
|
107
|
+
return {
|
|
108
|
+
...result.rows[0],
|
|
109
|
+
payload: result.rows[0].payload,
|
|
110
|
+
timeout_ms: result.rows[0].timeout_ms,
|
|
111
|
+
failure_reason: result.rows[0].failure_reason,
|
|
112
|
+
};
|
|
113
|
+
} catch (error) {
|
|
114
|
+
log(`Error getting job ${id}: ${error}`);
|
|
115
|
+
throw error;
|
|
116
|
+
} finally {
|
|
117
|
+
client.release();
|
|
118
|
+
}
|
|
119
|
+
};
|
|
120
|
+
|
|
121
|
+
/**
|
|
122
|
+
* Get jobs by status
|
|
123
|
+
*/
|
|
124
|
+
export const getJobsByStatus = async <
|
|
125
|
+
PayloadMap,
|
|
126
|
+
T extends keyof PayloadMap & string,
|
|
127
|
+
>(
|
|
128
|
+
pool: Pool,
|
|
129
|
+
status: string,
|
|
130
|
+
limit = 100,
|
|
131
|
+
offset = 0,
|
|
132
|
+
): Promise<JobRecord<PayloadMap, T>[]> => {
|
|
133
|
+
const client = await pool.connect();
|
|
134
|
+
try {
|
|
135
|
+
const result = await client.query(
|
|
136
|
+
'SELECT * FROM job_queue WHERE status = $1 ORDER BY created_at DESC LIMIT $2 OFFSET $3',
|
|
137
|
+
[status, limit, offset],
|
|
138
|
+
);
|
|
139
|
+
|
|
140
|
+
log(`Found ${result.rows.length} jobs by status ${status}`);
|
|
141
|
+
|
|
142
|
+
return result.rows.map((row) => ({
|
|
143
|
+
...row,
|
|
144
|
+
payload: row.payload,
|
|
145
|
+
timeout_ms: row.timeout_ms,
|
|
146
|
+
failure_reason: row.failure_reason,
|
|
147
|
+
}));
|
|
148
|
+
} catch (error) {
|
|
149
|
+
log(`Error getting jobs by status ${status}: ${error}`);
|
|
150
|
+
throw error;
|
|
151
|
+
} finally {
|
|
152
|
+
client.release();
|
|
153
|
+
}
|
|
154
|
+
};
|
|
155
|
+
|
|
156
|
+
/**
|
|
157
|
+
* Get the next batch of jobs to process
|
|
158
|
+
* @param pool - The database pool
|
|
159
|
+
* @param workerId - The worker ID
|
|
160
|
+
* @param batchSize - The batch size
|
|
161
|
+
* @param jobType - Only fetch jobs with this job type (string or array of strings)
|
|
162
|
+
*/
|
|
163
|
+
export const getNextBatch = async <
|
|
164
|
+
PayloadMap,
|
|
165
|
+
T extends keyof PayloadMap & string,
|
|
166
|
+
>(
|
|
167
|
+
pool: Pool,
|
|
168
|
+
workerId: string,
|
|
169
|
+
batchSize = 10,
|
|
170
|
+
jobType?: string | string[],
|
|
171
|
+
): Promise<JobRecord<PayloadMap, T>[]> => {
|
|
172
|
+
const client = await pool.connect();
|
|
173
|
+
try {
|
|
174
|
+
// Begin transaction
|
|
175
|
+
await client.query('BEGIN');
|
|
176
|
+
|
|
177
|
+
// Build job type filter
|
|
178
|
+
let jobTypeFilter = '';
|
|
179
|
+
let params: any[] = [workerId, batchSize];
|
|
180
|
+
if (jobType) {
|
|
181
|
+
if (Array.isArray(jobType)) {
|
|
182
|
+
jobTypeFilter = ` AND job_type = ANY($3)`;
|
|
183
|
+
params.push(jobType);
|
|
184
|
+
} else {
|
|
185
|
+
jobTypeFilter = ` AND job_type = $3`;
|
|
186
|
+
params.push(jobType);
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
// Get and lock a batch of jobs
|
|
191
|
+
const result = await client.query(
|
|
192
|
+
`
|
|
193
|
+
UPDATE job_queue
|
|
194
|
+
SET status = 'processing',
|
|
195
|
+
locked_at = NOW(),
|
|
196
|
+
locked_by = $1,
|
|
197
|
+
attempts = attempts + 1,
|
|
198
|
+
updated_at = NOW(),
|
|
199
|
+
pending_reason = NULL,
|
|
200
|
+
started_at = COALESCE(started_at, NOW()),
|
|
201
|
+
last_retried_at = CASE WHEN attempts > 0 THEN NOW() ELSE last_retried_at END
|
|
202
|
+
WHERE id IN (
|
|
203
|
+
SELECT id FROM job_queue
|
|
204
|
+
WHERE (status = 'pending' OR (status = 'failed' AND next_attempt_at <= NOW()))
|
|
205
|
+
AND (attempts < max_attempts)
|
|
206
|
+
AND run_at <= NOW()
|
|
207
|
+
${jobTypeFilter}
|
|
208
|
+
ORDER BY priority DESC, created_at ASC
|
|
209
|
+
LIMIT $2
|
|
210
|
+
FOR UPDATE SKIP LOCKED
|
|
211
|
+
)
|
|
212
|
+
RETURNING *
|
|
213
|
+
`,
|
|
214
|
+
params,
|
|
215
|
+
);
|
|
216
|
+
|
|
217
|
+
log(`Found ${result.rows.length} jobs to process`);
|
|
218
|
+
|
|
219
|
+
// Commit transaction
|
|
220
|
+
await client.query('COMMIT');
|
|
221
|
+
|
|
222
|
+
// Record processing event for each job
|
|
223
|
+
for (const row of result.rows) {
|
|
224
|
+
await recordJobEvent(pool, row.id, JobEventType.Processing);
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
return result.rows.map((row) => ({
|
|
228
|
+
...row,
|
|
229
|
+
payload: row.payload,
|
|
230
|
+
timeout_ms: row.timeout_ms,
|
|
231
|
+
}));
|
|
232
|
+
} catch (error) {
|
|
233
|
+
log(`Error getting next batch: ${error}`);
|
|
234
|
+
await client.query('ROLLBACK');
|
|
235
|
+
throw error;
|
|
236
|
+
} finally {
|
|
237
|
+
client.release();
|
|
238
|
+
}
|
|
239
|
+
};
|
|
240
|
+
|
|
241
|
+
/**
|
|
242
|
+
* Mark a job as completed
|
|
243
|
+
*/
|
|
244
|
+
export const completeJob = async (pool: Pool, jobId: number): Promise<void> => {
|
|
245
|
+
const client = await pool.connect();
|
|
246
|
+
try {
|
|
247
|
+
await client.query(
|
|
248
|
+
`
|
|
249
|
+
UPDATE job_queue
|
|
250
|
+
SET status = 'completed', updated_at = NOW(), completed_at = NOW()
|
|
251
|
+
WHERE id = $1
|
|
252
|
+
`,
|
|
253
|
+
[jobId],
|
|
254
|
+
);
|
|
255
|
+
await recordJobEvent(pool, jobId, JobEventType.Completed);
|
|
256
|
+
} catch (error) {
|
|
257
|
+
log(`Error completing job ${jobId}: ${error}`);
|
|
258
|
+
throw error;
|
|
259
|
+
} finally {
|
|
260
|
+
log(`Completed job ${jobId}`);
|
|
261
|
+
client.release();
|
|
262
|
+
}
|
|
263
|
+
};
|
|
264
|
+
|
|
265
|
+
/**
|
|
266
|
+
* Mark a job as failed
|
|
267
|
+
*/
|
|
268
|
+
export const failJob = async (
|
|
269
|
+
pool: Pool,
|
|
270
|
+
jobId: number,
|
|
271
|
+
error: Error,
|
|
272
|
+
failureReason?: FailureReason,
|
|
273
|
+
): Promise<void> => {
|
|
274
|
+
const client = await pool.connect();
|
|
275
|
+
try {
|
|
276
|
+
/**
|
|
277
|
+
* The next attempt will be scheduled after `2^attempts * 1 minute` from the last attempt.
|
|
278
|
+
*/
|
|
279
|
+
await client.query(
|
|
280
|
+
`
|
|
281
|
+
UPDATE job_queue
|
|
282
|
+
SET status = 'failed',
|
|
283
|
+
updated_at = NOW(),
|
|
284
|
+
next_attempt_at = CASE
|
|
285
|
+
WHEN attempts < max_attempts THEN NOW() + (POWER(2, attempts) * INTERVAL '1 minute')
|
|
286
|
+
ELSE NULL
|
|
287
|
+
END,
|
|
288
|
+
error_history = COALESCE(error_history, '[]'::jsonb) || $2::jsonb,
|
|
289
|
+
failure_reason = $3,
|
|
290
|
+
last_failed_at = NOW()
|
|
291
|
+
WHERE id = $1
|
|
292
|
+
`,
|
|
293
|
+
[
|
|
294
|
+
jobId,
|
|
295
|
+
JSON.stringify([
|
|
296
|
+
{
|
|
297
|
+
message: error.message || String(error),
|
|
298
|
+
timestamp: new Date().toISOString(),
|
|
299
|
+
},
|
|
300
|
+
]),
|
|
301
|
+
failureReason ?? null,
|
|
302
|
+
],
|
|
303
|
+
);
|
|
304
|
+
await recordJobEvent(pool, jobId, JobEventType.Failed, {
|
|
305
|
+
message: error.message || String(error),
|
|
306
|
+
failureReason,
|
|
307
|
+
});
|
|
308
|
+
} catch (error) {
|
|
309
|
+
log(`Error failing job ${jobId}: ${error}`);
|
|
310
|
+
throw error;
|
|
311
|
+
} finally {
|
|
312
|
+
log(`Failed job ${jobId}`);
|
|
313
|
+
client.release();
|
|
314
|
+
}
|
|
315
|
+
};
|
|
316
|
+
|
|
317
|
+
/**
|
|
318
|
+
* Retry a failed job immediately
|
|
319
|
+
*/
|
|
320
|
+
export const retryJob = async (pool: Pool, jobId: number): Promise<void> => {
|
|
321
|
+
const client = await pool.connect();
|
|
322
|
+
try {
|
|
323
|
+
await client.query(
|
|
324
|
+
`
|
|
325
|
+
UPDATE job_queue
|
|
326
|
+
SET status = 'pending',
|
|
327
|
+
updated_at = NOW(),
|
|
328
|
+
locked_at = NULL,
|
|
329
|
+
locked_by = NULL,
|
|
330
|
+
next_attempt_at = NOW(),
|
|
331
|
+
last_retried_at = NOW()
|
|
332
|
+
WHERE id = $1
|
|
333
|
+
`,
|
|
334
|
+
[jobId],
|
|
335
|
+
);
|
|
336
|
+
await recordJobEvent(pool, jobId, JobEventType.Retried);
|
|
337
|
+
} catch (error) {
|
|
338
|
+
log(`Error retrying job ${jobId}: ${error}`);
|
|
339
|
+
throw error;
|
|
340
|
+
} finally {
|
|
341
|
+
log(`Retried job ${jobId}`);
|
|
342
|
+
client.release();
|
|
343
|
+
}
|
|
344
|
+
};
|
|
345
|
+
|
|
346
|
+
/**
|
|
347
|
+
* Delete old completed jobs
|
|
348
|
+
*/
|
|
349
|
+
export const cleanupOldJobs = async (
|
|
350
|
+
pool: Pool,
|
|
351
|
+
daysToKeep = 30,
|
|
352
|
+
): Promise<number> => {
|
|
353
|
+
const client = await pool.connect();
|
|
354
|
+
try {
|
|
355
|
+
const result = await client.query(`
|
|
356
|
+
DELETE FROM job_queue
|
|
357
|
+
WHERE status = 'completed'
|
|
358
|
+
AND updated_at < NOW() - INTERVAL '${daysToKeep} days'
|
|
359
|
+
RETURNING id
|
|
360
|
+
`);
|
|
361
|
+
log(`Deleted ${result.rowCount} old jobs`);
|
|
362
|
+
return result.rowCount || 0;
|
|
363
|
+
} catch (error) {
|
|
364
|
+
log(`Error cleaning up old jobs: ${error}`);
|
|
365
|
+
throw error;
|
|
366
|
+
} finally {
|
|
367
|
+
client.release();
|
|
368
|
+
}
|
|
369
|
+
};
|
|
370
|
+
|
|
371
|
+
/**
|
|
372
|
+
* Cancel a scheduled job (only if still pending)
|
|
373
|
+
*/
|
|
374
|
+
export const cancelJob = async (pool: Pool, jobId: number): Promise<void> => {
|
|
375
|
+
const client = await pool.connect();
|
|
376
|
+
try {
|
|
377
|
+
await client.query(
|
|
378
|
+
`
|
|
379
|
+
UPDATE job_queue
|
|
380
|
+
SET status = 'cancelled', updated_at = NOW(), last_cancelled_at = NOW()
|
|
381
|
+
WHERE id = $1 AND status = 'pending'
|
|
382
|
+
`,
|
|
383
|
+
[jobId],
|
|
384
|
+
);
|
|
385
|
+
await recordJobEvent(pool, jobId, JobEventType.Cancelled);
|
|
386
|
+
} catch (error) {
|
|
387
|
+
log(`Error cancelling job ${jobId}: ${error}`);
|
|
388
|
+
throw error;
|
|
389
|
+
} finally {
|
|
390
|
+
log(`Cancelled job ${jobId}`);
|
|
391
|
+
client.release();
|
|
392
|
+
}
|
|
393
|
+
};
|
|
394
|
+
|
|
395
|
+
/**
|
|
396
|
+
* Cancel all upcoming jobs (pending and scheduled in the future) with optional filters
|
|
397
|
+
*/
|
|
398
|
+
export const cancelAllUpcomingJobs = async (
|
|
399
|
+
pool: Pool,
|
|
400
|
+
filters?: { job_type?: string; priority?: number; run_at?: Date },
|
|
401
|
+
): Promise<number> => {
|
|
402
|
+
const client = await pool.connect();
|
|
403
|
+
try {
|
|
404
|
+
let query = `
|
|
405
|
+
UPDATE job_queue
|
|
406
|
+
SET status = 'cancelled', updated_at = NOW()
|
|
407
|
+
WHERE status = 'pending'`;
|
|
408
|
+
const params: any[] = [];
|
|
409
|
+
let paramIdx = 1;
|
|
410
|
+
if (filters) {
|
|
411
|
+
if (filters.job_type) {
|
|
412
|
+
query += ` AND job_type = $${paramIdx++}`;
|
|
413
|
+
params.push(filters.job_type);
|
|
414
|
+
}
|
|
415
|
+
if (filters.priority !== undefined) {
|
|
416
|
+
query += ` AND priority = $${paramIdx++}`;
|
|
417
|
+
params.push(filters.priority);
|
|
418
|
+
}
|
|
419
|
+
if (filters.run_at) {
|
|
420
|
+
query += ` AND run_at = $${paramIdx++}`;
|
|
421
|
+
params.push(filters.run_at);
|
|
422
|
+
}
|
|
423
|
+
}
|
|
424
|
+
query += '\nRETURNING id';
|
|
425
|
+
const result = await client.query(query, params);
|
|
426
|
+
log(`Cancelled ${result.rowCount} jobs`);
|
|
427
|
+
return result.rowCount || 0;
|
|
428
|
+
} catch (error) {
|
|
429
|
+
log(`Error cancelling upcoming jobs: ${error}`);
|
|
430
|
+
throw error;
|
|
431
|
+
} finally {
|
|
432
|
+
client.release();
|
|
433
|
+
}
|
|
434
|
+
};
|
|
435
|
+
|
|
436
|
+
/**
|
|
437
|
+
* Get all jobs with optional pagination
|
|
438
|
+
*/
|
|
439
|
+
export const getAllJobs = async <
|
|
440
|
+
PayloadMap,
|
|
441
|
+
T extends keyof PayloadMap & string,
|
|
442
|
+
>(
|
|
443
|
+
pool: Pool,
|
|
444
|
+
limit = 100,
|
|
445
|
+
offset = 0,
|
|
446
|
+
): Promise<JobRecord<PayloadMap, T>[]> => {
|
|
447
|
+
const client = await pool.connect();
|
|
448
|
+
try {
|
|
449
|
+
const result = await client.query(
|
|
450
|
+
'SELECT * FROM job_queue ORDER BY created_at DESC LIMIT $1 OFFSET $2',
|
|
451
|
+
[limit, offset],
|
|
452
|
+
);
|
|
453
|
+
log(`Found ${result.rows.length} jobs (all)`);
|
|
454
|
+
return result.rows.map((row) => ({
|
|
455
|
+
...row,
|
|
456
|
+
payload: row.payload,
|
|
457
|
+
timeout_ms: row.timeout_ms,
|
|
458
|
+
}));
|
|
459
|
+
} catch (error) {
|
|
460
|
+
log(`Error getting all jobs: ${error}`);
|
|
461
|
+
throw error;
|
|
462
|
+
} finally {
|
|
463
|
+
client.release();
|
|
464
|
+
}
|
|
465
|
+
};
|
|
466
|
+
|
|
467
|
+
/**
|
|
468
|
+
* Set a pending reason for unpicked jobs
|
|
469
|
+
*/
|
|
470
|
+
export const setPendingReasonForUnpickedJobs = async (
|
|
471
|
+
pool: Pool,
|
|
472
|
+
reason: string,
|
|
473
|
+
jobType?: string | string[],
|
|
474
|
+
) => {
|
|
475
|
+
const client = await pool.connect();
|
|
476
|
+
try {
|
|
477
|
+
let jobTypeFilter = '';
|
|
478
|
+
let params: any[] = [reason];
|
|
479
|
+
if (jobType) {
|
|
480
|
+
if (Array.isArray(jobType)) {
|
|
481
|
+
jobTypeFilter = ` AND job_type = ANY($2)`;
|
|
482
|
+
params.push(jobType);
|
|
483
|
+
} else {
|
|
484
|
+
jobTypeFilter = ` AND job_type = $2`;
|
|
485
|
+
params.push(jobType);
|
|
486
|
+
}
|
|
487
|
+
}
|
|
488
|
+
await client.query(
|
|
489
|
+
`UPDATE job_queue SET pending_reason = $1 WHERE status = 'pending'${jobTypeFilter}`,
|
|
490
|
+
params,
|
|
491
|
+
);
|
|
492
|
+
} finally {
|
|
493
|
+
client.release();
|
|
494
|
+
}
|
|
495
|
+
};
|
|
496
|
+
|
|
497
|
+
/**
|
|
498
|
+
* Reclaim jobs stuck in 'processing' for too long.
|
|
499
|
+
*
|
|
500
|
+
* If a process (e.g., API route or worker) crashes after marking a job as 'processing' but before completing it, the job can remain stuck in the 'processing' state indefinitely. This can happen if the process is killed or encounters an unhandled error after updating the job status but before marking it as 'completed' or 'failed'.
|
|
501
|
+
* @param pool - The database pool
|
|
502
|
+
* @param maxProcessingTimeMinutes - Max allowed processing time in minutes (default: 10)
|
|
503
|
+
* @returns Number of jobs reclaimed
|
|
504
|
+
*/
|
|
505
|
+
export const reclaimStuckJobs = async (
|
|
506
|
+
pool: Pool,
|
|
507
|
+
maxProcessingTimeMinutes = 10,
|
|
508
|
+
): Promise<number> => {
|
|
509
|
+
const client = await pool.connect();
|
|
510
|
+
try {
|
|
511
|
+
const result = await client.query(
|
|
512
|
+
`
|
|
513
|
+
UPDATE job_queue
|
|
514
|
+
SET status = 'pending', locked_at = NULL, locked_by = NULL, updated_at = NOW()
|
|
515
|
+
WHERE status = 'processing'
|
|
516
|
+
AND locked_at < NOW() - INTERVAL '${maxProcessingTimeMinutes} minutes'
|
|
517
|
+
RETURNING id
|
|
518
|
+
`,
|
|
519
|
+
);
|
|
520
|
+
log(`Reclaimed ${result.rowCount} stuck jobs`);
|
|
521
|
+
return result.rowCount || 0;
|
|
522
|
+
} catch (error) {
|
|
523
|
+
log(`Error reclaiming stuck jobs: ${error}`);
|
|
524
|
+
throw error;
|
|
525
|
+
} finally {
|
|
526
|
+
client.release();
|
|
527
|
+
}
|
|
528
|
+
};
|
|
529
|
+
|
|
530
|
+
/**
|
|
531
|
+
* Get all events for a job, ordered by created_at ascending
|
|
532
|
+
*/
|
|
533
|
+
export const getJobEvents = async (
|
|
534
|
+
pool: Pool,
|
|
535
|
+
jobId: number,
|
|
536
|
+
): Promise<JobEvent[]> => {
|
|
537
|
+
const client = await pool.connect();
|
|
538
|
+
try {
|
|
539
|
+
const res = await client.query(
|
|
540
|
+
'SELECT * FROM job_events WHERE job_id = $1 ORDER BY created_at ASC',
|
|
541
|
+
[jobId],
|
|
542
|
+
);
|
|
543
|
+
return res.rows;
|
|
544
|
+
} finally {
|
|
545
|
+
client.release();
|
|
546
|
+
}
|
|
547
|
+
};
|
package/src/test-util.ts
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
import { Pool } from 'pg';
|
|
2
|
+
import { randomUUID } from 'crypto';
|
|
3
|
+
import { join } from 'path';
|
|
4
|
+
import { runner } from 'node-pg-migrate';
|
|
5
|
+
|
|
6
|
+
export async function createTestDbAndPool() {
|
|
7
|
+
const baseDatabaseUrl =
|
|
8
|
+
process.env.PG_TEST_URL ||
|
|
9
|
+
'postgres://postgres:postgres@localhost:5432/postgres';
|
|
10
|
+
const dbName = `test_db_${randomUUID().replace(/-/g, '')}`;
|
|
11
|
+
|
|
12
|
+
// 1. Connect to the default database to create a new test database
|
|
13
|
+
const adminPool = new Pool({ connectionString: baseDatabaseUrl });
|
|
14
|
+
await adminPool.query(`CREATE DATABASE ${dbName}`);
|
|
15
|
+
await adminPool.end();
|
|
16
|
+
|
|
17
|
+
// 2. Connect to the new test database
|
|
18
|
+
const testDbUrl = baseDatabaseUrl.replace(/(\/)[^/]+$/, `/${dbName}`);
|
|
19
|
+
const pool = new Pool({ connectionString: testDbUrl });
|
|
20
|
+
|
|
21
|
+
// Wait a bit to ensure DB visibility
|
|
22
|
+
await new Promise((r) => setTimeout(r, 50));
|
|
23
|
+
|
|
24
|
+
// 3. Run migrations
|
|
25
|
+
try {
|
|
26
|
+
await runner({
|
|
27
|
+
databaseUrl: testDbUrl,
|
|
28
|
+
dir: join(__dirname, '../migrations'),
|
|
29
|
+
direction: 'up',
|
|
30
|
+
count: Infinity,
|
|
31
|
+
migrationsTable: 'pgmigrations',
|
|
32
|
+
});
|
|
33
|
+
} catch (error) {
|
|
34
|
+
console.error(error);
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
return { pool, dbName, testDbUrl };
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
export async function destroyTestDb(dbName: string) {
|
|
41
|
+
const baseDatabaseUrl =
|
|
42
|
+
process.env.PG_TEST_URL ||
|
|
43
|
+
'postgres://postgres:postgres@localhost:5432/postgres';
|
|
44
|
+
const adminPool = new Pool({ connectionString: baseDatabaseUrl });
|
|
45
|
+
// Terminate all connections to the test database before dropping
|
|
46
|
+
await adminPool.query(
|
|
47
|
+
`
|
|
48
|
+
SELECT pg_terminate_backend(pid)
|
|
49
|
+
FROM pg_stat_activity
|
|
50
|
+
WHERE datname = $1 AND pid <> pg_backend_pid()
|
|
51
|
+
`,
|
|
52
|
+
[dbName],
|
|
53
|
+
);
|
|
54
|
+
await adminPool.query(`DROP DATABASE IF EXISTS ${dbName}`);
|
|
55
|
+
await adminPool.end();
|
|
56
|
+
}
|