@nicnocquee/dataqueue 1.16.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/cli.cjs +38 -0
- package/dist/index.cjs +663 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +227 -0
- package/dist/index.d.ts +227 -0
- package/dist/index.js +659 -0
- package/dist/index.js.map +1 -0
- package/migrations/1751131910823_initial.sql +32 -0
- package/migrations/1751131910825_add_timeout_seconds_to_job_queue.sql +7 -0
- package/migrations/1751186053000_add_job_events_table.sql +29 -0
- package/package.json +67 -0
- package/src/db-util.ts +7 -0
- package/src/index.test.ts +282 -0
- package/src/index.ts +97 -0
- package/src/log-context.ts +20 -0
- package/src/processor.test.ts +478 -0
- package/src/processor.ts +242 -0
- package/src/queue.test.ts +502 -0
- package/src/queue.ts +547 -0
- package/src/test-util.ts +56 -0
- package/src/types.ts +247 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) Nico Prananta 2024
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/cli.cjs
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
const { spawnSync } = require('child_process');
|
|
4
|
+
const path = require('path');
|
|
5
|
+
|
|
6
|
+
function printUsage() {
|
|
7
|
+
console.log('Usage: dataqueue-cli migrate');
|
|
8
|
+
process.exit(1);
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
const [, , command] = process.argv;
|
|
12
|
+
|
|
13
|
+
if (command === 'migrate') {
|
|
14
|
+
const migrationsDir = path.join(__dirname, 'migrations');
|
|
15
|
+
const dbUrl = process.env.PG_DATAQUEUE_DATABASE;
|
|
16
|
+
console.log(dbUrl);
|
|
17
|
+
if (!dbUrl) {
|
|
18
|
+
console.error(
|
|
19
|
+
'Error: PG_DATAQUEUE_DATABASE environment variable must be set to your Postgres connection string.',
|
|
20
|
+
);
|
|
21
|
+
process.exit(1);
|
|
22
|
+
}
|
|
23
|
+
const result = spawnSync(
|
|
24
|
+
'npx',
|
|
25
|
+
[
|
|
26
|
+
'node-pg-migrate',
|
|
27
|
+
'up',
|
|
28
|
+
'-d',
|
|
29
|
+
'PG_DATAQUEUE_DATABASE',
|
|
30
|
+
'-m',
|
|
31
|
+
migrationsDir,
|
|
32
|
+
],
|
|
33
|
+
{ stdio: 'inherit' },
|
|
34
|
+
);
|
|
35
|
+
process.exit(result.status);
|
|
36
|
+
} else {
|
|
37
|
+
printUsage();
|
|
38
|
+
}
|
package/dist/index.cjs
ADDED
|
@@ -0,0 +1,663 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
var async_hooks = require('async_hooks');
|
|
4
|
+
var pg = require('pg');
|
|
5
|
+
|
|
6
|
+
// src/types.ts
|
|
7
|
+
var JobEventType = /* @__PURE__ */ ((JobEventType2) => {
|
|
8
|
+
JobEventType2["Added"] = "added";
|
|
9
|
+
JobEventType2["Processing"] = "processing";
|
|
10
|
+
JobEventType2["Completed"] = "completed";
|
|
11
|
+
JobEventType2["Failed"] = "failed";
|
|
12
|
+
JobEventType2["Cancelled"] = "cancelled";
|
|
13
|
+
JobEventType2["Retried"] = "retried";
|
|
14
|
+
return JobEventType2;
|
|
15
|
+
})(JobEventType || {});
|
|
16
|
+
var FailureReason = /* @__PURE__ */ ((FailureReason3) => {
|
|
17
|
+
FailureReason3["Timeout"] = "timeout";
|
|
18
|
+
FailureReason3["HandlerError"] = "handler_error";
|
|
19
|
+
FailureReason3["NoHandler"] = "no_handler";
|
|
20
|
+
return FailureReason3;
|
|
21
|
+
})(FailureReason || {});
|
|
22
|
+
var logStorage = new async_hooks.AsyncLocalStorage();
|
|
23
|
+
var setLogContext = (verbose) => {
|
|
24
|
+
logStorage.enterWith({ verbose });
|
|
25
|
+
};
|
|
26
|
+
var getLogContext = () => {
|
|
27
|
+
return logStorage.getStore();
|
|
28
|
+
};
|
|
29
|
+
var log = (message) => {
|
|
30
|
+
const context = getLogContext();
|
|
31
|
+
if (context?.verbose) {
|
|
32
|
+
console.log(message);
|
|
33
|
+
}
|
|
34
|
+
};
|
|
35
|
+
|
|
36
|
+
// src/queue.ts
|
|
37
|
+
var recordJobEvent = async (pool, jobId, eventType, metadata) => {
|
|
38
|
+
const client = await pool.connect();
|
|
39
|
+
try {
|
|
40
|
+
await client.query(
|
|
41
|
+
`INSERT INTO job_events (job_id, event_type, metadata) VALUES ($1, $2, $3)`,
|
|
42
|
+
[jobId, eventType, metadata ? JSON.stringify(metadata) : null]
|
|
43
|
+
);
|
|
44
|
+
} catch (error) {
|
|
45
|
+
log(`Error recording job event for job ${jobId}: ${error}`);
|
|
46
|
+
} finally {
|
|
47
|
+
client.release();
|
|
48
|
+
}
|
|
49
|
+
};
|
|
50
|
+
var addJob = async (pool, {
|
|
51
|
+
job_type,
|
|
52
|
+
payload,
|
|
53
|
+
max_attempts = 3,
|
|
54
|
+
priority = 0,
|
|
55
|
+
run_at = null,
|
|
56
|
+
timeoutMs = void 0
|
|
57
|
+
}) => {
|
|
58
|
+
const client = await pool.connect();
|
|
59
|
+
try {
|
|
60
|
+
let result;
|
|
61
|
+
if (run_at) {
|
|
62
|
+
result = await client.query(
|
|
63
|
+
`INSERT INTO job_queue
|
|
64
|
+
(job_type, payload, max_attempts, priority, run_at, timeout_ms)
|
|
65
|
+
VALUES ($1, $2, $3, $4, $5, $6)
|
|
66
|
+
RETURNING id`,
|
|
67
|
+
[job_type, payload, max_attempts, priority, run_at, timeoutMs ?? null]
|
|
68
|
+
);
|
|
69
|
+
log(
|
|
70
|
+
`Added job ${result.rows[0].id}: payload ${JSON.stringify(payload)}, run_at ${run_at.toISOString()}, priority ${priority}, max_attempts ${max_attempts} job_type ${job_type}`
|
|
71
|
+
);
|
|
72
|
+
} else {
|
|
73
|
+
result = await client.query(
|
|
74
|
+
`INSERT INTO job_queue
|
|
75
|
+
(job_type, payload, max_attempts, priority, timeout_ms)
|
|
76
|
+
VALUES ($1, $2, $3, $4, $5)
|
|
77
|
+
RETURNING id`,
|
|
78
|
+
[job_type, payload, max_attempts, priority, timeoutMs ?? null]
|
|
79
|
+
);
|
|
80
|
+
log(
|
|
81
|
+
`Added job ${result.rows[0].id}: payload ${JSON.stringify(payload)}, priority ${priority}, max_attempts ${max_attempts} job_type ${job_type}`
|
|
82
|
+
);
|
|
83
|
+
}
|
|
84
|
+
await recordJobEvent(pool, result.rows[0].id, "added" /* Added */, {
|
|
85
|
+
job_type,
|
|
86
|
+
payload
|
|
87
|
+
});
|
|
88
|
+
return result.rows[0].id;
|
|
89
|
+
} catch (error) {
|
|
90
|
+
log(`Error adding job: ${error}`);
|
|
91
|
+
throw error;
|
|
92
|
+
} finally {
|
|
93
|
+
client.release();
|
|
94
|
+
}
|
|
95
|
+
};
|
|
96
|
+
var getJob = async (pool, id) => {
|
|
97
|
+
const client = await pool.connect();
|
|
98
|
+
try {
|
|
99
|
+
const result = await client.query("SELECT * FROM job_queue WHERE id = $1", [
|
|
100
|
+
id
|
|
101
|
+
]);
|
|
102
|
+
if (result.rows.length === 0) {
|
|
103
|
+
log(`Job ${id} not found`);
|
|
104
|
+
return null;
|
|
105
|
+
}
|
|
106
|
+
log(`Found job ${id}`);
|
|
107
|
+
return {
|
|
108
|
+
...result.rows[0],
|
|
109
|
+
payload: result.rows[0].payload,
|
|
110
|
+
timeout_ms: result.rows[0].timeout_ms,
|
|
111
|
+
failure_reason: result.rows[0].failure_reason
|
|
112
|
+
};
|
|
113
|
+
} catch (error) {
|
|
114
|
+
log(`Error getting job ${id}: ${error}`);
|
|
115
|
+
throw error;
|
|
116
|
+
} finally {
|
|
117
|
+
client.release();
|
|
118
|
+
}
|
|
119
|
+
};
|
|
120
|
+
var getJobsByStatus = async (pool, status, limit = 100, offset = 0) => {
|
|
121
|
+
const client = await pool.connect();
|
|
122
|
+
try {
|
|
123
|
+
const result = await client.query(
|
|
124
|
+
"SELECT * FROM job_queue WHERE status = $1 ORDER BY created_at DESC LIMIT $2 OFFSET $3",
|
|
125
|
+
[status, limit, offset]
|
|
126
|
+
);
|
|
127
|
+
log(`Found ${result.rows.length} jobs by status ${status}`);
|
|
128
|
+
return result.rows.map((row) => ({
|
|
129
|
+
...row,
|
|
130
|
+
payload: row.payload,
|
|
131
|
+
timeout_ms: row.timeout_ms,
|
|
132
|
+
failure_reason: row.failure_reason
|
|
133
|
+
}));
|
|
134
|
+
} catch (error) {
|
|
135
|
+
log(`Error getting jobs by status ${status}: ${error}`);
|
|
136
|
+
throw error;
|
|
137
|
+
} finally {
|
|
138
|
+
client.release();
|
|
139
|
+
}
|
|
140
|
+
};
|
|
141
|
+
var getNextBatch = async (pool, workerId, batchSize = 10, jobType) => {
|
|
142
|
+
const client = await pool.connect();
|
|
143
|
+
try {
|
|
144
|
+
await client.query("BEGIN");
|
|
145
|
+
let jobTypeFilter = "";
|
|
146
|
+
let params = [workerId, batchSize];
|
|
147
|
+
if (jobType) {
|
|
148
|
+
if (Array.isArray(jobType)) {
|
|
149
|
+
jobTypeFilter = ` AND job_type = ANY($3)`;
|
|
150
|
+
params.push(jobType);
|
|
151
|
+
} else {
|
|
152
|
+
jobTypeFilter = ` AND job_type = $3`;
|
|
153
|
+
params.push(jobType);
|
|
154
|
+
}
|
|
155
|
+
}
|
|
156
|
+
const result = await client.query(
|
|
157
|
+
`
|
|
158
|
+
UPDATE job_queue
|
|
159
|
+
SET status = 'processing',
|
|
160
|
+
locked_at = NOW(),
|
|
161
|
+
locked_by = $1,
|
|
162
|
+
attempts = attempts + 1,
|
|
163
|
+
updated_at = NOW(),
|
|
164
|
+
pending_reason = NULL,
|
|
165
|
+
started_at = COALESCE(started_at, NOW()),
|
|
166
|
+
last_retried_at = CASE WHEN attempts > 0 THEN NOW() ELSE last_retried_at END
|
|
167
|
+
WHERE id IN (
|
|
168
|
+
SELECT id FROM job_queue
|
|
169
|
+
WHERE (status = 'pending' OR (status = 'failed' AND next_attempt_at <= NOW()))
|
|
170
|
+
AND (attempts < max_attempts)
|
|
171
|
+
AND run_at <= NOW()
|
|
172
|
+
${jobTypeFilter}
|
|
173
|
+
ORDER BY priority DESC, created_at ASC
|
|
174
|
+
LIMIT $2
|
|
175
|
+
FOR UPDATE SKIP LOCKED
|
|
176
|
+
)
|
|
177
|
+
RETURNING *
|
|
178
|
+
`,
|
|
179
|
+
params
|
|
180
|
+
);
|
|
181
|
+
log(`Found ${result.rows.length} jobs to process`);
|
|
182
|
+
await client.query("COMMIT");
|
|
183
|
+
for (const row of result.rows) {
|
|
184
|
+
await recordJobEvent(pool, row.id, "processing" /* Processing */);
|
|
185
|
+
}
|
|
186
|
+
return result.rows.map((row) => ({
|
|
187
|
+
...row,
|
|
188
|
+
payload: row.payload,
|
|
189
|
+
timeout_ms: row.timeout_ms
|
|
190
|
+
}));
|
|
191
|
+
} catch (error) {
|
|
192
|
+
log(`Error getting next batch: ${error}`);
|
|
193
|
+
await client.query("ROLLBACK");
|
|
194
|
+
throw error;
|
|
195
|
+
} finally {
|
|
196
|
+
client.release();
|
|
197
|
+
}
|
|
198
|
+
};
|
|
199
|
+
var completeJob = async (pool, jobId) => {
|
|
200
|
+
const client = await pool.connect();
|
|
201
|
+
try {
|
|
202
|
+
await client.query(
|
|
203
|
+
`
|
|
204
|
+
UPDATE job_queue
|
|
205
|
+
SET status = 'completed', updated_at = NOW(), completed_at = NOW()
|
|
206
|
+
WHERE id = $1
|
|
207
|
+
`,
|
|
208
|
+
[jobId]
|
|
209
|
+
);
|
|
210
|
+
await recordJobEvent(pool, jobId, "completed" /* Completed */);
|
|
211
|
+
} catch (error) {
|
|
212
|
+
log(`Error completing job ${jobId}: ${error}`);
|
|
213
|
+
throw error;
|
|
214
|
+
} finally {
|
|
215
|
+
log(`Completed job ${jobId}`);
|
|
216
|
+
client.release();
|
|
217
|
+
}
|
|
218
|
+
};
|
|
219
|
+
var failJob = async (pool, jobId, error, failureReason) => {
|
|
220
|
+
const client = await pool.connect();
|
|
221
|
+
try {
|
|
222
|
+
await client.query(
|
|
223
|
+
`
|
|
224
|
+
UPDATE job_queue
|
|
225
|
+
SET status = 'failed',
|
|
226
|
+
updated_at = NOW(),
|
|
227
|
+
next_attempt_at = CASE
|
|
228
|
+
WHEN attempts < max_attempts THEN NOW() + (POWER(2, attempts) * INTERVAL '1 minute')
|
|
229
|
+
ELSE NULL
|
|
230
|
+
END,
|
|
231
|
+
error_history = COALESCE(error_history, '[]'::jsonb) || $2::jsonb,
|
|
232
|
+
failure_reason = $3,
|
|
233
|
+
last_failed_at = NOW()
|
|
234
|
+
WHERE id = $1
|
|
235
|
+
`,
|
|
236
|
+
[
|
|
237
|
+
jobId,
|
|
238
|
+
JSON.stringify([
|
|
239
|
+
{
|
|
240
|
+
message: error.message || String(error),
|
|
241
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
242
|
+
}
|
|
243
|
+
]),
|
|
244
|
+
failureReason ?? null
|
|
245
|
+
]
|
|
246
|
+
);
|
|
247
|
+
await recordJobEvent(pool, jobId, "failed" /* Failed */, {
|
|
248
|
+
message: error.message || String(error),
|
|
249
|
+
failureReason
|
|
250
|
+
});
|
|
251
|
+
} catch (error2) {
|
|
252
|
+
log(`Error failing job ${jobId}: ${error2}`);
|
|
253
|
+
throw error2;
|
|
254
|
+
} finally {
|
|
255
|
+
log(`Failed job ${jobId}`);
|
|
256
|
+
client.release();
|
|
257
|
+
}
|
|
258
|
+
};
|
|
259
|
+
var retryJob = async (pool, jobId) => {
|
|
260
|
+
const client = await pool.connect();
|
|
261
|
+
try {
|
|
262
|
+
await client.query(
|
|
263
|
+
`
|
|
264
|
+
UPDATE job_queue
|
|
265
|
+
SET status = 'pending',
|
|
266
|
+
updated_at = NOW(),
|
|
267
|
+
locked_at = NULL,
|
|
268
|
+
locked_by = NULL,
|
|
269
|
+
next_attempt_at = NOW(),
|
|
270
|
+
last_retried_at = NOW()
|
|
271
|
+
WHERE id = $1
|
|
272
|
+
`,
|
|
273
|
+
[jobId]
|
|
274
|
+
);
|
|
275
|
+
await recordJobEvent(pool, jobId, "retried" /* Retried */);
|
|
276
|
+
} catch (error) {
|
|
277
|
+
log(`Error retrying job ${jobId}: ${error}`);
|
|
278
|
+
throw error;
|
|
279
|
+
} finally {
|
|
280
|
+
log(`Retried job ${jobId}`);
|
|
281
|
+
client.release();
|
|
282
|
+
}
|
|
283
|
+
};
|
|
284
|
+
var cleanupOldJobs = async (pool, daysToKeep = 30) => {
|
|
285
|
+
const client = await pool.connect();
|
|
286
|
+
try {
|
|
287
|
+
const result = await client.query(`
|
|
288
|
+
DELETE FROM job_queue
|
|
289
|
+
WHERE status = 'completed'
|
|
290
|
+
AND updated_at < NOW() - INTERVAL '${daysToKeep} days'
|
|
291
|
+
RETURNING id
|
|
292
|
+
`);
|
|
293
|
+
log(`Deleted ${result.rowCount} old jobs`);
|
|
294
|
+
return result.rowCount || 0;
|
|
295
|
+
} catch (error) {
|
|
296
|
+
log(`Error cleaning up old jobs: ${error}`);
|
|
297
|
+
throw error;
|
|
298
|
+
} finally {
|
|
299
|
+
client.release();
|
|
300
|
+
}
|
|
301
|
+
};
|
|
302
|
+
var cancelJob = async (pool, jobId) => {
|
|
303
|
+
const client = await pool.connect();
|
|
304
|
+
try {
|
|
305
|
+
await client.query(
|
|
306
|
+
`
|
|
307
|
+
UPDATE job_queue
|
|
308
|
+
SET status = 'cancelled', updated_at = NOW(), last_cancelled_at = NOW()
|
|
309
|
+
WHERE id = $1 AND status = 'pending'
|
|
310
|
+
`,
|
|
311
|
+
[jobId]
|
|
312
|
+
);
|
|
313
|
+
await recordJobEvent(pool, jobId, "cancelled" /* Cancelled */);
|
|
314
|
+
} catch (error) {
|
|
315
|
+
log(`Error cancelling job ${jobId}: ${error}`);
|
|
316
|
+
throw error;
|
|
317
|
+
} finally {
|
|
318
|
+
log(`Cancelled job ${jobId}`);
|
|
319
|
+
client.release();
|
|
320
|
+
}
|
|
321
|
+
};
|
|
322
|
+
var cancelAllUpcomingJobs = async (pool, filters) => {
|
|
323
|
+
const client = await pool.connect();
|
|
324
|
+
try {
|
|
325
|
+
let query = `
|
|
326
|
+
UPDATE job_queue
|
|
327
|
+
SET status = 'cancelled', updated_at = NOW()
|
|
328
|
+
WHERE status = 'pending'`;
|
|
329
|
+
const params = [];
|
|
330
|
+
let paramIdx = 1;
|
|
331
|
+
if (filters) {
|
|
332
|
+
if (filters.job_type) {
|
|
333
|
+
query += ` AND job_type = $${paramIdx++}`;
|
|
334
|
+
params.push(filters.job_type);
|
|
335
|
+
}
|
|
336
|
+
if (filters.priority !== void 0) {
|
|
337
|
+
query += ` AND priority = $${paramIdx++}`;
|
|
338
|
+
params.push(filters.priority);
|
|
339
|
+
}
|
|
340
|
+
if (filters.run_at) {
|
|
341
|
+
query += ` AND run_at = $${paramIdx++}`;
|
|
342
|
+
params.push(filters.run_at);
|
|
343
|
+
}
|
|
344
|
+
}
|
|
345
|
+
query += "\nRETURNING id";
|
|
346
|
+
const result = await client.query(query, params);
|
|
347
|
+
log(`Cancelled ${result.rowCount} jobs`);
|
|
348
|
+
return result.rowCount || 0;
|
|
349
|
+
} catch (error) {
|
|
350
|
+
log(`Error cancelling upcoming jobs: ${error}`);
|
|
351
|
+
throw error;
|
|
352
|
+
} finally {
|
|
353
|
+
client.release();
|
|
354
|
+
}
|
|
355
|
+
};
|
|
356
|
+
var getAllJobs = async (pool, limit = 100, offset = 0) => {
|
|
357
|
+
const client = await pool.connect();
|
|
358
|
+
try {
|
|
359
|
+
const result = await client.query(
|
|
360
|
+
"SELECT * FROM job_queue ORDER BY created_at DESC LIMIT $1 OFFSET $2",
|
|
361
|
+
[limit, offset]
|
|
362
|
+
);
|
|
363
|
+
log(`Found ${result.rows.length} jobs (all)`);
|
|
364
|
+
return result.rows.map((row) => ({
|
|
365
|
+
...row,
|
|
366
|
+
payload: row.payload,
|
|
367
|
+
timeout_ms: row.timeout_ms
|
|
368
|
+
}));
|
|
369
|
+
} catch (error) {
|
|
370
|
+
log(`Error getting all jobs: ${error}`);
|
|
371
|
+
throw error;
|
|
372
|
+
} finally {
|
|
373
|
+
client.release();
|
|
374
|
+
}
|
|
375
|
+
};
|
|
376
|
+
var setPendingReasonForUnpickedJobs = async (pool, reason, jobType) => {
|
|
377
|
+
const client = await pool.connect();
|
|
378
|
+
try {
|
|
379
|
+
let jobTypeFilter = "";
|
|
380
|
+
let params = [reason];
|
|
381
|
+
if (jobType) {
|
|
382
|
+
if (Array.isArray(jobType)) {
|
|
383
|
+
jobTypeFilter = ` AND job_type = ANY($2)`;
|
|
384
|
+
params.push(jobType);
|
|
385
|
+
} else {
|
|
386
|
+
jobTypeFilter = ` AND job_type = $2`;
|
|
387
|
+
params.push(jobType);
|
|
388
|
+
}
|
|
389
|
+
}
|
|
390
|
+
await client.query(
|
|
391
|
+
`UPDATE job_queue SET pending_reason = $1 WHERE status = 'pending'${jobTypeFilter}`,
|
|
392
|
+
params
|
|
393
|
+
);
|
|
394
|
+
} finally {
|
|
395
|
+
client.release();
|
|
396
|
+
}
|
|
397
|
+
};
|
|
398
|
+
var reclaimStuckJobs = async (pool, maxProcessingTimeMinutes = 10) => {
|
|
399
|
+
const client = await pool.connect();
|
|
400
|
+
try {
|
|
401
|
+
const result = await client.query(
|
|
402
|
+
`
|
|
403
|
+
UPDATE job_queue
|
|
404
|
+
SET status = 'pending', locked_at = NULL, locked_by = NULL, updated_at = NOW()
|
|
405
|
+
WHERE status = 'processing'
|
|
406
|
+
AND locked_at < NOW() - INTERVAL '${maxProcessingTimeMinutes} minutes'
|
|
407
|
+
RETURNING id
|
|
408
|
+
`
|
|
409
|
+
);
|
|
410
|
+
log(`Reclaimed ${result.rowCount} stuck jobs`);
|
|
411
|
+
return result.rowCount || 0;
|
|
412
|
+
} catch (error) {
|
|
413
|
+
log(`Error reclaiming stuck jobs: ${error}`);
|
|
414
|
+
throw error;
|
|
415
|
+
} finally {
|
|
416
|
+
client.release();
|
|
417
|
+
}
|
|
418
|
+
};
|
|
419
|
+
var getJobEvents = async (pool, jobId) => {
|
|
420
|
+
const client = await pool.connect();
|
|
421
|
+
try {
|
|
422
|
+
const res = await client.query(
|
|
423
|
+
"SELECT * FROM job_events WHERE job_id = $1 ORDER BY created_at ASC",
|
|
424
|
+
[jobId]
|
|
425
|
+
);
|
|
426
|
+
return res.rows;
|
|
427
|
+
} finally {
|
|
428
|
+
client.release();
|
|
429
|
+
}
|
|
430
|
+
};
|
|
431
|
+
|
|
432
|
+
// src/processor.ts
|
|
433
|
+
async function processJobWithHandlers(pool, job, jobHandlers) {
|
|
434
|
+
const handler = jobHandlers[job.job_type];
|
|
435
|
+
if (!handler) {
|
|
436
|
+
await setPendingReasonForUnpickedJobs(
|
|
437
|
+
pool,
|
|
438
|
+
`No handler registered for job type: ${job.job_type}`,
|
|
439
|
+
job.job_type
|
|
440
|
+
);
|
|
441
|
+
await failJob(
|
|
442
|
+
pool,
|
|
443
|
+
job.id,
|
|
444
|
+
new Error(`No handler registered for job type: ${job.job_type}`),
|
|
445
|
+
"no_handler" /* NoHandler */
|
|
446
|
+
);
|
|
447
|
+
return;
|
|
448
|
+
}
|
|
449
|
+
const timeoutMs = job.timeout_ms ?? void 0;
|
|
450
|
+
let timeoutId;
|
|
451
|
+
const controller = new AbortController();
|
|
452
|
+
try {
|
|
453
|
+
const jobPromise = handler(job.payload, controller.signal);
|
|
454
|
+
if (timeoutMs && timeoutMs > 0) {
|
|
455
|
+
await Promise.race([
|
|
456
|
+
jobPromise,
|
|
457
|
+
new Promise((_, reject) => {
|
|
458
|
+
timeoutId = setTimeout(() => {
|
|
459
|
+
controller.abort();
|
|
460
|
+
const timeoutError = new Error(
|
|
461
|
+
`Job timed out after ${timeoutMs} ms`
|
|
462
|
+
);
|
|
463
|
+
timeoutError.failureReason = "timeout" /* Timeout */;
|
|
464
|
+
reject(timeoutError);
|
|
465
|
+
}, timeoutMs);
|
|
466
|
+
})
|
|
467
|
+
]);
|
|
468
|
+
} else {
|
|
469
|
+
await jobPromise;
|
|
470
|
+
}
|
|
471
|
+
if (timeoutId) clearTimeout(timeoutId);
|
|
472
|
+
await completeJob(pool, job.id);
|
|
473
|
+
} catch (error) {
|
|
474
|
+
if (timeoutId) clearTimeout(timeoutId);
|
|
475
|
+
console.error(`Error processing job ${job.id}:`, error);
|
|
476
|
+
let failureReason = "handler_error" /* HandlerError */;
|
|
477
|
+
if (error && typeof error === "object" && "failureReason" in error && error.failureReason === "timeout" /* Timeout */) {
|
|
478
|
+
failureReason = "timeout" /* Timeout */;
|
|
479
|
+
}
|
|
480
|
+
await failJob(
|
|
481
|
+
pool,
|
|
482
|
+
job.id,
|
|
483
|
+
error instanceof Error ? error : new Error(String(error)),
|
|
484
|
+
failureReason
|
|
485
|
+
);
|
|
486
|
+
}
|
|
487
|
+
}
|
|
488
|
+
async function processBatchWithHandlers(pool, workerId, batchSize, jobType, jobHandlers, concurrency) {
|
|
489
|
+
const jobs = await getNextBatch(
|
|
490
|
+
pool,
|
|
491
|
+
workerId,
|
|
492
|
+
batchSize,
|
|
493
|
+
jobType
|
|
494
|
+
);
|
|
495
|
+
if (!concurrency || concurrency >= jobs.length) {
|
|
496
|
+
await Promise.all(
|
|
497
|
+
jobs.map((job) => processJobWithHandlers(pool, job, jobHandlers))
|
|
498
|
+
);
|
|
499
|
+
return jobs.length;
|
|
500
|
+
}
|
|
501
|
+
let idx = 0;
|
|
502
|
+
let running = 0;
|
|
503
|
+
let finished = 0;
|
|
504
|
+
return new Promise((resolve, reject) => {
|
|
505
|
+
const next = () => {
|
|
506
|
+
if (finished === jobs.length) return resolve(jobs.length);
|
|
507
|
+
while (running < concurrency && idx < jobs.length) {
|
|
508
|
+
const job = jobs[idx++];
|
|
509
|
+
running++;
|
|
510
|
+
processJobWithHandlers(pool, job, jobHandlers).then(() => {
|
|
511
|
+
running--;
|
|
512
|
+
finished++;
|
|
513
|
+
next();
|
|
514
|
+
}).catch((err) => {
|
|
515
|
+
running--;
|
|
516
|
+
finished++;
|
|
517
|
+
next();
|
|
518
|
+
});
|
|
519
|
+
}
|
|
520
|
+
};
|
|
521
|
+
next();
|
|
522
|
+
});
|
|
523
|
+
}
|
|
524
|
+
var createProcessor = (pool, handlers, options = {}) => {
|
|
525
|
+
const {
|
|
526
|
+
workerId = `worker-${Math.random().toString(36).substring(2, 9)}`,
|
|
527
|
+
batchSize = 10,
|
|
528
|
+
pollInterval = 5e3,
|
|
529
|
+
onError = (error) => console.error("Job processor error:", error),
|
|
530
|
+
jobType,
|
|
531
|
+
concurrency = 3
|
|
532
|
+
} = options;
|
|
533
|
+
let running = false;
|
|
534
|
+
let intervalId = null;
|
|
535
|
+
setLogContext(options.verbose ?? false);
|
|
536
|
+
const processJobs = async () => {
|
|
537
|
+
if (!running) return 0;
|
|
538
|
+
log(
|
|
539
|
+
`Processing jobs with workerId: ${workerId}${jobType ? ` and jobType: ${Array.isArray(jobType) ? jobType.join(",") : jobType}` : ""}`
|
|
540
|
+
);
|
|
541
|
+
try {
|
|
542
|
+
const processed = await processBatchWithHandlers(
|
|
543
|
+
pool,
|
|
544
|
+
workerId,
|
|
545
|
+
batchSize,
|
|
546
|
+
jobType,
|
|
547
|
+
handlers,
|
|
548
|
+
concurrency
|
|
549
|
+
);
|
|
550
|
+
return processed;
|
|
551
|
+
} catch (error) {
|
|
552
|
+
onError(error instanceof Error ? error : new Error(String(error)));
|
|
553
|
+
}
|
|
554
|
+
return 0;
|
|
555
|
+
};
|
|
556
|
+
return {
|
|
557
|
+
/**
|
|
558
|
+
* Start the job processor in the background.
|
|
559
|
+
* - This will run periodically (every pollInterval milliseconds or 5 seconds if not provided) and process jobs as they become available.
|
|
560
|
+
* - You have to call the stop method to stop the processor.
|
|
561
|
+
*/
|
|
562
|
+
startInBackground: () => {
|
|
563
|
+
if (running) return;
|
|
564
|
+
log(`Starting job processor with workerId: ${workerId}`);
|
|
565
|
+
running = true;
|
|
566
|
+
const processBatches = async () => {
|
|
567
|
+
if (!running) return;
|
|
568
|
+
const processed = await processJobs();
|
|
569
|
+
if (processed === batchSize && running) {
|
|
570
|
+
setImmediate(processBatches);
|
|
571
|
+
}
|
|
572
|
+
};
|
|
573
|
+
processBatches();
|
|
574
|
+
intervalId = setInterval(processJobs, pollInterval);
|
|
575
|
+
},
|
|
576
|
+
/**
|
|
577
|
+
* Stop the job processor that runs in the background
|
|
578
|
+
*/
|
|
579
|
+
stop: () => {
|
|
580
|
+
log(`Stopping job processor with workerId: ${workerId}`);
|
|
581
|
+
running = false;
|
|
582
|
+
if (intervalId) {
|
|
583
|
+
clearInterval(intervalId);
|
|
584
|
+
intervalId = null;
|
|
585
|
+
}
|
|
586
|
+
},
|
|
587
|
+
/**
|
|
588
|
+
* Start the job processor synchronously.
|
|
589
|
+
* - This will process all jobs immediately and then stop.
|
|
590
|
+
* - The pollInterval is ignored.
|
|
591
|
+
*/
|
|
592
|
+
start: async () => {
|
|
593
|
+
log(`Starting job processor with workerId: ${workerId}`);
|
|
594
|
+
running = true;
|
|
595
|
+
const processed = await processJobs();
|
|
596
|
+
running = false;
|
|
597
|
+
return processed;
|
|
598
|
+
},
|
|
599
|
+
isRunning: () => running
|
|
600
|
+
};
|
|
601
|
+
};
|
|
602
|
+
var createPool = (config) => {
|
|
603
|
+
return new pg.Pool(config);
|
|
604
|
+
};
|
|
605
|
+
|
|
606
|
+
// src/index.ts
|
|
607
|
+
var initJobQueue = async (config) => {
|
|
608
|
+
const { databaseConfig } = config;
|
|
609
|
+
const pool = createPool(databaseConfig);
|
|
610
|
+
setLogContext(config.verbose ?? false);
|
|
611
|
+
return {
|
|
612
|
+
// Job queue operations
|
|
613
|
+
addJob: withLogContext(
|
|
614
|
+
(job) => addJob(pool, job),
|
|
615
|
+
config.verbose ?? false
|
|
616
|
+
),
|
|
617
|
+
getJob: withLogContext(
|
|
618
|
+
(id) => getJob(pool, id),
|
|
619
|
+
config.verbose ?? false
|
|
620
|
+
),
|
|
621
|
+
getJobsByStatus: withLogContext(
|
|
622
|
+
(status, limit, offset) => getJobsByStatus(pool, status, limit, offset),
|
|
623
|
+
config.verbose ?? false
|
|
624
|
+
),
|
|
625
|
+
getAllJobs: withLogContext(
|
|
626
|
+
(limit, offset) => getAllJobs(pool, limit, offset),
|
|
627
|
+
config.verbose ?? false
|
|
628
|
+
),
|
|
629
|
+
retryJob: (jobId) => retryJob(pool, jobId),
|
|
630
|
+
cleanupOldJobs: (daysToKeep) => cleanupOldJobs(pool, daysToKeep),
|
|
631
|
+
cancelJob: withLogContext(
|
|
632
|
+
(jobId) => cancelJob(pool, jobId),
|
|
633
|
+
config.verbose ?? false
|
|
634
|
+
),
|
|
635
|
+
cancelAllUpcomingJobs: withLogContext(
|
|
636
|
+
(filters) => cancelAllUpcomingJobs(pool, filters),
|
|
637
|
+
config.verbose ?? false
|
|
638
|
+
),
|
|
639
|
+
reclaimStuckJobs: withLogContext(
|
|
640
|
+
(maxProcessingTimeMinutes) => reclaimStuckJobs(pool, maxProcessingTimeMinutes),
|
|
641
|
+
config.verbose ?? false
|
|
642
|
+
),
|
|
643
|
+
// Job processing
|
|
644
|
+
createProcessor: (handlers, options) => createProcessor(pool, handlers, options),
|
|
645
|
+
// Advanced access (for custom operations)
|
|
646
|
+
getPool: () => pool,
|
|
647
|
+
// Job events
|
|
648
|
+
getJobEvents: withLogContext(
|
|
649
|
+
(jobId) => getJobEvents(pool, jobId),
|
|
650
|
+
config.verbose ?? false
|
|
651
|
+
)
|
|
652
|
+
};
|
|
653
|
+
};
|
|
654
|
+
var withLogContext = (fn, verbose) => (...args) => {
|
|
655
|
+
setLogContext(verbose);
|
|
656
|
+
return fn(...args);
|
|
657
|
+
};
|
|
658
|
+
|
|
659
|
+
exports.FailureReason = FailureReason;
|
|
660
|
+
exports.JobEventType = JobEventType;
|
|
661
|
+
exports.initJobQueue = initJobQueue;
|
|
662
|
+
//# sourceMappingURL=index.cjs.map
|
|
663
|
+
//# sourceMappingURL=index.cjs.map
|