@nicnocquee/dataqueue 1.24.0 → 1.26.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +44 -0
- package/migrations/1751131910825_add_timeout_seconds_to_job_queue.sql +2 -2
- package/migrations/1751186053000_add_job_events_table.sql +12 -8
- package/migrations/1751984773000_add_tags_to_job_queue.sql +1 -1
- package/migrations/1765809419000_add_force_kill_on_timeout_to_job_queue.sql +1 -1
- package/migrations/1771100000000_add_idempotency_key_to_job_queue.sql +7 -0
- package/migrations/1781200000000_add_wait_support.sql +12 -0
- package/migrations/1781200000001_create_waitpoints_table.sql +18 -0
- package/migrations/1781200000002_add_performance_indexes.sql +34 -0
- package/migrations/1781200000003_add_progress_to_job_queue.sql +7 -0
- package/package.json +20 -6
- package/src/backend.ts +163 -0
- package/src/backends/postgres.ts +1111 -0
- package/src/backends/redis-scripts.ts +533 -0
- package/src/backends/redis.test.ts +543 -0
- package/src/backends/redis.ts +834 -0
- package/src/db-util.ts +4 -2
- package/src/index.test.ts +6 -1
- package/src/index.ts +99 -36
- package/src/processor.test.ts +559 -18
- package/src/processor.ts +512 -44
- package/src/queue.test.ts +217 -6
- package/src/queue.ts +311 -902
- package/src/test-util.ts +32 -0
- package/src/types.ts +349 -16
- package/src/wait.test.ts +698 -0
- package/dist/cli.cjs +0 -88
- package/dist/cli.cjs.map +0 -1
- package/dist/cli.d.cts +0 -12
- package/dist/cli.d.ts +0 -12
- package/dist/cli.js +0 -81
- package/dist/cli.js.map +0 -1
- package/dist/index.cjs +0 -1420
- package/dist/index.cjs.map +0 -1
- package/dist/index.d.cts +0 -445
- package/dist/index.d.ts +0 -445
- package/dist/index.js +0 -1410
- package/dist/index.js.map +0 -1
package/dist/index.cjs
DELETED
|
@@ -1,1420 +0,0 @@
|
|
|
1
|
-
'use strict';
|
|
2
|
-
|
|
3
|
-
var async_hooks = require('async_hooks');
|
|
4
|
-
var worker_threads = require('worker_threads');
|
|
5
|
-
var pg = require('pg');
|
|
6
|
-
var pgConnectionString = require('pg-connection-string');
|
|
7
|
-
var fs = require('fs');
|
|
8
|
-
|
|
9
|
-
function _interopDefault (e) { return e && e.__esModule ? e : { default: e }; }
|
|
10
|
-
|
|
11
|
-
var fs__default = /*#__PURE__*/_interopDefault(fs);
|
|
12
|
-
|
|
13
|
-
// src/types.ts
|
|
14
|
-
var JobEventType = /* @__PURE__ */ ((JobEventType2) => {
|
|
15
|
-
JobEventType2["Added"] = "added";
|
|
16
|
-
JobEventType2["Processing"] = "processing";
|
|
17
|
-
JobEventType2["Completed"] = "completed";
|
|
18
|
-
JobEventType2["Failed"] = "failed";
|
|
19
|
-
JobEventType2["Cancelled"] = "cancelled";
|
|
20
|
-
JobEventType2["Retried"] = "retried";
|
|
21
|
-
JobEventType2["Edited"] = "edited";
|
|
22
|
-
return JobEventType2;
|
|
23
|
-
})(JobEventType || {});
|
|
24
|
-
var FailureReason = /* @__PURE__ */ ((FailureReason3) => {
|
|
25
|
-
FailureReason3["Timeout"] = "timeout";
|
|
26
|
-
FailureReason3["HandlerError"] = "handler_error";
|
|
27
|
-
FailureReason3["NoHandler"] = "no_handler";
|
|
28
|
-
return FailureReason3;
|
|
29
|
-
})(FailureReason || {});
|
|
30
|
-
var logStorage = new async_hooks.AsyncLocalStorage();
|
|
31
|
-
var setLogContext = (verbose) => {
|
|
32
|
-
logStorage.enterWith({ verbose });
|
|
33
|
-
};
|
|
34
|
-
var getLogContext = () => {
|
|
35
|
-
return logStorage.getStore();
|
|
36
|
-
};
|
|
37
|
-
var log = (message) => {
|
|
38
|
-
const context = getLogContext();
|
|
39
|
-
if (context?.verbose) {
|
|
40
|
-
console.log(message);
|
|
41
|
-
}
|
|
42
|
-
};
|
|
43
|
-
|
|
44
|
-
// src/queue.ts
|
|
45
|
-
var recordJobEvent = async (pool, jobId, eventType, metadata) => {
|
|
46
|
-
const client = await pool.connect();
|
|
47
|
-
try {
|
|
48
|
-
await client.query(
|
|
49
|
-
`INSERT INTO job_events (job_id, event_type, metadata) VALUES ($1, $2, $3)`,
|
|
50
|
-
[jobId, eventType, metadata ? JSON.stringify(metadata) : null]
|
|
51
|
-
);
|
|
52
|
-
} catch (error) {
|
|
53
|
-
log(`Error recording job event for job ${jobId}: ${error}`);
|
|
54
|
-
} finally {
|
|
55
|
-
client.release();
|
|
56
|
-
}
|
|
57
|
-
};
|
|
58
|
-
var addJob = async (pool, {
|
|
59
|
-
jobType,
|
|
60
|
-
payload,
|
|
61
|
-
maxAttempts = 3,
|
|
62
|
-
priority = 0,
|
|
63
|
-
runAt = null,
|
|
64
|
-
timeoutMs = void 0,
|
|
65
|
-
forceKillOnTimeout = false,
|
|
66
|
-
tags = void 0
|
|
67
|
-
}) => {
|
|
68
|
-
const client = await pool.connect();
|
|
69
|
-
try {
|
|
70
|
-
let result;
|
|
71
|
-
if (runAt) {
|
|
72
|
-
result = await client.query(
|
|
73
|
-
`INSERT INTO job_queue
|
|
74
|
-
(job_type, payload, max_attempts, priority, run_at, timeout_ms, force_kill_on_timeout, tags)
|
|
75
|
-
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
|
76
|
-
RETURNING id`,
|
|
77
|
-
[
|
|
78
|
-
jobType,
|
|
79
|
-
payload,
|
|
80
|
-
maxAttempts,
|
|
81
|
-
priority,
|
|
82
|
-
runAt,
|
|
83
|
-
timeoutMs ?? null,
|
|
84
|
-
forceKillOnTimeout ?? false,
|
|
85
|
-
tags ?? null
|
|
86
|
-
]
|
|
87
|
-
);
|
|
88
|
-
log(
|
|
89
|
-
`Added job ${result.rows[0].id}: payload ${JSON.stringify(payload)}, runAt ${runAt.toISOString()}, priority ${priority}, maxAttempts ${maxAttempts} jobType ${jobType}, tags ${JSON.stringify(tags)}`
|
|
90
|
-
);
|
|
91
|
-
} else {
|
|
92
|
-
result = await client.query(
|
|
93
|
-
`INSERT INTO job_queue
|
|
94
|
-
(job_type, payload, max_attempts, priority, timeout_ms, force_kill_on_timeout, tags)
|
|
95
|
-
VALUES ($1, $2, $3, $4, $5, $6, $7)
|
|
96
|
-
RETURNING id`,
|
|
97
|
-
[
|
|
98
|
-
jobType,
|
|
99
|
-
payload,
|
|
100
|
-
maxAttempts,
|
|
101
|
-
priority,
|
|
102
|
-
timeoutMs ?? null,
|
|
103
|
-
forceKillOnTimeout ?? false,
|
|
104
|
-
tags ?? null
|
|
105
|
-
]
|
|
106
|
-
);
|
|
107
|
-
log(
|
|
108
|
-
`Added job ${result.rows[0].id}: payload ${JSON.stringify(payload)}, priority ${priority}, maxAttempts ${maxAttempts} jobType ${jobType}, tags ${JSON.stringify(tags)}`
|
|
109
|
-
);
|
|
110
|
-
}
|
|
111
|
-
await recordJobEvent(pool, result.rows[0].id, "added" /* Added */, {
|
|
112
|
-
jobType,
|
|
113
|
-
payload,
|
|
114
|
-
tags
|
|
115
|
-
});
|
|
116
|
-
return result.rows[0].id;
|
|
117
|
-
} catch (error) {
|
|
118
|
-
log(`Error adding job: ${error}`);
|
|
119
|
-
throw error;
|
|
120
|
-
} finally {
|
|
121
|
-
client.release();
|
|
122
|
-
}
|
|
123
|
-
};
|
|
124
|
-
var getJob = async (pool, id) => {
|
|
125
|
-
const client = await pool.connect();
|
|
126
|
-
try {
|
|
127
|
-
const result = await client.query(
|
|
128
|
-
`SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags FROM job_queue WHERE id = $1`,
|
|
129
|
-
[id]
|
|
130
|
-
);
|
|
131
|
-
if (result.rows.length === 0) {
|
|
132
|
-
log(`Job ${id} not found`);
|
|
133
|
-
return null;
|
|
134
|
-
}
|
|
135
|
-
log(`Found job ${id}`);
|
|
136
|
-
const job = result.rows[0];
|
|
137
|
-
return {
|
|
138
|
-
...job,
|
|
139
|
-
payload: job.payload,
|
|
140
|
-
timeoutMs: job.timeoutMs,
|
|
141
|
-
forceKillOnTimeout: job.forceKillOnTimeout,
|
|
142
|
-
failureReason: job.failureReason
|
|
143
|
-
};
|
|
144
|
-
} catch (error) {
|
|
145
|
-
log(`Error getting job ${id}: ${error}`);
|
|
146
|
-
throw error;
|
|
147
|
-
} finally {
|
|
148
|
-
client.release();
|
|
149
|
-
}
|
|
150
|
-
};
|
|
151
|
-
var getJobsByStatus = async (pool, status, limit = 100, offset = 0) => {
|
|
152
|
-
const client = await pool.connect();
|
|
153
|
-
try {
|
|
154
|
-
const result = await client.query(
|
|
155
|
-
`SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason" FROM job_queue WHERE status = $1 ORDER BY created_at DESC LIMIT $2 OFFSET $3`,
|
|
156
|
-
[status, limit, offset]
|
|
157
|
-
);
|
|
158
|
-
log(`Found ${result.rows.length} jobs by status ${status}`);
|
|
159
|
-
return result.rows.map((job) => ({
|
|
160
|
-
...job,
|
|
161
|
-
payload: job.payload,
|
|
162
|
-
timeoutMs: job.timeoutMs,
|
|
163
|
-
forceKillOnTimeout: job.forceKillOnTimeout,
|
|
164
|
-
failureReason: job.failureReason
|
|
165
|
-
}));
|
|
166
|
-
} catch (error) {
|
|
167
|
-
log(`Error getting jobs by status ${status}: ${error}`);
|
|
168
|
-
throw error;
|
|
169
|
-
} finally {
|
|
170
|
-
client.release();
|
|
171
|
-
}
|
|
172
|
-
};
|
|
173
|
-
var getNextBatch = async (pool, workerId, batchSize = 10, jobType) => {
|
|
174
|
-
const client = await pool.connect();
|
|
175
|
-
try {
|
|
176
|
-
await client.query("BEGIN");
|
|
177
|
-
let jobTypeFilter = "";
|
|
178
|
-
let params = [workerId, batchSize];
|
|
179
|
-
if (jobType) {
|
|
180
|
-
if (Array.isArray(jobType)) {
|
|
181
|
-
jobTypeFilter = ` AND job_type = ANY($3)`;
|
|
182
|
-
params.push(jobType);
|
|
183
|
-
} else {
|
|
184
|
-
jobTypeFilter = ` AND job_type = $3`;
|
|
185
|
-
params.push(jobType);
|
|
186
|
-
}
|
|
187
|
-
}
|
|
188
|
-
const result = await client.query(
|
|
189
|
-
`
|
|
190
|
-
UPDATE job_queue
|
|
191
|
-
SET status = 'processing',
|
|
192
|
-
locked_at = NOW(),
|
|
193
|
-
locked_by = $1,
|
|
194
|
-
attempts = attempts + 1,
|
|
195
|
-
updated_at = NOW(),
|
|
196
|
-
pending_reason = NULL,
|
|
197
|
-
started_at = COALESCE(started_at, NOW()),
|
|
198
|
-
last_retried_at = CASE WHEN attempts > 0 THEN NOW() ELSE last_retried_at END
|
|
199
|
-
WHERE id IN (
|
|
200
|
-
SELECT id FROM job_queue
|
|
201
|
-
WHERE (status = 'pending' OR (status = 'failed' AND next_attempt_at <= NOW()))
|
|
202
|
-
AND (attempts < max_attempts)
|
|
203
|
-
AND run_at <= NOW()
|
|
204
|
-
${jobTypeFilter}
|
|
205
|
-
ORDER BY priority DESC, created_at ASC
|
|
206
|
-
LIMIT $2
|
|
207
|
-
FOR UPDATE SKIP LOCKED
|
|
208
|
-
)
|
|
209
|
-
RETURNING id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason"
|
|
210
|
-
`,
|
|
211
|
-
params
|
|
212
|
-
);
|
|
213
|
-
log(`Found ${result.rows.length} jobs to process`);
|
|
214
|
-
await client.query("COMMIT");
|
|
215
|
-
for (const row of result.rows) {
|
|
216
|
-
await recordJobEvent(pool, row.id, "processing" /* Processing */);
|
|
217
|
-
}
|
|
218
|
-
return result.rows.map((job) => ({
|
|
219
|
-
...job,
|
|
220
|
-
payload: job.payload,
|
|
221
|
-
timeoutMs: job.timeoutMs,
|
|
222
|
-
forceKillOnTimeout: job.forceKillOnTimeout
|
|
223
|
-
}));
|
|
224
|
-
} catch (error) {
|
|
225
|
-
log(`Error getting next batch: ${error}`);
|
|
226
|
-
await client.query("ROLLBACK");
|
|
227
|
-
throw error;
|
|
228
|
-
} finally {
|
|
229
|
-
client.release();
|
|
230
|
-
}
|
|
231
|
-
};
|
|
232
|
-
var completeJob = async (pool, jobId) => {
|
|
233
|
-
const client = await pool.connect();
|
|
234
|
-
try {
|
|
235
|
-
await client.query(
|
|
236
|
-
`
|
|
237
|
-
UPDATE job_queue
|
|
238
|
-
SET status = 'completed', updated_at = NOW(), completed_at = NOW()
|
|
239
|
-
WHERE id = $1
|
|
240
|
-
`,
|
|
241
|
-
[jobId]
|
|
242
|
-
);
|
|
243
|
-
await recordJobEvent(pool, jobId, "completed" /* Completed */);
|
|
244
|
-
} catch (error) {
|
|
245
|
-
log(`Error completing job ${jobId}: ${error}`);
|
|
246
|
-
throw error;
|
|
247
|
-
} finally {
|
|
248
|
-
log(`Completed job ${jobId}`);
|
|
249
|
-
client.release();
|
|
250
|
-
}
|
|
251
|
-
};
|
|
252
|
-
var failJob = async (pool, jobId, error, failureReason) => {
|
|
253
|
-
const client = await pool.connect();
|
|
254
|
-
try {
|
|
255
|
-
await client.query(
|
|
256
|
-
`
|
|
257
|
-
UPDATE job_queue
|
|
258
|
-
SET status = 'failed',
|
|
259
|
-
updated_at = NOW(),
|
|
260
|
-
next_attempt_at = CASE
|
|
261
|
-
WHEN attempts < max_attempts THEN NOW() + (POWER(2, attempts) * INTERVAL '1 minute')
|
|
262
|
-
ELSE NULL
|
|
263
|
-
END,
|
|
264
|
-
error_history = COALESCE(error_history, '[]'::jsonb) || $2::jsonb,
|
|
265
|
-
failure_reason = $3,
|
|
266
|
-
last_failed_at = NOW()
|
|
267
|
-
WHERE id = $1
|
|
268
|
-
`,
|
|
269
|
-
[
|
|
270
|
-
jobId,
|
|
271
|
-
JSON.stringify([
|
|
272
|
-
{
|
|
273
|
-
message: error.message || String(error),
|
|
274
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
275
|
-
}
|
|
276
|
-
]),
|
|
277
|
-
failureReason ?? null
|
|
278
|
-
]
|
|
279
|
-
);
|
|
280
|
-
await recordJobEvent(pool, jobId, "failed" /* Failed */, {
|
|
281
|
-
message: error.message || String(error),
|
|
282
|
-
failureReason
|
|
283
|
-
});
|
|
284
|
-
} catch (error2) {
|
|
285
|
-
log(`Error failing job ${jobId}: ${error2}`);
|
|
286
|
-
throw error2;
|
|
287
|
-
} finally {
|
|
288
|
-
log(`Failed job ${jobId}`);
|
|
289
|
-
client.release();
|
|
290
|
-
}
|
|
291
|
-
};
|
|
292
|
-
var retryJob = async (pool, jobId) => {
|
|
293
|
-
const client = await pool.connect();
|
|
294
|
-
try {
|
|
295
|
-
await client.query(
|
|
296
|
-
`
|
|
297
|
-
UPDATE job_queue
|
|
298
|
-
SET status = 'pending',
|
|
299
|
-
updated_at = NOW(),
|
|
300
|
-
locked_at = NULL,
|
|
301
|
-
locked_by = NULL,
|
|
302
|
-
next_attempt_at = NOW(),
|
|
303
|
-
last_retried_at = NOW()
|
|
304
|
-
WHERE id = $1
|
|
305
|
-
`,
|
|
306
|
-
[jobId]
|
|
307
|
-
);
|
|
308
|
-
await recordJobEvent(pool, jobId, "retried" /* Retried */);
|
|
309
|
-
} catch (error) {
|
|
310
|
-
log(`Error retrying job ${jobId}: ${error}`);
|
|
311
|
-
throw error;
|
|
312
|
-
} finally {
|
|
313
|
-
log(`Retried job ${jobId}`);
|
|
314
|
-
client.release();
|
|
315
|
-
}
|
|
316
|
-
};
|
|
317
|
-
var cleanupOldJobs = async (pool, daysToKeep = 30) => {
|
|
318
|
-
const client = await pool.connect();
|
|
319
|
-
try {
|
|
320
|
-
const result = await client.query(`
|
|
321
|
-
DELETE FROM job_queue
|
|
322
|
-
WHERE status = 'completed'
|
|
323
|
-
AND updated_at < NOW() - INTERVAL '${daysToKeep} days'
|
|
324
|
-
RETURNING id
|
|
325
|
-
`);
|
|
326
|
-
log(`Deleted ${result.rowCount} old jobs`);
|
|
327
|
-
return result.rowCount || 0;
|
|
328
|
-
} catch (error) {
|
|
329
|
-
log(`Error cleaning up old jobs: ${error}`);
|
|
330
|
-
throw error;
|
|
331
|
-
} finally {
|
|
332
|
-
client.release();
|
|
333
|
-
}
|
|
334
|
-
};
|
|
335
|
-
var cancelJob = async (pool, jobId) => {
|
|
336
|
-
const client = await pool.connect();
|
|
337
|
-
try {
|
|
338
|
-
await client.query(
|
|
339
|
-
`
|
|
340
|
-
UPDATE job_queue
|
|
341
|
-
SET status = 'cancelled', updated_at = NOW(), last_cancelled_at = NOW()
|
|
342
|
-
WHERE id = $1 AND status = 'pending'
|
|
343
|
-
`,
|
|
344
|
-
[jobId]
|
|
345
|
-
);
|
|
346
|
-
await recordJobEvent(pool, jobId, "cancelled" /* Cancelled */);
|
|
347
|
-
} catch (error) {
|
|
348
|
-
log(`Error cancelling job ${jobId}: ${error}`);
|
|
349
|
-
throw error;
|
|
350
|
-
} finally {
|
|
351
|
-
log(`Cancelled job ${jobId}`);
|
|
352
|
-
client.release();
|
|
353
|
-
}
|
|
354
|
-
};
|
|
355
|
-
var editJob = async (pool, jobId, updates) => {
|
|
356
|
-
const client = await pool.connect();
|
|
357
|
-
try {
|
|
358
|
-
const updateFields = [];
|
|
359
|
-
const params = [];
|
|
360
|
-
let paramIdx = 1;
|
|
361
|
-
if (updates.payload !== void 0) {
|
|
362
|
-
updateFields.push(`payload = $${paramIdx++}`);
|
|
363
|
-
params.push(updates.payload);
|
|
364
|
-
}
|
|
365
|
-
if (updates.maxAttempts !== void 0) {
|
|
366
|
-
updateFields.push(`max_attempts = $${paramIdx++}`);
|
|
367
|
-
params.push(updates.maxAttempts);
|
|
368
|
-
}
|
|
369
|
-
if (updates.priority !== void 0) {
|
|
370
|
-
updateFields.push(`priority = $${paramIdx++}`);
|
|
371
|
-
params.push(updates.priority);
|
|
372
|
-
}
|
|
373
|
-
if (updates.runAt !== void 0) {
|
|
374
|
-
if (updates.runAt === null) {
|
|
375
|
-
updateFields.push(`run_at = NOW()`);
|
|
376
|
-
} else {
|
|
377
|
-
updateFields.push(`run_at = $${paramIdx++}`);
|
|
378
|
-
params.push(updates.runAt);
|
|
379
|
-
}
|
|
380
|
-
}
|
|
381
|
-
if (updates.timeoutMs !== void 0) {
|
|
382
|
-
updateFields.push(`timeout_ms = $${paramIdx++}`);
|
|
383
|
-
params.push(updates.timeoutMs ?? null);
|
|
384
|
-
}
|
|
385
|
-
if (updates.tags !== void 0) {
|
|
386
|
-
updateFields.push(`tags = $${paramIdx++}`);
|
|
387
|
-
params.push(updates.tags ?? null);
|
|
388
|
-
}
|
|
389
|
-
if (updateFields.length === 0) {
|
|
390
|
-
log(`No fields to update for job ${jobId}`);
|
|
391
|
-
return;
|
|
392
|
-
}
|
|
393
|
-
updateFields.push(`updated_at = NOW()`);
|
|
394
|
-
params.push(jobId);
|
|
395
|
-
const query = `
|
|
396
|
-
UPDATE job_queue
|
|
397
|
-
SET ${updateFields.join(", ")}
|
|
398
|
-
WHERE id = $${paramIdx} AND status = 'pending'
|
|
399
|
-
`;
|
|
400
|
-
await client.query(query, params);
|
|
401
|
-
const metadata = {};
|
|
402
|
-
if (updates.payload !== void 0) metadata.payload = updates.payload;
|
|
403
|
-
if (updates.maxAttempts !== void 0)
|
|
404
|
-
metadata.maxAttempts = updates.maxAttempts;
|
|
405
|
-
if (updates.priority !== void 0) metadata.priority = updates.priority;
|
|
406
|
-
if (updates.runAt !== void 0) metadata.runAt = updates.runAt;
|
|
407
|
-
if (updates.timeoutMs !== void 0) metadata.timeoutMs = updates.timeoutMs;
|
|
408
|
-
if (updates.tags !== void 0) metadata.tags = updates.tags;
|
|
409
|
-
await recordJobEvent(pool, jobId, "edited" /* Edited */, metadata);
|
|
410
|
-
log(`Edited job ${jobId}: ${JSON.stringify(metadata)}`);
|
|
411
|
-
} catch (error) {
|
|
412
|
-
log(`Error editing job ${jobId}: ${error}`);
|
|
413
|
-
throw error;
|
|
414
|
-
} finally {
|
|
415
|
-
client.release();
|
|
416
|
-
}
|
|
417
|
-
};
|
|
418
|
-
var editAllPendingJobs = async (pool, filters = void 0, updates) => {
|
|
419
|
-
const client = await pool.connect();
|
|
420
|
-
try {
|
|
421
|
-
const updateFields = [];
|
|
422
|
-
const params = [];
|
|
423
|
-
let paramIdx = 1;
|
|
424
|
-
if (updates.payload !== void 0) {
|
|
425
|
-
updateFields.push(`payload = $${paramIdx++}`);
|
|
426
|
-
params.push(updates.payload);
|
|
427
|
-
}
|
|
428
|
-
if (updates.maxAttempts !== void 0) {
|
|
429
|
-
updateFields.push(`max_attempts = $${paramIdx++}`);
|
|
430
|
-
params.push(updates.maxAttempts);
|
|
431
|
-
}
|
|
432
|
-
if (updates.priority !== void 0) {
|
|
433
|
-
updateFields.push(`priority = $${paramIdx++}`);
|
|
434
|
-
params.push(updates.priority);
|
|
435
|
-
}
|
|
436
|
-
if (updates.runAt !== void 0) {
|
|
437
|
-
if (updates.runAt === null) {
|
|
438
|
-
updateFields.push(`run_at = NOW()`);
|
|
439
|
-
} else {
|
|
440
|
-
updateFields.push(`run_at = $${paramIdx++}`);
|
|
441
|
-
params.push(updates.runAt);
|
|
442
|
-
}
|
|
443
|
-
}
|
|
444
|
-
if (updates.timeoutMs !== void 0) {
|
|
445
|
-
updateFields.push(`timeout_ms = $${paramIdx++}`);
|
|
446
|
-
params.push(updates.timeoutMs ?? null);
|
|
447
|
-
}
|
|
448
|
-
if (updates.tags !== void 0) {
|
|
449
|
-
updateFields.push(`tags = $${paramIdx++}`);
|
|
450
|
-
params.push(updates.tags ?? null);
|
|
451
|
-
}
|
|
452
|
-
if (updateFields.length === 0) {
|
|
453
|
-
log(`No fields to update for batch edit`);
|
|
454
|
-
return 0;
|
|
455
|
-
}
|
|
456
|
-
updateFields.push(`updated_at = NOW()`);
|
|
457
|
-
let query = `
|
|
458
|
-
UPDATE job_queue
|
|
459
|
-
SET ${updateFields.join(", ")}
|
|
460
|
-
WHERE status = 'pending'`;
|
|
461
|
-
if (filters) {
|
|
462
|
-
if (filters.jobType) {
|
|
463
|
-
query += ` AND job_type = $${paramIdx++}`;
|
|
464
|
-
params.push(filters.jobType);
|
|
465
|
-
}
|
|
466
|
-
if (filters.priority !== void 0) {
|
|
467
|
-
query += ` AND priority = $${paramIdx++}`;
|
|
468
|
-
params.push(filters.priority);
|
|
469
|
-
}
|
|
470
|
-
if (filters.runAt) {
|
|
471
|
-
if (filters.runAt instanceof Date) {
|
|
472
|
-
query += ` AND run_at = $${paramIdx++}`;
|
|
473
|
-
params.push(filters.runAt);
|
|
474
|
-
} else if (typeof filters.runAt === "object") {
|
|
475
|
-
const ops = filters.runAt;
|
|
476
|
-
if (ops.gt) {
|
|
477
|
-
query += ` AND run_at > $${paramIdx++}`;
|
|
478
|
-
params.push(ops.gt);
|
|
479
|
-
}
|
|
480
|
-
if (ops.gte) {
|
|
481
|
-
query += ` AND run_at >= $${paramIdx++}`;
|
|
482
|
-
params.push(ops.gte);
|
|
483
|
-
}
|
|
484
|
-
if (ops.lt) {
|
|
485
|
-
query += ` AND run_at < $${paramIdx++}`;
|
|
486
|
-
params.push(ops.lt);
|
|
487
|
-
}
|
|
488
|
-
if (ops.lte) {
|
|
489
|
-
query += ` AND run_at <= $${paramIdx++}`;
|
|
490
|
-
params.push(ops.lte);
|
|
491
|
-
}
|
|
492
|
-
if (ops.eq) {
|
|
493
|
-
query += ` AND run_at = $${paramIdx++}`;
|
|
494
|
-
params.push(ops.eq);
|
|
495
|
-
}
|
|
496
|
-
}
|
|
497
|
-
}
|
|
498
|
-
if (filters.tags && filters.tags.values && filters.tags.values.length > 0) {
|
|
499
|
-
const mode = filters.tags.mode || "all";
|
|
500
|
-
const tagValues = filters.tags.values;
|
|
501
|
-
switch (mode) {
|
|
502
|
-
case "exact":
|
|
503
|
-
query += ` AND tags = $${paramIdx++}`;
|
|
504
|
-
params.push(tagValues);
|
|
505
|
-
break;
|
|
506
|
-
case "all":
|
|
507
|
-
query += ` AND tags @> $${paramIdx++}`;
|
|
508
|
-
params.push(tagValues);
|
|
509
|
-
break;
|
|
510
|
-
case "any":
|
|
511
|
-
query += ` AND tags && $${paramIdx++}`;
|
|
512
|
-
params.push(tagValues);
|
|
513
|
-
break;
|
|
514
|
-
case "none":
|
|
515
|
-
query += ` AND NOT (tags && $${paramIdx++})`;
|
|
516
|
-
params.push(tagValues);
|
|
517
|
-
break;
|
|
518
|
-
default:
|
|
519
|
-
query += ` AND tags @> $${paramIdx++}`;
|
|
520
|
-
params.push(tagValues);
|
|
521
|
-
}
|
|
522
|
-
}
|
|
523
|
-
}
|
|
524
|
-
query += "\nRETURNING id";
|
|
525
|
-
const result = await client.query(query, params);
|
|
526
|
-
const editedCount = result.rowCount || 0;
|
|
527
|
-
const metadata = {};
|
|
528
|
-
if (updates.payload !== void 0) metadata.payload = updates.payload;
|
|
529
|
-
if (updates.maxAttempts !== void 0)
|
|
530
|
-
metadata.maxAttempts = updates.maxAttempts;
|
|
531
|
-
if (updates.priority !== void 0) metadata.priority = updates.priority;
|
|
532
|
-
if (updates.runAt !== void 0) metadata.runAt = updates.runAt;
|
|
533
|
-
if (updates.timeoutMs !== void 0) metadata.timeoutMs = updates.timeoutMs;
|
|
534
|
-
if (updates.tags !== void 0) metadata.tags = updates.tags;
|
|
535
|
-
for (const row of result.rows) {
|
|
536
|
-
await recordJobEvent(pool, row.id, "edited" /* Edited */, metadata);
|
|
537
|
-
}
|
|
538
|
-
log(`Edited ${editedCount} pending jobs: ${JSON.stringify(metadata)}`);
|
|
539
|
-
return editedCount;
|
|
540
|
-
} catch (error) {
|
|
541
|
-
log(`Error editing pending jobs: ${error}`);
|
|
542
|
-
throw error;
|
|
543
|
-
} finally {
|
|
544
|
-
client.release();
|
|
545
|
-
}
|
|
546
|
-
};
|
|
547
|
-
var cancelAllUpcomingJobs = async (pool, filters) => {
|
|
548
|
-
const client = await pool.connect();
|
|
549
|
-
try {
|
|
550
|
-
let query = `
|
|
551
|
-
UPDATE job_queue
|
|
552
|
-
SET status = 'cancelled', updated_at = NOW()
|
|
553
|
-
WHERE status = 'pending'`;
|
|
554
|
-
const params = [];
|
|
555
|
-
let paramIdx = 1;
|
|
556
|
-
if (filters) {
|
|
557
|
-
if (filters.jobType) {
|
|
558
|
-
query += ` AND job_type = $${paramIdx++}`;
|
|
559
|
-
params.push(filters.jobType);
|
|
560
|
-
}
|
|
561
|
-
if (filters.priority !== void 0) {
|
|
562
|
-
query += ` AND priority = $${paramIdx++}`;
|
|
563
|
-
params.push(filters.priority);
|
|
564
|
-
}
|
|
565
|
-
if (filters.runAt) {
|
|
566
|
-
if (filters.runAt instanceof Date) {
|
|
567
|
-
query += ` AND run_at = $${paramIdx++}`;
|
|
568
|
-
params.push(filters.runAt);
|
|
569
|
-
} else if (typeof filters.runAt === "object") {
|
|
570
|
-
const ops = filters.runAt;
|
|
571
|
-
if (ops.gt) {
|
|
572
|
-
query += ` AND run_at > $${paramIdx++}`;
|
|
573
|
-
params.push(ops.gt);
|
|
574
|
-
}
|
|
575
|
-
if (ops.gte) {
|
|
576
|
-
query += ` AND run_at >= $${paramIdx++}`;
|
|
577
|
-
params.push(ops.gte);
|
|
578
|
-
}
|
|
579
|
-
if (ops.lt) {
|
|
580
|
-
query += ` AND run_at < $${paramIdx++}`;
|
|
581
|
-
params.push(ops.lt);
|
|
582
|
-
}
|
|
583
|
-
if (ops.lte) {
|
|
584
|
-
query += ` AND run_at <= $${paramIdx++}`;
|
|
585
|
-
params.push(ops.lte);
|
|
586
|
-
}
|
|
587
|
-
if (ops.eq) {
|
|
588
|
-
query += ` AND run_at = $${paramIdx++}`;
|
|
589
|
-
params.push(ops.eq);
|
|
590
|
-
}
|
|
591
|
-
}
|
|
592
|
-
}
|
|
593
|
-
if (filters.tags && filters.tags.values && filters.tags.values.length > 0) {
|
|
594
|
-
const mode = filters.tags.mode || "all";
|
|
595
|
-
const tagValues = filters.tags.values;
|
|
596
|
-
switch (mode) {
|
|
597
|
-
case "exact":
|
|
598
|
-
query += ` AND tags = $${paramIdx++}`;
|
|
599
|
-
params.push(tagValues);
|
|
600
|
-
break;
|
|
601
|
-
case "all":
|
|
602
|
-
query += ` AND tags @> $${paramIdx++}`;
|
|
603
|
-
params.push(tagValues);
|
|
604
|
-
break;
|
|
605
|
-
case "any":
|
|
606
|
-
query += ` AND tags && $${paramIdx++}`;
|
|
607
|
-
params.push(tagValues);
|
|
608
|
-
break;
|
|
609
|
-
case "none":
|
|
610
|
-
query += ` AND NOT (tags && $${paramIdx++})`;
|
|
611
|
-
params.push(tagValues);
|
|
612
|
-
break;
|
|
613
|
-
default:
|
|
614
|
-
query += ` AND tags @> $${paramIdx++}`;
|
|
615
|
-
params.push(tagValues);
|
|
616
|
-
}
|
|
617
|
-
}
|
|
618
|
-
}
|
|
619
|
-
query += "\nRETURNING id";
|
|
620
|
-
const result = await client.query(query, params);
|
|
621
|
-
log(`Cancelled ${result.rowCount} jobs`);
|
|
622
|
-
return result.rowCount || 0;
|
|
623
|
-
} catch (error) {
|
|
624
|
-
log(`Error cancelling upcoming jobs: ${error}`);
|
|
625
|
-
throw error;
|
|
626
|
-
} finally {
|
|
627
|
-
client.release();
|
|
628
|
-
}
|
|
629
|
-
};
|
|
630
|
-
var getAllJobs = async (pool, limit = 100, offset = 0) => {
|
|
631
|
-
const client = await pool.connect();
|
|
632
|
-
try {
|
|
633
|
-
const result = await client.query(
|
|
634
|
-
`SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason" FROM job_queue ORDER BY created_at DESC LIMIT $1 OFFSET $2`,
|
|
635
|
-
[limit, offset]
|
|
636
|
-
);
|
|
637
|
-
log(`Found ${result.rows.length} jobs (all)`);
|
|
638
|
-
return result.rows.map((job) => ({
|
|
639
|
-
...job,
|
|
640
|
-
payload: job.payload,
|
|
641
|
-
timeoutMs: job.timeoutMs,
|
|
642
|
-
forceKillOnTimeout: job.forceKillOnTimeout
|
|
643
|
-
}));
|
|
644
|
-
} catch (error) {
|
|
645
|
-
log(`Error getting all jobs: ${error}`);
|
|
646
|
-
throw error;
|
|
647
|
-
} finally {
|
|
648
|
-
client.release();
|
|
649
|
-
}
|
|
650
|
-
};
|
|
651
|
-
var setPendingReasonForUnpickedJobs = async (pool, reason, jobType) => {
|
|
652
|
-
const client = await pool.connect();
|
|
653
|
-
try {
|
|
654
|
-
let jobTypeFilter = "";
|
|
655
|
-
let params = [reason];
|
|
656
|
-
if (jobType) {
|
|
657
|
-
if (Array.isArray(jobType)) {
|
|
658
|
-
jobTypeFilter = ` AND job_type = ANY($2)`;
|
|
659
|
-
params.push(jobType);
|
|
660
|
-
} else {
|
|
661
|
-
jobTypeFilter = ` AND job_type = $2`;
|
|
662
|
-
params.push(jobType);
|
|
663
|
-
}
|
|
664
|
-
}
|
|
665
|
-
await client.query(
|
|
666
|
-
`UPDATE job_queue SET pending_reason = $1 WHERE status = 'pending'${jobTypeFilter}`,
|
|
667
|
-
params
|
|
668
|
-
);
|
|
669
|
-
} finally {
|
|
670
|
-
client.release();
|
|
671
|
-
}
|
|
672
|
-
};
|
|
673
|
-
var reclaimStuckJobs = async (pool, maxProcessingTimeMinutes = 10) => {
|
|
674
|
-
const client = await pool.connect();
|
|
675
|
-
try {
|
|
676
|
-
const result = await client.query(
|
|
677
|
-
`
|
|
678
|
-
UPDATE job_queue
|
|
679
|
-
SET status = 'pending', locked_at = NULL, locked_by = NULL, updated_at = NOW()
|
|
680
|
-
WHERE status = 'processing'
|
|
681
|
-
AND locked_at < NOW() - INTERVAL '${maxProcessingTimeMinutes} minutes'
|
|
682
|
-
RETURNING id
|
|
683
|
-
`
|
|
684
|
-
);
|
|
685
|
-
log(`Reclaimed ${result.rowCount} stuck jobs`);
|
|
686
|
-
return result.rowCount || 0;
|
|
687
|
-
} catch (error) {
|
|
688
|
-
log(`Error reclaiming stuck jobs: ${error}`);
|
|
689
|
-
throw error;
|
|
690
|
-
} finally {
|
|
691
|
-
client.release();
|
|
692
|
-
}
|
|
693
|
-
};
|
|
694
|
-
var getJobEvents = async (pool, jobId) => {
|
|
695
|
-
const client = await pool.connect();
|
|
696
|
-
try {
|
|
697
|
-
const res = await client.query(
|
|
698
|
-
`SELECT id, job_id AS "jobId", event_type AS "eventType", metadata, created_at AS "createdAt" FROM job_events WHERE job_id = $1 ORDER BY created_at ASC`,
|
|
699
|
-
[jobId]
|
|
700
|
-
);
|
|
701
|
-
return res.rows;
|
|
702
|
-
} finally {
|
|
703
|
-
client.release();
|
|
704
|
-
}
|
|
705
|
-
};
|
|
706
|
-
var getJobsByTags = async (pool, tags, mode = "all", limit = 100, offset = 0) => {
|
|
707
|
-
const client = await pool.connect();
|
|
708
|
-
try {
|
|
709
|
-
let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags
|
|
710
|
-
FROM job_queue`;
|
|
711
|
-
let params = [];
|
|
712
|
-
switch (mode) {
|
|
713
|
-
case "exact":
|
|
714
|
-
query += " WHERE tags = $1";
|
|
715
|
-
params = [tags];
|
|
716
|
-
break;
|
|
717
|
-
case "all":
|
|
718
|
-
query += " WHERE tags @> $1";
|
|
719
|
-
params = [tags];
|
|
720
|
-
break;
|
|
721
|
-
case "any":
|
|
722
|
-
query += " WHERE tags && $1";
|
|
723
|
-
params = [tags];
|
|
724
|
-
break;
|
|
725
|
-
case "none":
|
|
726
|
-
query += " WHERE NOT (tags && $1)";
|
|
727
|
-
params = [tags];
|
|
728
|
-
break;
|
|
729
|
-
default:
|
|
730
|
-
query += " WHERE tags @> $1";
|
|
731
|
-
params = [tags];
|
|
732
|
-
}
|
|
733
|
-
query += " ORDER BY created_at DESC LIMIT $2 OFFSET $3";
|
|
734
|
-
params.push(limit, offset);
|
|
735
|
-
const result = await client.query(query, params);
|
|
736
|
-
log(
|
|
737
|
-
`Found ${result.rows.length} jobs by tags ${JSON.stringify(tags)} (mode: ${mode})`
|
|
738
|
-
);
|
|
739
|
-
return result.rows.map((job) => ({
|
|
740
|
-
...job,
|
|
741
|
-
payload: job.payload,
|
|
742
|
-
timeoutMs: job.timeoutMs,
|
|
743
|
-
forceKillOnTimeout: job.forceKillOnTimeout,
|
|
744
|
-
failureReason: job.failureReason
|
|
745
|
-
}));
|
|
746
|
-
} catch (error) {
|
|
747
|
-
log(
|
|
748
|
-
`Error getting jobs by tags ${JSON.stringify(tags)} (mode: ${mode}): ${error}`
|
|
749
|
-
);
|
|
750
|
-
throw error;
|
|
751
|
-
} finally {
|
|
752
|
-
client.release();
|
|
753
|
-
}
|
|
754
|
-
};
|
|
755
|
-
var getJobs = async (pool, filters, limit = 100, offset = 0) => {
|
|
756
|
-
const client = await pool.connect();
|
|
757
|
-
try {
|
|
758
|
-
let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags FROM job_queue`;
|
|
759
|
-
const params = [];
|
|
760
|
-
let where = [];
|
|
761
|
-
let paramIdx = 1;
|
|
762
|
-
if (filters) {
|
|
763
|
-
if (filters.jobType) {
|
|
764
|
-
where.push(`job_type = $${paramIdx++}`);
|
|
765
|
-
params.push(filters.jobType);
|
|
766
|
-
}
|
|
767
|
-
if (filters.priority !== void 0) {
|
|
768
|
-
where.push(`priority = $${paramIdx++}`);
|
|
769
|
-
params.push(filters.priority);
|
|
770
|
-
}
|
|
771
|
-
if (filters.runAt) {
|
|
772
|
-
if (filters.runAt instanceof Date) {
|
|
773
|
-
where.push(`run_at = $${paramIdx++}`);
|
|
774
|
-
params.push(filters.runAt);
|
|
775
|
-
} else if (typeof filters.runAt === "object" && (filters.runAt.gt !== void 0 || filters.runAt.gte !== void 0 || filters.runAt.lt !== void 0 || filters.runAt.lte !== void 0 || filters.runAt.eq !== void 0)) {
|
|
776
|
-
const ops = filters.runAt;
|
|
777
|
-
if (ops.gt) {
|
|
778
|
-
where.push(`run_at > $${paramIdx++}`);
|
|
779
|
-
params.push(ops.gt);
|
|
780
|
-
}
|
|
781
|
-
if (ops.gte) {
|
|
782
|
-
where.push(`run_at >= $${paramIdx++}`);
|
|
783
|
-
params.push(ops.gte);
|
|
784
|
-
}
|
|
785
|
-
if (ops.lt) {
|
|
786
|
-
where.push(`run_at < $${paramIdx++}`);
|
|
787
|
-
params.push(ops.lt);
|
|
788
|
-
}
|
|
789
|
-
if (ops.lte) {
|
|
790
|
-
where.push(`run_at <= $${paramIdx++}`);
|
|
791
|
-
params.push(ops.lte);
|
|
792
|
-
}
|
|
793
|
-
if (ops.eq) {
|
|
794
|
-
where.push(`run_at = $${paramIdx++}`);
|
|
795
|
-
params.push(ops.eq);
|
|
796
|
-
}
|
|
797
|
-
}
|
|
798
|
-
}
|
|
799
|
-
if (filters.tags && filters.tags.values && filters.tags.values.length > 0) {
|
|
800
|
-
const mode = filters.tags.mode || "all";
|
|
801
|
-
const tagValues = filters.tags.values;
|
|
802
|
-
switch (mode) {
|
|
803
|
-
case "exact":
|
|
804
|
-
where.push(`tags = $${paramIdx++}`);
|
|
805
|
-
params.push(tagValues);
|
|
806
|
-
break;
|
|
807
|
-
case "all":
|
|
808
|
-
where.push(`tags @> $${paramIdx++}`);
|
|
809
|
-
params.push(tagValues);
|
|
810
|
-
break;
|
|
811
|
-
case "any":
|
|
812
|
-
where.push(`tags && $${paramIdx++}`);
|
|
813
|
-
params.push(tagValues);
|
|
814
|
-
break;
|
|
815
|
-
case "none":
|
|
816
|
-
where.push(`NOT (tags && $${paramIdx++})`);
|
|
817
|
-
params.push(tagValues);
|
|
818
|
-
break;
|
|
819
|
-
default:
|
|
820
|
-
where.push(`tags @> $${paramIdx++}`);
|
|
821
|
-
params.push(tagValues);
|
|
822
|
-
}
|
|
823
|
-
}
|
|
824
|
-
}
|
|
825
|
-
if (where.length > 0) {
|
|
826
|
-
query += ` WHERE ${where.join(" AND ")}`;
|
|
827
|
-
}
|
|
828
|
-
paramIdx = params.length + 1;
|
|
829
|
-
query += ` ORDER BY created_at DESC LIMIT $${paramIdx++} OFFSET $${paramIdx}`;
|
|
830
|
-
params.push(limit, offset);
|
|
831
|
-
const result = await client.query(query, params);
|
|
832
|
-
log(`Found ${result.rows.length} jobs`);
|
|
833
|
-
return result.rows.map((job) => ({
|
|
834
|
-
...job,
|
|
835
|
-
payload: job.payload,
|
|
836
|
-
timeoutMs: job.timeoutMs,
|
|
837
|
-
forceKillOnTimeout: job.forceKillOnTimeout,
|
|
838
|
-
failureReason: job.failureReason
|
|
839
|
-
}));
|
|
840
|
-
} catch (error) {
|
|
841
|
-
log(`Error getting jobs: ${error}`);
|
|
842
|
-
throw error;
|
|
843
|
-
} finally {
|
|
844
|
-
client.release();
|
|
845
|
-
}
|
|
846
|
-
};
|
|
847
|
-
function validateHandlerSerializable(handler, jobType) {
|
|
848
|
-
try {
|
|
849
|
-
const handlerString = handler.toString();
|
|
850
|
-
if (handlerString.includes("this.") && !handlerString.match(/\([^)]*this[^)]*\)/)) {
|
|
851
|
-
throw new Error(
|
|
852
|
-
`Handler for job type "${jobType}" uses 'this' context which cannot be serialized. Use a regular function or avoid 'this' references when forceKillOnTimeout is enabled.`
|
|
853
|
-
);
|
|
854
|
-
}
|
|
855
|
-
if (handlerString.includes("[native code]")) {
|
|
856
|
-
throw new Error(
|
|
857
|
-
`Handler for job type "${jobType}" contains native code which cannot be serialized. Ensure your handler is a plain function when forceKillOnTimeout is enabled.`
|
|
858
|
-
);
|
|
859
|
-
}
|
|
860
|
-
try {
|
|
861
|
-
new Function("return " + handlerString);
|
|
862
|
-
} catch (parseError) {
|
|
863
|
-
throw new Error(
|
|
864
|
-
`Handler for job type "${jobType}" cannot be serialized: ${parseError instanceof Error ? parseError.message : String(parseError)}. When using forceKillOnTimeout, handlers must be serializable functions without closures over external variables.`
|
|
865
|
-
);
|
|
866
|
-
}
|
|
867
|
-
} catch (error) {
|
|
868
|
-
if (error instanceof Error) {
|
|
869
|
-
throw error;
|
|
870
|
-
}
|
|
871
|
-
throw new Error(
|
|
872
|
-
`Failed to validate handler serialization for job type "${jobType}": ${String(error)}`
|
|
873
|
-
);
|
|
874
|
-
}
|
|
875
|
-
}
|
|
876
|
-
async function runHandlerInWorker(handler, payload, timeoutMs, jobType) {
|
|
877
|
-
validateHandlerSerializable(handler, jobType);
|
|
878
|
-
return new Promise((resolve, reject) => {
|
|
879
|
-
const workerCode = `
|
|
880
|
-
(function() {
|
|
881
|
-
const { parentPort, workerData } = require('worker_threads');
|
|
882
|
-
const { handlerCode, payload, timeoutMs } = workerData;
|
|
883
|
-
|
|
884
|
-
// Create an AbortController for the handler
|
|
885
|
-
const controller = new AbortController();
|
|
886
|
-
const signal = controller.signal;
|
|
887
|
-
|
|
888
|
-
// Set up timeout
|
|
889
|
-
const timeoutId = setTimeout(() => {
|
|
890
|
-
controller.abort();
|
|
891
|
-
parentPort.postMessage({ type: 'timeout' });
|
|
892
|
-
}, timeoutMs);
|
|
893
|
-
|
|
894
|
-
try {
|
|
895
|
-
// Execute the handler
|
|
896
|
-
// Note: This uses Function constructor which requires the handler to be serializable.
|
|
897
|
-
// The handler should be validated before reaching this point.
|
|
898
|
-
let handlerFn;
|
|
899
|
-
try {
|
|
900
|
-
// Wrap handlerCode in parentheses to ensure it's treated as an expression
|
|
901
|
-
// This handles both arrow functions and regular functions
|
|
902
|
-
const wrappedCode = handlerCode.trim().startsWith('async') || handlerCode.trim().startsWith('function')
|
|
903
|
-
? handlerCode
|
|
904
|
-
: '(' + handlerCode + ')';
|
|
905
|
-
handlerFn = new Function('return ' + wrappedCode)();
|
|
906
|
-
} catch (parseError) {
|
|
907
|
-
clearTimeout(timeoutId);
|
|
908
|
-
parentPort.postMessage({
|
|
909
|
-
type: 'error',
|
|
910
|
-
error: {
|
|
911
|
-
message: 'Handler cannot be deserialized in worker thread. ' +
|
|
912
|
-
'Ensure your handler is a standalone function without closures over external variables. ' +
|
|
913
|
-
'Original error: ' + (parseError instanceof Error ? parseError.message : String(parseError)),
|
|
914
|
-
stack: parseError instanceof Error ? parseError.stack : undefined,
|
|
915
|
-
name: 'SerializationError',
|
|
916
|
-
},
|
|
917
|
-
});
|
|
918
|
-
return;
|
|
919
|
-
}
|
|
920
|
-
|
|
921
|
-
// Ensure handlerFn is actually a function
|
|
922
|
-
if (typeof handlerFn !== 'function') {
|
|
923
|
-
clearTimeout(timeoutId);
|
|
924
|
-
parentPort.postMessage({
|
|
925
|
-
type: 'error',
|
|
926
|
-
error: {
|
|
927
|
-
message: 'Handler deserialization did not produce a function. ' +
|
|
928
|
-
'Ensure your handler is a valid function when forceKillOnTimeout is enabled.',
|
|
929
|
-
name: 'SerializationError',
|
|
930
|
-
},
|
|
931
|
-
});
|
|
932
|
-
return;
|
|
933
|
-
}
|
|
934
|
-
|
|
935
|
-
handlerFn(payload, signal)
|
|
936
|
-
.then(() => {
|
|
937
|
-
clearTimeout(timeoutId);
|
|
938
|
-
parentPort.postMessage({ type: 'success' });
|
|
939
|
-
})
|
|
940
|
-
.catch((error) => {
|
|
941
|
-
clearTimeout(timeoutId);
|
|
942
|
-
parentPort.postMessage({
|
|
943
|
-
type: 'error',
|
|
944
|
-
error: {
|
|
945
|
-
message: error.message,
|
|
946
|
-
stack: error.stack,
|
|
947
|
-
name: error.name,
|
|
948
|
-
},
|
|
949
|
-
});
|
|
950
|
-
});
|
|
951
|
-
} catch (error) {
|
|
952
|
-
clearTimeout(timeoutId);
|
|
953
|
-
parentPort.postMessage({
|
|
954
|
-
type: 'error',
|
|
955
|
-
error: {
|
|
956
|
-
message: error.message,
|
|
957
|
-
stack: error.stack,
|
|
958
|
-
name: error.name,
|
|
959
|
-
},
|
|
960
|
-
});
|
|
961
|
-
}
|
|
962
|
-
})();
|
|
963
|
-
`;
|
|
964
|
-
const worker = new worker_threads.Worker(workerCode, {
|
|
965
|
-
eval: true,
|
|
966
|
-
workerData: {
|
|
967
|
-
handlerCode: handler.toString(),
|
|
968
|
-
payload,
|
|
969
|
-
timeoutMs
|
|
970
|
-
}
|
|
971
|
-
});
|
|
972
|
-
let resolved = false;
|
|
973
|
-
worker.on("message", (message) => {
|
|
974
|
-
if (resolved) return;
|
|
975
|
-
resolved = true;
|
|
976
|
-
if (message.type === "success") {
|
|
977
|
-
resolve();
|
|
978
|
-
} else if (message.type === "timeout") {
|
|
979
|
-
const timeoutError = new Error(
|
|
980
|
-
`Job timed out after ${timeoutMs} ms and was forcefully terminated`
|
|
981
|
-
);
|
|
982
|
-
timeoutError.failureReason = "timeout" /* Timeout */;
|
|
983
|
-
reject(timeoutError);
|
|
984
|
-
} else if (message.type === "error") {
|
|
985
|
-
const error = new Error(message.error.message);
|
|
986
|
-
error.stack = message.error.stack;
|
|
987
|
-
error.name = message.error.name;
|
|
988
|
-
reject(error);
|
|
989
|
-
}
|
|
990
|
-
});
|
|
991
|
-
worker.on("error", (error) => {
|
|
992
|
-
if (resolved) return;
|
|
993
|
-
resolved = true;
|
|
994
|
-
reject(error);
|
|
995
|
-
});
|
|
996
|
-
worker.on("exit", (code) => {
|
|
997
|
-
if (resolved) return;
|
|
998
|
-
if (code !== 0) {
|
|
999
|
-
resolved = true;
|
|
1000
|
-
reject(new Error(`Worker stopped with exit code ${code}`));
|
|
1001
|
-
}
|
|
1002
|
-
});
|
|
1003
|
-
setTimeout(() => {
|
|
1004
|
-
if (!resolved) {
|
|
1005
|
-
resolved = true;
|
|
1006
|
-
worker.terminate().then(() => {
|
|
1007
|
-
const timeoutError = new Error(
|
|
1008
|
-
`Job timed out after ${timeoutMs} ms and was forcefully terminated`
|
|
1009
|
-
);
|
|
1010
|
-
timeoutError.failureReason = "timeout" /* Timeout */;
|
|
1011
|
-
reject(timeoutError);
|
|
1012
|
-
}).catch((err) => {
|
|
1013
|
-
reject(err);
|
|
1014
|
-
});
|
|
1015
|
-
}
|
|
1016
|
-
}, timeoutMs + 100);
|
|
1017
|
-
});
|
|
1018
|
-
}
|
|
1019
|
-
async function processJobWithHandlers(pool, job, jobHandlers) {
|
|
1020
|
-
const handler = jobHandlers[job.jobType];
|
|
1021
|
-
if (!handler) {
|
|
1022
|
-
await setPendingReasonForUnpickedJobs(
|
|
1023
|
-
pool,
|
|
1024
|
-
`No handler registered for job type: ${job.jobType}`,
|
|
1025
|
-
job.jobType
|
|
1026
|
-
);
|
|
1027
|
-
await failJob(
|
|
1028
|
-
pool,
|
|
1029
|
-
job.id,
|
|
1030
|
-
new Error(`No handler registered for job type: ${job.jobType}`),
|
|
1031
|
-
"no_handler" /* NoHandler */
|
|
1032
|
-
);
|
|
1033
|
-
return;
|
|
1034
|
-
}
|
|
1035
|
-
const timeoutMs = job.timeoutMs ?? void 0;
|
|
1036
|
-
const forceKillOnTimeout = job.forceKillOnTimeout ?? false;
|
|
1037
|
-
let timeoutId;
|
|
1038
|
-
const controller = new AbortController();
|
|
1039
|
-
try {
|
|
1040
|
-
if (forceKillOnTimeout && timeoutMs && timeoutMs > 0) {
|
|
1041
|
-
await runHandlerInWorker(handler, job.payload, timeoutMs, job.jobType);
|
|
1042
|
-
} else {
|
|
1043
|
-
const jobPromise = handler(job.payload, controller.signal);
|
|
1044
|
-
if (timeoutMs && timeoutMs > 0) {
|
|
1045
|
-
await Promise.race([
|
|
1046
|
-
jobPromise,
|
|
1047
|
-
new Promise((_, reject) => {
|
|
1048
|
-
timeoutId = setTimeout(() => {
|
|
1049
|
-
controller.abort();
|
|
1050
|
-
const timeoutError = new Error(
|
|
1051
|
-
`Job timed out after ${timeoutMs} ms`
|
|
1052
|
-
);
|
|
1053
|
-
timeoutError.failureReason = "timeout" /* Timeout */;
|
|
1054
|
-
reject(timeoutError);
|
|
1055
|
-
}, timeoutMs);
|
|
1056
|
-
})
|
|
1057
|
-
]);
|
|
1058
|
-
} else {
|
|
1059
|
-
await jobPromise;
|
|
1060
|
-
}
|
|
1061
|
-
}
|
|
1062
|
-
if (timeoutId) clearTimeout(timeoutId);
|
|
1063
|
-
await completeJob(pool, job.id);
|
|
1064
|
-
} catch (error) {
|
|
1065
|
-
if (timeoutId) clearTimeout(timeoutId);
|
|
1066
|
-
console.error(`Error processing job ${job.id}:`, error);
|
|
1067
|
-
let failureReason = "handler_error" /* HandlerError */;
|
|
1068
|
-
if (error && typeof error === "object" && "failureReason" in error && error.failureReason === "timeout" /* Timeout */) {
|
|
1069
|
-
failureReason = "timeout" /* Timeout */;
|
|
1070
|
-
}
|
|
1071
|
-
await failJob(
|
|
1072
|
-
pool,
|
|
1073
|
-
job.id,
|
|
1074
|
-
error instanceof Error ? error : new Error(String(error)),
|
|
1075
|
-
failureReason
|
|
1076
|
-
);
|
|
1077
|
-
}
|
|
1078
|
-
}
|
|
1079
|
-
async function processBatchWithHandlers(pool, workerId, batchSize, jobType, jobHandlers, concurrency) {
|
|
1080
|
-
const jobs = await getNextBatch(
|
|
1081
|
-
pool,
|
|
1082
|
-
workerId,
|
|
1083
|
-
batchSize,
|
|
1084
|
-
jobType
|
|
1085
|
-
);
|
|
1086
|
-
if (!concurrency || concurrency >= jobs.length) {
|
|
1087
|
-
await Promise.all(
|
|
1088
|
-
jobs.map((job) => processJobWithHandlers(pool, job, jobHandlers))
|
|
1089
|
-
);
|
|
1090
|
-
return jobs.length;
|
|
1091
|
-
}
|
|
1092
|
-
let idx = 0;
|
|
1093
|
-
let running = 0;
|
|
1094
|
-
let finished = 0;
|
|
1095
|
-
return new Promise((resolve, reject) => {
|
|
1096
|
-
const next = () => {
|
|
1097
|
-
if (finished === jobs.length) return resolve(jobs.length);
|
|
1098
|
-
while (running < concurrency && idx < jobs.length) {
|
|
1099
|
-
const job = jobs[idx++];
|
|
1100
|
-
running++;
|
|
1101
|
-
processJobWithHandlers(pool, job, jobHandlers).then(() => {
|
|
1102
|
-
running--;
|
|
1103
|
-
finished++;
|
|
1104
|
-
next();
|
|
1105
|
-
}).catch((err) => {
|
|
1106
|
-
running--;
|
|
1107
|
-
finished++;
|
|
1108
|
-
next();
|
|
1109
|
-
});
|
|
1110
|
-
}
|
|
1111
|
-
};
|
|
1112
|
-
next();
|
|
1113
|
-
});
|
|
1114
|
-
}
|
|
1115
|
-
var createProcessor = (pool, handlers, options = {}) => {
|
|
1116
|
-
const {
|
|
1117
|
-
workerId = `worker-${Math.random().toString(36).substring(2, 9)}`,
|
|
1118
|
-
batchSize = 10,
|
|
1119
|
-
pollInterval = 5e3,
|
|
1120
|
-
onError = (error) => console.error("Job processor error:", error),
|
|
1121
|
-
jobType,
|
|
1122
|
-
concurrency = 3
|
|
1123
|
-
} = options;
|
|
1124
|
-
let running = false;
|
|
1125
|
-
let intervalId = null;
|
|
1126
|
-
setLogContext(options.verbose ?? false);
|
|
1127
|
-
const processJobs = async () => {
|
|
1128
|
-
if (!running) return 0;
|
|
1129
|
-
log(
|
|
1130
|
-
`Processing jobs with workerId: ${workerId}${jobType ? ` and jobType: ${Array.isArray(jobType) ? jobType.join(",") : jobType}` : ""}`
|
|
1131
|
-
);
|
|
1132
|
-
try {
|
|
1133
|
-
const processed = await processBatchWithHandlers(
|
|
1134
|
-
pool,
|
|
1135
|
-
workerId,
|
|
1136
|
-
batchSize,
|
|
1137
|
-
jobType,
|
|
1138
|
-
handlers,
|
|
1139
|
-
concurrency
|
|
1140
|
-
);
|
|
1141
|
-
return processed;
|
|
1142
|
-
} catch (error) {
|
|
1143
|
-
onError(error instanceof Error ? error : new Error(String(error)));
|
|
1144
|
-
}
|
|
1145
|
-
return 0;
|
|
1146
|
-
};
|
|
1147
|
-
return {
|
|
1148
|
-
/**
|
|
1149
|
-
* Start the job processor in the background.
|
|
1150
|
-
* - This will run periodically (every pollInterval milliseconds or 5 seconds if not provided) and process jobs as they become available.
|
|
1151
|
-
* - You have to call the stop method to stop the processor.
|
|
1152
|
-
*/
|
|
1153
|
-
startInBackground: () => {
|
|
1154
|
-
if (running) return;
|
|
1155
|
-
log(`Starting job processor with workerId: ${workerId}`);
|
|
1156
|
-
running = true;
|
|
1157
|
-
const processBatches = async () => {
|
|
1158
|
-
if (!running) return;
|
|
1159
|
-
const processed = await processJobs();
|
|
1160
|
-
if (processed === batchSize && running) {
|
|
1161
|
-
setImmediate(processBatches);
|
|
1162
|
-
}
|
|
1163
|
-
};
|
|
1164
|
-
processBatches();
|
|
1165
|
-
intervalId = setInterval(processJobs, pollInterval);
|
|
1166
|
-
},
|
|
1167
|
-
/**
|
|
1168
|
-
* Stop the job processor that runs in the background
|
|
1169
|
-
*/
|
|
1170
|
-
stop: () => {
|
|
1171
|
-
log(`Stopping job processor with workerId: ${workerId}`);
|
|
1172
|
-
running = false;
|
|
1173
|
-
if (intervalId) {
|
|
1174
|
-
clearInterval(intervalId);
|
|
1175
|
-
intervalId = null;
|
|
1176
|
-
}
|
|
1177
|
-
},
|
|
1178
|
-
/**
|
|
1179
|
-
* Start the job processor synchronously.
|
|
1180
|
-
* - This will process all jobs immediately and then stop.
|
|
1181
|
-
* - The pollInterval is ignored.
|
|
1182
|
-
*/
|
|
1183
|
-
start: async () => {
|
|
1184
|
-
log(`Starting job processor with workerId: ${workerId}`);
|
|
1185
|
-
running = true;
|
|
1186
|
-
const processed = await processJobs();
|
|
1187
|
-
running = false;
|
|
1188
|
-
return processed;
|
|
1189
|
-
},
|
|
1190
|
-
isRunning: () => running
|
|
1191
|
-
};
|
|
1192
|
-
};
|
|
1193
|
-
function loadPemOrFile(value) {
|
|
1194
|
-
if (!value) return void 0;
|
|
1195
|
-
if (value.startsWith("file://")) {
|
|
1196
|
-
const filePath = value.slice(7);
|
|
1197
|
-
return fs__default.default.readFileSync(filePath, "utf8");
|
|
1198
|
-
}
|
|
1199
|
-
return value;
|
|
1200
|
-
}
|
|
1201
|
-
var createPool = (config) => {
|
|
1202
|
-
let searchPath;
|
|
1203
|
-
let ssl = void 0;
|
|
1204
|
-
let customCA;
|
|
1205
|
-
let sslmode;
|
|
1206
|
-
if (config.connectionString) {
|
|
1207
|
-
try {
|
|
1208
|
-
const url = new URL(config.connectionString);
|
|
1209
|
-
searchPath = url.searchParams.get("search_path") || void 0;
|
|
1210
|
-
sslmode = url.searchParams.get("sslmode") || void 0;
|
|
1211
|
-
if (sslmode === "no-verify") {
|
|
1212
|
-
ssl = { rejectUnauthorized: false };
|
|
1213
|
-
}
|
|
1214
|
-
} catch (e) {
|
|
1215
|
-
const parsed = pgConnectionString.parse(config.connectionString);
|
|
1216
|
-
if (parsed.options) {
|
|
1217
|
-
const match = parsed.options.match(/search_path=([^\s]+)/);
|
|
1218
|
-
if (match) {
|
|
1219
|
-
searchPath = match[1];
|
|
1220
|
-
}
|
|
1221
|
-
}
|
|
1222
|
-
sslmode = typeof parsed.sslmode === "string" ? parsed.sslmode : void 0;
|
|
1223
|
-
if (sslmode === "no-verify") {
|
|
1224
|
-
ssl = { rejectUnauthorized: false };
|
|
1225
|
-
}
|
|
1226
|
-
}
|
|
1227
|
-
}
|
|
1228
|
-
if (config.ssl) {
|
|
1229
|
-
if (typeof config.ssl.ca === "string") {
|
|
1230
|
-
customCA = config.ssl.ca;
|
|
1231
|
-
} else if (typeof process.env.PGSSLROOTCERT === "string") {
|
|
1232
|
-
customCA = process.env.PGSSLROOTCERT;
|
|
1233
|
-
} else {
|
|
1234
|
-
customCA = void 0;
|
|
1235
|
-
}
|
|
1236
|
-
const caValue = typeof customCA === "string" ? loadPemOrFile(customCA) : void 0;
|
|
1237
|
-
ssl = {
|
|
1238
|
-
...ssl,
|
|
1239
|
-
...caValue ? { ca: caValue } : {},
|
|
1240
|
-
cert: loadPemOrFile(
|
|
1241
|
-
typeof config.ssl.cert === "string" ? config.ssl.cert : process.env.PGSSLCERT
|
|
1242
|
-
),
|
|
1243
|
-
key: loadPemOrFile(
|
|
1244
|
-
typeof config.ssl.key === "string" ? config.ssl.key : process.env.PGSSLKEY
|
|
1245
|
-
),
|
|
1246
|
-
rejectUnauthorized: config.ssl.rejectUnauthorized !== void 0 ? config.ssl.rejectUnauthorized : true
|
|
1247
|
-
};
|
|
1248
|
-
}
|
|
1249
|
-
if (sslmode && customCA) {
|
|
1250
|
-
const warning = `
|
|
1251
|
-
|
|
1252
|
-
\x1B[33m**************************************************
|
|
1253
|
-
\u26A0\uFE0F WARNING: SSL CONFIGURATION ISSUE
|
|
1254
|
-
**************************************************
|
|
1255
|
-
Both sslmode ('${sslmode}') is set in the connection string
|
|
1256
|
-
and a custom CA is provided (via config.ssl.ca or PGSSLROOTCERT).
|
|
1257
|
-
This combination may cause connection failures or unexpected behavior.
|
|
1258
|
-
|
|
1259
|
-
Recommended: Remove sslmode from the connection string when using a custom CA.
|
|
1260
|
-
**************************************************\x1B[0m
|
|
1261
|
-
`;
|
|
1262
|
-
console.warn(warning);
|
|
1263
|
-
}
|
|
1264
|
-
const pool = new pg.Pool({
|
|
1265
|
-
...config,
|
|
1266
|
-
...ssl ? { ssl } : {}
|
|
1267
|
-
});
|
|
1268
|
-
if (searchPath) {
|
|
1269
|
-
pool.on("connect", (client) => {
|
|
1270
|
-
client.query(`SET search_path TO ${searchPath}`);
|
|
1271
|
-
});
|
|
1272
|
-
}
|
|
1273
|
-
return pool;
|
|
1274
|
-
};
|
|
1275
|
-
|
|
1276
|
-
// src/handler-validation.ts
|
|
1277
|
-
function validateHandlerSerializable2(handler, jobType) {
|
|
1278
|
-
try {
|
|
1279
|
-
const handlerString = handler.toString();
|
|
1280
|
-
const typeLabel = jobType ? `job type "${jobType}"` : "handler";
|
|
1281
|
-
if (handlerString.includes("this.") && !handlerString.match(/\([^)]*this[^)]*\)/)) {
|
|
1282
|
-
return {
|
|
1283
|
-
isSerializable: false,
|
|
1284
|
-
error: `Handler for ${typeLabel} uses 'this' context which cannot be serialized. Use a regular function or avoid 'this' references when forceKillOnTimeout is enabled.`
|
|
1285
|
-
};
|
|
1286
|
-
}
|
|
1287
|
-
if (handlerString.includes("[native code]")) {
|
|
1288
|
-
return {
|
|
1289
|
-
isSerializable: false,
|
|
1290
|
-
error: `Handler for ${typeLabel} contains native code which cannot be serialized. Ensure your handler is a plain function when forceKillOnTimeout is enabled.`
|
|
1291
|
-
};
|
|
1292
|
-
}
|
|
1293
|
-
try {
|
|
1294
|
-
new Function("return " + handlerString);
|
|
1295
|
-
} catch (parseError) {
|
|
1296
|
-
return {
|
|
1297
|
-
isSerializable: false,
|
|
1298
|
-
error: `Handler for ${typeLabel} cannot be serialized: ${parseError instanceof Error ? parseError.message : String(parseError)}. When using forceKillOnTimeout, handlers must be serializable functions without closures over external variables.`
|
|
1299
|
-
};
|
|
1300
|
-
}
|
|
1301
|
-
const hasPotentialClosure = /const\s+\w+\s*=\s*[^;]+;\s*async\s*\(/.test(handlerString) || /let\s+\w+\s*=\s*[^;]+;\s*async\s*\(/.test(handlerString);
|
|
1302
|
-
if (hasPotentialClosure) {
|
|
1303
|
-
return {
|
|
1304
|
-
isSerializable: true,
|
|
1305
|
-
// Still serializable, but might have issues
|
|
1306
|
-
error: `Warning: Handler for ${typeLabel} may have closures over external variables. Test thoroughly with forceKillOnTimeout enabled. If the handler fails to execute in a worker thread, ensure all dependencies are imported within the handler function.`
|
|
1307
|
-
};
|
|
1308
|
-
}
|
|
1309
|
-
return { isSerializable: true };
|
|
1310
|
-
} catch (error) {
|
|
1311
|
-
return {
|
|
1312
|
-
isSerializable: false,
|
|
1313
|
-
error: `Failed to validate handler serialization${jobType ? ` for job type "${jobType}"` : ""}: ${error instanceof Error ? error.message : String(error)}`
|
|
1314
|
-
};
|
|
1315
|
-
}
|
|
1316
|
-
}
|
|
1317
|
-
async function testHandlerSerialization(handler, jobType) {
|
|
1318
|
-
const basicValidation = validateHandlerSerializable2(handler, jobType);
|
|
1319
|
-
if (!basicValidation.isSerializable) {
|
|
1320
|
-
return basicValidation;
|
|
1321
|
-
}
|
|
1322
|
-
try {
|
|
1323
|
-
const handlerString = handler.toString();
|
|
1324
|
-
const handlerFn = new Function("return " + handlerString)();
|
|
1325
|
-
const testPromise = handlerFn({}, new AbortController().signal);
|
|
1326
|
-
const timeoutPromise = new Promise(
|
|
1327
|
-
(_, reject) => setTimeout(() => reject(new Error("Handler test timeout")), 100)
|
|
1328
|
-
);
|
|
1329
|
-
try {
|
|
1330
|
-
await Promise.race([testPromise, timeoutPromise]);
|
|
1331
|
-
} catch (execError) {
|
|
1332
|
-
if (execError instanceof Error && execError.message === "Handler test timeout") {
|
|
1333
|
-
return { isSerializable: true };
|
|
1334
|
-
}
|
|
1335
|
-
}
|
|
1336
|
-
return { isSerializable: true };
|
|
1337
|
-
} catch (error) {
|
|
1338
|
-
return {
|
|
1339
|
-
isSerializable: false,
|
|
1340
|
-
error: `Handler failed serialization test: ${error instanceof Error ? error.message : String(error)}`
|
|
1341
|
-
};
|
|
1342
|
-
}
|
|
1343
|
-
}
|
|
1344
|
-
|
|
1345
|
-
// src/index.ts
|
|
1346
|
-
var initJobQueue = (config) => {
|
|
1347
|
-
const { databaseConfig } = config;
|
|
1348
|
-
const pool = createPool(databaseConfig);
|
|
1349
|
-
setLogContext(config.verbose ?? false);
|
|
1350
|
-
return {
|
|
1351
|
-
// Job queue operations
|
|
1352
|
-
addJob: withLogContext(
|
|
1353
|
-
(job) => addJob(pool, job),
|
|
1354
|
-
config.verbose ?? false
|
|
1355
|
-
),
|
|
1356
|
-
getJob: withLogContext(
|
|
1357
|
-
(id) => getJob(pool, id),
|
|
1358
|
-
config.verbose ?? false
|
|
1359
|
-
),
|
|
1360
|
-
getJobsByStatus: withLogContext(
|
|
1361
|
-
(status, limit, offset) => getJobsByStatus(pool, status, limit, offset),
|
|
1362
|
-
config.verbose ?? false
|
|
1363
|
-
),
|
|
1364
|
-
getAllJobs: withLogContext(
|
|
1365
|
-
(limit, offset) => getAllJobs(pool, limit, offset),
|
|
1366
|
-
config.verbose ?? false
|
|
1367
|
-
),
|
|
1368
|
-
getJobs: withLogContext(
|
|
1369
|
-
(filters, limit, offset) => getJobs(pool, filters, limit, offset),
|
|
1370
|
-
config.verbose ?? false
|
|
1371
|
-
),
|
|
1372
|
-
retryJob: (jobId) => retryJob(pool, jobId),
|
|
1373
|
-
cleanupOldJobs: (daysToKeep) => cleanupOldJobs(pool, daysToKeep),
|
|
1374
|
-
cancelJob: withLogContext(
|
|
1375
|
-
(jobId) => cancelJob(pool, jobId),
|
|
1376
|
-
config.verbose ?? false
|
|
1377
|
-
),
|
|
1378
|
-
editJob: withLogContext(
|
|
1379
|
-
(jobId, updates) => editJob(pool, jobId, updates),
|
|
1380
|
-
config.verbose ?? false
|
|
1381
|
-
),
|
|
1382
|
-
editAllPendingJobs: withLogContext(
|
|
1383
|
-
(filters, updates) => editAllPendingJobs(pool, filters, updates),
|
|
1384
|
-
config.verbose ?? false
|
|
1385
|
-
),
|
|
1386
|
-
cancelAllUpcomingJobs: withLogContext(
|
|
1387
|
-
(filters) => cancelAllUpcomingJobs(pool, filters),
|
|
1388
|
-
config.verbose ?? false
|
|
1389
|
-
),
|
|
1390
|
-
reclaimStuckJobs: withLogContext(
|
|
1391
|
-
(maxProcessingTimeMinutes) => reclaimStuckJobs(pool, maxProcessingTimeMinutes),
|
|
1392
|
-
config.verbose ?? false
|
|
1393
|
-
),
|
|
1394
|
-
getJobsByTags: withLogContext(
|
|
1395
|
-
(tags, mode = "all", limit, offset) => getJobsByTags(pool, tags, mode, limit, offset),
|
|
1396
|
-
config.verbose ?? false
|
|
1397
|
-
),
|
|
1398
|
-
// Job processing
|
|
1399
|
-
createProcessor: (handlers, options) => createProcessor(pool, handlers, options),
|
|
1400
|
-
// Advanced access (for custom operations)
|
|
1401
|
-
getPool: () => pool,
|
|
1402
|
-
// Job events
|
|
1403
|
-
getJobEvents: withLogContext(
|
|
1404
|
-
(jobId) => getJobEvents(pool, jobId),
|
|
1405
|
-
config.verbose ?? false
|
|
1406
|
-
)
|
|
1407
|
-
};
|
|
1408
|
-
};
|
|
1409
|
-
var withLogContext = (fn, verbose) => (...args) => {
|
|
1410
|
-
setLogContext(verbose);
|
|
1411
|
-
return fn(...args);
|
|
1412
|
-
};
|
|
1413
|
-
|
|
1414
|
-
exports.FailureReason = FailureReason;
|
|
1415
|
-
exports.JobEventType = JobEventType;
|
|
1416
|
-
exports.initJobQueue = initJobQueue;
|
|
1417
|
-
exports.testHandlerSerialization = testHandlerSerialization;
|
|
1418
|
-
exports.validateHandlerSerializable = validateHandlerSerializable2;
|
|
1419
|
-
//# sourceMappingURL=index.cjs.map
|
|
1420
|
-
//# sourceMappingURL=index.cjs.map
|