@nicnocquee/dataqueue 1.25.0 → 1.26.0-beta.20260223202259
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/ai/build-docs-content.ts +96 -0
- package/ai/build-llms-full.ts +42 -0
- package/ai/docs-content.json +284 -0
- package/ai/rules/advanced.md +150 -0
- package/ai/rules/basic.md +159 -0
- package/ai/rules/react-dashboard.md +83 -0
- package/ai/skills/dataqueue-advanced/SKILL.md +370 -0
- package/ai/skills/dataqueue-core/SKILL.md +234 -0
- package/ai/skills/dataqueue-react/SKILL.md +189 -0
- package/dist/cli.cjs +1149 -14
- package/dist/cli.cjs.map +1 -1
- package/dist/cli.d.cts +66 -1
- package/dist/cli.d.ts +66 -1
- package/dist/cli.js +1146 -13
- package/dist/cli.js.map +1 -1
- package/dist/index.cjs +3236 -1237
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +697 -23
- package/dist/index.d.ts +697 -23
- package/dist/index.js +3235 -1238
- package/dist/index.js.map +1 -1
- package/dist/mcp-server.cjs +186 -0
- package/dist/mcp-server.cjs.map +1 -0
- package/dist/mcp-server.d.cts +32 -0
- package/dist/mcp-server.d.ts +32 -0
- package/dist/mcp-server.js +175 -0
- package/dist/mcp-server.js.map +1 -0
- package/migrations/1781200000004_create_cron_schedules_table.sql +33 -0
- package/migrations/1781200000005_add_retry_config_to_job_queue.sql +17 -0
- package/package.json +24 -21
- package/src/backend.ts +170 -5
- package/src/backends/postgres.ts +992 -63
- package/src/backends/redis-scripts.ts +358 -26
- package/src/backends/redis.test.ts +1532 -0
- package/src/backends/redis.ts +993 -35
- package/src/cli.test.ts +82 -6
- package/src/cli.ts +73 -10
- package/src/cron.test.ts +126 -0
- package/src/cron.ts +40 -0
- package/src/db-util.ts +1 -1
- package/src/index.test.ts +1034 -11
- package/src/index.ts +267 -39
- package/src/init-command.test.ts +449 -0
- package/src/init-command.ts +709 -0
- package/src/install-mcp-command.test.ts +216 -0
- package/src/install-mcp-command.ts +185 -0
- package/src/install-rules-command.test.ts +218 -0
- package/src/install-rules-command.ts +233 -0
- package/src/install-skills-command.test.ts +176 -0
- package/src/install-skills-command.ts +124 -0
- package/src/mcp-server.test.ts +162 -0
- package/src/mcp-server.ts +231 -0
- package/src/processor.ts +104 -113
- package/src/queue.test.ts +465 -0
- package/src/queue.ts +34 -252
- package/src/supervisor.test.ts +340 -0
- package/src/supervisor.ts +177 -0
- package/src/types.ts +476 -12
- package/LICENSE +0 -21
package/dist/index.cjs
CHANGED
|
@@ -1,18 +1,22 @@
|
|
|
1
1
|
'use strict';
|
|
2
2
|
|
|
3
|
-
var
|
|
4
|
-
var crypto = require('crypto');
|
|
3
|
+
var events = require('events');
|
|
5
4
|
var worker_threads = require('worker_threads');
|
|
5
|
+
var async_hooks = require('async_hooks');
|
|
6
6
|
var pg = require('pg');
|
|
7
7
|
var pgConnectionString = require('pg-connection-string');
|
|
8
8
|
var fs = require('fs');
|
|
9
|
+
var crypto = require('crypto');
|
|
9
10
|
var module$1 = require('module');
|
|
11
|
+
var croner = require('croner');
|
|
10
12
|
|
|
11
13
|
var _documentCurrentScript = typeof document !== 'undefined' ? document.currentScript : null;
|
|
12
14
|
function _interopDefault (e) { return e && e.__esModule ? e : { default: e }; }
|
|
13
15
|
|
|
14
16
|
var fs__default = /*#__PURE__*/_interopDefault(fs);
|
|
15
17
|
|
|
18
|
+
// src/index.ts
|
|
19
|
+
|
|
16
20
|
// src/types.ts
|
|
17
21
|
var JobEventType = /* @__PURE__ */ ((JobEventType2) => {
|
|
18
22
|
JobEventType2["Added"] = "added";
|
|
@@ -26,11 +30,11 @@ var JobEventType = /* @__PURE__ */ ((JobEventType2) => {
|
|
|
26
30
|
JobEventType2["Waiting"] = "waiting";
|
|
27
31
|
return JobEventType2;
|
|
28
32
|
})(JobEventType || {});
|
|
29
|
-
var FailureReason = /* @__PURE__ */ ((
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
return
|
|
33
|
+
var FailureReason = /* @__PURE__ */ ((FailureReason4) => {
|
|
34
|
+
FailureReason4["Timeout"] = "timeout";
|
|
35
|
+
FailureReason4["HandlerError"] = "handler_error";
|
|
36
|
+
FailureReason4["NoHandler"] = "no_handler";
|
|
37
|
+
return FailureReason4;
|
|
34
38
|
})(FailureReason || {});
|
|
35
39
|
var WaitSignal = class extends Error {
|
|
36
40
|
constructor(type, waitUntil, tokenId, stepData) {
|
|
@@ -57,264 +61,1311 @@ var log = (message) => {
|
|
|
57
61
|
}
|
|
58
62
|
};
|
|
59
63
|
|
|
60
|
-
// src/
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
return this.pool;
|
|
68
|
-
}
|
|
69
|
-
// ── Events ──────────────────────────────────────────────────────────
|
|
70
|
-
async recordJobEvent(jobId, eventType, metadata) {
|
|
71
|
-
const client = await this.pool.connect();
|
|
72
|
-
try {
|
|
73
|
-
await client.query(
|
|
74
|
-
`INSERT INTO job_events (job_id, event_type, metadata) VALUES ($1, $2, $3)`,
|
|
75
|
-
[jobId, eventType, metadata ? JSON.stringify(metadata) : null]
|
|
76
|
-
);
|
|
77
|
-
} catch (error) {
|
|
78
|
-
log(`Error recording job event for job ${jobId}: ${error}`);
|
|
79
|
-
} finally {
|
|
80
|
-
client.release();
|
|
81
|
-
}
|
|
82
|
-
}
|
|
83
|
-
async getJobEvents(jobId) {
|
|
84
|
-
const client = await this.pool.connect();
|
|
85
|
-
try {
|
|
86
|
-
const res = await client.query(
|
|
87
|
-
`SELECT id, job_id AS "jobId", event_type AS "eventType", metadata, created_at AS "createdAt" FROM job_events WHERE job_id = $1 ORDER BY created_at ASC`,
|
|
88
|
-
[jobId]
|
|
89
|
-
);
|
|
90
|
-
return res.rows;
|
|
91
|
-
} finally {
|
|
92
|
-
client.release();
|
|
93
|
-
}
|
|
94
|
-
}
|
|
95
|
-
// ── Job CRUD ──────────────────────────────────────────────────────────
|
|
96
|
-
async addJob({
|
|
97
|
-
jobType,
|
|
98
|
-
payload,
|
|
99
|
-
maxAttempts = 3,
|
|
100
|
-
priority = 0,
|
|
101
|
-
runAt = null,
|
|
102
|
-
timeoutMs = void 0,
|
|
103
|
-
forceKillOnTimeout = false,
|
|
104
|
-
tags = void 0,
|
|
105
|
-
idempotencyKey = void 0
|
|
106
|
-
}) {
|
|
107
|
-
const client = await this.pool.connect();
|
|
108
|
-
try {
|
|
109
|
-
let result;
|
|
110
|
-
const onConflict = idempotencyKey ? `ON CONFLICT (idempotency_key) WHERE idempotency_key IS NOT NULL DO NOTHING` : "";
|
|
111
|
-
if (runAt) {
|
|
112
|
-
result = await client.query(
|
|
113
|
-
`INSERT INTO job_queue
|
|
114
|
-
(job_type, payload, max_attempts, priority, run_at, timeout_ms, force_kill_on_timeout, tags, idempotency_key)
|
|
115
|
-
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
|
|
116
|
-
${onConflict}
|
|
117
|
-
RETURNING id`,
|
|
118
|
-
[
|
|
119
|
-
jobType,
|
|
120
|
-
payload,
|
|
121
|
-
maxAttempts,
|
|
122
|
-
priority,
|
|
123
|
-
runAt,
|
|
124
|
-
timeoutMs ?? null,
|
|
125
|
-
forceKillOnTimeout ?? false,
|
|
126
|
-
tags ?? null,
|
|
127
|
-
idempotencyKey ?? null
|
|
128
|
-
]
|
|
129
|
-
);
|
|
130
|
-
} else {
|
|
131
|
-
result = await client.query(
|
|
132
|
-
`INSERT INTO job_queue
|
|
133
|
-
(job_type, payload, max_attempts, priority, timeout_ms, force_kill_on_timeout, tags, idempotency_key)
|
|
134
|
-
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
|
135
|
-
${onConflict}
|
|
136
|
-
RETURNING id`,
|
|
137
|
-
[
|
|
138
|
-
jobType,
|
|
139
|
-
payload,
|
|
140
|
-
maxAttempts,
|
|
141
|
-
priority,
|
|
142
|
-
timeoutMs ?? null,
|
|
143
|
-
forceKillOnTimeout ?? false,
|
|
144
|
-
tags ?? null,
|
|
145
|
-
idempotencyKey ?? null
|
|
146
|
-
]
|
|
147
|
-
);
|
|
148
|
-
}
|
|
149
|
-
if (result.rows.length === 0 && idempotencyKey) {
|
|
150
|
-
const existing = await client.query(
|
|
151
|
-
`SELECT id FROM job_queue WHERE idempotency_key = $1`,
|
|
152
|
-
[idempotencyKey]
|
|
153
|
-
);
|
|
154
|
-
if (existing.rows.length > 0) {
|
|
155
|
-
log(
|
|
156
|
-
`Job with idempotency key "${idempotencyKey}" already exists (id: ${existing.rows[0].id}), returning existing job`
|
|
157
|
-
);
|
|
158
|
-
return existing.rows[0].id;
|
|
159
|
-
}
|
|
160
|
-
throw new Error(
|
|
161
|
-
`Failed to insert job and could not find existing job with idempotency key "${idempotencyKey}"`
|
|
162
|
-
);
|
|
163
|
-
}
|
|
164
|
-
const jobId = result.rows[0].id;
|
|
165
|
-
log(
|
|
166
|
-
`Added job ${jobId}: payload ${JSON.stringify(payload)}, ${runAt ? `runAt ${runAt.toISOString()}, ` : ""}priority ${priority}, maxAttempts ${maxAttempts}, jobType ${jobType}, tags ${JSON.stringify(tags)}${idempotencyKey ? `, idempotencyKey "${idempotencyKey}"` : ""}`
|
|
64
|
+
// src/processor.ts
|
|
65
|
+
function validateHandlerSerializable(handler, jobType) {
|
|
66
|
+
try {
|
|
67
|
+
const handlerString = handler.toString();
|
|
68
|
+
if (handlerString.includes("this.") && !handlerString.match(/\([^)]*this[^)]*\)/)) {
|
|
69
|
+
throw new Error(
|
|
70
|
+
`Handler for job type "${jobType}" uses 'this' context which cannot be serialized. Use a regular function or avoid 'this' references when forceKillOnTimeout is enabled.`
|
|
167
71
|
);
|
|
168
|
-
await this.recordJobEvent(jobId, "added" /* Added */, {
|
|
169
|
-
jobType,
|
|
170
|
-
payload,
|
|
171
|
-
tags,
|
|
172
|
-
idempotencyKey
|
|
173
|
-
});
|
|
174
|
-
return jobId;
|
|
175
|
-
} catch (error) {
|
|
176
|
-
log(`Error adding job: ${error}`);
|
|
177
|
-
throw error;
|
|
178
|
-
} finally {
|
|
179
|
-
client.release();
|
|
180
72
|
}
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
try {
|
|
185
|
-
const result = await client.query(
|
|
186
|
-
`SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue WHERE id = $1`,
|
|
187
|
-
[id]
|
|
73
|
+
if (handlerString.includes("[native code]")) {
|
|
74
|
+
throw new Error(
|
|
75
|
+
`Handler for job type "${jobType}" contains native code which cannot be serialized. Ensure your handler is a plain function when forceKillOnTimeout is enabled.`
|
|
188
76
|
);
|
|
189
|
-
if (result.rows.length === 0) {
|
|
190
|
-
log(`Job ${id} not found`);
|
|
191
|
-
return null;
|
|
192
|
-
}
|
|
193
|
-
log(`Found job ${id}`);
|
|
194
|
-
const job = result.rows[0];
|
|
195
|
-
return {
|
|
196
|
-
...job,
|
|
197
|
-
payload: job.payload,
|
|
198
|
-
timeoutMs: job.timeoutMs,
|
|
199
|
-
forceKillOnTimeout: job.forceKillOnTimeout,
|
|
200
|
-
failureReason: job.failureReason
|
|
201
|
-
};
|
|
202
|
-
} catch (error) {
|
|
203
|
-
log(`Error getting job ${id}: ${error}`);
|
|
204
|
-
throw error;
|
|
205
|
-
} finally {
|
|
206
|
-
client.release();
|
|
207
77
|
}
|
|
208
|
-
}
|
|
209
|
-
async getJobsByStatus(status, limit = 100, offset = 0) {
|
|
210
|
-
const client = await this.pool.connect();
|
|
211
78
|
try {
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
79
|
+
new Function("return " + handlerString);
|
|
80
|
+
} catch (parseError) {
|
|
81
|
+
throw new Error(
|
|
82
|
+
`Handler for job type "${jobType}" cannot be serialized: ${parseError instanceof Error ? parseError.message : String(parseError)}. When using forceKillOnTimeout, handlers must be serializable functions without closures over external variables.`
|
|
215
83
|
);
|
|
216
|
-
log(`Found ${result.rows.length} jobs by status ${status}`);
|
|
217
|
-
return result.rows.map((job) => ({
|
|
218
|
-
...job,
|
|
219
|
-
payload: job.payload,
|
|
220
|
-
timeoutMs: job.timeoutMs,
|
|
221
|
-
forceKillOnTimeout: job.forceKillOnTimeout,
|
|
222
|
-
failureReason: job.failureReason
|
|
223
|
-
}));
|
|
224
|
-
} catch (error) {
|
|
225
|
-
log(`Error getting jobs by status ${status}: ${error}`);
|
|
226
|
-
throw error;
|
|
227
|
-
} finally {
|
|
228
|
-
client.release();
|
|
229
84
|
}
|
|
230
|
-
}
|
|
231
|
-
|
|
232
|
-
const client = await this.pool.connect();
|
|
233
|
-
try {
|
|
234
|
-
const result = await client.query(
|
|
235
|
-
`SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue ORDER BY created_at DESC LIMIT $1 OFFSET $2`,
|
|
236
|
-
[limit, offset]
|
|
237
|
-
);
|
|
238
|
-
log(`Found ${result.rows.length} jobs (all)`);
|
|
239
|
-
return result.rows.map((job) => ({
|
|
240
|
-
...job,
|
|
241
|
-
payload: job.payload,
|
|
242
|
-
timeoutMs: job.timeoutMs,
|
|
243
|
-
forceKillOnTimeout: job.forceKillOnTimeout
|
|
244
|
-
}));
|
|
245
|
-
} catch (error) {
|
|
246
|
-
log(`Error getting all jobs: ${error}`);
|
|
85
|
+
} catch (error) {
|
|
86
|
+
if (error instanceof Error) {
|
|
247
87
|
throw error;
|
|
248
|
-
} finally {
|
|
249
|
-
client.release();
|
|
250
88
|
}
|
|
89
|
+
throw new Error(
|
|
90
|
+
`Failed to validate handler serialization for job type "${jobType}": ${String(error)}`
|
|
91
|
+
);
|
|
251
92
|
}
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
93
|
+
}
|
|
94
|
+
async function runHandlerInWorker(handler, payload, timeoutMs, jobType) {
|
|
95
|
+
validateHandlerSerializable(handler, jobType);
|
|
96
|
+
return new Promise((resolve, reject) => {
|
|
97
|
+
const workerCode = `
|
|
98
|
+
(function() {
|
|
99
|
+
const { parentPort, workerData } = require('worker_threads');
|
|
100
|
+
const { handlerCode, payload, timeoutMs } = workerData;
|
|
101
|
+
|
|
102
|
+
// Create an AbortController for the handler
|
|
103
|
+
const controller = new AbortController();
|
|
104
|
+
const signal = controller.signal;
|
|
105
|
+
|
|
106
|
+
// Set up timeout
|
|
107
|
+
const timeoutId = setTimeout(() => {
|
|
108
|
+
controller.abort();
|
|
109
|
+
parentPort.postMessage({ type: 'timeout' });
|
|
110
|
+
}, timeoutMs);
|
|
111
|
+
|
|
112
|
+
try {
|
|
113
|
+
// Execute the handler
|
|
114
|
+
// Note: This uses Function constructor which requires the handler to be serializable.
|
|
115
|
+
// The handler should be validated before reaching this point.
|
|
116
|
+
let handlerFn;
|
|
117
|
+
try {
|
|
118
|
+
// Wrap handlerCode in parentheses to ensure it's treated as an expression
|
|
119
|
+
// This handles both arrow functions and regular functions
|
|
120
|
+
const wrappedCode = handlerCode.trim().startsWith('async') || handlerCode.trim().startsWith('function')
|
|
121
|
+
? handlerCode
|
|
122
|
+
: '(' + handlerCode + ')';
|
|
123
|
+
handlerFn = new Function('return ' + wrappedCode)();
|
|
124
|
+
} catch (parseError) {
|
|
125
|
+
clearTimeout(timeoutId);
|
|
126
|
+
parentPort.postMessage({
|
|
127
|
+
type: 'error',
|
|
128
|
+
error: {
|
|
129
|
+
message: 'Handler cannot be deserialized in worker thread. ' +
|
|
130
|
+
'Ensure your handler is a standalone function without closures over external variables. ' +
|
|
131
|
+
'Original error: ' + (parseError instanceof Error ? parseError.message : String(parseError)),
|
|
132
|
+
stack: parseError instanceof Error ? parseError.stack : undefined,
|
|
133
|
+
name: 'SerializationError',
|
|
134
|
+
},
|
|
135
|
+
});
|
|
136
|
+
return;
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
// Ensure handlerFn is actually a function
|
|
140
|
+
if (typeof handlerFn !== 'function') {
|
|
141
|
+
clearTimeout(timeoutId);
|
|
142
|
+
parentPort.postMessage({
|
|
143
|
+
type: 'error',
|
|
144
|
+
error: {
|
|
145
|
+
message: 'Handler deserialization did not produce a function. ' +
|
|
146
|
+
'Ensure your handler is a valid function when forceKillOnTimeout is enabled.',
|
|
147
|
+
name: 'SerializationError',
|
|
148
|
+
},
|
|
149
|
+
});
|
|
150
|
+
return;
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
handlerFn(payload, signal)
|
|
154
|
+
.then(() => {
|
|
155
|
+
clearTimeout(timeoutId);
|
|
156
|
+
parentPort.postMessage({ type: 'success' });
|
|
157
|
+
})
|
|
158
|
+
.catch((error) => {
|
|
159
|
+
clearTimeout(timeoutId);
|
|
160
|
+
parentPort.postMessage({
|
|
161
|
+
type: 'error',
|
|
162
|
+
error: {
|
|
163
|
+
message: error.message,
|
|
164
|
+
stack: error.stack,
|
|
165
|
+
name: error.name,
|
|
166
|
+
},
|
|
167
|
+
});
|
|
168
|
+
});
|
|
169
|
+
} catch (error) {
|
|
170
|
+
clearTimeout(timeoutId);
|
|
171
|
+
parentPort.postMessage({
|
|
172
|
+
type: 'error',
|
|
173
|
+
error: {
|
|
174
|
+
message: error.message,
|
|
175
|
+
stack: error.stack,
|
|
176
|
+
name: error.name,
|
|
177
|
+
},
|
|
178
|
+
});
|
|
179
|
+
}
|
|
180
|
+
})();
|
|
181
|
+
`;
|
|
182
|
+
const worker = new worker_threads.Worker(workerCode, {
|
|
183
|
+
eval: true,
|
|
184
|
+
workerData: {
|
|
185
|
+
handlerCode: handler.toString(),
|
|
186
|
+
payload,
|
|
187
|
+
timeoutMs
|
|
188
|
+
}
|
|
189
|
+
});
|
|
190
|
+
let resolved = false;
|
|
191
|
+
worker.on("message", (message) => {
|
|
192
|
+
if (resolved) return;
|
|
193
|
+
resolved = true;
|
|
194
|
+
if (message.type === "success") {
|
|
195
|
+
resolve();
|
|
196
|
+
} else if (message.type === "timeout") {
|
|
197
|
+
const timeoutError = new Error(
|
|
198
|
+
`Job timed out after ${timeoutMs} ms and was forcefully terminated`
|
|
199
|
+
);
|
|
200
|
+
timeoutError.failureReason = "timeout" /* Timeout */;
|
|
201
|
+
reject(timeoutError);
|
|
202
|
+
} else if (message.type === "error") {
|
|
203
|
+
const error = new Error(message.error.message);
|
|
204
|
+
error.stack = message.error.stack;
|
|
205
|
+
error.name = message.error.name;
|
|
206
|
+
reject(error);
|
|
207
|
+
}
|
|
208
|
+
});
|
|
209
|
+
worker.on("error", (error) => {
|
|
210
|
+
if (resolved) return;
|
|
211
|
+
resolved = true;
|
|
212
|
+
reject(error);
|
|
213
|
+
});
|
|
214
|
+
worker.on("exit", (code) => {
|
|
215
|
+
if (resolved) return;
|
|
216
|
+
if (code !== 0) {
|
|
217
|
+
resolved = true;
|
|
218
|
+
reject(new Error(`Worker stopped with exit code ${code}`));
|
|
219
|
+
}
|
|
220
|
+
});
|
|
221
|
+
setTimeout(() => {
|
|
222
|
+
if (!resolved) {
|
|
223
|
+
resolved = true;
|
|
224
|
+
worker.terminate().then(() => {
|
|
225
|
+
const timeoutError = new Error(
|
|
226
|
+
`Job timed out after ${timeoutMs} ms and was forcefully terminated`
|
|
227
|
+
);
|
|
228
|
+
timeoutError.failureReason = "timeout" /* Timeout */;
|
|
229
|
+
reject(timeoutError);
|
|
230
|
+
}).catch((err) => {
|
|
231
|
+
reject(err);
|
|
232
|
+
});
|
|
233
|
+
}
|
|
234
|
+
}, timeoutMs + 100);
|
|
235
|
+
});
|
|
236
|
+
}
|
|
237
|
+
function calculateWaitUntil(duration) {
|
|
238
|
+
const now = Date.now();
|
|
239
|
+
let ms = 0;
|
|
240
|
+
if (duration.seconds) ms += duration.seconds * 1e3;
|
|
241
|
+
if (duration.minutes) ms += duration.minutes * 60 * 1e3;
|
|
242
|
+
if (duration.hours) ms += duration.hours * 60 * 60 * 1e3;
|
|
243
|
+
if (duration.days) ms += duration.days * 24 * 60 * 60 * 1e3;
|
|
244
|
+
if (duration.weeks) ms += duration.weeks * 7 * 24 * 60 * 60 * 1e3;
|
|
245
|
+
if (duration.months) ms += duration.months * 30 * 24 * 60 * 60 * 1e3;
|
|
246
|
+
if (duration.years) ms += duration.years * 365 * 24 * 60 * 60 * 1e3;
|
|
247
|
+
if (ms <= 0) {
|
|
248
|
+
throw new Error(
|
|
249
|
+
"waitFor duration must be positive. Provide at least one positive duration field."
|
|
250
|
+
);
|
|
251
|
+
}
|
|
252
|
+
return new Date(now + ms);
|
|
253
|
+
}
|
|
254
|
+
async function resolveCompletedWaits(backend, stepData) {
|
|
255
|
+
for (const key of Object.keys(stepData)) {
|
|
256
|
+
if (!key.startsWith("__wait_")) continue;
|
|
257
|
+
const entry = stepData[key];
|
|
258
|
+
if (!entry || typeof entry !== "object" || entry.completed) continue;
|
|
259
|
+
if (entry.type === "duration" || entry.type === "date") {
|
|
260
|
+
stepData[key] = { ...entry, completed: true };
|
|
261
|
+
} else if (entry.type === "token" && entry.tokenId) {
|
|
262
|
+
const wp = await backend.getWaitpoint(entry.tokenId);
|
|
263
|
+
if (wp && wp.status === "completed") {
|
|
264
|
+
stepData[key] = {
|
|
265
|
+
...entry,
|
|
266
|
+
completed: true,
|
|
267
|
+
result: { ok: true, output: wp.output }
|
|
268
|
+
};
|
|
269
|
+
} else if (wp && wp.status === "timed_out") {
|
|
270
|
+
stepData[key] = {
|
|
271
|
+
...entry,
|
|
272
|
+
completed: true,
|
|
273
|
+
result: { ok: false, error: "Token timed out" }
|
|
274
|
+
};
|
|
275
|
+
}
|
|
276
|
+
}
|
|
277
|
+
}
|
|
278
|
+
}
|
|
279
|
+
function buildWaitContext(backend, jobId, stepData, baseCtx) {
|
|
280
|
+
let waitCounter = 0;
|
|
281
|
+
const ctx = {
|
|
282
|
+
prolong: baseCtx.prolong,
|
|
283
|
+
onTimeout: baseCtx.onTimeout,
|
|
284
|
+
run: async (stepName, fn) => {
|
|
285
|
+
const cached = stepData[stepName];
|
|
286
|
+
if (cached && typeof cached === "object" && cached.__completed) {
|
|
287
|
+
log(`Step "${stepName}" replayed from cache for job ${jobId}`);
|
|
288
|
+
return cached.result;
|
|
289
|
+
}
|
|
290
|
+
const result = await fn();
|
|
291
|
+
stepData[stepName] = { __completed: true, result };
|
|
292
|
+
await backend.updateStepData(jobId, stepData);
|
|
293
|
+
return result;
|
|
294
|
+
},
|
|
295
|
+
waitFor: async (duration) => {
|
|
296
|
+
const waitKey = `__wait_${waitCounter++}`;
|
|
297
|
+
const cached = stepData[waitKey];
|
|
298
|
+
if (cached && typeof cached === "object" && cached.completed) {
|
|
299
|
+
log(`Wait "${waitKey}" already completed for job ${jobId}, skipping`);
|
|
300
|
+
return;
|
|
301
|
+
}
|
|
302
|
+
const waitUntilDate = calculateWaitUntil(duration);
|
|
303
|
+
stepData[waitKey] = { type: "duration", completed: false };
|
|
304
|
+
throw new WaitSignal("duration", waitUntilDate, void 0, stepData);
|
|
305
|
+
},
|
|
306
|
+
waitUntil: async (date) => {
|
|
307
|
+
const waitKey = `__wait_${waitCounter++}`;
|
|
308
|
+
const cached = stepData[waitKey];
|
|
309
|
+
if (cached && typeof cached === "object" && cached.completed) {
|
|
310
|
+
log(`Wait "${waitKey}" already completed for job ${jobId}, skipping`);
|
|
311
|
+
return;
|
|
312
|
+
}
|
|
313
|
+
stepData[waitKey] = { type: "date", completed: false };
|
|
314
|
+
throw new WaitSignal("date", date, void 0, stepData);
|
|
315
|
+
},
|
|
316
|
+
createToken: async (options) => {
|
|
317
|
+
const token = await backend.createWaitpoint(jobId, options);
|
|
318
|
+
return token;
|
|
319
|
+
},
|
|
320
|
+
waitForToken: async (tokenId) => {
|
|
321
|
+
const waitKey = `__wait_${waitCounter++}`;
|
|
322
|
+
const cached = stepData[waitKey];
|
|
323
|
+
if (cached && typeof cached === "object" && cached.completed) {
|
|
324
|
+
log(
|
|
325
|
+
`Token wait "${waitKey}" already completed for job ${jobId}, returning cached result`
|
|
326
|
+
);
|
|
327
|
+
return cached.result;
|
|
328
|
+
}
|
|
329
|
+
const wp = await backend.getWaitpoint(tokenId);
|
|
330
|
+
if (wp && wp.status === "completed") {
|
|
331
|
+
const result = {
|
|
332
|
+
ok: true,
|
|
333
|
+
output: wp.output
|
|
334
|
+
};
|
|
335
|
+
stepData[waitKey] = {
|
|
336
|
+
type: "token",
|
|
337
|
+
tokenId,
|
|
338
|
+
completed: true,
|
|
339
|
+
result
|
|
340
|
+
};
|
|
341
|
+
await backend.updateStepData(jobId, stepData);
|
|
342
|
+
return result;
|
|
343
|
+
}
|
|
344
|
+
if (wp && wp.status === "timed_out") {
|
|
345
|
+
const result = {
|
|
346
|
+
ok: false,
|
|
347
|
+
error: "Token timed out"
|
|
348
|
+
};
|
|
349
|
+
stepData[waitKey] = {
|
|
350
|
+
type: "token",
|
|
351
|
+
tokenId,
|
|
352
|
+
completed: true,
|
|
353
|
+
result
|
|
354
|
+
};
|
|
355
|
+
await backend.updateStepData(jobId, stepData);
|
|
356
|
+
return result;
|
|
357
|
+
}
|
|
358
|
+
stepData[waitKey] = { type: "token", tokenId, completed: false };
|
|
359
|
+
throw new WaitSignal("token", void 0, tokenId, stepData);
|
|
360
|
+
},
|
|
361
|
+
setProgress: async (percent) => {
|
|
362
|
+
if (percent < 0 || percent > 100)
|
|
363
|
+
throw new Error("Progress must be between 0 and 100");
|
|
364
|
+
await backend.updateProgress(jobId, Math.round(percent));
|
|
365
|
+
}
|
|
366
|
+
};
|
|
367
|
+
return ctx;
|
|
368
|
+
}
|
|
369
|
+
async function processJobWithHandlers(backend, job, jobHandlers, emit) {
|
|
370
|
+
const handler = jobHandlers[job.jobType];
|
|
371
|
+
if (!handler) {
|
|
372
|
+
await backend.setPendingReasonForUnpickedJobs(
|
|
373
|
+
`No handler registered for job type: ${job.jobType}`,
|
|
374
|
+
job.jobType
|
|
375
|
+
);
|
|
376
|
+
const noHandlerError = new Error(
|
|
377
|
+
`No handler registered for job type: ${job.jobType}`
|
|
378
|
+
);
|
|
379
|
+
await backend.failJob(job.id, noHandlerError, "no_handler" /* NoHandler */);
|
|
380
|
+
emit?.("job:failed", {
|
|
381
|
+
jobId: job.id,
|
|
382
|
+
jobType: job.jobType,
|
|
383
|
+
error: noHandlerError,
|
|
384
|
+
willRetry: false
|
|
385
|
+
});
|
|
386
|
+
return;
|
|
387
|
+
}
|
|
388
|
+
const stepData = { ...job.stepData || {} };
|
|
389
|
+
const hasStepHistory = Object.keys(stepData).some(
|
|
390
|
+
(k) => k.startsWith("__wait_")
|
|
391
|
+
);
|
|
392
|
+
if (hasStepHistory) {
|
|
393
|
+
await resolveCompletedWaits(backend, stepData);
|
|
394
|
+
await backend.updateStepData(job.id, stepData);
|
|
395
|
+
}
|
|
396
|
+
const timeoutMs = job.timeoutMs ?? void 0;
|
|
397
|
+
const forceKillOnTimeout = job.forceKillOnTimeout ?? false;
|
|
398
|
+
let timeoutId;
|
|
399
|
+
const controller = new AbortController();
|
|
400
|
+
try {
|
|
401
|
+
if (forceKillOnTimeout && timeoutMs && timeoutMs > 0) {
|
|
402
|
+
await runHandlerInWorker(handler, job.payload, timeoutMs, job.jobType);
|
|
403
|
+
} else {
|
|
404
|
+
let onTimeoutCallback;
|
|
405
|
+
let timeoutReject;
|
|
406
|
+
const armTimeout = (ms) => {
|
|
407
|
+
if (timeoutId) clearTimeout(timeoutId);
|
|
408
|
+
timeoutId = setTimeout(() => {
|
|
409
|
+
if (onTimeoutCallback) {
|
|
410
|
+
try {
|
|
411
|
+
const extension = onTimeoutCallback();
|
|
412
|
+
if (typeof extension === "number" && extension > 0) {
|
|
413
|
+
backend.prolongJob(job.id).catch(() => {
|
|
414
|
+
});
|
|
415
|
+
armTimeout(extension);
|
|
416
|
+
return;
|
|
417
|
+
}
|
|
418
|
+
} catch (callbackError) {
|
|
419
|
+
log(
|
|
420
|
+
`onTimeout callback threw for job ${job.id}: ${callbackError}`
|
|
421
|
+
);
|
|
422
|
+
}
|
|
423
|
+
}
|
|
424
|
+
controller.abort();
|
|
425
|
+
const timeoutError = new Error(`Job timed out after ${ms} ms`);
|
|
426
|
+
timeoutError.failureReason = "timeout" /* Timeout */;
|
|
427
|
+
if (timeoutReject) {
|
|
428
|
+
timeoutReject(timeoutError);
|
|
429
|
+
}
|
|
430
|
+
}, ms);
|
|
431
|
+
};
|
|
432
|
+
const hasTimeout = timeoutMs != null && timeoutMs > 0;
|
|
433
|
+
const baseCtx = hasTimeout ? {
|
|
434
|
+
prolong: (ms) => {
|
|
435
|
+
const duration = ms ?? timeoutMs;
|
|
436
|
+
if (duration != null && duration > 0) {
|
|
437
|
+
armTimeout(duration);
|
|
438
|
+
backend.prolongJob(job.id).catch(() => {
|
|
439
|
+
});
|
|
440
|
+
}
|
|
441
|
+
},
|
|
442
|
+
onTimeout: (callback) => {
|
|
443
|
+
onTimeoutCallback = callback;
|
|
444
|
+
}
|
|
445
|
+
} : {
|
|
446
|
+
prolong: () => {
|
|
447
|
+
log("prolong() called but ignored: job has no timeout set");
|
|
448
|
+
},
|
|
449
|
+
onTimeout: () => {
|
|
450
|
+
log("onTimeout() called but ignored: job has no timeout set");
|
|
451
|
+
}
|
|
452
|
+
};
|
|
453
|
+
const ctx = buildWaitContext(backend, job.id, stepData, baseCtx);
|
|
454
|
+
if (emit) {
|
|
455
|
+
const originalSetProgress = ctx.setProgress;
|
|
456
|
+
ctx.setProgress = async (percent) => {
|
|
457
|
+
await originalSetProgress(percent);
|
|
458
|
+
emit("job:progress", {
|
|
459
|
+
jobId: job.id,
|
|
460
|
+
progress: Math.round(percent)
|
|
461
|
+
});
|
|
462
|
+
};
|
|
463
|
+
}
|
|
464
|
+
if (forceKillOnTimeout && !hasTimeout) {
|
|
465
|
+
log(
|
|
466
|
+
`forceKillOnTimeout is set but no timeoutMs for job ${job.id}, running without force kill`
|
|
467
|
+
);
|
|
468
|
+
}
|
|
469
|
+
const jobPromise = handler(job.payload, controller.signal, ctx);
|
|
470
|
+
if (hasTimeout) {
|
|
471
|
+
await Promise.race([
|
|
472
|
+
jobPromise,
|
|
473
|
+
new Promise((_, reject) => {
|
|
474
|
+
timeoutReject = reject;
|
|
475
|
+
armTimeout(timeoutMs);
|
|
476
|
+
})
|
|
477
|
+
]);
|
|
478
|
+
} else {
|
|
479
|
+
await jobPromise;
|
|
480
|
+
}
|
|
481
|
+
}
|
|
482
|
+
if (timeoutId) clearTimeout(timeoutId);
|
|
483
|
+
await backend.completeJob(job.id);
|
|
484
|
+
emit?.("job:completed", { jobId: job.id, jobType: job.jobType });
|
|
485
|
+
} catch (error) {
|
|
486
|
+
if (timeoutId) clearTimeout(timeoutId);
|
|
487
|
+
if (error instanceof WaitSignal) {
|
|
488
|
+
log(
|
|
489
|
+
`Job ${job.id} entering wait: type=${error.type}, waitUntil=${error.waitUntil?.toISOString() ?? "none"}, tokenId=${error.tokenId ?? "none"}`
|
|
490
|
+
);
|
|
491
|
+
await backend.waitJob(job.id, {
|
|
492
|
+
waitUntil: error.waitUntil,
|
|
493
|
+
waitTokenId: error.tokenId,
|
|
494
|
+
stepData: error.stepData
|
|
495
|
+
});
|
|
496
|
+
emit?.("job:waiting", { jobId: job.id, jobType: job.jobType });
|
|
497
|
+
return;
|
|
498
|
+
}
|
|
499
|
+
console.error(`Error processing job ${job.id}:`, error);
|
|
500
|
+
let failureReason = "handler_error" /* HandlerError */;
|
|
501
|
+
if (error && typeof error === "object" && "failureReason" in error && error.failureReason === "timeout" /* Timeout */) {
|
|
502
|
+
failureReason = "timeout" /* Timeout */;
|
|
503
|
+
}
|
|
504
|
+
const failError = error instanceof Error ? error : new Error(String(error));
|
|
505
|
+
await backend.failJob(job.id, failError, failureReason);
|
|
506
|
+
emit?.("job:failed", {
|
|
507
|
+
jobId: job.id,
|
|
508
|
+
jobType: job.jobType,
|
|
509
|
+
error: failError,
|
|
510
|
+
willRetry: job.attempts + 1 < job.maxAttempts
|
|
511
|
+
});
|
|
512
|
+
}
|
|
513
|
+
}
|
|
514
|
+
async function processBatchWithHandlers(backend, workerId, batchSize, jobType, jobHandlers, concurrency, onError, emit) {
|
|
515
|
+
const jobs = await backend.getNextBatch(
|
|
516
|
+
workerId,
|
|
517
|
+
batchSize,
|
|
518
|
+
jobType
|
|
519
|
+
);
|
|
520
|
+
if (emit) {
|
|
521
|
+
for (const job of jobs) {
|
|
522
|
+
emit("job:processing", { jobId: job.id, jobType: job.jobType });
|
|
523
|
+
}
|
|
524
|
+
}
|
|
525
|
+
if (!concurrency || concurrency >= jobs.length) {
|
|
526
|
+
await Promise.all(
|
|
527
|
+
jobs.map(
|
|
528
|
+
(job) => processJobWithHandlers(backend, job, jobHandlers, emit)
|
|
529
|
+
)
|
|
530
|
+
);
|
|
531
|
+
return jobs.length;
|
|
532
|
+
}
|
|
533
|
+
let idx = 0;
|
|
534
|
+
let running = 0;
|
|
535
|
+
let finished = 0;
|
|
536
|
+
return new Promise((resolve, reject) => {
|
|
537
|
+
const next = () => {
|
|
538
|
+
if (finished === jobs.length) return resolve(jobs.length);
|
|
539
|
+
while (running < concurrency && idx < jobs.length) {
|
|
540
|
+
const job = jobs[idx++];
|
|
541
|
+
running++;
|
|
542
|
+
processJobWithHandlers(backend, job, jobHandlers, emit).then(() => {
|
|
543
|
+
running--;
|
|
544
|
+
finished++;
|
|
545
|
+
next();
|
|
546
|
+
}).catch((err) => {
|
|
547
|
+
running--;
|
|
548
|
+
finished++;
|
|
549
|
+
if (onError) {
|
|
550
|
+
onError(err instanceof Error ? err : new Error(String(err)));
|
|
551
|
+
}
|
|
552
|
+
next();
|
|
553
|
+
});
|
|
554
|
+
}
|
|
555
|
+
};
|
|
556
|
+
next();
|
|
557
|
+
});
|
|
558
|
+
}
|
|
559
|
+
var createProcessor = (backend, handlers, options = {}, onBeforeBatch, emit) => {
|
|
560
|
+
const {
|
|
561
|
+
workerId = `worker-${Math.random().toString(36).substring(2, 9)}`,
|
|
562
|
+
batchSize = 10,
|
|
563
|
+
pollInterval = 5e3,
|
|
564
|
+
onError = (error) => console.error("Job processor error:", error),
|
|
565
|
+
jobType,
|
|
566
|
+
concurrency = 3
|
|
567
|
+
} = options;
|
|
568
|
+
let running = false;
|
|
569
|
+
let intervalId = null;
|
|
570
|
+
let currentBatchPromise = null;
|
|
571
|
+
setLogContext(options.verbose ?? false);
|
|
572
|
+
const processJobs = async () => {
|
|
573
|
+
if (!running) return 0;
|
|
574
|
+
if (onBeforeBatch) {
|
|
575
|
+
try {
|
|
576
|
+
await onBeforeBatch();
|
|
577
|
+
} catch (hookError) {
|
|
578
|
+
log(`onBeforeBatch hook error: ${hookError}`);
|
|
579
|
+
const err = hookError instanceof Error ? hookError : new Error(String(hookError));
|
|
580
|
+
if (onError) {
|
|
581
|
+
onError(err);
|
|
582
|
+
}
|
|
583
|
+
emit?.("error", err);
|
|
584
|
+
}
|
|
585
|
+
}
|
|
586
|
+
log(
|
|
587
|
+
`Processing jobs with workerId: ${workerId}${jobType ? ` and jobType: ${Array.isArray(jobType) ? jobType.join(",") : jobType}` : ""}`
|
|
588
|
+
);
|
|
589
|
+
try {
|
|
590
|
+
const processed = await processBatchWithHandlers(
|
|
591
|
+
backend,
|
|
592
|
+
workerId,
|
|
593
|
+
batchSize,
|
|
594
|
+
jobType,
|
|
595
|
+
handlers,
|
|
596
|
+
concurrency,
|
|
597
|
+
onError,
|
|
598
|
+
emit
|
|
599
|
+
);
|
|
600
|
+
return processed;
|
|
601
|
+
} catch (error) {
|
|
602
|
+
const err = error instanceof Error ? error : new Error(String(error));
|
|
603
|
+
onError(err);
|
|
604
|
+
emit?.("error", err);
|
|
605
|
+
}
|
|
606
|
+
return 0;
|
|
607
|
+
};
|
|
608
|
+
return {
|
|
609
|
+
/**
|
|
610
|
+
* Start the job processor in the background.
|
|
611
|
+
* - This will run periodically (every pollInterval milliseconds or 5 seconds if not provided) and process jobs as they become available.
|
|
612
|
+
* - You have to call the stop method to stop the processor.
|
|
613
|
+
*/
|
|
614
|
+
startInBackground: () => {
|
|
615
|
+
if (running) return;
|
|
616
|
+
log(`Starting job processor with workerId: ${workerId}`);
|
|
617
|
+
running = true;
|
|
618
|
+
const scheduleNext = (immediate) => {
|
|
619
|
+
if (!running) return;
|
|
620
|
+
if (immediate) {
|
|
621
|
+
intervalId = setTimeout(loop, 0);
|
|
622
|
+
} else {
|
|
623
|
+
intervalId = setTimeout(loop, pollInterval);
|
|
624
|
+
}
|
|
625
|
+
};
|
|
626
|
+
const loop = async () => {
|
|
627
|
+
if (!running) return;
|
|
628
|
+
currentBatchPromise = processJobs();
|
|
629
|
+
const processed = await currentBatchPromise;
|
|
630
|
+
currentBatchPromise = null;
|
|
631
|
+
scheduleNext(processed === batchSize);
|
|
632
|
+
};
|
|
633
|
+
loop();
|
|
634
|
+
},
|
|
635
|
+
/**
|
|
636
|
+
* Stop the job processor that runs in the background.
|
|
637
|
+
* Does not wait for in-flight jobs.
|
|
638
|
+
*/
|
|
639
|
+
stop: () => {
|
|
640
|
+
log(`Stopping job processor with workerId: ${workerId}`);
|
|
641
|
+
running = false;
|
|
642
|
+
if (intervalId) {
|
|
643
|
+
clearTimeout(intervalId);
|
|
644
|
+
intervalId = null;
|
|
645
|
+
}
|
|
646
|
+
},
|
|
647
|
+
/**
|
|
648
|
+
* Stop the job processor and wait for all in-flight jobs to complete.
|
|
649
|
+
* Useful for graceful shutdown (e.g., SIGTERM handling).
|
|
650
|
+
*/
|
|
651
|
+
stopAndDrain: async (drainTimeoutMs = 3e4) => {
|
|
652
|
+
log(`Stopping and draining job processor with workerId: ${workerId}`);
|
|
653
|
+
running = false;
|
|
654
|
+
if (intervalId) {
|
|
655
|
+
clearTimeout(intervalId);
|
|
656
|
+
intervalId = null;
|
|
657
|
+
}
|
|
658
|
+
if (currentBatchPromise) {
|
|
659
|
+
await Promise.race([
|
|
660
|
+
currentBatchPromise.catch(() => {
|
|
661
|
+
}),
|
|
662
|
+
new Promise((resolve) => setTimeout(resolve, drainTimeoutMs))
|
|
663
|
+
]);
|
|
664
|
+
currentBatchPromise = null;
|
|
665
|
+
}
|
|
666
|
+
log(`Job processor ${workerId} drained`);
|
|
667
|
+
},
|
|
668
|
+
/**
|
|
669
|
+
* Start the job processor synchronously.
|
|
670
|
+
* - This will process all jobs immediately and then stop.
|
|
671
|
+
* - The pollInterval is ignored.
|
|
672
|
+
*/
|
|
673
|
+
start: async () => {
|
|
674
|
+
log(`Starting job processor with workerId: ${workerId}`);
|
|
675
|
+
running = true;
|
|
676
|
+
const processed = await processJobs();
|
|
677
|
+
running = false;
|
|
678
|
+
return processed;
|
|
679
|
+
},
|
|
680
|
+
isRunning: () => running
|
|
681
|
+
};
|
|
682
|
+
};
|
|
683
|
+
|
|
684
|
+
// src/supervisor.ts
|
|
685
|
+
var createSupervisor = (backend, options = {}, emit) => {
|
|
686
|
+
const {
|
|
687
|
+
intervalMs = 6e4,
|
|
688
|
+
stuckJobsTimeoutMinutes = 10,
|
|
689
|
+
cleanupJobsDaysToKeep = 30,
|
|
690
|
+
cleanupEventsDaysToKeep = 30,
|
|
691
|
+
cleanupBatchSize = 1e3,
|
|
692
|
+
reclaimStuckJobs = true,
|
|
693
|
+
expireTimedOutTokens = true,
|
|
694
|
+
onError = (error) => console.error("Supervisor maintenance error:", error),
|
|
695
|
+
verbose = false
|
|
696
|
+
} = options;
|
|
697
|
+
let running = false;
|
|
698
|
+
let timeoutId = null;
|
|
699
|
+
let currentRunPromise = null;
|
|
700
|
+
setLogContext(verbose);
|
|
701
|
+
const runOnce = async () => {
|
|
702
|
+
setLogContext(verbose);
|
|
703
|
+
const result = {
|
|
704
|
+
reclaimedJobs: 0,
|
|
705
|
+
cleanedUpJobs: 0,
|
|
706
|
+
cleanedUpEvents: 0,
|
|
707
|
+
expiredTokens: 0
|
|
708
|
+
};
|
|
709
|
+
if (reclaimStuckJobs) {
|
|
710
|
+
try {
|
|
711
|
+
result.reclaimedJobs = await backend.reclaimStuckJobs(
|
|
712
|
+
stuckJobsTimeoutMinutes
|
|
713
|
+
);
|
|
714
|
+
if (result.reclaimedJobs > 0) {
|
|
715
|
+
log(`Supervisor: reclaimed ${result.reclaimedJobs} stuck jobs`);
|
|
716
|
+
}
|
|
717
|
+
} catch (e) {
|
|
718
|
+
const err = e instanceof Error ? e : new Error(String(e));
|
|
719
|
+
onError(err);
|
|
720
|
+
emit?.("error", err);
|
|
721
|
+
}
|
|
722
|
+
}
|
|
723
|
+
if (cleanupJobsDaysToKeep > 0) {
|
|
724
|
+
try {
|
|
725
|
+
result.cleanedUpJobs = await backend.cleanupOldJobs(
|
|
726
|
+
cleanupJobsDaysToKeep,
|
|
727
|
+
cleanupBatchSize
|
|
728
|
+
);
|
|
729
|
+
if (result.cleanedUpJobs > 0) {
|
|
730
|
+
log(`Supervisor: cleaned up ${result.cleanedUpJobs} old jobs`);
|
|
731
|
+
}
|
|
732
|
+
} catch (e) {
|
|
733
|
+
const err = e instanceof Error ? e : new Error(String(e));
|
|
734
|
+
onError(err);
|
|
735
|
+
emit?.("error", err);
|
|
736
|
+
}
|
|
737
|
+
}
|
|
738
|
+
if (cleanupEventsDaysToKeep > 0) {
|
|
739
|
+
try {
|
|
740
|
+
result.cleanedUpEvents = await backend.cleanupOldJobEvents(
|
|
741
|
+
cleanupEventsDaysToKeep,
|
|
742
|
+
cleanupBatchSize
|
|
743
|
+
);
|
|
744
|
+
if (result.cleanedUpEvents > 0) {
|
|
745
|
+
log(
|
|
746
|
+
`Supervisor: cleaned up ${result.cleanedUpEvents} old job events`
|
|
747
|
+
);
|
|
748
|
+
}
|
|
749
|
+
} catch (e) {
|
|
750
|
+
const err = e instanceof Error ? e : new Error(String(e));
|
|
751
|
+
onError(err);
|
|
752
|
+
emit?.("error", err);
|
|
753
|
+
}
|
|
754
|
+
}
|
|
755
|
+
if (expireTimedOutTokens) {
|
|
756
|
+
try {
|
|
757
|
+
result.expiredTokens = await backend.expireTimedOutWaitpoints();
|
|
758
|
+
if (result.expiredTokens > 0) {
|
|
759
|
+
log(`Supervisor: expired ${result.expiredTokens} timed-out tokens`);
|
|
760
|
+
}
|
|
761
|
+
} catch (e) {
|
|
762
|
+
const err = e instanceof Error ? e : new Error(String(e));
|
|
763
|
+
onError(err);
|
|
764
|
+
emit?.("error", err);
|
|
765
|
+
}
|
|
766
|
+
}
|
|
767
|
+
return result;
|
|
768
|
+
};
|
|
769
|
+
return {
|
|
770
|
+
start: async () => {
|
|
771
|
+
return runOnce();
|
|
772
|
+
},
|
|
773
|
+
startInBackground: () => {
|
|
774
|
+
if (running) return;
|
|
775
|
+
log("Supervisor: starting background maintenance loop");
|
|
776
|
+
running = true;
|
|
777
|
+
const loop = async () => {
|
|
778
|
+
if (!running) return;
|
|
779
|
+
currentRunPromise = runOnce();
|
|
780
|
+
await currentRunPromise;
|
|
781
|
+
currentRunPromise = null;
|
|
782
|
+
if (running) {
|
|
783
|
+
timeoutId = setTimeout(loop, intervalMs);
|
|
784
|
+
}
|
|
785
|
+
};
|
|
786
|
+
loop();
|
|
787
|
+
},
|
|
788
|
+
stop: () => {
|
|
789
|
+
running = false;
|
|
790
|
+
if (timeoutId !== null) {
|
|
791
|
+
clearTimeout(timeoutId);
|
|
792
|
+
timeoutId = null;
|
|
793
|
+
}
|
|
794
|
+
log("Supervisor: stopped");
|
|
795
|
+
},
|
|
796
|
+
stopAndDrain: async (timeoutMs = 3e4) => {
|
|
797
|
+
running = false;
|
|
798
|
+
if (timeoutId !== null) {
|
|
799
|
+
clearTimeout(timeoutId);
|
|
800
|
+
timeoutId = null;
|
|
801
|
+
}
|
|
802
|
+
if (currentRunPromise) {
|
|
803
|
+
log("Supervisor: draining current maintenance run\u2026");
|
|
804
|
+
await Promise.race([
|
|
805
|
+
currentRunPromise,
|
|
806
|
+
new Promise((resolve) => setTimeout(resolve, timeoutMs))
|
|
807
|
+
]);
|
|
808
|
+
currentRunPromise = null;
|
|
809
|
+
}
|
|
810
|
+
log("Supervisor: drained and stopped");
|
|
811
|
+
},
|
|
812
|
+
isRunning: () => running
|
|
813
|
+
};
|
|
814
|
+
};
|
|
815
|
+
function loadPemOrFile(value) {
|
|
816
|
+
if (!value) return void 0;
|
|
817
|
+
if (value.startsWith("file://")) {
|
|
818
|
+
const filePath = value.slice(7);
|
|
819
|
+
return fs__default.default.readFileSync(filePath, "utf8");
|
|
820
|
+
}
|
|
821
|
+
return value;
|
|
822
|
+
}
|
|
823
|
+
var createPool = (config) => {
|
|
824
|
+
let searchPath;
|
|
825
|
+
let ssl = void 0;
|
|
826
|
+
let customCA;
|
|
827
|
+
let sslmode;
|
|
828
|
+
if (config.connectionString) {
|
|
829
|
+
try {
|
|
830
|
+
const url = new URL(config.connectionString);
|
|
831
|
+
searchPath = url.searchParams.get("search_path") || void 0;
|
|
832
|
+
sslmode = url.searchParams.get("sslmode") || void 0;
|
|
833
|
+
if (sslmode === "no-verify") {
|
|
834
|
+
ssl = { rejectUnauthorized: false };
|
|
835
|
+
}
|
|
836
|
+
} catch (e) {
|
|
837
|
+
const parsed = pgConnectionString.parse(config.connectionString);
|
|
838
|
+
if (parsed.options) {
|
|
839
|
+
const match = parsed.options.match(/search_path=([^\s]+)/);
|
|
840
|
+
if (match) {
|
|
841
|
+
searchPath = match[1];
|
|
842
|
+
}
|
|
843
|
+
}
|
|
844
|
+
sslmode = typeof parsed.sslmode === "string" ? parsed.sslmode : void 0;
|
|
845
|
+
if (sslmode === "no-verify") {
|
|
846
|
+
ssl = { rejectUnauthorized: false };
|
|
847
|
+
}
|
|
848
|
+
}
|
|
849
|
+
}
|
|
850
|
+
if (config.ssl) {
|
|
851
|
+
if (typeof config.ssl.ca === "string") {
|
|
852
|
+
customCA = config.ssl.ca;
|
|
853
|
+
} else if (typeof process.env.PGSSLROOTCERT === "string") {
|
|
854
|
+
customCA = process.env.PGSSLROOTCERT;
|
|
855
|
+
} else {
|
|
856
|
+
customCA = void 0;
|
|
857
|
+
}
|
|
858
|
+
const caValue = typeof customCA === "string" ? loadPemOrFile(customCA) : void 0;
|
|
859
|
+
ssl = {
|
|
860
|
+
...ssl,
|
|
861
|
+
...caValue ? { ca: caValue } : {},
|
|
862
|
+
cert: loadPemOrFile(
|
|
863
|
+
typeof config.ssl.cert === "string" ? config.ssl.cert : process.env.PGSSLCERT
|
|
864
|
+
),
|
|
865
|
+
key: loadPemOrFile(
|
|
866
|
+
typeof config.ssl.key === "string" ? config.ssl.key : process.env.PGSSLKEY
|
|
867
|
+
),
|
|
868
|
+
rejectUnauthorized: config.ssl.rejectUnauthorized !== void 0 ? config.ssl.rejectUnauthorized : true
|
|
869
|
+
};
|
|
870
|
+
}
|
|
871
|
+
if (sslmode && customCA) {
|
|
872
|
+
const warning = `
|
|
873
|
+
|
|
874
|
+
\x1B[33m**************************************************
|
|
875
|
+
\u26A0\uFE0F WARNING: SSL CONFIGURATION ISSUE
|
|
876
|
+
**************************************************
|
|
877
|
+
Both sslmode ('${sslmode}') is set in the connection string
|
|
878
|
+
and a custom CA is provided (via config.ssl.ca or PGSSLROOTCERT).
|
|
879
|
+
This combination may cause connection failures or unexpected behavior.
|
|
880
|
+
|
|
881
|
+
Recommended: Remove sslmode from the connection string when using a custom CA.
|
|
882
|
+
**************************************************\x1B[0m
|
|
883
|
+
`;
|
|
884
|
+
console.warn(warning);
|
|
885
|
+
}
|
|
886
|
+
const pool = new pg.Pool({
|
|
887
|
+
...config,
|
|
888
|
+
...ssl ? { ssl } : {}
|
|
889
|
+
});
|
|
890
|
+
if (searchPath) {
|
|
891
|
+
pool.on("connect", (client) => {
|
|
892
|
+
client.query(`SET search_path TO ${searchPath}`);
|
|
893
|
+
});
|
|
894
|
+
}
|
|
895
|
+
return pool;
|
|
896
|
+
};
|
|
897
|
+
var MAX_TIMEOUT_MS = 365 * 24 * 60 * 60 * 1e3;
|
|
898
|
+
function parseTimeoutString(timeout) {
|
|
899
|
+
const match = timeout.match(/^(\d+)(s|m|h|d)$/);
|
|
900
|
+
if (!match) {
|
|
901
|
+
throw new Error(
|
|
902
|
+
`Invalid timeout format: "${timeout}". Expected format like "10m", "1h", "24h", "7d".`
|
|
903
|
+
);
|
|
904
|
+
}
|
|
905
|
+
const value = parseInt(match[1], 10);
|
|
906
|
+
const unit = match[2];
|
|
907
|
+
let ms;
|
|
908
|
+
switch (unit) {
|
|
909
|
+
case "s":
|
|
910
|
+
ms = value * 1e3;
|
|
911
|
+
break;
|
|
912
|
+
case "m":
|
|
913
|
+
ms = value * 60 * 1e3;
|
|
914
|
+
break;
|
|
915
|
+
case "h":
|
|
916
|
+
ms = value * 60 * 60 * 1e3;
|
|
917
|
+
break;
|
|
918
|
+
case "d":
|
|
919
|
+
ms = value * 24 * 60 * 60 * 1e3;
|
|
920
|
+
break;
|
|
921
|
+
default:
|
|
922
|
+
throw new Error(`Unknown timeout unit: "${unit}"`);
|
|
923
|
+
}
|
|
924
|
+
if (!Number.isFinite(ms) || ms > MAX_TIMEOUT_MS) {
|
|
925
|
+
throw new Error(
|
|
926
|
+
`Timeout value "${timeout}" is too large. Maximum allowed is 365 days.`
|
|
927
|
+
);
|
|
928
|
+
}
|
|
929
|
+
return ms;
|
|
930
|
+
}
|
|
931
|
+
var PostgresBackend = class {
|
|
932
|
+
constructor(pool) {
|
|
933
|
+
this.pool = pool;
|
|
934
|
+
}
|
|
935
|
+
/** Expose the raw pool for advanced usage. */
|
|
936
|
+
getPool() {
|
|
937
|
+
return this.pool;
|
|
938
|
+
}
|
|
939
|
+
// ── Events ──────────────────────────────────────────────────────────
|
|
940
|
+
async recordJobEvent(jobId, eventType, metadata) {
|
|
941
|
+
const client = await this.pool.connect();
|
|
942
|
+
try {
|
|
943
|
+
await client.query(
|
|
944
|
+
`INSERT INTO job_events (job_id, event_type, metadata) VALUES ($1, $2, $3)`,
|
|
945
|
+
[jobId, eventType, metadata ? JSON.stringify(metadata) : null]
|
|
946
|
+
);
|
|
947
|
+
} catch (error) {
|
|
948
|
+
log(`Error recording job event for job ${jobId}: ${error}`);
|
|
949
|
+
} finally {
|
|
950
|
+
client.release();
|
|
951
|
+
}
|
|
952
|
+
}
|
|
953
|
+
async getJobEvents(jobId) {
|
|
954
|
+
const client = await this.pool.connect();
|
|
955
|
+
try {
|
|
956
|
+
const res = await client.query(
|
|
957
|
+
`SELECT id, job_id AS "jobId", event_type AS "eventType", metadata, created_at AS "createdAt" FROM job_events WHERE job_id = $1 ORDER BY created_at ASC`,
|
|
958
|
+
[jobId]
|
|
959
|
+
);
|
|
960
|
+
return res.rows;
|
|
961
|
+
} finally {
|
|
962
|
+
client.release();
|
|
963
|
+
}
|
|
964
|
+
}
|
|
965
|
+
// ── Job CRUD ──────────────────────────────────────────────────────────
|
|
966
|
+
/**
|
|
967
|
+
* Add a job and return its numeric ID.
|
|
968
|
+
*
|
|
969
|
+
* @param job - Job configuration.
|
|
970
|
+
* @param options - Optional. Pass `{ db }` to run the INSERT on an external
|
|
971
|
+
* client (e.g., inside a transaction) so the job is part of the caller's
|
|
972
|
+
* transaction. The event INSERT also uses the same client.
|
|
973
|
+
*/
|
|
974
|
+
async addJob({
|
|
975
|
+
jobType,
|
|
976
|
+
payload,
|
|
977
|
+
maxAttempts = 3,
|
|
978
|
+
priority = 0,
|
|
979
|
+
runAt = null,
|
|
980
|
+
timeoutMs = void 0,
|
|
981
|
+
forceKillOnTimeout = false,
|
|
982
|
+
tags = void 0,
|
|
983
|
+
idempotencyKey = void 0,
|
|
984
|
+
retryDelay = void 0,
|
|
985
|
+
retryBackoff = void 0,
|
|
986
|
+
retryDelayMax = void 0
|
|
987
|
+
}, options) {
|
|
988
|
+
const externalClient = options?.db;
|
|
989
|
+
const client = externalClient ?? await this.pool.connect();
|
|
990
|
+
try {
|
|
991
|
+
let result;
|
|
992
|
+
const onConflict = idempotencyKey ? `ON CONFLICT (idempotency_key) WHERE idempotency_key IS NOT NULL DO NOTHING` : "";
|
|
993
|
+
if (runAt) {
|
|
994
|
+
result = await client.query(
|
|
995
|
+
`INSERT INTO job_queue
|
|
996
|
+
(job_type, payload, max_attempts, priority, run_at, timeout_ms, force_kill_on_timeout, tags, idempotency_key, retry_delay, retry_backoff, retry_delay_max)
|
|
997
|
+
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)
|
|
998
|
+
${onConflict}
|
|
999
|
+
RETURNING id`,
|
|
1000
|
+
[
|
|
1001
|
+
jobType,
|
|
1002
|
+
payload,
|
|
1003
|
+
maxAttempts,
|
|
1004
|
+
priority,
|
|
1005
|
+
runAt,
|
|
1006
|
+
timeoutMs ?? null,
|
|
1007
|
+
forceKillOnTimeout ?? false,
|
|
1008
|
+
tags ?? null,
|
|
1009
|
+
idempotencyKey ?? null,
|
|
1010
|
+
retryDelay ?? null,
|
|
1011
|
+
retryBackoff ?? null,
|
|
1012
|
+
retryDelayMax ?? null
|
|
1013
|
+
]
|
|
1014
|
+
);
|
|
1015
|
+
} else {
|
|
1016
|
+
result = await client.query(
|
|
1017
|
+
`INSERT INTO job_queue
|
|
1018
|
+
(job_type, payload, max_attempts, priority, timeout_ms, force_kill_on_timeout, tags, idempotency_key, retry_delay, retry_backoff, retry_delay_max)
|
|
1019
|
+
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)
|
|
1020
|
+
${onConflict}
|
|
1021
|
+
RETURNING id`,
|
|
1022
|
+
[
|
|
1023
|
+
jobType,
|
|
1024
|
+
payload,
|
|
1025
|
+
maxAttempts,
|
|
1026
|
+
priority,
|
|
1027
|
+
timeoutMs ?? null,
|
|
1028
|
+
forceKillOnTimeout ?? false,
|
|
1029
|
+
tags ?? null,
|
|
1030
|
+
idempotencyKey ?? null,
|
|
1031
|
+
retryDelay ?? null,
|
|
1032
|
+
retryBackoff ?? null,
|
|
1033
|
+
retryDelayMax ?? null
|
|
1034
|
+
]
|
|
1035
|
+
);
|
|
1036
|
+
}
|
|
1037
|
+
if (result.rows.length === 0 && idempotencyKey) {
|
|
1038
|
+
const existing = await client.query(
|
|
1039
|
+
`SELECT id FROM job_queue WHERE idempotency_key = $1`,
|
|
1040
|
+
[idempotencyKey]
|
|
1041
|
+
);
|
|
1042
|
+
if (existing.rows.length > 0) {
|
|
1043
|
+
log(
|
|
1044
|
+
`Job with idempotency key "${idempotencyKey}" already exists (id: ${existing.rows[0].id}), returning existing job`
|
|
1045
|
+
);
|
|
1046
|
+
return existing.rows[0].id;
|
|
1047
|
+
}
|
|
1048
|
+
throw new Error(
|
|
1049
|
+
`Failed to insert job and could not find existing job with idempotency key "${idempotencyKey}"`
|
|
1050
|
+
);
|
|
1051
|
+
}
|
|
1052
|
+
const jobId = result.rows[0].id;
|
|
1053
|
+
log(
|
|
1054
|
+
`Added job ${jobId}: payload ${JSON.stringify(payload)}, ${runAt ? `runAt ${runAt.toISOString()}, ` : ""}priority ${priority}, maxAttempts ${maxAttempts}, jobType ${jobType}, tags ${JSON.stringify(tags)}${idempotencyKey ? `, idempotencyKey "${idempotencyKey}"` : ""}`
|
|
1055
|
+
);
|
|
1056
|
+
if (externalClient) {
|
|
1057
|
+
try {
|
|
1058
|
+
await client.query(
|
|
1059
|
+
`INSERT INTO job_events (job_id, event_type, metadata) VALUES ($1, $2, $3)`,
|
|
1060
|
+
[
|
|
1061
|
+
jobId,
|
|
1062
|
+
"added" /* Added */,
|
|
1063
|
+
JSON.stringify({ jobType, payload, tags, idempotencyKey })
|
|
1064
|
+
]
|
|
1065
|
+
);
|
|
1066
|
+
} catch (error) {
|
|
1067
|
+
log(`Error recording job event for job ${jobId}: ${error}`);
|
|
1068
|
+
}
|
|
1069
|
+
} else {
|
|
1070
|
+
await this.recordJobEvent(jobId, "added" /* Added */, {
|
|
1071
|
+
jobType,
|
|
1072
|
+
payload,
|
|
1073
|
+
tags,
|
|
1074
|
+
idempotencyKey
|
|
1075
|
+
});
|
|
1076
|
+
}
|
|
1077
|
+
return jobId;
|
|
1078
|
+
} catch (error) {
|
|
1079
|
+
log(`Error adding job: ${error}`);
|
|
1080
|
+
throw error;
|
|
1081
|
+
} finally {
|
|
1082
|
+
if (!externalClient) client.release();
|
|
1083
|
+
}
|
|
1084
|
+
}
|
|
1085
|
+
/**
|
|
1086
|
+
* Insert multiple jobs in a single database round-trip.
|
|
1087
|
+
*
|
|
1088
|
+
* Uses a multi-row INSERT with ON CONFLICT handling for idempotency keys.
|
|
1089
|
+
* Returns IDs in the same order as the input array.
|
|
1090
|
+
*/
|
|
1091
|
+
async addJobs(jobs, options) {
|
|
1092
|
+
if (jobs.length === 0) return [];
|
|
1093
|
+
const externalClient = options?.db;
|
|
1094
|
+
const client = externalClient ?? await this.pool.connect();
|
|
1095
|
+
try {
|
|
1096
|
+
const COLS_PER_JOB = 12;
|
|
1097
|
+
const valueClauses = [];
|
|
1098
|
+
const params = [];
|
|
1099
|
+
const hasAnyIdempotencyKey = jobs.some((j) => j.idempotencyKey);
|
|
1100
|
+
for (let i = 0; i < jobs.length; i++) {
|
|
1101
|
+
const {
|
|
1102
|
+
jobType,
|
|
1103
|
+
payload,
|
|
1104
|
+
maxAttempts = 3,
|
|
1105
|
+
priority = 0,
|
|
1106
|
+
runAt = null,
|
|
1107
|
+
timeoutMs = void 0,
|
|
1108
|
+
forceKillOnTimeout = false,
|
|
1109
|
+
tags = void 0,
|
|
1110
|
+
idempotencyKey = void 0,
|
|
1111
|
+
retryDelay = void 0,
|
|
1112
|
+
retryBackoff = void 0,
|
|
1113
|
+
retryDelayMax = void 0
|
|
1114
|
+
} = jobs[i];
|
|
1115
|
+
const base = i * COLS_PER_JOB;
|
|
1116
|
+
valueClauses.push(
|
|
1117
|
+
`($${base + 1}, $${base + 2}, $${base + 3}, $${base + 4}, COALESCE($${base + 5}::timestamptz, CURRENT_TIMESTAMP), $${base + 6}, $${base + 7}, $${base + 8}, $${base + 9}, $${base + 10}, $${base + 11}, $${base + 12})`
|
|
1118
|
+
);
|
|
1119
|
+
params.push(
|
|
1120
|
+
jobType,
|
|
1121
|
+
payload,
|
|
1122
|
+
maxAttempts,
|
|
1123
|
+
priority,
|
|
1124
|
+
runAt,
|
|
1125
|
+
timeoutMs ?? null,
|
|
1126
|
+
forceKillOnTimeout ?? false,
|
|
1127
|
+
tags ?? null,
|
|
1128
|
+
idempotencyKey ?? null,
|
|
1129
|
+
retryDelay ?? null,
|
|
1130
|
+
retryBackoff ?? null,
|
|
1131
|
+
retryDelayMax ?? null
|
|
1132
|
+
);
|
|
1133
|
+
}
|
|
1134
|
+
const onConflict = hasAnyIdempotencyKey ? `ON CONFLICT (idempotency_key) WHERE idempotency_key IS NOT NULL DO NOTHING` : "";
|
|
1135
|
+
const result = await client.query(
|
|
1136
|
+
`INSERT INTO job_queue
|
|
1137
|
+
(job_type, payload, max_attempts, priority, run_at, timeout_ms, force_kill_on_timeout, tags, idempotency_key, retry_delay, retry_backoff, retry_delay_max)
|
|
1138
|
+
VALUES ${valueClauses.join(", ")}
|
|
1139
|
+
${onConflict}
|
|
1140
|
+
RETURNING id, idempotency_key`,
|
|
1141
|
+
params
|
|
1142
|
+
);
|
|
1143
|
+
const returnedKeyToId = /* @__PURE__ */ new Map();
|
|
1144
|
+
const returnedNullKeyIds = [];
|
|
1145
|
+
for (const row of result.rows) {
|
|
1146
|
+
if (row.idempotency_key != null) {
|
|
1147
|
+
returnedKeyToId.set(row.idempotency_key, row.id);
|
|
1148
|
+
} else {
|
|
1149
|
+
returnedNullKeyIds.push(row.id);
|
|
1150
|
+
}
|
|
1151
|
+
}
|
|
1152
|
+
const missingKeys = [];
|
|
1153
|
+
for (const job of jobs) {
|
|
1154
|
+
if (job.idempotencyKey && !returnedKeyToId.has(job.idempotencyKey)) {
|
|
1155
|
+
missingKeys.push(job.idempotencyKey);
|
|
1156
|
+
}
|
|
1157
|
+
}
|
|
1158
|
+
if (missingKeys.length > 0) {
|
|
1159
|
+
const existing = await client.query(
|
|
1160
|
+
`SELECT id, idempotency_key FROM job_queue WHERE idempotency_key = ANY($1)`,
|
|
1161
|
+
[missingKeys]
|
|
1162
|
+
);
|
|
1163
|
+
for (const row of existing.rows) {
|
|
1164
|
+
returnedKeyToId.set(row.idempotency_key, row.id);
|
|
1165
|
+
}
|
|
1166
|
+
}
|
|
1167
|
+
let nullKeyIdx = 0;
|
|
1168
|
+
const ids = [];
|
|
1169
|
+
for (const job of jobs) {
|
|
1170
|
+
if (job.idempotencyKey) {
|
|
1171
|
+
const id = returnedKeyToId.get(job.idempotencyKey);
|
|
1172
|
+
if (id === void 0) {
|
|
1173
|
+
throw new Error(
|
|
1174
|
+
`Failed to resolve job ID for idempotency key "${job.idempotencyKey}"`
|
|
1175
|
+
);
|
|
1176
|
+
}
|
|
1177
|
+
ids.push(id);
|
|
1178
|
+
} else {
|
|
1179
|
+
ids.push(returnedNullKeyIds[nullKeyIdx++]);
|
|
1180
|
+
}
|
|
1181
|
+
}
|
|
1182
|
+
log(`Batch-inserted ${jobs.length} jobs, IDs: [${ids.join(", ")}]`);
|
|
1183
|
+
const newJobEvents = [];
|
|
1184
|
+
for (let i = 0; i < jobs.length; i++) {
|
|
1185
|
+
const job = jobs[i];
|
|
1186
|
+
const wasInserted = !job.idempotencyKey || !missingKeys.includes(job.idempotencyKey);
|
|
1187
|
+
if (wasInserted) {
|
|
1188
|
+
newJobEvents.push({
|
|
1189
|
+
jobId: ids[i],
|
|
1190
|
+
eventType: "added" /* Added */,
|
|
1191
|
+
metadata: {
|
|
1192
|
+
jobType: job.jobType,
|
|
1193
|
+
payload: job.payload,
|
|
1194
|
+
tags: job.tags,
|
|
1195
|
+
idempotencyKey: job.idempotencyKey
|
|
1196
|
+
}
|
|
1197
|
+
});
|
|
1198
|
+
}
|
|
1199
|
+
}
|
|
1200
|
+
if (newJobEvents.length > 0) {
|
|
1201
|
+
if (externalClient) {
|
|
1202
|
+
const evtValues = [];
|
|
1203
|
+
const evtParams = [];
|
|
1204
|
+
let evtIdx = 1;
|
|
1205
|
+
for (const evt of newJobEvents) {
|
|
1206
|
+
evtValues.push(`($${evtIdx++}, $${evtIdx++}, $${evtIdx++})`);
|
|
1207
|
+
evtParams.push(
|
|
1208
|
+
evt.jobId,
|
|
1209
|
+
evt.eventType,
|
|
1210
|
+
evt.metadata ? JSON.stringify(evt.metadata) : null
|
|
1211
|
+
);
|
|
1212
|
+
}
|
|
1213
|
+
try {
|
|
1214
|
+
await client.query(
|
|
1215
|
+
`INSERT INTO job_events (job_id, event_type, metadata) VALUES ${evtValues.join(", ")}`,
|
|
1216
|
+
evtParams
|
|
1217
|
+
);
|
|
1218
|
+
} catch (error) {
|
|
1219
|
+
log(`Error recording batch job events: ${error}`);
|
|
1220
|
+
}
|
|
1221
|
+
} else {
|
|
1222
|
+
await this.recordJobEventsBatch(newJobEvents);
|
|
1223
|
+
}
|
|
1224
|
+
}
|
|
1225
|
+
return ids;
|
|
1226
|
+
} catch (error) {
|
|
1227
|
+
log(`Error batch-inserting jobs: ${error}`);
|
|
1228
|
+
throw error;
|
|
1229
|
+
} finally {
|
|
1230
|
+
if (!externalClient) client.release();
|
|
1231
|
+
}
|
|
1232
|
+
}
|
|
1233
|
+
async getJob(id) {
|
|
1234
|
+
const client = await this.pool.connect();
|
|
1235
|
+
try {
|
|
1236
|
+
const result = await client.query(
|
|
1237
|
+
`SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax" FROM job_queue WHERE id = $1`,
|
|
1238
|
+
[id]
|
|
1239
|
+
);
|
|
1240
|
+
if (result.rows.length === 0) {
|
|
1241
|
+
log(`Job ${id} not found`);
|
|
1242
|
+
return null;
|
|
1243
|
+
}
|
|
1244
|
+
log(`Found job ${id}`);
|
|
1245
|
+
const job = result.rows[0];
|
|
1246
|
+
return {
|
|
1247
|
+
...job,
|
|
1248
|
+
payload: job.payload,
|
|
1249
|
+
timeoutMs: job.timeoutMs,
|
|
1250
|
+
forceKillOnTimeout: job.forceKillOnTimeout,
|
|
1251
|
+
failureReason: job.failureReason
|
|
1252
|
+
};
|
|
1253
|
+
} catch (error) {
|
|
1254
|
+
log(`Error getting job ${id}: ${error}`);
|
|
1255
|
+
throw error;
|
|
1256
|
+
} finally {
|
|
1257
|
+
client.release();
|
|
1258
|
+
}
|
|
1259
|
+
}
|
|
1260
|
+
async getJobsByStatus(status, limit = 100, offset = 0) {
|
|
1261
|
+
const client = await this.pool.connect();
|
|
1262
|
+
try {
|
|
1263
|
+
const result = await client.query(
|
|
1264
|
+
`SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax" FROM job_queue WHERE status = $1 ORDER BY created_at DESC LIMIT $2 OFFSET $3`,
|
|
1265
|
+
[status, limit, offset]
|
|
1266
|
+
);
|
|
1267
|
+
log(`Found ${result.rows.length} jobs by status ${status}`);
|
|
1268
|
+
return result.rows.map((job) => ({
|
|
1269
|
+
...job,
|
|
1270
|
+
payload: job.payload,
|
|
1271
|
+
timeoutMs: job.timeoutMs,
|
|
1272
|
+
forceKillOnTimeout: job.forceKillOnTimeout,
|
|
1273
|
+
failureReason: job.failureReason
|
|
1274
|
+
}));
|
|
1275
|
+
} catch (error) {
|
|
1276
|
+
log(`Error getting jobs by status ${status}: ${error}`);
|
|
1277
|
+
throw error;
|
|
1278
|
+
} finally {
|
|
1279
|
+
client.release();
|
|
1280
|
+
}
|
|
1281
|
+
}
|
|
1282
|
+
async getAllJobs(limit = 100, offset = 0) {
|
|
1283
|
+
const client = await this.pool.connect();
|
|
1284
|
+
try {
|
|
1285
|
+
const result = await client.query(
|
|
1286
|
+
`SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax" FROM job_queue ORDER BY created_at DESC LIMIT $1 OFFSET $2`,
|
|
1287
|
+
[limit, offset]
|
|
1288
|
+
);
|
|
1289
|
+
log(`Found ${result.rows.length} jobs (all)`);
|
|
1290
|
+
return result.rows.map((job) => ({
|
|
1291
|
+
...job,
|
|
1292
|
+
payload: job.payload,
|
|
1293
|
+
timeoutMs: job.timeoutMs,
|
|
1294
|
+
forceKillOnTimeout: job.forceKillOnTimeout
|
|
1295
|
+
}));
|
|
1296
|
+
} catch (error) {
|
|
1297
|
+
log(`Error getting all jobs: ${error}`);
|
|
1298
|
+
throw error;
|
|
1299
|
+
} finally {
|
|
1300
|
+
client.release();
|
|
1301
|
+
}
|
|
1302
|
+
}
|
|
1303
|
+
async getJobs(filters, limit = 100, offset = 0) {
|
|
1304
|
+
const client = await this.pool.connect();
|
|
1305
|
+
try {
|
|
1306
|
+
let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax" FROM job_queue`;
|
|
1307
|
+
const params = [];
|
|
1308
|
+
const where = [];
|
|
1309
|
+
let paramIdx = 1;
|
|
1310
|
+
if (filters) {
|
|
1311
|
+
if (filters.jobType) {
|
|
1312
|
+
where.push(`job_type = $${paramIdx++}`);
|
|
1313
|
+
params.push(filters.jobType);
|
|
1314
|
+
}
|
|
1315
|
+
if (filters.priority !== void 0) {
|
|
1316
|
+
where.push(`priority = $${paramIdx++}`);
|
|
1317
|
+
params.push(filters.priority);
|
|
1318
|
+
}
|
|
1319
|
+
if (filters.runAt) {
|
|
1320
|
+
if (filters.runAt instanceof Date) {
|
|
1321
|
+
where.push(`run_at = $${paramIdx++}`);
|
|
1322
|
+
params.push(filters.runAt);
|
|
1323
|
+
} else if (typeof filters.runAt === "object" && (filters.runAt.gt !== void 0 || filters.runAt.gte !== void 0 || filters.runAt.lt !== void 0 || filters.runAt.lte !== void 0 || filters.runAt.eq !== void 0)) {
|
|
1324
|
+
const ops = filters.runAt;
|
|
1325
|
+
if (ops.gt) {
|
|
1326
|
+
where.push(`run_at > $${paramIdx++}`);
|
|
1327
|
+
params.push(ops.gt);
|
|
1328
|
+
}
|
|
1329
|
+
if (ops.gte) {
|
|
1330
|
+
where.push(`run_at >= $${paramIdx++}`);
|
|
1331
|
+
params.push(ops.gte);
|
|
1332
|
+
}
|
|
1333
|
+
if (ops.lt) {
|
|
1334
|
+
where.push(`run_at < $${paramIdx++}`);
|
|
1335
|
+
params.push(ops.lt);
|
|
1336
|
+
}
|
|
1337
|
+
if (ops.lte) {
|
|
1338
|
+
where.push(`run_at <= $${paramIdx++}`);
|
|
1339
|
+
params.push(ops.lte);
|
|
1340
|
+
}
|
|
1341
|
+
if (ops.eq) {
|
|
1342
|
+
where.push(`run_at = $${paramIdx++}`);
|
|
1343
|
+
params.push(ops.eq);
|
|
1344
|
+
}
|
|
1345
|
+
}
|
|
1346
|
+
}
|
|
1347
|
+
if (filters.tags && filters.tags.values && filters.tags.values.length > 0) {
|
|
1348
|
+
const mode = filters.tags.mode || "all";
|
|
1349
|
+
const tagValues = filters.tags.values;
|
|
1350
|
+
switch (mode) {
|
|
1351
|
+
case "exact":
|
|
1352
|
+
where.push(`tags = $${paramIdx++}`);
|
|
1353
|
+
params.push(tagValues);
|
|
1354
|
+
break;
|
|
1355
|
+
case "all":
|
|
1356
|
+
where.push(`tags @> $${paramIdx++}`);
|
|
1357
|
+
params.push(tagValues);
|
|
1358
|
+
break;
|
|
1359
|
+
case "any":
|
|
1360
|
+
where.push(`tags && $${paramIdx++}`);
|
|
1361
|
+
params.push(tagValues);
|
|
1362
|
+
break;
|
|
1363
|
+
case "none":
|
|
1364
|
+
where.push(`NOT (tags && $${paramIdx++})`);
|
|
1365
|
+
params.push(tagValues);
|
|
1366
|
+
break;
|
|
1367
|
+
default:
|
|
1368
|
+
where.push(`tags @> $${paramIdx++}`);
|
|
318
1369
|
params.push(tagValues);
|
|
319
1370
|
}
|
|
320
1371
|
}
|
|
@@ -353,7 +1404,7 @@ var PostgresBackend = class {
|
|
|
353
1404
|
async getJobsByTags(tags, mode = "all", limit = 100, offset = 0) {
|
|
354
1405
|
const client = await this.pool.connect();
|
|
355
1406
|
try {
|
|
356
|
-
let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress
|
|
1407
|
+
let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax"
|
|
357
1408
|
FROM job_queue`;
|
|
358
1409
|
let params = [];
|
|
359
1410
|
switch (mode) {
|
|
@@ -447,7 +1498,7 @@ var PostgresBackend = class {
|
|
|
447
1498
|
LIMIT $2
|
|
448
1499
|
FOR UPDATE SKIP LOCKED
|
|
449
1500
|
)
|
|
450
|
-
RETURNING id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress
|
|
1501
|
+
RETURNING id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax"
|
|
451
1502
|
`,
|
|
452
1503
|
params
|
|
453
1504
|
);
|
|
@@ -509,9 +1560,17 @@ var PostgresBackend = class {
|
|
|
509
1560
|
UPDATE job_queue
|
|
510
1561
|
SET status = 'failed',
|
|
511
1562
|
updated_at = NOW(),
|
|
512
|
-
next_attempt_at = CASE
|
|
513
|
-
WHEN attempts
|
|
514
|
-
|
|
1563
|
+
next_attempt_at = CASE
|
|
1564
|
+
WHEN attempts >= max_attempts THEN NULL
|
|
1565
|
+
WHEN retry_delay IS NULL AND retry_backoff IS NULL AND retry_delay_max IS NULL
|
|
1566
|
+
THEN NOW() + (POWER(2, attempts) * INTERVAL '1 minute')
|
|
1567
|
+
WHEN COALESCE(retry_backoff, true) = true
|
|
1568
|
+
THEN NOW() + (LEAST(
|
|
1569
|
+
COALESCE(retry_delay_max, 2147483647),
|
|
1570
|
+
COALESCE(retry_delay, 60) * POWER(2, attempts)
|
|
1571
|
+
) * (0.5 + 0.5 * random()) * INTERVAL '1 second')
|
|
1572
|
+
ELSE
|
|
1573
|
+
NOW() + (COALESCE(retry_delay, 60) * INTERVAL '1 second')
|
|
515
1574
|
END,
|
|
516
1575
|
error_history = COALESCE(error_history, '[]'::jsonb) || $2::jsonb,
|
|
517
1576
|
failure_reason = $3,
|
|
@@ -749,6 +1808,18 @@ var PostgresBackend = class {
|
|
|
749
1808
|
updateFields.push(`tags = $${paramIdx++}`);
|
|
750
1809
|
params.push(updates.tags ?? null);
|
|
751
1810
|
}
|
|
1811
|
+
if (updates.retryDelay !== void 0) {
|
|
1812
|
+
updateFields.push(`retry_delay = $${paramIdx++}`);
|
|
1813
|
+
params.push(updates.retryDelay ?? null);
|
|
1814
|
+
}
|
|
1815
|
+
if (updates.retryBackoff !== void 0) {
|
|
1816
|
+
updateFields.push(`retry_backoff = $${paramIdx++}`);
|
|
1817
|
+
params.push(updates.retryBackoff ?? null);
|
|
1818
|
+
}
|
|
1819
|
+
if (updates.retryDelayMax !== void 0) {
|
|
1820
|
+
updateFields.push(`retry_delay_max = $${paramIdx++}`);
|
|
1821
|
+
params.push(updates.retryDelayMax ?? null);
|
|
1822
|
+
}
|
|
752
1823
|
if (updateFields.length === 0) {
|
|
753
1824
|
log(`No fields to update for job ${jobId}`);
|
|
754
1825
|
return;
|
|
@@ -770,6 +1841,12 @@ var PostgresBackend = class {
|
|
|
770
1841
|
if (updates.timeoutMs !== void 0)
|
|
771
1842
|
metadata.timeoutMs = updates.timeoutMs;
|
|
772
1843
|
if (updates.tags !== void 0) metadata.tags = updates.tags;
|
|
1844
|
+
if (updates.retryDelay !== void 0)
|
|
1845
|
+
metadata.retryDelay = updates.retryDelay;
|
|
1846
|
+
if (updates.retryBackoff !== void 0)
|
|
1847
|
+
metadata.retryBackoff = updates.retryBackoff;
|
|
1848
|
+
if (updates.retryDelayMax !== void 0)
|
|
1849
|
+
metadata.retryDelayMax = updates.retryDelayMax;
|
|
773
1850
|
await this.recordJobEvent(jobId, "edited" /* Edited */, metadata);
|
|
774
1851
|
log(`Edited job ${jobId}: ${JSON.stringify(metadata)}`);
|
|
775
1852
|
} catch (error) {
|
|
@@ -813,6 +1890,18 @@ var PostgresBackend = class {
|
|
|
813
1890
|
updateFields.push(`tags = $${paramIdx++}`);
|
|
814
1891
|
params.push(updates.tags ?? null);
|
|
815
1892
|
}
|
|
1893
|
+
if (updates.retryDelay !== void 0) {
|
|
1894
|
+
updateFields.push(`retry_delay = $${paramIdx++}`);
|
|
1895
|
+
params.push(updates.retryDelay ?? null);
|
|
1896
|
+
}
|
|
1897
|
+
if (updates.retryBackoff !== void 0) {
|
|
1898
|
+
updateFields.push(`retry_backoff = $${paramIdx++}`);
|
|
1899
|
+
params.push(updates.retryBackoff ?? null);
|
|
1900
|
+
}
|
|
1901
|
+
if (updates.retryDelayMax !== void 0) {
|
|
1902
|
+
updateFields.push(`retry_delay_max = $${paramIdx++}`);
|
|
1903
|
+
params.push(updates.retryDelayMax ?? null);
|
|
1904
|
+
}
|
|
816
1905
|
if (updateFields.length === 0) {
|
|
817
1906
|
log(`No fields to update for batch edit`);
|
|
818
1907
|
return 0;
|
|
@@ -909,45 +1998,85 @@ var PostgresBackend = class {
|
|
|
909
1998
|
client.release();
|
|
910
1999
|
}
|
|
911
2000
|
}
|
|
912
|
-
|
|
913
|
-
|
|
2001
|
+
/**
|
|
2002
|
+
* Delete completed jobs older than the given number of days.
|
|
2003
|
+
* Deletes in batches of 1000 to avoid long-running transactions
|
|
2004
|
+
* and excessive WAL bloat at scale.
|
|
2005
|
+
*
|
|
2006
|
+
* @param daysToKeep - Number of days to retain completed jobs (default 30).
|
|
2007
|
+
* @param batchSize - Number of rows to delete per batch (default 1000).
|
|
2008
|
+
* @returns Total number of deleted jobs.
|
|
2009
|
+
*/
|
|
2010
|
+
async cleanupOldJobs(daysToKeep = 30, batchSize = 1e3) {
|
|
2011
|
+
let totalDeleted = 0;
|
|
914
2012
|
try {
|
|
915
|
-
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
|
|
925
|
-
|
|
2013
|
+
let deletedInBatch;
|
|
2014
|
+
do {
|
|
2015
|
+
const client = await this.pool.connect();
|
|
2016
|
+
try {
|
|
2017
|
+
const result = await client.query(
|
|
2018
|
+
`
|
|
2019
|
+
DELETE FROM job_queue
|
|
2020
|
+
WHERE id IN (
|
|
2021
|
+
SELECT id FROM job_queue
|
|
2022
|
+
WHERE status = 'completed'
|
|
2023
|
+
AND updated_at < NOW() - INTERVAL '1 day' * $1::int
|
|
2024
|
+
LIMIT $2
|
|
2025
|
+
)
|
|
2026
|
+
`,
|
|
2027
|
+
[daysToKeep, batchSize]
|
|
2028
|
+
);
|
|
2029
|
+
deletedInBatch = result.rowCount || 0;
|
|
2030
|
+
totalDeleted += deletedInBatch;
|
|
2031
|
+
} finally {
|
|
2032
|
+
client.release();
|
|
2033
|
+
}
|
|
2034
|
+
} while (deletedInBatch === batchSize);
|
|
2035
|
+
log(`Deleted ${totalDeleted} old jobs`);
|
|
2036
|
+
return totalDeleted;
|
|
926
2037
|
} catch (error) {
|
|
927
2038
|
log(`Error cleaning up old jobs: ${error}`);
|
|
928
2039
|
throw error;
|
|
929
|
-
} finally {
|
|
930
|
-
client.release();
|
|
931
2040
|
}
|
|
932
2041
|
}
|
|
933
|
-
|
|
934
|
-
|
|
2042
|
+
/**
|
|
2043
|
+
* Delete job events older than the given number of days.
|
|
2044
|
+
* Deletes in batches of 1000 to avoid long-running transactions
|
|
2045
|
+
* and excessive WAL bloat at scale.
|
|
2046
|
+
*
|
|
2047
|
+
* @param daysToKeep - Number of days to retain events (default 30).
|
|
2048
|
+
* @param batchSize - Number of rows to delete per batch (default 1000).
|
|
2049
|
+
* @returns Total number of deleted events.
|
|
2050
|
+
*/
|
|
2051
|
+
async cleanupOldJobEvents(daysToKeep = 30, batchSize = 1e3) {
|
|
2052
|
+
let totalDeleted = 0;
|
|
935
2053
|
try {
|
|
936
|
-
|
|
937
|
-
|
|
938
|
-
|
|
939
|
-
|
|
940
|
-
|
|
941
|
-
|
|
942
|
-
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
|
|
2054
|
+
let deletedInBatch;
|
|
2055
|
+
do {
|
|
2056
|
+
const client = await this.pool.connect();
|
|
2057
|
+
try {
|
|
2058
|
+
const result = await client.query(
|
|
2059
|
+
`
|
|
2060
|
+
DELETE FROM job_events
|
|
2061
|
+
WHERE id IN (
|
|
2062
|
+
SELECT id FROM job_events
|
|
2063
|
+
WHERE created_at < NOW() - INTERVAL '1 day' * $1::int
|
|
2064
|
+
LIMIT $2
|
|
2065
|
+
)
|
|
2066
|
+
`,
|
|
2067
|
+
[daysToKeep, batchSize]
|
|
2068
|
+
);
|
|
2069
|
+
deletedInBatch = result.rowCount || 0;
|
|
2070
|
+
totalDeleted += deletedInBatch;
|
|
2071
|
+
} finally {
|
|
2072
|
+
client.release();
|
|
2073
|
+
}
|
|
2074
|
+
} while (deletedInBatch === batchSize);
|
|
2075
|
+
log(`Deleted ${totalDeleted} old job events`);
|
|
2076
|
+
return totalDeleted;
|
|
946
2077
|
} catch (error) {
|
|
947
2078
|
log(`Error cleaning up old job events: ${error}`);
|
|
948
2079
|
throw error;
|
|
949
|
-
} finally {
|
|
950
|
-
client.release();
|
|
951
2080
|
}
|
|
952
2081
|
}
|
|
953
2082
|
async reclaimStuckJobs(maxProcessingTimeMinutes = 10) {
|
|
@@ -1005,926 +2134,540 @@ var PostgresBackend = class {
|
|
|
1005
2134
|
client.release();
|
|
1006
2135
|
}
|
|
1007
2136
|
}
|
|
1008
|
-
|
|
2137
|
+
// ── Cron schedules ──────────────────────────────────────────────────
|
|
2138
|
+
/** Create a cron schedule and return its ID. */
|
|
2139
|
+
async addCronSchedule(input) {
|
|
1009
2140
|
const client = await this.pool.connect();
|
|
1010
2141
|
try {
|
|
1011
|
-
|
|
1012
|
-
|
|
1013
|
-
|
|
1014
|
-
|
|
1015
|
-
|
|
1016
|
-
|
|
1017
|
-
|
|
1018
|
-
|
|
1019
|
-
|
|
1020
|
-
|
|
1021
|
-
|
|
1022
|
-
|
|
1023
|
-
|
|
1024
|
-
|
|
2142
|
+
const result = await client.query(
|
|
2143
|
+
`INSERT INTO cron_schedules
|
|
2144
|
+
(schedule_name, cron_expression, job_type, payload, max_attempts,
|
|
2145
|
+
priority, timeout_ms, force_kill_on_timeout, tags, timezone,
|
|
2146
|
+
allow_overlap, next_run_at, retry_delay, retry_backoff, retry_delay_max)
|
|
2147
|
+
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15)
|
|
2148
|
+
RETURNING id`,
|
|
2149
|
+
[
|
|
2150
|
+
input.scheduleName,
|
|
2151
|
+
input.cronExpression,
|
|
2152
|
+
input.jobType,
|
|
2153
|
+
input.payload,
|
|
2154
|
+
input.maxAttempts,
|
|
2155
|
+
input.priority,
|
|
2156
|
+
input.timeoutMs,
|
|
2157
|
+
input.forceKillOnTimeout,
|
|
2158
|
+
input.tags ?? null,
|
|
2159
|
+
input.timezone,
|
|
2160
|
+
input.allowOverlap,
|
|
2161
|
+
input.nextRunAt,
|
|
2162
|
+
input.retryDelay,
|
|
2163
|
+
input.retryBackoff,
|
|
2164
|
+
input.retryDelayMax
|
|
2165
|
+
]
|
|
1025
2166
|
);
|
|
2167
|
+
const id = result.rows[0].id;
|
|
2168
|
+
log(`Added cron schedule ${id}: "${input.scheduleName}"`);
|
|
2169
|
+
return id;
|
|
2170
|
+
} catch (error) {
|
|
2171
|
+
if (error?.code === "23505") {
|
|
2172
|
+
throw new Error(
|
|
2173
|
+
`Cron schedule with name "${input.scheduleName}" already exists`
|
|
2174
|
+
);
|
|
2175
|
+
}
|
|
2176
|
+
log(`Error adding cron schedule: ${error}`);
|
|
2177
|
+
throw error;
|
|
1026
2178
|
} finally {
|
|
1027
2179
|
client.release();
|
|
1028
2180
|
}
|
|
1029
2181
|
}
|
|
1030
|
-
|
|
1031
|
-
|
|
1032
|
-
|
|
1033
|
-
|
|
1034
|
-
|
|
1035
|
-
|
|
1036
|
-
|
|
1037
|
-
|
|
1038
|
-
|
|
1039
|
-
|
|
1040
|
-
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
|
|
1047
|
-
[
|
|
1048
|
-
jobId,
|
|
1049
|
-
options.waitUntil ?? null,
|
|
1050
|
-
options.waitTokenId ?? null,
|
|
1051
|
-
JSON.stringify(options.stepData)
|
|
1052
|
-
]
|
|
1053
|
-
);
|
|
1054
|
-
if (result.rowCount === 0) {
|
|
1055
|
-
log(
|
|
1056
|
-
`Job ${jobId} could not be set to waiting (may have been reclaimed or is no longer processing)`
|
|
2182
|
+
/** Get a cron schedule by ID. */
|
|
2183
|
+
async getCronSchedule(id) {
|
|
2184
|
+
const client = await this.pool.connect();
|
|
2185
|
+
try {
|
|
2186
|
+
const result = await client.query(
|
|
2187
|
+
`SELECT id, schedule_name AS "scheduleName", cron_expression AS "cronExpression",
|
|
2188
|
+
job_type AS "jobType", payload, max_attempts AS "maxAttempts",
|
|
2189
|
+
priority, timeout_ms AS "timeoutMs",
|
|
2190
|
+
force_kill_on_timeout AS "forceKillOnTimeout", tags,
|
|
2191
|
+
timezone, allow_overlap AS "allowOverlap", status,
|
|
2192
|
+
last_enqueued_at AS "lastEnqueuedAt", last_job_id AS "lastJobId",
|
|
2193
|
+
next_run_at AS "nextRunAt",
|
|
2194
|
+
created_at AS "createdAt", updated_at AS "updatedAt",
|
|
2195
|
+
retry_delay AS "retryDelay", retry_backoff AS "retryBackoff",
|
|
2196
|
+
retry_delay_max AS "retryDelayMax"
|
|
2197
|
+
FROM cron_schedules WHERE id = $1`,
|
|
2198
|
+
[id]
|
|
1057
2199
|
);
|
|
1058
|
-
return;
|
|
1059
|
-
|
|
1060
|
-
|
|
1061
|
-
|
|
1062
|
-
|
|
1063
|
-
}
|
|
1064
|
-
|
|
1065
|
-
} catch (error) {
|
|
1066
|
-
log(`Error setting job ${jobId} to waiting: ${error}`);
|
|
1067
|
-
throw error;
|
|
1068
|
-
} finally {
|
|
1069
|
-
client.release();
|
|
1070
|
-
}
|
|
1071
|
-
};
|
|
1072
|
-
var updateStepData = async (pool, jobId, stepData) => {
|
|
1073
|
-
const client = await pool.connect();
|
|
1074
|
-
try {
|
|
1075
|
-
await client.query(
|
|
1076
|
-
`UPDATE job_queue SET step_data = $2, updated_at = NOW() WHERE id = $1`,
|
|
1077
|
-
[jobId, JSON.stringify(stepData)]
|
|
1078
|
-
);
|
|
1079
|
-
} catch (error) {
|
|
1080
|
-
log(`Error updating step_data for job ${jobId}: ${error}`);
|
|
1081
|
-
} finally {
|
|
1082
|
-
client.release();
|
|
1083
|
-
}
|
|
1084
|
-
};
|
|
1085
|
-
var MAX_TIMEOUT_MS = 365 * 24 * 60 * 60 * 1e3;
|
|
1086
|
-
function parseTimeoutString(timeout) {
|
|
1087
|
-
const match = timeout.match(/^(\d+)(s|m|h|d)$/);
|
|
1088
|
-
if (!match) {
|
|
1089
|
-
throw new Error(
|
|
1090
|
-
`Invalid timeout format: "${timeout}". Expected format like "10m", "1h", "24h", "7d".`
|
|
1091
|
-
);
|
|
1092
|
-
}
|
|
1093
|
-
const value = parseInt(match[1], 10);
|
|
1094
|
-
const unit = match[2];
|
|
1095
|
-
let ms;
|
|
1096
|
-
switch (unit) {
|
|
1097
|
-
case "s":
|
|
1098
|
-
ms = value * 1e3;
|
|
1099
|
-
break;
|
|
1100
|
-
case "m":
|
|
1101
|
-
ms = value * 60 * 1e3;
|
|
1102
|
-
break;
|
|
1103
|
-
case "h":
|
|
1104
|
-
ms = value * 60 * 60 * 1e3;
|
|
1105
|
-
break;
|
|
1106
|
-
case "d":
|
|
1107
|
-
ms = value * 24 * 60 * 60 * 1e3;
|
|
1108
|
-
break;
|
|
1109
|
-
default:
|
|
1110
|
-
throw new Error(`Unknown timeout unit: "${unit}"`);
|
|
1111
|
-
}
|
|
1112
|
-
if (!Number.isFinite(ms) || ms > MAX_TIMEOUT_MS) {
|
|
1113
|
-
throw new Error(
|
|
1114
|
-
`Timeout value "${timeout}" is too large. Maximum allowed is 365 days.`
|
|
1115
|
-
);
|
|
1116
|
-
}
|
|
1117
|
-
return ms;
|
|
1118
|
-
}
|
|
1119
|
-
var createWaitpoint = async (pool, jobId, options) => {
|
|
1120
|
-
const client = await pool.connect();
|
|
1121
|
-
try {
|
|
1122
|
-
const id = `wp_${crypto.randomUUID()}`;
|
|
1123
|
-
let timeoutAt = null;
|
|
1124
|
-
if (options?.timeout) {
|
|
1125
|
-
const ms = parseTimeoutString(options.timeout);
|
|
1126
|
-
timeoutAt = new Date(Date.now() + ms);
|
|
2200
|
+
if (result.rows.length === 0) return null;
|
|
2201
|
+
return result.rows[0];
|
|
2202
|
+
} catch (error) {
|
|
2203
|
+
log(`Error getting cron schedule ${id}: ${error}`);
|
|
2204
|
+
throw error;
|
|
2205
|
+
} finally {
|
|
2206
|
+
client.release();
|
|
1127
2207
|
}
|
|
1128
|
-
await client.query(
|
|
1129
|
-
`INSERT INTO waitpoints (id, job_id, status, timeout_at, tags) VALUES ($1, $2, 'waiting', $3, $4)`,
|
|
1130
|
-
[id, jobId, timeoutAt, options?.tags ?? null]
|
|
1131
|
-
);
|
|
1132
|
-
log(`Created waitpoint ${id} for job ${jobId}`);
|
|
1133
|
-
return { id };
|
|
1134
|
-
} catch (error) {
|
|
1135
|
-
log(`Error creating waitpoint: ${error}`);
|
|
1136
|
-
throw error;
|
|
1137
|
-
} finally {
|
|
1138
|
-
client.release();
|
|
1139
2208
|
}
|
|
1140
|
-
|
|
1141
|
-
|
|
1142
|
-
|
|
1143
|
-
|
|
1144
|
-
|
|
1145
|
-
|
|
1146
|
-
|
|
1147
|
-
|
|
1148
|
-
|
|
1149
|
-
|
|
1150
|
-
|
|
1151
|
-
|
|
1152
|
-
|
|
1153
|
-
|
|
1154
|
-
|
|
1155
|
-
|
|
1156
|
-
|
|
1157
|
-
if (jobId != null) {
|
|
1158
|
-
await client.query(
|
|
1159
|
-
`UPDATE job_queue
|
|
1160
|
-
SET status = 'pending', wait_token_id = NULL, wait_until = NULL, updated_at = NOW()
|
|
1161
|
-
WHERE id = $1 AND status = 'waiting'`,
|
|
1162
|
-
[jobId]
|
|
2209
|
+
/** Get a cron schedule by its unique name. */
|
|
2210
|
+
async getCronScheduleByName(name) {
|
|
2211
|
+
const client = await this.pool.connect();
|
|
2212
|
+
try {
|
|
2213
|
+
const result = await client.query(
|
|
2214
|
+
`SELECT id, schedule_name AS "scheduleName", cron_expression AS "cronExpression",
|
|
2215
|
+
job_type AS "jobType", payload, max_attempts AS "maxAttempts",
|
|
2216
|
+
priority, timeout_ms AS "timeoutMs",
|
|
2217
|
+
force_kill_on_timeout AS "forceKillOnTimeout", tags,
|
|
2218
|
+
timezone, allow_overlap AS "allowOverlap", status,
|
|
2219
|
+
last_enqueued_at AS "lastEnqueuedAt", last_job_id AS "lastJobId",
|
|
2220
|
+
next_run_at AS "nextRunAt",
|
|
2221
|
+
created_at AS "createdAt", updated_at AS "updatedAt",
|
|
2222
|
+
retry_delay AS "retryDelay", retry_backoff AS "retryBackoff",
|
|
2223
|
+
retry_delay_max AS "retryDelayMax"
|
|
2224
|
+
FROM cron_schedules WHERE schedule_name = $1`,
|
|
2225
|
+
[name]
|
|
1163
2226
|
);
|
|
2227
|
+
if (result.rows.length === 0) return null;
|
|
2228
|
+
return result.rows[0];
|
|
2229
|
+
} catch (error) {
|
|
2230
|
+
log(`Error getting cron schedule by name "${name}": ${error}`);
|
|
2231
|
+
throw error;
|
|
2232
|
+
} finally {
|
|
2233
|
+
client.release();
|
|
1164
2234
|
}
|
|
1165
|
-
await client.query("COMMIT");
|
|
1166
|
-
log(`Completed waitpoint ${tokenId} for job ${jobId}`);
|
|
1167
|
-
} catch (error) {
|
|
1168
|
-
await client.query("ROLLBACK");
|
|
1169
|
-
log(`Error completing waitpoint ${tokenId}: ${error}`);
|
|
1170
|
-
throw error;
|
|
1171
|
-
} finally {
|
|
1172
|
-
client.release();
|
|
1173
|
-
}
|
|
1174
|
-
};
|
|
1175
|
-
var getWaitpoint = async (pool, tokenId) => {
|
|
1176
|
-
const client = await pool.connect();
|
|
1177
|
-
try {
|
|
1178
|
-
const result = await client.query(
|
|
1179
|
-
`SELECT id, job_id AS "jobId", status, output, timeout_at AS "timeoutAt", created_at AS "createdAt", completed_at AS "completedAt", tags FROM waitpoints WHERE id = $1`,
|
|
1180
|
-
[tokenId]
|
|
1181
|
-
);
|
|
1182
|
-
if (result.rows.length === 0) return null;
|
|
1183
|
-
return result.rows[0];
|
|
1184
|
-
} catch (error) {
|
|
1185
|
-
log(`Error getting waitpoint ${tokenId}: ${error}`);
|
|
1186
|
-
throw error;
|
|
1187
|
-
} finally {
|
|
1188
|
-
client.release();
|
|
1189
|
-
}
|
|
1190
|
-
};
|
|
1191
|
-
var expireTimedOutWaitpoints = async (pool) => {
|
|
1192
|
-
const client = await pool.connect();
|
|
1193
|
-
try {
|
|
1194
|
-
await client.query("BEGIN");
|
|
1195
|
-
const result = await client.query(
|
|
1196
|
-
`UPDATE waitpoints
|
|
1197
|
-
SET status = 'timed_out'
|
|
1198
|
-
WHERE status = 'waiting' AND timeout_at IS NOT NULL AND timeout_at <= NOW()
|
|
1199
|
-
RETURNING id, job_id`
|
|
1200
|
-
);
|
|
1201
|
-
for (const row of result.rows) {
|
|
1202
|
-
if (row.job_id != null) {
|
|
1203
|
-
await client.query(
|
|
1204
|
-
`UPDATE job_queue
|
|
1205
|
-
SET status = 'pending', wait_token_id = NULL, wait_until = NULL, updated_at = NOW()
|
|
1206
|
-
WHERE id = $1 AND status = 'waiting'`,
|
|
1207
|
-
[row.job_id]
|
|
1208
|
-
);
|
|
1209
|
-
}
|
|
1210
|
-
}
|
|
1211
|
-
await client.query("COMMIT");
|
|
1212
|
-
const count = result.rowCount || 0;
|
|
1213
|
-
if (count > 0) {
|
|
1214
|
-
log(`Expired ${count} timed-out waitpoints`);
|
|
1215
|
-
}
|
|
1216
|
-
return count;
|
|
1217
|
-
} catch (error) {
|
|
1218
|
-
await client.query("ROLLBACK");
|
|
1219
|
-
log(`Error expiring timed-out waitpoints: ${error}`);
|
|
1220
|
-
throw error;
|
|
1221
|
-
} finally {
|
|
1222
|
-
client.release();
|
|
1223
|
-
}
|
|
1224
|
-
};
|
|
1225
|
-
function tryExtractPool(backend) {
|
|
1226
|
-
if (backend instanceof PostgresBackend) {
|
|
1227
|
-
return backend.getPool();
|
|
1228
2235
|
}
|
|
1229
|
-
|
|
1230
|
-
|
|
1231
|
-
|
|
1232
|
-
|
|
1233
|
-
|
|
1234
|
-
|
|
1235
|
-
|
|
1236
|
-
|
|
1237
|
-
|
|
1238
|
-
|
|
1239
|
-
|
|
1240
|
-
|
|
1241
|
-
|
|
1242
|
-
|
|
1243
|
-
|
|
1244
|
-
|
|
1245
|
-
|
|
1246
|
-
|
|
1247
|
-
|
|
1248
|
-
|
|
1249
|
-
|
|
1250
|
-
|
|
1251
|
-
|
|
1252
|
-
}
|
|
1253
|
-
|
|
1254
|
-
|
|
1255
|
-
|
|
1256
|
-
|
|
2236
|
+
/** List cron schedules, optionally filtered by status. */
|
|
2237
|
+
async listCronSchedules(status) {
|
|
2238
|
+
const client = await this.pool.connect();
|
|
2239
|
+
try {
|
|
2240
|
+
let query = `SELECT id, schedule_name AS "scheduleName", cron_expression AS "cronExpression",
|
|
2241
|
+
job_type AS "jobType", payload, max_attempts AS "maxAttempts",
|
|
2242
|
+
priority, timeout_ms AS "timeoutMs",
|
|
2243
|
+
force_kill_on_timeout AS "forceKillOnTimeout", tags,
|
|
2244
|
+
timezone, allow_overlap AS "allowOverlap", status,
|
|
2245
|
+
last_enqueued_at AS "lastEnqueuedAt", last_job_id AS "lastJobId",
|
|
2246
|
+
next_run_at AS "nextRunAt",
|
|
2247
|
+
created_at AS "createdAt", updated_at AS "updatedAt",
|
|
2248
|
+
retry_delay AS "retryDelay", retry_backoff AS "retryBackoff",
|
|
2249
|
+
retry_delay_max AS "retryDelayMax"
|
|
2250
|
+
FROM cron_schedules`;
|
|
2251
|
+
const params = [];
|
|
2252
|
+
if (status) {
|
|
2253
|
+
query += ` WHERE status = $1`;
|
|
2254
|
+
params.push(status);
|
|
2255
|
+
}
|
|
2256
|
+
query += ` ORDER BY created_at ASC`;
|
|
2257
|
+
const result = await client.query(query, params);
|
|
2258
|
+
return result.rows;
|
|
2259
|
+
} catch (error) {
|
|
2260
|
+
log(`Error listing cron schedules: ${error}`);
|
|
2261
|
+
throw error;
|
|
2262
|
+
} finally {
|
|
2263
|
+
client.release();
|
|
1257
2264
|
}
|
|
1258
|
-
}
|
|
1259
|
-
|
|
1260
|
-
|
|
1261
|
-
|
|
1262
|
-
|
|
1263
|
-
|
|
1264
|
-
|
|
1265
|
-
|
|
1266
|
-
);
|
|
2265
|
+
}
|
|
2266
|
+
/** Delete a cron schedule by ID. */
|
|
2267
|
+
async removeCronSchedule(id) {
|
|
2268
|
+
const client = await this.pool.connect();
|
|
2269
|
+
try {
|
|
2270
|
+
await client.query(`DELETE FROM cron_schedules WHERE id = $1`, [id]);
|
|
2271
|
+
log(`Removed cron schedule ${id}`);
|
|
2272
|
+
} catch (error) {
|
|
2273
|
+
log(`Error removing cron schedule ${id}: ${error}`);
|
|
2274
|
+
throw error;
|
|
2275
|
+
} finally {
|
|
2276
|
+
client.release();
|
|
1267
2277
|
}
|
|
1268
|
-
|
|
1269
|
-
|
|
1270
|
-
|
|
2278
|
+
}
|
|
2279
|
+
/** Pause a cron schedule. */
|
|
2280
|
+
async pauseCronSchedule(id) {
|
|
2281
|
+
const client = await this.pool.connect();
|
|
2282
|
+
try {
|
|
2283
|
+
await client.query(
|
|
2284
|
+
`UPDATE cron_schedules SET status = 'paused', updated_at = NOW() WHERE id = $1`,
|
|
2285
|
+
[id]
|
|
1271
2286
|
);
|
|
2287
|
+
log(`Paused cron schedule ${id}`);
|
|
2288
|
+
} catch (error) {
|
|
2289
|
+
log(`Error pausing cron schedule ${id}: ${error}`);
|
|
2290
|
+
throw error;
|
|
2291
|
+
} finally {
|
|
2292
|
+
client.release();
|
|
1272
2293
|
}
|
|
2294
|
+
}
|
|
2295
|
+
/** Resume a paused cron schedule. */
|
|
2296
|
+
async resumeCronSchedule(id) {
|
|
2297
|
+
const client = await this.pool.connect();
|
|
1273
2298
|
try {
|
|
1274
|
-
|
|
1275
|
-
|
|
1276
|
-
|
|
1277
|
-
`Handler for job type "${jobType}" cannot be serialized: ${parseError instanceof Error ? parseError.message : String(parseError)}. When using forceKillOnTimeout, handlers must be serializable functions without closures over external variables.`
|
|
2299
|
+
await client.query(
|
|
2300
|
+
`UPDATE cron_schedules SET status = 'active', updated_at = NOW() WHERE id = $1`,
|
|
2301
|
+
[id]
|
|
1278
2302
|
);
|
|
1279
|
-
|
|
1280
|
-
|
|
1281
|
-
|
|
2303
|
+
log(`Resumed cron schedule ${id}`);
|
|
2304
|
+
} catch (error) {
|
|
2305
|
+
log(`Error resuming cron schedule ${id}: ${error}`);
|
|
1282
2306
|
throw error;
|
|
2307
|
+
} finally {
|
|
2308
|
+
client.release();
|
|
1283
2309
|
}
|
|
1284
|
-
throw new Error(
|
|
1285
|
-
`Failed to validate handler serialization for job type "${jobType}": ${String(error)}`
|
|
1286
|
-
);
|
|
1287
2310
|
}
|
|
1288
|
-
|
|
1289
|
-
async
|
|
1290
|
-
|
|
1291
|
-
|
|
1292
|
-
|
|
1293
|
-
|
|
1294
|
-
|
|
1295
|
-
|
|
1296
|
-
|
|
1297
|
-
|
|
1298
|
-
const controller = new AbortController();
|
|
1299
|
-
const signal = controller.signal;
|
|
1300
|
-
|
|
1301
|
-
// Set up timeout
|
|
1302
|
-
const timeoutId = setTimeout(() => {
|
|
1303
|
-
controller.abort();
|
|
1304
|
-
parentPort.postMessage({ type: 'timeout' });
|
|
1305
|
-
}, timeoutMs);
|
|
1306
|
-
|
|
1307
|
-
try {
|
|
1308
|
-
// Execute the handler
|
|
1309
|
-
// Note: This uses Function constructor which requires the handler to be serializable.
|
|
1310
|
-
// The handler should be validated before reaching this point.
|
|
1311
|
-
let handlerFn;
|
|
1312
|
-
try {
|
|
1313
|
-
// Wrap handlerCode in parentheses to ensure it's treated as an expression
|
|
1314
|
-
// This handles both arrow functions and regular functions
|
|
1315
|
-
const wrappedCode = handlerCode.trim().startsWith('async') || handlerCode.trim().startsWith('function')
|
|
1316
|
-
? handlerCode
|
|
1317
|
-
: '(' + handlerCode + ')';
|
|
1318
|
-
handlerFn = new Function('return ' + wrappedCode)();
|
|
1319
|
-
} catch (parseError) {
|
|
1320
|
-
clearTimeout(timeoutId);
|
|
1321
|
-
parentPort.postMessage({
|
|
1322
|
-
type: 'error',
|
|
1323
|
-
error: {
|
|
1324
|
-
message: 'Handler cannot be deserialized in worker thread. ' +
|
|
1325
|
-
'Ensure your handler is a standalone function without closures over external variables. ' +
|
|
1326
|
-
'Original error: ' + (parseError instanceof Error ? parseError.message : String(parseError)),
|
|
1327
|
-
stack: parseError instanceof Error ? parseError.stack : undefined,
|
|
1328
|
-
name: 'SerializationError',
|
|
1329
|
-
},
|
|
1330
|
-
});
|
|
1331
|
-
return;
|
|
1332
|
-
}
|
|
1333
|
-
|
|
1334
|
-
// Ensure handlerFn is actually a function
|
|
1335
|
-
if (typeof handlerFn !== 'function') {
|
|
1336
|
-
clearTimeout(timeoutId);
|
|
1337
|
-
parentPort.postMessage({
|
|
1338
|
-
type: 'error',
|
|
1339
|
-
error: {
|
|
1340
|
-
message: 'Handler deserialization did not produce a function. ' +
|
|
1341
|
-
'Ensure your handler is a valid function when forceKillOnTimeout is enabled.',
|
|
1342
|
-
name: 'SerializationError',
|
|
1343
|
-
},
|
|
1344
|
-
});
|
|
1345
|
-
return;
|
|
1346
|
-
}
|
|
1347
|
-
|
|
1348
|
-
handlerFn(payload, signal)
|
|
1349
|
-
.then(() => {
|
|
1350
|
-
clearTimeout(timeoutId);
|
|
1351
|
-
parentPort.postMessage({ type: 'success' });
|
|
1352
|
-
})
|
|
1353
|
-
.catch((error) => {
|
|
1354
|
-
clearTimeout(timeoutId);
|
|
1355
|
-
parentPort.postMessage({
|
|
1356
|
-
type: 'error',
|
|
1357
|
-
error: {
|
|
1358
|
-
message: error.message,
|
|
1359
|
-
stack: error.stack,
|
|
1360
|
-
name: error.name,
|
|
1361
|
-
},
|
|
1362
|
-
});
|
|
1363
|
-
});
|
|
1364
|
-
} catch (error) {
|
|
1365
|
-
clearTimeout(timeoutId);
|
|
1366
|
-
parentPort.postMessage({
|
|
1367
|
-
type: 'error',
|
|
1368
|
-
error: {
|
|
1369
|
-
message: error.message,
|
|
1370
|
-
stack: error.stack,
|
|
1371
|
-
name: error.name,
|
|
1372
|
-
},
|
|
1373
|
-
});
|
|
1374
|
-
}
|
|
1375
|
-
})();
|
|
1376
|
-
`;
|
|
1377
|
-
const worker = new worker_threads.Worker(workerCode, {
|
|
1378
|
-
eval: true,
|
|
1379
|
-
workerData: {
|
|
1380
|
-
handlerCode: handler.toString(),
|
|
1381
|
-
payload,
|
|
1382
|
-
timeoutMs
|
|
1383
|
-
}
|
|
1384
|
-
});
|
|
1385
|
-
let resolved = false;
|
|
1386
|
-
worker.on("message", (message) => {
|
|
1387
|
-
if (resolved) return;
|
|
1388
|
-
resolved = true;
|
|
1389
|
-
if (message.type === "success") {
|
|
1390
|
-
resolve();
|
|
1391
|
-
} else if (message.type === "timeout") {
|
|
1392
|
-
const timeoutError = new Error(
|
|
1393
|
-
`Job timed out after ${timeoutMs} ms and was forcefully terminated`
|
|
1394
|
-
);
|
|
1395
|
-
timeoutError.failureReason = "timeout" /* Timeout */;
|
|
1396
|
-
reject(timeoutError);
|
|
1397
|
-
} else if (message.type === "error") {
|
|
1398
|
-
const error = new Error(message.error.message);
|
|
1399
|
-
error.stack = message.error.stack;
|
|
1400
|
-
error.name = message.error.name;
|
|
1401
|
-
reject(error);
|
|
2311
|
+
/** Edit a cron schedule. */
|
|
2312
|
+
async editCronSchedule(id, updates, nextRunAt) {
|
|
2313
|
+
const client = await this.pool.connect();
|
|
2314
|
+
try {
|
|
2315
|
+
const updateFields = [];
|
|
2316
|
+
const params = [];
|
|
2317
|
+
let paramIdx = 1;
|
|
2318
|
+
if (updates.cronExpression !== void 0) {
|
|
2319
|
+
updateFields.push(`cron_expression = $${paramIdx++}`);
|
|
2320
|
+
params.push(updates.cronExpression);
|
|
1402
2321
|
}
|
|
1403
|
-
|
|
1404
|
-
|
|
1405
|
-
|
|
1406
|
-
resolved = true;
|
|
1407
|
-
reject(error);
|
|
1408
|
-
});
|
|
1409
|
-
worker.on("exit", (code) => {
|
|
1410
|
-
if (resolved) return;
|
|
1411
|
-
if (code !== 0) {
|
|
1412
|
-
resolved = true;
|
|
1413
|
-
reject(new Error(`Worker stopped with exit code ${code}`));
|
|
2322
|
+
if (updates.payload !== void 0) {
|
|
2323
|
+
updateFields.push(`payload = $${paramIdx++}`);
|
|
2324
|
+
params.push(updates.payload);
|
|
1414
2325
|
}
|
|
1415
|
-
|
|
1416
|
-
|
|
1417
|
-
|
|
1418
|
-
resolved = true;
|
|
1419
|
-
worker.terminate().then(() => {
|
|
1420
|
-
const timeoutError = new Error(
|
|
1421
|
-
`Job timed out after ${timeoutMs} ms and was forcefully terminated`
|
|
1422
|
-
);
|
|
1423
|
-
timeoutError.failureReason = "timeout" /* Timeout */;
|
|
1424
|
-
reject(timeoutError);
|
|
1425
|
-
}).catch((err) => {
|
|
1426
|
-
reject(err);
|
|
1427
|
-
});
|
|
2326
|
+
if (updates.maxAttempts !== void 0) {
|
|
2327
|
+
updateFields.push(`max_attempts = $${paramIdx++}`);
|
|
2328
|
+
params.push(updates.maxAttempts);
|
|
1428
2329
|
}
|
|
1429
|
-
|
|
1430
|
-
|
|
1431
|
-
|
|
1432
|
-
function calculateWaitUntil(duration) {
|
|
1433
|
-
const now = Date.now();
|
|
1434
|
-
let ms = 0;
|
|
1435
|
-
if (duration.seconds) ms += duration.seconds * 1e3;
|
|
1436
|
-
if (duration.minutes) ms += duration.minutes * 60 * 1e3;
|
|
1437
|
-
if (duration.hours) ms += duration.hours * 60 * 60 * 1e3;
|
|
1438
|
-
if (duration.days) ms += duration.days * 24 * 60 * 60 * 1e3;
|
|
1439
|
-
if (duration.weeks) ms += duration.weeks * 7 * 24 * 60 * 60 * 1e3;
|
|
1440
|
-
if (duration.months) ms += duration.months * 30 * 24 * 60 * 60 * 1e3;
|
|
1441
|
-
if (duration.years) ms += duration.years * 365 * 24 * 60 * 60 * 1e3;
|
|
1442
|
-
if (ms <= 0) {
|
|
1443
|
-
throw new Error(
|
|
1444
|
-
"waitFor duration must be positive. Provide at least one positive duration field."
|
|
1445
|
-
);
|
|
1446
|
-
}
|
|
1447
|
-
return new Date(now + ms);
|
|
1448
|
-
}
|
|
1449
|
-
async function resolveCompletedWaits(pool, stepData) {
|
|
1450
|
-
for (const key of Object.keys(stepData)) {
|
|
1451
|
-
if (!key.startsWith("__wait_")) continue;
|
|
1452
|
-
const entry = stepData[key];
|
|
1453
|
-
if (!entry || typeof entry !== "object" || entry.completed) continue;
|
|
1454
|
-
if (entry.type === "duration" || entry.type === "date") {
|
|
1455
|
-
stepData[key] = { ...entry, completed: true };
|
|
1456
|
-
} else if (entry.type === "token" && entry.tokenId) {
|
|
1457
|
-
const wp = await getWaitpoint(pool, entry.tokenId);
|
|
1458
|
-
if (wp && wp.status === "completed") {
|
|
1459
|
-
stepData[key] = {
|
|
1460
|
-
...entry,
|
|
1461
|
-
completed: true,
|
|
1462
|
-
result: { ok: true, output: wp.output }
|
|
1463
|
-
};
|
|
1464
|
-
} else if (wp && wp.status === "timed_out") {
|
|
1465
|
-
stepData[key] = {
|
|
1466
|
-
...entry,
|
|
1467
|
-
completed: true,
|
|
1468
|
-
result: { ok: false, error: "Token timed out" }
|
|
1469
|
-
};
|
|
2330
|
+
if (updates.priority !== void 0) {
|
|
2331
|
+
updateFields.push(`priority = $${paramIdx++}`);
|
|
2332
|
+
params.push(updates.priority);
|
|
1470
2333
|
}
|
|
1471
|
-
|
|
1472
|
-
|
|
1473
|
-
|
|
1474
|
-
function buildWaitContext(backend, pool, jobId, stepData, baseCtx) {
|
|
1475
|
-
let waitCounter = 0;
|
|
1476
|
-
const ctx = {
|
|
1477
|
-
prolong: baseCtx.prolong,
|
|
1478
|
-
onTimeout: baseCtx.onTimeout,
|
|
1479
|
-
run: async (stepName, fn) => {
|
|
1480
|
-
const cached = stepData[stepName];
|
|
1481
|
-
if (cached && typeof cached === "object" && cached.__completed) {
|
|
1482
|
-
log(`Step "${stepName}" replayed from cache for job ${jobId}`);
|
|
1483
|
-
return cached.result;
|
|
2334
|
+
if (updates.timeoutMs !== void 0) {
|
|
2335
|
+
updateFields.push(`timeout_ms = $${paramIdx++}`);
|
|
2336
|
+
params.push(updates.timeoutMs);
|
|
1484
2337
|
}
|
|
1485
|
-
|
|
1486
|
-
|
|
1487
|
-
|
|
1488
|
-
return result;
|
|
1489
|
-
},
|
|
1490
|
-
waitFor: async (duration) => {
|
|
1491
|
-
const waitKey = `__wait_${waitCounter++}`;
|
|
1492
|
-
const cached = stepData[waitKey];
|
|
1493
|
-
if (cached && typeof cached === "object" && cached.completed) {
|
|
1494
|
-
log(`Wait "${waitKey}" already completed for job ${jobId}, skipping`);
|
|
1495
|
-
return;
|
|
2338
|
+
if (updates.forceKillOnTimeout !== void 0) {
|
|
2339
|
+
updateFields.push(`force_kill_on_timeout = $${paramIdx++}`);
|
|
2340
|
+
params.push(updates.forceKillOnTimeout);
|
|
1496
2341
|
}
|
|
1497
|
-
|
|
1498
|
-
|
|
1499
|
-
|
|
1500
|
-
},
|
|
1501
|
-
waitUntil: async (date) => {
|
|
1502
|
-
const waitKey = `__wait_${waitCounter++}`;
|
|
1503
|
-
const cached = stepData[waitKey];
|
|
1504
|
-
if (cached && typeof cached === "object" && cached.completed) {
|
|
1505
|
-
log(`Wait "${waitKey}" already completed for job ${jobId}, skipping`);
|
|
1506
|
-
return;
|
|
2342
|
+
if (updates.tags !== void 0) {
|
|
2343
|
+
updateFields.push(`tags = $${paramIdx++}`);
|
|
2344
|
+
params.push(updates.tags);
|
|
1507
2345
|
}
|
|
1508
|
-
|
|
1509
|
-
|
|
1510
|
-
|
|
1511
|
-
createToken: async (options) => {
|
|
1512
|
-
const token = await createWaitpoint(pool, jobId, options);
|
|
1513
|
-
return token;
|
|
1514
|
-
},
|
|
1515
|
-
waitForToken: async (tokenId) => {
|
|
1516
|
-
const waitKey = `__wait_${waitCounter++}`;
|
|
1517
|
-
const cached = stepData[waitKey];
|
|
1518
|
-
if (cached && typeof cached === "object" && cached.completed) {
|
|
1519
|
-
log(
|
|
1520
|
-
`Token wait "${waitKey}" already completed for job ${jobId}, returning cached result`
|
|
1521
|
-
);
|
|
1522
|
-
return cached.result;
|
|
2346
|
+
if (updates.timezone !== void 0) {
|
|
2347
|
+
updateFields.push(`timezone = $${paramIdx++}`);
|
|
2348
|
+
params.push(updates.timezone);
|
|
1523
2349
|
}
|
|
1524
|
-
|
|
1525
|
-
|
|
1526
|
-
|
|
1527
|
-
ok: true,
|
|
1528
|
-
output: wp.output
|
|
1529
|
-
};
|
|
1530
|
-
stepData[waitKey] = {
|
|
1531
|
-
type: "token",
|
|
1532
|
-
tokenId,
|
|
1533
|
-
completed: true,
|
|
1534
|
-
result
|
|
1535
|
-
};
|
|
1536
|
-
await updateStepData(pool, jobId, stepData);
|
|
1537
|
-
return result;
|
|
2350
|
+
if (updates.allowOverlap !== void 0) {
|
|
2351
|
+
updateFields.push(`allow_overlap = $${paramIdx++}`);
|
|
2352
|
+
params.push(updates.allowOverlap);
|
|
1538
2353
|
}
|
|
1539
|
-
if (
|
|
1540
|
-
|
|
1541
|
-
|
|
1542
|
-
error: "Token timed out"
|
|
1543
|
-
};
|
|
1544
|
-
stepData[waitKey] = {
|
|
1545
|
-
type: "token",
|
|
1546
|
-
tokenId,
|
|
1547
|
-
completed: true,
|
|
1548
|
-
result
|
|
1549
|
-
};
|
|
1550
|
-
await updateStepData(pool, jobId, stepData);
|
|
1551
|
-
return result;
|
|
2354
|
+
if (updates.retryDelay !== void 0) {
|
|
2355
|
+
updateFields.push(`retry_delay = $${paramIdx++}`);
|
|
2356
|
+
params.push(updates.retryDelay);
|
|
1552
2357
|
}
|
|
1553
|
-
|
|
1554
|
-
|
|
1555
|
-
|
|
1556
|
-
setProgress: async (percent) => {
|
|
1557
|
-
if (percent < 0 || percent > 100)
|
|
1558
|
-
throw new Error("Progress must be between 0 and 100");
|
|
1559
|
-
await backend.updateProgress(jobId, Math.round(percent));
|
|
1560
|
-
}
|
|
1561
|
-
};
|
|
1562
|
-
return ctx;
|
|
1563
|
-
}
|
|
1564
|
-
async function processJobWithHandlers(backend, job, jobHandlers) {
|
|
1565
|
-
const handler = jobHandlers[job.jobType];
|
|
1566
|
-
if (!handler) {
|
|
1567
|
-
await backend.setPendingReasonForUnpickedJobs(
|
|
1568
|
-
`No handler registered for job type: ${job.jobType}`,
|
|
1569
|
-
job.jobType
|
|
1570
|
-
);
|
|
1571
|
-
await backend.failJob(
|
|
1572
|
-
job.id,
|
|
1573
|
-
new Error(`No handler registered for job type: ${job.jobType}`),
|
|
1574
|
-
"no_handler" /* NoHandler */
|
|
1575
|
-
);
|
|
1576
|
-
return;
|
|
1577
|
-
}
|
|
1578
|
-
const stepData = { ...job.stepData || {} };
|
|
1579
|
-
const pool = tryExtractPool(backend);
|
|
1580
|
-
const hasStepHistory = Object.keys(stepData).some(
|
|
1581
|
-
(k) => k.startsWith("__wait_")
|
|
1582
|
-
);
|
|
1583
|
-
if (hasStepHistory && pool) {
|
|
1584
|
-
await resolveCompletedWaits(pool, stepData);
|
|
1585
|
-
await updateStepData(pool, job.id, stepData);
|
|
1586
|
-
}
|
|
1587
|
-
const timeoutMs = job.timeoutMs ?? void 0;
|
|
1588
|
-
const forceKillOnTimeout = job.forceKillOnTimeout ?? false;
|
|
1589
|
-
let timeoutId;
|
|
1590
|
-
const controller = new AbortController();
|
|
1591
|
-
try {
|
|
1592
|
-
if (forceKillOnTimeout && timeoutMs && timeoutMs > 0) {
|
|
1593
|
-
await runHandlerInWorker(handler, job.payload, timeoutMs, job.jobType);
|
|
1594
|
-
} else {
|
|
1595
|
-
let onTimeoutCallback;
|
|
1596
|
-
let timeoutReject;
|
|
1597
|
-
const armTimeout = (ms) => {
|
|
1598
|
-
if (timeoutId) clearTimeout(timeoutId);
|
|
1599
|
-
timeoutId = setTimeout(() => {
|
|
1600
|
-
if (onTimeoutCallback) {
|
|
1601
|
-
try {
|
|
1602
|
-
const extension = onTimeoutCallback();
|
|
1603
|
-
if (typeof extension === "number" && extension > 0) {
|
|
1604
|
-
backend.prolongJob(job.id).catch(() => {
|
|
1605
|
-
});
|
|
1606
|
-
armTimeout(extension);
|
|
1607
|
-
return;
|
|
1608
|
-
}
|
|
1609
|
-
} catch (callbackError) {
|
|
1610
|
-
log(
|
|
1611
|
-
`onTimeout callback threw for job ${job.id}: ${callbackError}`
|
|
1612
|
-
);
|
|
1613
|
-
}
|
|
1614
|
-
}
|
|
1615
|
-
controller.abort();
|
|
1616
|
-
const timeoutError = new Error(`Job timed out after ${ms} ms`);
|
|
1617
|
-
timeoutError.failureReason = "timeout" /* Timeout */;
|
|
1618
|
-
if (timeoutReject) {
|
|
1619
|
-
timeoutReject(timeoutError);
|
|
1620
|
-
}
|
|
1621
|
-
}, ms);
|
|
1622
|
-
};
|
|
1623
|
-
const hasTimeout = timeoutMs != null && timeoutMs > 0;
|
|
1624
|
-
const baseCtx = hasTimeout ? {
|
|
1625
|
-
prolong: (ms) => {
|
|
1626
|
-
const duration = ms ?? timeoutMs;
|
|
1627
|
-
if (duration != null && duration > 0) {
|
|
1628
|
-
armTimeout(duration);
|
|
1629
|
-
backend.prolongJob(job.id).catch(() => {
|
|
1630
|
-
});
|
|
1631
|
-
}
|
|
1632
|
-
},
|
|
1633
|
-
onTimeout: (callback) => {
|
|
1634
|
-
onTimeoutCallback = callback;
|
|
1635
|
-
}
|
|
1636
|
-
} : {
|
|
1637
|
-
prolong: () => {
|
|
1638
|
-
log("prolong() called but ignored: job has no timeout set");
|
|
1639
|
-
},
|
|
1640
|
-
onTimeout: () => {
|
|
1641
|
-
log("onTimeout() called but ignored: job has no timeout set");
|
|
1642
|
-
}
|
|
1643
|
-
};
|
|
1644
|
-
const ctx = pool ? buildWaitContext(backend, pool, job.id, stepData, baseCtx) : buildBasicContext(backend, job.id, baseCtx);
|
|
1645
|
-
if (forceKillOnTimeout && !hasTimeout) {
|
|
1646
|
-
log(
|
|
1647
|
-
`forceKillOnTimeout is set but no timeoutMs for job ${job.id}, running without force kill`
|
|
1648
|
-
);
|
|
2358
|
+
if (updates.retryBackoff !== void 0) {
|
|
2359
|
+
updateFields.push(`retry_backoff = $${paramIdx++}`);
|
|
2360
|
+
params.push(updates.retryBackoff);
|
|
1649
2361
|
}
|
|
1650
|
-
|
|
1651
|
-
|
|
1652
|
-
|
|
1653
|
-
jobPromise,
|
|
1654
|
-
new Promise((_, reject) => {
|
|
1655
|
-
timeoutReject = reject;
|
|
1656
|
-
armTimeout(timeoutMs);
|
|
1657
|
-
})
|
|
1658
|
-
]);
|
|
1659
|
-
} else {
|
|
1660
|
-
await jobPromise;
|
|
2362
|
+
if (updates.retryDelayMax !== void 0) {
|
|
2363
|
+
updateFields.push(`retry_delay_max = $${paramIdx++}`);
|
|
2364
|
+
params.push(updates.retryDelayMax);
|
|
1661
2365
|
}
|
|
1662
|
-
|
|
1663
|
-
|
|
1664
|
-
|
|
1665
|
-
|
|
1666
|
-
|
|
1667
|
-
|
|
1668
|
-
if (!pool) {
|
|
1669
|
-
await backend.failJob(
|
|
1670
|
-
job.id,
|
|
1671
|
-
new Error(
|
|
1672
|
-
"WaitSignal received but wait features require the PostgreSQL backend."
|
|
1673
|
-
),
|
|
1674
|
-
"handler_error" /* HandlerError */
|
|
1675
|
-
);
|
|
2366
|
+
if (nextRunAt !== void 0) {
|
|
2367
|
+
updateFields.push(`next_run_at = $${paramIdx++}`);
|
|
2368
|
+
params.push(nextRunAt);
|
|
2369
|
+
}
|
|
2370
|
+
if (updateFields.length === 0) {
|
|
2371
|
+
log(`No fields to update for cron schedule ${id}`);
|
|
1676
2372
|
return;
|
|
1677
2373
|
}
|
|
1678
|
-
|
|
1679
|
-
|
|
1680
|
-
)
|
|
1681
|
-
await
|
|
1682
|
-
|
|
1683
|
-
|
|
1684
|
-
|
|
1685
|
-
|
|
1686
|
-
|
|
2374
|
+
updateFields.push(`updated_at = NOW()`);
|
|
2375
|
+
params.push(id);
|
|
2376
|
+
const query = `UPDATE cron_schedules SET ${updateFields.join(", ")} WHERE id = $${paramIdx}`;
|
|
2377
|
+
await client.query(query, params);
|
|
2378
|
+
log(`Edited cron schedule ${id}`);
|
|
2379
|
+
} catch (error) {
|
|
2380
|
+
log(`Error editing cron schedule ${id}: ${error}`);
|
|
2381
|
+
throw error;
|
|
2382
|
+
} finally {
|
|
2383
|
+
client.release();
|
|
1687
2384
|
}
|
|
1688
|
-
|
|
1689
|
-
|
|
1690
|
-
|
|
1691
|
-
|
|
2385
|
+
}
|
|
2386
|
+
/**
|
|
2387
|
+
* Atomically fetch all active cron schedules whose nextRunAt <= NOW().
|
|
2388
|
+
* Uses FOR UPDATE SKIP LOCKED to prevent duplicate enqueuing across workers.
|
|
2389
|
+
*/
|
|
2390
|
+
async getDueCronSchedules() {
|
|
2391
|
+
const client = await this.pool.connect();
|
|
2392
|
+
try {
|
|
2393
|
+
const result = await client.query(
|
|
2394
|
+
`SELECT id, schedule_name AS "scheduleName", cron_expression AS "cronExpression",
|
|
2395
|
+
job_type AS "jobType", payload, max_attempts AS "maxAttempts",
|
|
2396
|
+
priority, timeout_ms AS "timeoutMs",
|
|
2397
|
+
force_kill_on_timeout AS "forceKillOnTimeout", tags,
|
|
2398
|
+
timezone, allow_overlap AS "allowOverlap", status,
|
|
2399
|
+
last_enqueued_at AS "lastEnqueuedAt", last_job_id AS "lastJobId",
|
|
2400
|
+
next_run_at AS "nextRunAt",
|
|
2401
|
+
created_at AS "createdAt", updated_at AS "updatedAt",
|
|
2402
|
+
retry_delay AS "retryDelay", retry_backoff AS "retryBackoff",
|
|
2403
|
+
retry_delay_max AS "retryDelayMax"
|
|
2404
|
+
FROM cron_schedules
|
|
2405
|
+
WHERE status = 'active'
|
|
2406
|
+
AND next_run_at IS NOT NULL
|
|
2407
|
+
AND next_run_at <= NOW()
|
|
2408
|
+
ORDER BY next_run_at ASC
|
|
2409
|
+
FOR UPDATE SKIP LOCKED`
|
|
2410
|
+
);
|
|
2411
|
+
log(`Found ${result.rows.length} due cron schedules`);
|
|
2412
|
+
return result.rows;
|
|
2413
|
+
} catch (error) {
|
|
2414
|
+
if (error?.code === "42P01") {
|
|
2415
|
+
log("cron_schedules table does not exist, skipping cron enqueue");
|
|
2416
|
+
return [];
|
|
2417
|
+
}
|
|
2418
|
+
log(`Error getting due cron schedules: ${error}`);
|
|
2419
|
+
throw error;
|
|
2420
|
+
} finally {
|
|
2421
|
+
client.release();
|
|
1692
2422
|
}
|
|
1693
|
-
await backend.failJob(
|
|
1694
|
-
job.id,
|
|
1695
|
-
error instanceof Error ? error : new Error(String(error)),
|
|
1696
|
-
failureReason
|
|
1697
|
-
);
|
|
1698
2423
|
}
|
|
1699
|
-
|
|
1700
|
-
|
|
1701
|
-
|
|
1702
|
-
|
|
1703
|
-
|
|
1704
|
-
|
|
1705
|
-
|
|
1706
|
-
|
|
1707
|
-
|
|
1708
|
-
|
|
1709
|
-
|
|
1710
|
-
|
|
2424
|
+
/**
|
|
2425
|
+
* Update a cron schedule after a job has been enqueued.
|
|
2426
|
+
* Sets lastEnqueuedAt, lastJobId, and advances nextRunAt.
|
|
2427
|
+
*/
|
|
2428
|
+
async updateCronScheduleAfterEnqueue(id, lastEnqueuedAt, lastJobId, nextRunAt) {
|
|
2429
|
+
const client = await this.pool.connect();
|
|
2430
|
+
try {
|
|
2431
|
+
await client.query(
|
|
2432
|
+
`UPDATE cron_schedules
|
|
2433
|
+
SET last_enqueued_at = $2,
|
|
2434
|
+
last_job_id = $3,
|
|
2435
|
+
next_run_at = $4,
|
|
2436
|
+
updated_at = NOW()
|
|
2437
|
+
WHERE id = $1`,
|
|
2438
|
+
[id, lastEnqueuedAt, lastJobId, nextRunAt]
|
|
2439
|
+
);
|
|
2440
|
+
log(
|
|
2441
|
+
`Updated cron schedule ${id}: lastJobId=${lastJobId}, nextRunAt=${nextRunAt?.toISOString() ?? "null"}`
|
|
2442
|
+
);
|
|
2443
|
+
} catch (error) {
|
|
2444
|
+
log(`Error updating cron schedule ${id} after enqueue: ${error}`);
|
|
2445
|
+
throw error;
|
|
2446
|
+
} finally {
|
|
2447
|
+
client.release();
|
|
2448
|
+
}
|
|
1711
2449
|
}
|
|
1712
|
-
|
|
1713
|
-
|
|
1714
|
-
|
|
1715
|
-
|
|
1716
|
-
|
|
1717
|
-
|
|
1718
|
-
|
|
1719
|
-
|
|
1720
|
-
|
|
1721
|
-
|
|
1722
|
-
|
|
1723
|
-
|
|
1724
|
-
|
|
1725
|
-
|
|
1726
|
-
|
|
1727
|
-
|
|
1728
|
-
|
|
1729
|
-
|
|
1730
|
-
|
|
1731
|
-
|
|
1732
|
-
|
|
2450
|
+
// ── Wait / step-data support ────────────────────────────────────────
|
|
2451
|
+
/**
|
|
2452
|
+
* Transition a job from 'processing' to 'waiting' status.
|
|
2453
|
+
* Persists step data so the handler can resume from where it left off.
|
|
2454
|
+
*
|
|
2455
|
+
* @param jobId - The job to pause.
|
|
2456
|
+
* @param options - Wait configuration including optional waitUntil date, token ID, and step data.
|
|
2457
|
+
*/
|
|
2458
|
+
async waitJob(jobId, options) {
|
|
2459
|
+
const client = await this.pool.connect();
|
|
2460
|
+
try {
|
|
2461
|
+
const result = await client.query(
|
|
2462
|
+
`
|
|
2463
|
+
UPDATE job_queue
|
|
2464
|
+
SET status = 'waiting',
|
|
2465
|
+
wait_until = $2,
|
|
2466
|
+
wait_token_id = $3,
|
|
2467
|
+
step_data = $4,
|
|
2468
|
+
locked_at = NULL,
|
|
2469
|
+
locked_by = NULL,
|
|
2470
|
+
updated_at = NOW()
|
|
2471
|
+
WHERE id = $1 AND status = 'processing'
|
|
2472
|
+
`,
|
|
2473
|
+
[
|
|
2474
|
+
jobId,
|
|
2475
|
+
options.waitUntil ?? null,
|
|
2476
|
+
options.waitTokenId ?? null,
|
|
2477
|
+
JSON.stringify(options.stepData)
|
|
2478
|
+
]
|
|
2479
|
+
);
|
|
2480
|
+
if (result.rowCount === 0) {
|
|
2481
|
+
log(
|
|
2482
|
+
`Job ${jobId} could not be set to waiting (may have been reclaimed or is no longer processing)`
|
|
2483
|
+
);
|
|
2484
|
+
return;
|
|
1733
2485
|
}
|
|
1734
|
-
|
|
1735
|
-
|
|
1736
|
-
|
|
1737
|
-
}
|
|
1738
|
-
|
|
1739
|
-
|
|
1740
|
-
|
|
1741
|
-
|
|
1742
|
-
|
|
1743
|
-
|
|
1744
|
-
|
|
1745
|
-
|
|
1746
|
-
|
|
1747
|
-
|
|
1748
|
-
|
|
1749
|
-
|
|
1750
|
-
|
|
1751
|
-
|
|
1752
|
-
|
|
1753
|
-
|
|
1754
|
-
|
|
1755
|
-
);
|
|
2486
|
+
await this.recordJobEvent(jobId, "waiting" /* Waiting */, {
|
|
2487
|
+
waitUntil: options.waitUntil?.toISOString() ?? null,
|
|
2488
|
+
waitTokenId: options.waitTokenId ?? null
|
|
2489
|
+
});
|
|
2490
|
+
log(`Job ${jobId} set to waiting`);
|
|
2491
|
+
} catch (error) {
|
|
2492
|
+
log(`Error setting job ${jobId} to waiting: ${error}`);
|
|
2493
|
+
throw error;
|
|
2494
|
+
} finally {
|
|
2495
|
+
client.release();
|
|
2496
|
+
}
|
|
2497
|
+
}
|
|
2498
|
+
/**
|
|
2499
|
+
* Persist step data for a job. Called after each ctx.run() step completes.
|
|
2500
|
+
* Best-effort: does not throw to avoid killing the running handler.
|
|
2501
|
+
*
|
|
2502
|
+
* @param jobId - The job to update.
|
|
2503
|
+
* @param stepData - The step data to persist.
|
|
2504
|
+
*/
|
|
2505
|
+
async updateStepData(jobId, stepData) {
|
|
2506
|
+
const client = await this.pool.connect();
|
|
1756
2507
|
try {
|
|
1757
|
-
|
|
1758
|
-
|
|
1759
|
-
|
|
1760
|
-
batchSize,
|
|
1761
|
-
jobType,
|
|
1762
|
-
handlers,
|
|
1763
|
-
concurrency,
|
|
1764
|
-
onError
|
|
2508
|
+
await client.query(
|
|
2509
|
+
`UPDATE job_queue SET step_data = $2, updated_at = NOW() WHERE id = $1`,
|
|
2510
|
+
[jobId, JSON.stringify(stepData)]
|
|
1765
2511
|
);
|
|
1766
|
-
return processed;
|
|
1767
2512
|
} catch (error) {
|
|
1768
|
-
|
|
2513
|
+
log(`Error updating step_data for job ${jobId}: ${error}`);
|
|
2514
|
+
} finally {
|
|
2515
|
+
client.release();
|
|
1769
2516
|
}
|
|
1770
|
-
return 0;
|
|
1771
|
-
};
|
|
1772
|
-
return {
|
|
1773
|
-
/**
|
|
1774
|
-
* Start the job processor in the background.
|
|
1775
|
-
* - This will run periodically (every pollInterval milliseconds or 5 seconds if not provided) and process jobs as they become available.
|
|
1776
|
-
* - You have to call the stop method to stop the processor.
|
|
1777
|
-
*/
|
|
1778
|
-
startInBackground: () => {
|
|
1779
|
-
if (running) return;
|
|
1780
|
-
log(`Starting job processor with workerId: ${workerId}`);
|
|
1781
|
-
running = true;
|
|
1782
|
-
const scheduleNext = (immediate) => {
|
|
1783
|
-
if (!running) return;
|
|
1784
|
-
if (immediate) {
|
|
1785
|
-
intervalId = setTimeout(loop, 0);
|
|
1786
|
-
} else {
|
|
1787
|
-
intervalId = setTimeout(loop, pollInterval);
|
|
1788
|
-
}
|
|
1789
|
-
};
|
|
1790
|
-
const loop = async () => {
|
|
1791
|
-
if (!running) return;
|
|
1792
|
-
currentBatchPromise = processJobs();
|
|
1793
|
-
const processed = await currentBatchPromise;
|
|
1794
|
-
currentBatchPromise = null;
|
|
1795
|
-
scheduleNext(processed === batchSize);
|
|
1796
|
-
};
|
|
1797
|
-
loop();
|
|
1798
|
-
},
|
|
1799
|
-
/**
|
|
1800
|
-
* Stop the job processor that runs in the background.
|
|
1801
|
-
* Does not wait for in-flight jobs.
|
|
1802
|
-
*/
|
|
1803
|
-
stop: () => {
|
|
1804
|
-
log(`Stopping job processor with workerId: ${workerId}`);
|
|
1805
|
-
running = false;
|
|
1806
|
-
if (intervalId) {
|
|
1807
|
-
clearTimeout(intervalId);
|
|
1808
|
-
intervalId = null;
|
|
1809
|
-
}
|
|
1810
|
-
},
|
|
1811
|
-
/**
|
|
1812
|
-
* Stop the job processor and wait for all in-flight jobs to complete.
|
|
1813
|
-
* Useful for graceful shutdown (e.g., SIGTERM handling).
|
|
1814
|
-
*/
|
|
1815
|
-
stopAndDrain: async (drainTimeoutMs = 3e4) => {
|
|
1816
|
-
log(`Stopping and draining job processor with workerId: ${workerId}`);
|
|
1817
|
-
running = false;
|
|
1818
|
-
if (intervalId) {
|
|
1819
|
-
clearTimeout(intervalId);
|
|
1820
|
-
intervalId = null;
|
|
1821
|
-
}
|
|
1822
|
-
if (currentBatchPromise) {
|
|
1823
|
-
await Promise.race([
|
|
1824
|
-
currentBatchPromise.catch(() => {
|
|
1825
|
-
}),
|
|
1826
|
-
new Promise((resolve) => setTimeout(resolve, drainTimeoutMs))
|
|
1827
|
-
]);
|
|
1828
|
-
currentBatchPromise = null;
|
|
1829
|
-
}
|
|
1830
|
-
log(`Job processor ${workerId} drained`);
|
|
1831
|
-
},
|
|
1832
|
-
/**
|
|
1833
|
-
* Start the job processor synchronously.
|
|
1834
|
-
* - This will process all jobs immediately and then stop.
|
|
1835
|
-
* - The pollInterval is ignored.
|
|
1836
|
-
*/
|
|
1837
|
-
start: async () => {
|
|
1838
|
-
log(`Starting job processor with workerId: ${workerId}`);
|
|
1839
|
-
running = true;
|
|
1840
|
-
const processed = await processJobs();
|
|
1841
|
-
running = false;
|
|
1842
|
-
return processed;
|
|
1843
|
-
},
|
|
1844
|
-
isRunning: () => running
|
|
1845
|
-
};
|
|
1846
|
-
};
|
|
1847
|
-
function loadPemOrFile(value) {
|
|
1848
|
-
if (!value) return void 0;
|
|
1849
|
-
if (value.startsWith("file://")) {
|
|
1850
|
-
const filePath = value.slice(7);
|
|
1851
|
-
return fs__default.default.readFileSync(filePath, "utf8");
|
|
1852
2517
|
}
|
|
1853
|
-
|
|
1854
|
-
|
|
1855
|
-
|
|
1856
|
-
|
|
1857
|
-
|
|
1858
|
-
|
|
1859
|
-
|
|
1860
|
-
|
|
2518
|
+
/**
|
|
2519
|
+
* Create a waitpoint token in the database.
|
|
2520
|
+
*
|
|
2521
|
+
* @param jobId - The job ID to associate with the token (null if created outside a handler).
|
|
2522
|
+
* @param options - Optional timeout string (e.g. '10m', '1h') and tags.
|
|
2523
|
+
* @returns The created waitpoint with its unique ID.
|
|
2524
|
+
*/
|
|
2525
|
+
async createWaitpoint(jobId, options) {
|
|
2526
|
+
const client = await this.pool.connect();
|
|
1861
2527
|
try {
|
|
1862
|
-
const
|
|
1863
|
-
|
|
1864
|
-
|
|
1865
|
-
|
|
1866
|
-
|
|
2528
|
+
const id = `wp_${crypto.randomUUID()}`;
|
|
2529
|
+
let timeoutAt = null;
|
|
2530
|
+
if (options?.timeout) {
|
|
2531
|
+
const ms = parseTimeoutString(options.timeout);
|
|
2532
|
+
timeoutAt = new Date(Date.now() + ms);
|
|
1867
2533
|
}
|
|
1868
|
-
|
|
1869
|
-
|
|
1870
|
-
|
|
1871
|
-
|
|
1872
|
-
|
|
1873
|
-
|
|
1874
|
-
|
|
2534
|
+
await client.query(
|
|
2535
|
+
`INSERT INTO waitpoints (id, job_id, status, timeout_at, tags) VALUES ($1, $2, 'waiting', $3, $4)`,
|
|
2536
|
+
[id, jobId, timeoutAt, options?.tags ?? null]
|
|
2537
|
+
);
|
|
2538
|
+
log(`Created waitpoint ${id} for job ${jobId}`);
|
|
2539
|
+
return { id };
|
|
2540
|
+
} catch (error) {
|
|
2541
|
+
log(`Error creating waitpoint: ${error}`);
|
|
2542
|
+
throw error;
|
|
2543
|
+
} finally {
|
|
2544
|
+
client.release();
|
|
2545
|
+
}
|
|
2546
|
+
}
|
|
2547
|
+
/**
|
|
2548
|
+
* Complete a waitpoint token and move the associated job back to 'pending'.
|
|
2549
|
+
*
|
|
2550
|
+
* @param tokenId - The waitpoint token ID to complete.
|
|
2551
|
+
* @param data - Optional data to pass to the waiting handler.
|
|
2552
|
+
*/
|
|
2553
|
+
async completeWaitpoint(tokenId, data) {
|
|
2554
|
+
const client = await this.pool.connect();
|
|
2555
|
+
try {
|
|
2556
|
+
await client.query("BEGIN");
|
|
2557
|
+
const wpResult = await client.query(
|
|
2558
|
+
`UPDATE waitpoints SET status = 'completed', output = $2, completed_at = NOW()
|
|
2559
|
+
WHERE id = $1 AND status = 'waiting'
|
|
2560
|
+
RETURNING job_id`,
|
|
2561
|
+
[tokenId, data != null ? JSON.stringify(data) : null]
|
|
2562
|
+
);
|
|
2563
|
+
if (wpResult.rows.length === 0) {
|
|
2564
|
+
await client.query("ROLLBACK");
|
|
2565
|
+
log(`Waitpoint ${tokenId} not found or already completed`);
|
|
2566
|
+
return;
|
|
1875
2567
|
}
|
|
1876
|
-
|
|
1877
|
-
if (
|
|
1878
|
-
|
|
2568
|
+
const jobId = wpResult.rows[0].job_id;
|
|
2569
|
+
if (jobId != null) {
|
|
2570
|
+
await client.query(
|
|
2571
|
+
`UPDATE job_queue
|
|
2572
|
+
SET status = 'pending', wait_token_id = NULL, wait_until = NULL, updated_at = NOW()
|
|
2573
|
+
WHERE id = $1 AND status = 'waiting'`,
|
|
2574
|
+
[jobId]
|
|
2575
|
+
);
|
|
1879
2576
|
}
|
|
2577
|
+
await client.query("COMMIT");
|
|
2578
|
+
log(`Completed waitpoint ${tokenId} for job ${jobId}`);
|
|
2579
|
+
} catch (error) {
|
|
2580
|
+
await client.query("ROLLBACK");
|
|
2581
|
+
log(`Error completing waitpoint ${tokenId}: ${error}`);
|
|
2582
|
+
throw error;
|
|
2583
|
+
} finally {
|
|
2584
|
+
client.release();
|
|
1880
2585
|
}
|
|
1881
2586
|
}
|
|
1882
|
-
|
|
1883
|
-
|
|
1884
|
-
|
|
1885
|
-
|
|
1886
|
-
|
|
1887
|
-
|
|
1888
|
-
|
|
2587
|
+
/**
|
|
2588
|
+
* Retrieve a waitpoint token by its ID.
|
|
2589
|
+
*
|
|
2590
|
+
* @param tokenId - The waitpoint token ID to look up.
|
|
2591
|
+
* @returns The waitpoint record, or null if not found.
|
|
2592
|
+
*/
|
|
2593
|
+
async getWaitpoint(tokenId) {
|
|
2594
|
+
const client = await this.pool.connect();
|
|
2595
|
+
try {
|
|
2596
|
+
const result = await client.query(
|
|
2597
|
+
`SELECT id, job_id AS "jobId", status, output, timeout_at AS "timeoutAt", created_at AS "createdAt", completed_at AS "completedAt", tags FROM waitpoints WHERE id = $1`,
|
|
2598
|
+
[tokenId]
|
|
2599
|
+
);
|
|
2600
|
+
if (result.rows.length === 0) return null;
|
|
2601
|
+
return result.rows[0];
|
|
2602
|
+
} catch (error) {
|
|
2603
|
+
log(`Error getting waitpoint ${tokenId}: ${error}`);
|
|
2604
|
+
throw error;
|
|
2605
|
+
} finally {
|
|
2606
|
+
client.release();
|
|
1889
2607
|
}
|
|
1890
|
-
const caValue = typeof customCA === "string" ? loadPemOrFile(customCA) : void 0;
|
|
1891
|
-
ssl = {
|
|
1892
|
-
...ssl,
|
|
1893
|
-
...caValue ? { ca: caValue } : {},
|
|
1894
|
-
cert: loadPemOrFile(
|
|
1895
|
-
typeof config.ssl.cert === "string" ? config.ssl.cert : process.env.PGSSLCERT
|
|
1896
|
-
),
|
|
1897
|
-
key: loadPemOrFile(
|
|
1898
|
-
typeof config.ssl.key === "string" ? config.ssl.key : process.env.PGSSLKEY
|
|
1899
|
-
),
|
|
1900
|
-
rejectUnauthorized: config.ssl.rejectUnauthorized !== void 0 ? config.ssl.rejectUnauthorized : true
|
|
1901
|
-
};
|
|
1902
2608
|
}
|
|
1903
|
-
|
|
1904
|
-
|
|
1905
|
-
|
|
1906
|
-
|
|
1907
|
-
|
|
1908
|
-
|
|
1909
|
-
|
|
1910
|
-
|
|
1911
|
-
|
|
1912
|
-
|
|
1913
|
-
|
|
1914
|
-
|
|
1915
|
-
|
|
1916
|
-
|
|
2609
|
+
/**
|
|
2610
|
+
* Expire timed-out waitpoint tokens and move their associated jobs back to 'pending'.
|
|
2611
|
+
*
|
|
2612
|
+
* @returns The number of tokens that were expired.
|
|
2613
|
+
*/
|
|
2614
|
+
async expireTimedOutWaitpoints() {
|
|
2615
|
+
const client = await this.pool.connect();
|
|
2616
|
+
try {
|
|
2617
|
+
await client.query("BEGIN");
|
|
2618
|
+
const result = await client.query(
|
|
2619
|
+
`UPDATE waitpoints
|
|
2620
|
+
SET status = 'timed_out'
|
|
2621
|
+
WHERE status = 'waiting' AND timeout_at IS NOT NULL AND timeout_at <= NOW()
|
|
2622
|
+
RETURNING id, job_id`
|
|
2623
|
+
);
|
|
2624
|
+
for (const row of result.rows) {
|
|
2625
|
+
if (row.job_id != null) {
|
|
2626
|
+
await client.query(
|
|
2627
|
+
`UPDATE job_queue
|
|
2628
|
+
SET status = 'pending', wait_token_id = NULL, wait_until = NULL, updated_at = NOW()
|
|
2629
|
+
WHERE id = $1 AND status = 'waiting'`,
|
|
2630
|
+
[row.job_id]
|
|
2631
|
+
);
|
|
2632
|
+
}
|
|
2633
|
+
}
|
|
2634
|
+
await client.query("COMMIT");
|
|
2635
|
+
const count = result.rowCount || 0;
|
|
2636
|
+
if (count > 0) {
|
|
2637
|
+
log(`Expired ${count} timed-out waitpoints`);
|
|
2638
|
+
}
|
|
2639
|
+
return count;
|
|
2640
|
+
} catch (error) {
|
|
2641
|
+
await client.query("ROLLBACK");
|
|
2642
|
+
log(`Error expiring timed-out waitpoints: ${error}`);
|
|
2643
|
+
throw error;
|
|
2644
|
+
} finally {
|
|
2645
|
+
client.release();
|
|
2646
|
+
}
|
|
1917
2647
|
}
|
|
1918
|
-
|
|
1919
|
-
|
|
1920
|
-
|
|
1921
|
-
|
|
1922
|
-
|
|
1923
|
-
|
|
1924
|
-
|
|
1925
|
-
|
|
2648
|
+
// ── Internal helpers ──────────────────────────────────────────────────
|
|
2649
|
+
async setPendingReasonForUnpickedJobs(reason, jobType) {
|
|
2650
|
+
const client = await this.pool.connect();
|
|
2651
|
+
try {
|
|
2652
|
+
let jobTypeFilter = "";
|
|
2653
|
+
const params = [reason];
|
|
2654
|
+
if (jobType) {
|
|
2655
|
+
if (Array.isArray(jobType)) {
|
|
2656
|
+
jobTypeFilter = ` AND job_type = ANY($2)`;
|
|
2657
|
+
params.push(jobType);
|
|
2658
|
+
} else {
|
|
2659
|
+
jobTypeFilter = ` AND job_type = $2`;
|
|
2660
|
+
params.push(jobType);
|
|
2661
|
+
}
|
|
2662
|
+
}
|
|
2663
|
+
await client.query(
|
|
2664
|
+
`UPDATE job_queue SET pending_reason = $1 WHERE status = 'pending'${jobTypeFilter}`,
|
|
2665
|
+
params
|
|
2666
|
+
);
|
|
2667
|
+
} finally {
|
|
2668
|
+
client.release();
|
|
2669
|
+
}
|
|
1926
2670
|
}
|
|
1927
|
-
return pool;
|
|
1928
2671
|
};
|
|
1929
2672
|
|
|
1930
2673
|
// src/backends/redis-scripts.ts
|
|
@@ -1941,6 +2684,9 @@ local forceKillOnTimeout = ARGV[7]
|
|
|
1941
2684
|
local tagsJson = ARGV[8] -- "null" or JSON array string
|
|
1942
2685
|
local idempotencyKey = ARGV[9] -- "null" string if not set
|
|
1943
2686
|
local nowMs = tonumber(ARGV[10])
|
|
2687
|
+
local retryDelay = ARGV[11] -- "null" or seconds string
|
|
2688
|
+
local retryBackoff = ARGV[12] -- "null" or "true"/"false"
|
|
2689
|
+
local retryDelayMax = ARGV[13] -- "null" or seconds string
|
|
1944
2690
|
|
|
1945
2691
|
-- Idempotency check
|
|
1946
2692
|
if idempotencyKey ~= "null" then
|
|
@@ -1981,7 +2727,13 @@ redis.call('HMSET', jobKey,
|
|
|
1981
2727
|
'lastFailedAt', 'null',
|
|
1982
2728
|
'lastCancelledAt', 'null',
|
|
1983
2729
|
'tags', tagsJson,
|
|
1984
|
-
'idempotencyKey', idempotencyKey
|
|
2730
|
+
'idempotencyKey', idempotencyKey,
|
|
2731
|
+
'waitUntil', 'null',
|
|
2732
|
+
'waitTokenId', 'null',
|
|
2733
|
+
'stepData', 'null',
|
|
2734
|
+
'retryDelay', retryDelay,
|
|
2735
|
+
'retryBackoff', retryBackoff,
|
|
2736
|
+
'retryDelayMax', retryDelayMax
|
|
1985
2737
|
)
|
|
1986
2738
|
|
|
1987
2739
|
-- Status index
|
|
@@ -2022,6 +2774,118 @@ end
|
|
|
2022
2774
|
|
|
2023
2775
|
return id
|
|
2024
2776
|
`;
|
|
2777
|
+
var ADD_JOBS_SCRIPT = `
|
|
2778
|
+
local prefix = KEYS[1]
|
|
2779
|
+
local jobsJson = ARGV[1]
|
|
2780
|
+
local nowMs = tonumber(ARGV[2])
|
|
2781
|
+
|
|
2782
|
+
local jobs = cjson.decode(jobsJson)
|
|
2783
|
+
local results = {}
|
|
2784
|
+
|
|
2785
|
+
for i, job in ipairs(jobs) do
|
|
2786
|
+
local jobType = job.jobType
|
|
2787
|
+
local payloadJson = job.payload
|
|
2788
|
+
local maxAttempts = tonumber(job.maxAttempts)
|
|
2789
|
+
local priority = tonumber(job.priority)
|
|
2790
|
+
local runAtMs = tostring(job.runAtMs)
|
|
2791
|
+
local timeoutMs = tostring(job.timeoutMs)
|
|
2792
|
+
local forceKillOnTimeout = tostring(job.forceKillOnTimeout)
|
|
2793
|
+
local tagsJson = tostring(job.tags)
|
|
2794
|
+
local idempotencyKey = tostring(job.idempotencyKey)
|
|
2795
|
+
local retryDelay = tostring(job.retryDelay)
|
|
2796
|
+
local retryBackoff = tostring(job.retryBackoff)
|
|
2797
|
+
local retryDelayMax = tostring(job.retryDelayMax)
|
|
2798
|
+
|
|
2799
|
+
-- Idempotency check
|
|
2800
|
+
local skip = false
|
|
2801
|
+
if idempotencyKey ~= "null" then
|
|
2802
|
+
local existing = redis.call('GET', prefix .. 'idempotency:' .. idempotencyKey)
|
|
2803
|
+
if existing then
|
|
2804
|
+
results[i] = tonumber(existing)
|
|
2805
|
+
skip = true
|
|
2806
|
+
end
|
|
2807
|
+
end
|
|
2808
|
+
|
|
2809
|
+
if not skip then
|
|
2810
|
+
-- Generate ID
|
|
2811
|
+
local id = redis.call('INCR', prefix .. 'id_seq')
|
|
2812
|
+
local jobKey = prefix .. 'job:' .. id
|
|
2813
|
+
local runAt = runAtMs ~= "0" and tonumber(runAtMs) or nowMs
|
|
2814
|
+
|
|
2815
|
+
-- Store the job hash
|
|
2816
|
+
redis.call('HMSET', jobKey,
|
|
2817
|
+
'id', id,
|
|
2818
|
+
'jobType', jobType,
|
|
2819
|
+
'payload', payloadJson,
|
|
2820
|
+
'status', 'pending',
|
|
2821
|
+
'maxAttempts', maxAttempts,
|
|
2822
|
+
'attempts', 0,
|
|
2823
|
+
'priority', priority,
|
|
2824
|
+
'runAt', runAt,
|
|
2825
|
+
'timeoutMs', timeoutMs,
|
|
2826
|
+
'forceKillOnTimeout', forceKillOnTimeout,
|
|
2827
|
+
'createdAt', nowMs,
|
|
2828
|
+
'updatedAt', nowMs,
|
|
2829
|
+
'lockedAt', 'null',
|
|
2830
|
+
'lockedBy', 'null',
|
|
2831
|
+
'nextAttemptAt', 'null',
|
|
2832
|
+
'pendingReason', 'null',
|
|
2833
|
+
'errorHistory', '[]',
|
|
2834
|
+
'failureReason', 'null',
|
|
2835
|
+
'completedAt', 'null',
|
|
2836
|
+
'startedAt', 'null',
|
|
2837
|
+
'lastRetriedAt', 'null',
|
|
2838
|
+
'lastFailedAt', 'null',
|
|
2839
|
+
'lastCancelledAt', 'null',
|
|
2840
|
+
'tags', tagsJson,
|
|
2841
|
+
'idempotencyKey', idempotencyKey,
|
|
2842
|
+
'waitUntil', 'null',
|
|
2843
|
+
'waitTokenId', 'null',
|
|
2844
|
+
'stepData', 'null',
|
|
2845
|
+
'retryDelay', retryDelay,
|
|
2846
|
+
'retryBackoff', retryBackoff,
|
|
2847
|
+
'retryDelayMax', retryDelayMax
|
|
2848
|
+
)
|
|
2849
|
+
|
|
2850
|
+
-- Status index
|
|
2851
|
+
redis.call('SADD', prefix .. 'status:pending', id)
|
|
2852
|
+
|
|
2853
|
+
-- Type index
|
|
2854
|
+
redis.call('SADD', prefix .. 'type:' .. jobType, id)
|
|
2855
|
+
|
|
2856
|
+
-- Tag indexes
|
|
2857
|
+
if tagsJson ~= "null" then
|
|
2858
|
+
local tags = cjson.decode(tagsJson)
|
|
2859
|
+
for _, tag in ipairs(tags) do
|
|
2860
|
+
redis.call('SADD', prefix .. 'tag:' .. tag, id)
|
|
2861
|
+
end
|
|
2862
|
+
for _, tag in ipairs(tags) do
|
|
2863
|
+
redis.call('SADD', prefix .. 'job:' .. id .. ':tags', tag)
|
|
2864
|
+
end
|
|
2865
|
+
end
|
|
2866
|
+
|
|
2867
|
+
-- Idempotency mapping
|
|
2868
|
+
if idempotencyKey ~= "null" then
|
|
2869
|
+
redis.call('SET', prefix .. 'idempotency:' .. idempotencyKey, id)
|
|
2870
|
+
end
|
|
2871
|
+
|
|
2872
|
+
-- All-jobs sorted set
|
|
2873
|
+
redis.call('ZADD', prefix .. 'all', nowMs, id)
|
|
2874
|
+
|
|
2875
|
+
-- Queue or delayed
|
|
2876
|
+
if runAt <= nowMs then
|
|
2877
|
+
local score = priority * ${SCORE_RANGE} + (${SCORE_RANGE} - nowMs)
|
|
2878
|
+
redis.call('ZADD', prefix .. 'queue', score, id)
|
|
2879
|
+
else
|
|
2880
|
+
redis.call('ZADD', prefix .. 'delayed', runAt, id)
|
|
2881
|
+
end
|
|
2882
|
+
|
|
2883
|
+
results[i] = id
|
|
2884
|
+
end
|
|
2885
|
+
end
|
|
2886
|
+
|
|
2887
|
+
return results
|
|
2888
|
+
`;
|
|
2025
2889
|
var GET_NEXT_BATCH_SCRIPT = `
|
|
2026
2890
|
local prefix = KEYS[1]
|
|
2027
2891
|
local workerId = ARGV[1]
|
|
@@ -2064,7 +2928,25 @@ for _, jobId in ipairs(retries) do
|
|
|
2064
2928
|
redis.call('ZREM', prefix .. 'retry', jobId)
|
|
2065
2929
|
end
|
|
2066
2930
|
|
|
2067
|
-
-- 3.
|
|
2931
|
+
-- 3. Move ready waiting jobs (time-based, no token) into queue
|
|
2932
|
+
local waitingJobs = redis.call('ZRANGEBYSCORE', prefix .. 'waiting', '-inf', nowMs, 'LIMIT', 0, 200)
|
|
2933
|
+
for _, jobId in ipairs(waitingJobs) do
|
|
2934
|
+
local jk = prefix .. 'job:' .. jobId
|
|
2935
|
+
local status = redis.call('HGET', jk, 'status')
|
|
2936
|
+
local waitTokenId = redis.call('HGET', jk, 'waitTokenId')
|
|
2937
|
+
if status == 'waiting' and (waitTokenId == false or waitTokenId == 'null') then
|
|
2938
|
+
local pri = tonumber(redis.call('HGET', jk, 'priority') or '0')
|
|
2939
|
+
local ca = tonumber(redis.call('HGET', jk, 'createdAt'))
|
|
2940
|
+
local score = pri * ${SCORE_RANGE} + (${SCORE_RANGE} - ca)
|
|
2941
|
+
redis.call('ZADD', prefix .. 'queue', score, jobId)
|
|
2942
|
+
redis.call('SREM', prefix .. 'status:waiting', jobId)
|
|
2943
|
+
redis.call('SADD', prefix .. 'status:pending', jobId)
|
|
2944
|
+
redis.call('HMSET', jk, 'status', 'pending', 'waitUntil', 'null')
|
|
2945
|
+
end
|
|
2946
|
+
redis.call('ZREM', prefix .. 'waiting', jobId)
|
|
2947
|
+
end
|
|
2948
|
+
|
|
2949
|
+
-- 4. Parse job type filter
|
|
2068
2950
|
local filterTypes = nil
|
|
2069
2951
|
if jobTypeFilter ~= "null" then
|
|
2070
2952
|
-- Could be a JSON array or a plain string
|
|
@@ -2077,7 +2959,7 @@ if jobTypeFilter ~= "null" then
|
|
|
2077
2959
|
end
|
|
2078
2960
|
end
|
|
2079
2961
|
|
|
2080
|
-
--
|
|
2962
|
+
-- 5. Pop candidates from queue (highest score first)
|
|
2081
2963
|
-- We pop more than batchSize because some may be filtered out
|
|
2082
2964
|
local popCount = batchSize * 3
|
|
2083
2965
|
local candidates = redis.call('ZPOPMAX', prefix .. 'queue', popCount)
|
|
@@ -2161,7 +3043,10 @@ local jk = prefix .. 'job:' .. jobId
|
|
|
2161
3043
|
redis.call('HMSET', jk,
|
|
2162
3044
|
'status', 'completed',
|
|
2163
3045
|
'updatedAt', nowMs,
|
|
2164
|
-
'completedAt', nowMs
|
|
3046
|
+
'completedAt', nowMs,
|
|
3047
|
+
'stepData', 'null',
|
|
3048
|
+
'waitUntil', 'null',
|
|
3049
|
+
'waitTokenId', 'null'
|
|
2165
3050
|
)
|
|
2166
3051
|
redis.call('SREM', prefix .. 'status:processing', jobId)
|
|
2167
3052
|
redis.call('SADD', prefix .. 'status:completed', jobId)
|
|
@@ -2179,11 +3064,38 @@ local jk = prefix .. 'job:' .. jobId
|
|
|
2179
3064
|
local attempts = tonumber(redis.call('HGET', jk, 'attempts'))
|
|
2180
3065
|
local maxAttempts = tonumber(redis.call('HGET', jk, 'maxAttempts'))
|
|
2181
3066
|
|
|
2182
|
-
--
|
|
3067
|
+
-- Read per-job retry config (may be "null")
|
|
3068
|
+
local rdRaw = redis.call('HGET', jk, 'retryDelay')
|
|
3069
|
+
local rbRaw = redis.call('HGET', jk, 'retryBackoff')
|
|
3070
|
+
local rmRaw = redis.call('HGET', jk, 'retryDelayMax')
|
|
3071
|
+
|
|
2183
3072
|
local nextAttemptAt = 'null'
|
|
2184
3073
|
if attempts < maxAttempts then
|
|
2185
|
-
local
|
|
2186
|
-
|
|
3074
|
+
local allNull = (rdRaw == 'null' or rdRaw == false)
|
|
3075
|
+
and (rbRaw == 'null' or rbRaw == false)
|
|
3076
|
+
and (rmRaw == 'null' or rmRaw == false)
|
|
3077
|
+
if allNull then
|
|
3078
|
+
-- Legacy formula: 2^attempts minutes
|
|
3079
|
+
local delayMs = math.pow(2, attempts) * 60000
|
|
3080
|
+
nextAttemptAt = nowMs + delayMs
|
|
3081
|
+
else
|
|
3082
|
+
local retryDelaySec = 60
|
|
3083
|
+
if rdRaw and rdRaw ~= 'null' then retryDelaySec = tonumber(rdRaw) end
|
|
3084
|
+
local useBackoff = true
|
|
3085
|
+
if rbRaw and rbRaw ~= 'null' then useBackoff = (rbRaw == 'true') end
|
|
3086
|
+
local maxDelaySec = nil
|
|
3087
|
+
if rmRaw and rmRaw ~= 'null' then maxDelaySec = tonumber(rmRaw) end
|
|
3088
|
+
|
|
3089
|
+
local delaySec
|
|
3090
|
+
if useBackoff then
|
|
3091
|
+
delaySec = retryDelaySec * math.pow(2, attempts)
|
|
3092
|
+
if maxDelaySec then delaySec = math.min(delaySec, maxDelaySec) end
|
|
3093
|
+
delaySec = delaySec * (0.5 + 0.5 * math.random())
|
|
3094
|
+
else
|
|
3095
|
+
delaySec = retryDelaySec
|
|
3096
|
+
end
|
|
3097
|
+
nextAttemptAt = nowMs + math.floor(delaySec * 1000)
|
|
3098
|
+
end
|
|
2187
3099
|
end
|
|
2188
3100
|
|
|
2189
3101
|
-- Append to error_history
|
|
@@ -2220,6 +3132,7 @@ local nowMs = tonumber(ARGV[2])
|
|
|
2220
3132
|
local jk = prefix .. 'job:' .. jobId
|
|
2221
3133
|
|
|
2222
3134
|
local oldStatus = redis.call('HGET', jk, 'status')
|
|
3135
|
+
if oldStatus ~= 'failed' and oldStatus ~= 'processing' then return 0 end
|
|
2223
3136
|
|
|
2224
3137
|
redis.call('HMSET', jk,
|
|
2225
3138
|
'status', 'pending',
|
|
@@ -2231,9 +3144,7 @@ redis.call('HMSET', jk,
|
|
|
2231
3144
|
)
|
|
2232
3145
|
|
|
2233
3146
|
-- Remove from old status, add to pending
|
|
2234
|
-
|
|
2235
|
-
redis.call('SREM', prefix .. 'status:' .. oldStatus, jobId)
|
|
2236
|
-
end
|
|
3147
|
+
redis.call('SREM', prefix .. 'status:' .. oldStatus, jobId)
|
|
2237
3148
|
redis.call('SADD', prefix .. 'status:pending', jobId)
|
|
2238
3149
|
|
|
2239
3150
|
-- Remove from retry sorted set if present
|
|
@@ -2254,18 +3165,21 @@ local nowMs = ARGV[2]
|
|
|
2254
3165
|
local jk = prefix .. 'job:' .. jobId
|
|
2255
3166
|
|
|
2256
3167
|
local status = redis.call('HGET', jk, 'status')
|
|
2257
|
-
if status ~= 'pending' then return 0 end
|
|
3168
|
+
if status ~= 'pending' and status ~= 'waiting' then return 0 end
|
|
2258
3169
|
|
|
2259
3170
|
redis.call('HMSET', jk,
|
|
2260
3171
|
'status', 'cancelled',
|
|
2261
3172
|
'updatedAt', nowMs,
|
|
2262
|
-
'lastCancelledAt', nowMs
|
|
3173
|
+
'lastCancelledAt', nowMs,
|
|
3174
|
+
'waitUntil', 'null',
|
|
3175
|
+
'waitTokenId', 'null'
|
|
2263
3176
|
)
|
|
2264
|
-
redis.call('SREM', prefix .. 'status:
|
|
3177
|
+
redis.call('SREM', prefix .. 'status:' .. status, jobId)
|
|
2265
3178
|
redis.call('SADD', prefix .. 'status:cancelled', jobId)
|
|
2266
|
-
-- Remove from queue / delayed
|
|
3179
|
+
-- Remove from queue / delayed / waiting
|
|
2267
3180
|
redis.call('ZREM', prefix .. 'queue', jobId)
|
|
2268
3181
|
redis.call('ZREM', prefix .. 'delayed', jobId)
|
|
3182
|
+
redis.call('ZREM', prefix .. 'waiting', jobId)
|
|
2269
3183
|
|
|
2270
3184
|
return 1
|
|
2271
3185
|
`;
|
|
@@ -2333,18 +3247,16 @@ end
|
|
|
2333
3247
|
|
|
2334
3248
|
return count
|
|
2335
3249
|
`;
|
|
2336
|
-
var
|
|
3250
|
+
var CLEANUP_OLD_JOBS_BATCH_SCRIPT = `
|
|
2337
3251
|
local prefix = KEYS[1]
|
|
2338
3252
|
local cutoffMs = tonumber(ARGV[1])
|
|
2339
|
-
|
|
2340
|
-
local completed = redis.call('SMEMBERS', prefix .. 'status:completed')
|
|
2341
3253
|
local count = 0
|
|
2342
3254
|
|
|
2343
|
-
for
|
|
3255
|
+
for i = 2, #ARGV do
|
|
3256
|
+
local jobId = ARGV[i]
|
|
2344
3257
|
local jk = prefix .. 'job:' .. jobId
|
|
2345
3258
|
local updatedAt = tonumber(redis.call('HGET', jk, 'updatedAt'))
|
|
2346
3259
|
if updatedAt and updatedAt < cutoffMs then
|
|
2347
|
-
-- Remove all indexes
|
|
2348
3260
|
local jobType = redis.call('HGET', jk, 'jobType')
|
|
2349
3261
|
local tagsJson = redis.call('HGET', jk, 'tags')
|
|
2350
3262
|
local idempotencyKey = redis.call('HGET', jk, 'idempotencyKey')
|
|
@@ -2367,7 +3279,6 @@ for _, jobId in ipairs(completed) do
|
|
|
2367
3279
|
if idempotencyKey and idempotencyKey ~= 'null' then
|
|
2368
3280
|
redis.call('DEL', prefix .. 'idempotency:' .. idempotencyKey)
|
|
2369
3281
|
end
|
|
2370
|
-
-- Delete events
|
|
2371
3282
|
redis.call('DEL', prefix .. 'events:' .. jobId)
|
|
2372
3283
|
|
|
2373
3284
|
count = count + 1
|
|
@@ -2376,8 +3287,158 @@ end
|
|
|
2376
3287
|
|
|
2377
3288
|
return count
|
|
2378
3289
|
`;
|
|
3290
|
+
var WAIT_JOB_SCRIPT = `
|
|
3291
|
+
local prefix = KEYS[1]
|
|
3292
|
+
local jobId = ARGV[1]
|
|
3293
|
+
local waitUntilMs = ARGV[2]
|
|
3294
|
+
local waitTokenId = ARGV[3]
|
|
3295
|
+
local stepDataJson = ARGV[4]
|
|
3296
|
+
local nowMs = ARGV[5]
|
|
3297
|
+
local jk = prefix .. 'job:' .. jobId
|
|
3298
|
+
|
|
3299
|
+
local status = redis.call('HGET', jk, 'status')
|
|
3300
|
+
if status ~= 'processing' then return 0 end
|
|
3301
|
+
|
|
3302
|
+
redis.call('HMSET', jk,
|
|
3303
|
+
'status', 'waiting',
|
|
3304
|
+
'waitUntil', waitUntilMs,
|
|
3305
|
+
'waitTokenId', waitTokenId,
|
|
3306
|
+
'stepData', stepDataJson,
|
|
3307
|
+
'lockedAt', 'null',
|
|
3308
|
+
'lockedBy', 'null',
|
|
3309
|
+
'updatedAt', nowMs
|
|
3310
|
+
)
|
|
3311
|
+
redis.call('SREM', prefix .. 'status:processing', jobId)
|
|
3312
|
+
redis.call('SADD', prefix .. 'status:waiting', jobId)
|
|
3313
|
+
|
|
3314
|
+
-- Add to waiting sorted set if time-based wait
|
|
3315
|
+
if waitUntilMs ~= 'null' then
|
|
3316
|
+
redis.call('ZADD', prefix .. 'waiting', tonumber(waitUntilMs), jobId)
|
|
3317
|
+
end
|
|
3318
|
+
|
|
3319
|
+
return 1
|
|
3320
|
+
`;
|
|
3321
|
+
var COMPLETE_WAITPOINT_SCRIPT = `
|
|
3322
|
+
local prefix = KEYS[1]
|
|
3323
|
+
local tokenId = ARGV[1]
|
|
3324
|
+
local outputJson = ARGV[2]
|
|
3325
|
+
local nowMs = ARGV[3]
|
|
3326
|
+
local wpk = prefix .. 'waitpoint:' .. tokenId
|
|
3327
|
+
|
|
3328
|
+
local wpStatus = redis.call('HGET', wpk, 'status')
|
|
3329
|
+
if not wpStatus or wpStatus ~= 'waiting' then return 0 end
|
|
3330
|
+
|
|
3331
|
+
redis.call('HMSET', wpk,
|
|
3332
|
+
'status', 'completed',
|
|
3333
|
+
'output', outputJson,
|
|
3334
|
+
'completedAt', nowMs
|
|
3335
|
+
)
|
|
3336
|
+
|
|
3337
|
+
-- Move associated job back to pending
|
|
3338
|
+
local jobId = redis.call('HGET', wpk, 'jobId')
|
|
3339
|
+
if jobId and jobId ~= 'null' then
|
|
3340
|
+
local jk = prefix .. 'job:' .. jobId
|
|
3341
|
+
local jobStatus = redis.call('HGET', jk, 'status')
|
|
3342
|
+
if jobStatus == 'waiting' then
|
|
3343
|
+
redis.call('HMSET', jk,
|
|
3344
|
+
'status', 'pending',
|
|
3345
|
+
'waitTokenId', 'null',
|
|
3346
|
+
'waitUntil', 'null',
|
|
3347
|
+
'updatedAt', nowMs
|
|
3348
|
+
)
|
|
3349
|
+
redis.call('SREM', prefix .. 'status:waiting', jobId)
|
|
3350
|
+
redis.call('SADD', prefix .. 'status:pending', jobId)
|
|
3351
|
+
redis.call('ZREM', prefix .. 'waiting', jobId)
|
|
3352
|
+
|
|
3353
|
+
-- Re-add to queue
|
|
3354
|
+
local priority = tonumber(redis.call('HGET', jk, 'priority') or '0')
|
|
3355
|
+
local createdAt = tonumber(redis.call('HGET', jk, 'createdAt'))
|
|
3356
|
+
local score = priority * ${SCORE_RANGE} + (${SCORE_RANGE} - createdAt)
|
|
3357
|
+
redis.call('ZADD', prefix .. 'queue', score, jobId)
|
|
3358
|
+
end
|
|
3359
|
+
end
|
|
3360
|
+
|
|
3361
|
+
return 1
|
|
3362
|
+
`;
|
|
3363
|
+
var EXPIRE_TIMED_OUT_WAITPOINTS_SCRIPT = `
|
|
3364
|
+
local prefix = KEYS[1]
|
|
3365
|
+
local nowMs = tonumber(ARGV[1])
|
|
3366
|
+
|
|
3367
|
+
local expiredIds = redis.call('ZRANGEBYSCORE', prefix .. 'waitpoint_timeout', '-inf', nowMs)
|
|
3368
|
+
local count = 0
|
|
3369
|
+
|
|
3370
|
+
for _, tokenId in ipairs(expiredIds) do
|
|
3371
|
+
local wpk = prefix .. 'waitpoint:' .. tokenId
|
|
3372
|
+
local wpStatus = redis.call('HGET', wpk, 'status')
|
|
3373
|
+
if wpStatus == 'waiting' then
|
|
3374
|
+
redis.call('HMSET', wpk,
|
|
3375
|
+
'status', 'timed_out'
|
|
3376
|
+
)
|
|
3377
|
+
|
|
3378
|
+
-- Move associated job back to pending
|
|
3379
|
+
local jobId = redis.call('HGET', wpk, 'jobId')
|
|
3380
|
+
if jobId and jobId ~= 'null' then
|
|
3381
|
+
local jk = prefix .. 'job:' .. jobId
|
|
3382
|
+
local jobStatus = redis.call('HGET', jk, 'status')
|
|
3383
|
+
if jobStatus == 'waiting' then
|
|
3384
|
+
redis.call('HMSET', jk,
|
|
3385
|
+
'status', 'pending',
|
|
3386
|
+
'waitTokenId', 'null',
|
|
3387
|
+
'waitUntil', 'null',
|
|
3388
|
+
'updatedAt', nowMs
|
|
3389
|
+
)
|
|
3390
|
+
redis.call('SREM', prefix .. 'status:waiting', jobId)
|
|
3391
|
+
redis.call('SADD', prefix .. 'status:pending', jobId)
|
|
3392
|
+
redis.call('ZREM', prefix .. 'waiting', jobId)
|
|
3393
|
+
|
|
3394
|
+
local priority = tonumber(redis.call('HGET', jk, 'priority') or '0')
|
|
3395
|
+
local createdAt = tonumber(redis.call('HGET', jk, 'createdAt'))
|
|
3396
|
+
local score = priority * ${SCORE_RANGE} + (${SCORE_RANGE} - createdAt)
|
|
3397
|
+
redis.call('ZADD', prefix .. 'queue', score, jobId)
|
|
3398
|
+
end
|
|
3399
|
+
end
|
|
3400
|
+
|
|
3401
|
+
count = count + 1
|
|
3402
|
+
end
|
|
3403
|
+
redis.call('ZREM', prefix .. 'waitpoint_timeout', tokenId)
|
|
3404
|
+
end
|
|
2379
3405
|
|
|
2380
|
-
|
|
3406
|
+
return count
|
|
3407
|
+
`;
|
|
3408
|
+
var MAX_TIMEOUT_MS2 = 365 * 24 * 60 * 60 * 1e3;
|
|
3409
|
+
function parseTimeoutString2(timeout) {
|
|
3410
|
+
const match = timeout.match(/^(\d+)(s|m|h|d)$/);
|
|
3411
|
+
if (!match) {
|
|
3412
|
+
throw new Error(
|
|
3413
|
+
`Invalid timeout format: "${timeout}". Expected format like "10m", "1h", "24h", "7d".`
|
|
3414
|
+
);
|
|
3415
|
+
}
|
|
3416
|
+
const value = parseInt(match[1], 10);
|
|
3417
|
+
const unit = match[2];
|
|
3418
|
+
let ms;
|
|
3419
|
+
switch (unit) {
|
|
3420
|
+
case "s":
|
|
3421
|
+
ms = value * 1e3;
|
|
3422
|
+
break;
|
|
3423
|
+
case "m":
|
|
3424
|
+
ms = value * 60 * 1e3;
|
|
3425
|
+
break;
|
|
3426
|
+
case "h":
|
|
3427
|
+
ms = value * 60 * 60 * 1e3;
|
|
3428
|
+
break;
|
|
3429
|
+
case "d":
|
|
3430
|
+
ms = value * 24 * 60 * 60 * 1e3;
|
|
3431
|
+
break;
|
|
3432
|
+
default:
|
|
3433
|
+
throw new Error(`Unknown timeout unit: "${unit}"`);
|
|
3434
|
+
}
|
|
3435
|
+
if (!Number.isFinite(ms) || ms > MAX_TIMEOUT_MS2) {
|
|
3436
|
+
throw new Error(
|
|
3437
|
+
`Timeout value "${timeout}" is too large. Maximum allowed is 365 days.`
|
|
3438
|
+
);
|
|
3439
|
+
}
|
|
3440
|
+
return ms;
|
|
3441
|
+
}
|
|
2381
3442
|
function hashToObject(arr) {
|
|
2382
3443
|
const obj = {};
|
|
2383
3444
|
for (let i = 0; i < arr.length; i += 2) {
|
|
@@ -2443,11 +3504,41 @@ function deserializeJob(h) {
|
|
|
2443
3504
|
lastCancelledAt: dateOrNull(h.lastCancelledAt),
|
|
2444
3505
|
tags,
|
|
2445
3506
|
idempotencyKey: nullish(h.idempotencyKey),
|
|
2446
|
-
progress: numOrNull(h.progress)
|
|
3507
|
+
progress: numOrNull(h.progress),
|
|
3508
|
+
waitUntil: dateOrNull(h.waitUntil),
|
|
3509
|
+
waitTokenId: nullish(h.waitTokenId),
|
|
3510
|
+
stepData: parseStepData(h.stepData),
|
|
3511
|
+
retryDelay: numOrNull(h.retryDelay),
|
|
3512
|
+
retryBackoff: h.retryBackoff === "true" ? true : h.retryBackoff === "false" ? false : null,
|
|
3513
|
+
retryDelayMax: numOrNull(h.retryDelayMax)
|
|
2447
3514
|
};
|
|
2448
3515
|
}
|
|
3516
|
+
function parseStepData(raw) {
|
|
3517
|
+
if (!raw || raw === "null") return void 0;
|
|
3518
|
+
try {
|
|
3519
|
+
return JSON.parse(raw);
|
|
3520
|
+
} catch {
|
|
3521
|
+
return void 0;
|
|
3522
|
+
}
|
|
3523
|
+
}
|
|
2449
3524
|
var RedisBackend = class {
|
|
2450
|
-
|
|
3525
|
+
/**
|
|
3526
|
+
* Create a RedisBackend.
|
|
3527
|
+
*
|
|
3528
|
+
* @param configOrClient - Either `redisConfig` from the config file (the
|
|
3529
|
+
* library creates a new ioredis client) or an existing ioredis client
|
|
3530
|
+
* instance (bring your own).
|
|
3531
|
+
* @param keyPrefix - Key prefix, only used when `configOrClient` is an
|
|
3532
|
+
* external client. Ignored when `redisConfig` is passed (uses
|
|
3533
|
+
* `redisConfig.keyPrefix` instead). Default: `'dq:'`.
|
|
3534
|
+
*/
|
|
3535
|
+
constructor(configOrClient, keyPrefix) {
|
|
3536
|
+
if (configOrClient && typeof configOrClient.eval === "function") {
|
|
3537
|
+
this.client = configOrClient;
|
|
3538
|
+
this.prefix = keyPrefix ?? "dq:";
|
|
3539
|
+
return;
|
|
3540
|
+
}
|
|
3541
|
+
const redisConfig = configOrClient;
|
|
2451
3542
|
let IORedis;
|
|
2452
3543
|
try {
|
|
2453
3544
|
const _require = module$1.createRequire((typeof document === 'undefined' ? require('u' + 'rl').pathToFileURL(__filename).href : (_documentCurrentScript && _documentCurrentScript.tagName.toUpperCase() === 'SCRIPT' && _documentCurrentScript.src || new URL('index.cjs', document.baseURI).href)));
|
|
@@ -2520,8 +3611,16 @@ var RedisBackend = class {
|
|
|
2520
3611
|
timeoutMs = void 0,
|
|
2521
3612
|
forceKillOnTimeout = false,
|
|
2522
3613
|
tags = void 0,
|
|
2523
|
-
idempotencyKey = void 0
|
|
2524
|
-
|
|
3614
|
+
idempotencyKey = void 0,
|
|
3615
|
+
retryDelay = void 0,
|
|
3616
|
+
retryBackoff = void 0,
|
|
3617
|
+
retryDelayMax = void 0
|
|
3618
|
+
}, options) {
|
|
3619
|
+
if (options?.db) {
|
|
3620
|
+
throw new Error(
|
|
3621
|
+
"The db option is not supported with the Redis backend. Transactional job creation is only available with PostgreSQL."
|
|
3622
|
+
);
|
|
3623
|
+
}
|
|
2525
3624
|
const now = this.nowMs();
|
|
2526
3625
|
const runAtMs = runAt ? runAt.getTime() : 0;
|
|
2527
3626
|
const result = await this.client.eval(
|
|
@@ -2537,7 +3636,10 @@ var RedisBackend = class {
|
|
|
2537
3636
|
forceKillOnTimeout ? "true" : "false",
|
|
2538
3637
|
tags ? JSON.stringify(tags) : "null",
|
|
2539
3638
|
idempotencyKey ?? "null",
|
|
2540
|
-
now
|
|
3639
|
+
now,
|
|
3640
|
+
retryDelay !== void 0 ? retryDelay.toString() : "null",
|
|
3641
|
+
retryBackoff !== void 0 ? retryBackoff.toString() : "null",
|
|
3642
|
+
retryDelayMax !== void 0 ? retryDelayMax.toString() : "null"
|
|
2541
3643
|
);
|
|
2542
3644
|
const jobId = Number(result);
|
|
2543
3645
|
log(
|
|
@@ -2551,6 +3653,58 @@ var RedisBackend = class {
|
|
|
2551
3653
|
});
|
|
2552
3654
|
return jobId;
|
|
2553
3655
|
}
|
|
3656
|
+
/**
|
|
3657
|
+
* Insert multiple jobs atomically via a single Lua script.
|
|
3658
|
+
* Returns IDs in the same order as the input array.
|
|
3659
|
+
*/
|
|
3660
|
+
async addJobs(jobs, options) {
|
|
3661
|
+
if (jobs.length === 0) return [];
|
|
3662
|
+
if (options?.db) {
|
|
3663
|
+
throw new Error(
|
|
3664
|
+
"The db option is not supported with the Redis backend. Transactional job creation is only available with PostgreSQL."
|
|
3665
|
+
);
|
|
3666
|
+
}
|
|
3667
|
+
const now = this.nowMs();
|
|
3668
|
+
const jobsPayload = jobs.map((job) => ({
|
|
3669
|
+
jobType: job.jobType,
|
|
3670
|
+
payload: JSON.stringify(job.payload),
|
|
3671
|
+
maxAttempts: job.maxAttempts ?? 3,
|
|
3672
|
+
priority: job.priority ?? 0,
|
|
3673
|
+
runAtMs: job.runAt ? job.runAt.getTime() : 0,
|
|
3674
|
+
timeoutMs: job.timeoutMs !== void 0 ? job.timeoutMs.toString() : "null",
|
|
3675
|
+
forceKillOnTimeout: job.forceKillOnTimeout ? "true" : "false",
|
|
3676
|
+
tags: job.tags ? JSON.stringify(job.tags) : "null",
|
|
3677
|
+
idempotencyKey: job.idempotencyKey ?? "null",
|
|
3678
|
+
retryDelay: job.retryDelay !== void 0 ? job.retryDelay.toString() : "null",
|
|
3679
|
+
retryBackoff: job.retryBackoff !== void 0 ? job.retryBackoff.toString() : "null",
|
|
3680
|
+
retryDelayMax: job.retryDelayMax !== void 0 ? job.retryDelayMax.toString() : "null"
|
|
3681
|
+
}));
|
|
3682
|
+
const result = await this.client.eval(
|
|
3683
|
+
ADD_JOBS_SCRIPT,
|
|
3684
|
+
1,
|
|
3685
|
+
this.prefix,
|
|
3686
|
+
JSON.stringify(jobsPayload),
|
|
3687
|
+
now
|
|
3688
|
+
);
|
|
3689
|
+
const ids = result.map(Number);
|
|
3690
|
+
log(`Batch-inserted ${jobs.length} jobs, IDs: [${ids.join(", ")}]`);
|
|
3691
|
+
const existingIdempotencyIds = /* @__PURE__ */ new Set();
|
|
3692
|
+
for (let i = 0; i < jobs.length; i++) {
|
|
3693
|
+
if (jobs[i].idempotencyKey) {
|
|
3694
|
+
if (existingIdempotencyIds.has(ids[i])) {
|
|
3695
|
+
continue;
|
|
3696
|
+
}
|
|
3697
|
+
existingIdempotencyIds.add(ids[i]);
|
|
3698
|
+
}
|
|
3699
|
+
await this.recordJobEvent(ids[i], "added" /* Added */, {
|
|
3700
|
+
jobType: jobs[i].jobType,
|
|
3701
|
+
payload: jobs[i].payload,
|
|
3702
|
+
tags: jobs[i].tags,
|
|
3703
|
+
idempotencyKey: jobs[i].idempotencyKey
|
|
3704
|
+
});
|
|
3705
|
+
}
|
|
3706
|
+
return ids;
|
|
3707
|
+
}
|
|
2554
3708
|
async getJob(id) {
|
|
2555
3709
|
const data = await this.client.hgetall(`${this.prefix}job:${id}`);
|
|
2556
3710
|
if (!data || Object.keys(data).length === 0) {
|
|
@@ -2601,8 +3755,14 @@ var RedisBackend = class {
|
|
|
2601
3755
|
if (filters.runAt) {
|
|
2602
3756
|
jobs = this.filterByRunAt(jobs, filters.runAt);
|
|
2603
3757
|
}
|
|
3758
|
+
if (filters.cursor !== void 0) {
|
|
3759
|
+
jobs = jobs.filter((j) => j.id < filters.cursor);
|
|
3760
|
+
}
|
|
3761
|
+
}
|
|
3762
|
+
jobs.sort((a, b) => b.id - a.id);
|
|
3763
|
+
if (filters?.cursor !== void 0) {
|
|
3764
|
+
return jobs.slice(0, limit);
|
|
2604
3765
|
}
|
|
2605
|
-
jobs.sort((a, b) => b.createdAt.getTime() - a.createdAt.getTime());
|
|
2606
3766
|
return jobs.slice(offset, offset + limit);
|
|
2607
3767
|
}
|
|
2608
3768
|
async getJobsByTags(tags, mode = "all", limit = 100, offset = 0) {
|
|
@@ -2811,58 +3971,346 @@ var RedisBackend = class {
|
|
|
2811
3971
|
}
|
|
2812
3972
|
metadata.tags = updates.tags;
|
|
2813
3973
|
}
|
|
3974
|
+
if (updates.retryDelay !== void 0) {
|
|
3975
|
+
fields.push(
|
|
3976
|
+
"retryDelay",
|
|
3977
|
+
updates.retryDelay !== null ? updates.retryDelay.toString() : "null"
|
|
3978
|
+
);
|
|
3979
|
+
metadata.retryDelay = updates.retryDelay;
|
|
3980
|
+
}
|
|
3981
|
+
if (updates.retryBackoff !== void 0) {
|
|
3982
|
+
fields.push(
|
|
3983
|
+
"retryBackoff",
|
|
3984
|
+
updates.retryBackoff !== null ? updates.retryBackoff.toString() : "null"
|
|
3985
|
+
);
|
|
3986
|
+
metadata.retryBackoff = updates.retryBackoff;
|
|
3987
|
+
}
|
|
3988
|
+
if (updates.retryDelayMax !== void 0) {
|
|
3989
|
+
fields.push(
|
|
3990
|
+
"retryDelayMax",
|
|
3991
|
+
updates.retryDelayMax !== null ? updates.retryDelayMax.toString() : "null"
|
|
3992
|
+
);
|
|
3993
|
+
metadata.retryDelayMax = updates.retryDelayMax;
|
|
3994
|
+
}
|
|
2814
3995
|
if (fields.length === 0) {
|
|
2815
3996
|
log(`No fields to update for job ${jobId}`);
|
|
2816
3997
|
return;
|
|
2817
3998
|
}
|
|
2818
|
-
fields.push("updatedAt", now.toString());
|
|
2819
|
-
await this.client.hmset(jk, ...fields);
|
|
2820
|
-
await this.recordJobEvent(jobId, "edited" /* Edited */, metadata);
|
|
2821
|
-
log(`Edited job ${jobId}: ${JSON.stringify(metadata)}`);
|
|
3999
|
+
fields.push("updatedAt", now.toString());
|
|
4000
|
+
await this.client.hmset(jk, ...fields);
|
|
4001
|
+
await this.recordJobEvent(jobId, "edited" /* Edited */, metadata);
|
|
4002
|
+
log(`Edited job ${jobId}: ${JSON.stringify(metadata)}`);
|
|
4003
|
+
}
|
|
4004
|
+
async editAllPendingJobs(filters, updates) {
|
|
4005
|
+
let ids = await this.client.smembers(`${this.prefix}status:pending`);
|
|
4006
|
+
if (ids.length === 0) return 0;
|
|
4007
|
+
if (filters) {
|
|
4008
|
+
ids = await this.applyFilters(ids, filters);
|
|
4009
|
+
}
|
|
4010
|
+
let count = 0;
|
|
4011
|
+
for (const id of ids) {
|
|
4012
|
+
await this.editJob(Number(id), updates);
|
|
4013
|
+
count++;
|
|
4014
|
+
}
|
|
4015
|
+
log(`Edited ${count} pending jobs`);
|
|
4016
|
+
return count;
|
|
4017
|
+
}
|
|
4018
|
+
/**
|
|
4019
|
+
* Delete completed jobs older than the given number of days.
|
|
4020
|
+
* Uses SSCAN to iterate the completed set in batches, avoiding
|
|
4021
|
+
* loading all IDs into memory and preventing long Redis blocks.
|
|
4022
|
+
*
|
|
4023
|
+
* @param daysToKeep - Number of days to retain completed jobs (default 30).
|
|
4024
|
+
* @param batchSize - Number of IDs to scan per SSCAN iteration (default 200).
|
|
4025
|
+
* @returns Total number of deleted jobs.
|
|
4026
|
+
*/
|
|
4027
|
+
async cleanupOldJobs(daysToKeep = 30, batchSize = 200) {
|
|
4028
|
+
const cutoffMs = this.nowMs() - daysToKeep * 24 * 60 * 60 * 1e3;
|
|
4029
|
+
const setKey = `${this.prefix}status:completed`;
|
|
4030
|
+
let totalDeleted = 0;
|
|
4031
|
+
let cursor = "0";
|
|
4032
|
+
do {
|
|
4033
|
+
const [nextCursor, ids] = await this.client.sscan(
|
|
4034
|
+
setKey,
|
|
4035
|
+
cursor,
|
|
4036
|
+
"COUNT",
|
|
4037
|
+
batchSize
|
|
4038
|
+
);
|
|
4039
|
+
cursor = nextCursor;
|
|
4040
|
+
if (ids.length > 0) {
|
|
4041
|
+
const result = await this.client.eval(
|
|
4042
|
+
CLEANUP_OLD_JOBS_BATCH_SCRIPT,
|
|
4043
|
+
1,
|
|
4044
|
+
this.prefix,
|
|
4045
|
+
cutoffMs,
|
|
4046
|
+
...ids
|
|
4047
|
+
);
|
|
4048
|
+
totalDeleted += Number(result);
|
|
4049
|
+
}
|
|
4050
|
+
} while (cursor !== "0");
|
|
4051
|
+
log(`Deleted ${totalDeleted} old jobs`);
|
|
4052
|
+
return totalDeleted;
|
|
4053
|
+
}
|
|
4054
|
+
/**
|
|
4055
|
+
* Delete job events older than the given number of days.
|
|
4056
|
+
* Iterates all event lists and removes events whose createdAt is before the cutoff.
|
|
4057
|
+
* Also removes orphaned event lists (where the job no longer exists).
|
|
4058
|
+
*
|
|
4059
|
+
* @param daysToKeep - Number of days to retain events (default 30).
|
|
4060
|
+
* @param batchSize - Number of event keys to scan per SCAN iteration (default 200).
|
|
4061
|
+
* @returns Total number of deleted events.
|
|
4062
|
+
*/
|
|
4063
|
+
async cleanupOldJobEvents(daysToKeep = 30, batchSize = 200) {
|
|
4064
|
+
const cutoffMs = this.nowMs() - daysToKeep * 24 * 60 * 60 * 1e3;
|
|
4065
|
+
const pattern = `${this.prefix}events:*`;
|
|
4066
|
+
let totalDeleted = 0;
|
|
4067
|
+
let cursor = "0";
|
|
4068
|
+
do {
|
|
4069
|
+
const [nextCursor, keys] = await this.client.scan(
|
|
4070
|
+
cursor,
|
|
4071
|
+
"MATCH",
|
|
4072
|
+
pattern,
|
|
4073
|
+
"COUNT",
|
|
4074
|
+
batchSize
|
|
4075
|
+
);
|
|
4076
|
+
cursor = nextCursor;
|
|
4077
|
+
for (const key of keys) {
|
|
4078
|
+
const jobIdStr = key.slice(`${this.prefix}events:`.length);
|
|
4079
|
+
const jobExists = await this.client.exists(
|
|
4080
|
+
`${this.prefix}job:${jobIdStr}`
|
|
4081
|
+
);
|
|
4082
|
+
if (!jobExists) {
|
|
4083
|
+
const len = await this.client.llen(key);
|
|
4084
|
+
await this.client.del(key);
|
|
4085
|
+
totalDeleted += len;
|
|
4086
|
+
continue;
|
|
4087
|
+
}
|
|
4088
|
+
const events = await this.client.lrange(key, 0, -1);
|
|
4089
|
+
const kept = [];
|
|
4090
|
+
for (const raw of events) {
|
|
4091
|
+
try {
|
|
4092
|
+
const e = JSON.parse(raw);
|
|
4093
|
+
if (e.createdAt >= cutoffMs) {
|
|
4094
|
+
kept.push(raw);
|
|
4095
|
+
} else {
|
|
4096
|
+
totalDeleted++;
|
|
4097
|
+
}
|
|
4098
|
+
} catch {
|
|
4099
|
+
totalDeleted++;
|
|
4100
|
+
}
|
|
4101
|
+
}
|
|
4102
|
+
if (kept.length === 0) {
|
|
4103
|
+
await this.client.del(key);
|
|
4104
|
+
} else if (kept.length < events.length) {
|
|
4105
|
+
const pipeline = this.client.pipeline();
|
|
4106
|
+
pipeline.del(key);
|
|
4107
|
+
for (const raw of kept) {
|
|
4108
|
+
pipeline.rpush(key, raw);
|
|
4109
|
+
}
|
|
4110
|
+
await pipeline.exec();
|
|
4111
|
+
}
|
|
4112
|
+
}
|
|
4113
|
+
} while (cursor !== "0");
|
|
4114
|
+
log(`Deleted ${totalDeleted} old job events`);
|
|
4115
|
+
return totalDeleted;
|
|
4116
|
+
}
|
|
4117
|
+
async reclaimStuckJobs(maxProcessingTimeMinutes = 10) {
|
|
4118
|
+
const maxAgeMs = maxProcessingTimeMinutes * 60 * 1e3;
|
|
4119
|
+
const now = this.nowMs();
|
|
4120
|
+
const result = await this.client.eval(
|
|
4121
|
+
RECLAIM_STUCK_JOBS_SCRIPT,
|
|
4122
|
+
1,
|
|
4123
|
+
this.prefix,
|
|
4124
|
+
maxAgeMs,
|
|
4125
|
+
now
|
|
4126
|
+
);
|
|
4127
|
+
log(`Reclaimed ${result} stuck jobs`);
|
|
4128
|
+
return Number(result);
|
|
4129
|
+
}
|
|
4130
|
+
// ── Wait / step-data support ────────────────────────────────────────
|
|
4131
|
+
/**
|
|
4132
|
+
* Transition a job from 'processing' to 'waiting' status.
|
|
4133
|
+
* Persists step data so the handler can resume from where it left off.
|
|
4134
|
+
*
|
|
4135
|
+
* @param jobId - The job to pause.
|
|
4136
|
+
* @param options - Wait configuration including optional waitUntil date, token ID, and step data.
|
|
4137
|
+
*/
|
|
4138
|
+
async waitJob(jobId, options) {
|
|
4139
|
+
const now = this.nowMs();
|
|
4140
|
+
const waitUntilMs = options.waitUntil ? options.waitUntil.getTime().toString() : "null";
|
|
4141
|
+
const waitTokenId = options.waitTokenId ?? "null";
|
|
4142
|
+
const stepDataJson = JSON.stringify(options.stepData);
|
|
4143
|
+
const result = await this.client.eval(
|
|
4144
|
+
WAIT_JOB_SCRIPT,
|
|
4145
|
+
1,
|
|
4146
|
+
this.prefix,
|
|
4147
|
+
jobId,
|
|
4148
|
+
waitUntilMs,
|
|
4149
|
+
waitTokenId,
|
|
4150
|
+
stepDataJson,
|
|
4151
|
+
now
|
|
4152
|
+
);
|
|
4153
|
+
if (Number(result) === 0) {
|
|
4154
|
+
log(
|
|
4155
|
+
`Job ${jobId} could not be set to waiting (may have been reclaimed or is no longer processing)`
|
|
4156
|
+
);
|
|
4157
|
+
return;
|
|
4158
|
+
}
|
|
4159
|
+
await this.recordJobEvent(jobId, "waiting" /* Waiting */, {
|
|
4160
|
+
waitUntil: options.waitUntil?.toISOString() ?? null,
|
|
4161
|
+
waitTokenId: options.waitTokenId ?? null
|
|
4162
|
+
});
|
|
4163
|
+
log(`Job ${jobId} set to waiting`);
|
|
2822
4164
|
}
|
|
2823
|
-
|
|
2824
|
-
|
|
2825
|
-
|
|
2826
|
-
|
|
2827
|
-
|
|
4165
|
+
/**
|
|
4166
|
+
* Persist step data for a job. Called after each ctx.run() step completes.
|
|
4167
|
+
* Best-effort: does not throw to avoid killing the running handler.
|
|
4168
|
+
*
|
|
4169
|
+
* @param jobId - The job to update.
|
|
4170
|
+
* @param stepData - The step data to persist.
|
|
4171
|
+
*/
|
|
4172
|
+
async updateStepData(jobId, stepData) {
|
|
4173
|
+
try {
|
|
4174
|
+
const now = this.nowMs();
|
|
4175
|
+
await this.client.hset(
|
|
4176
|
+
`${this.prefix}job:${jobId}`,
|
|
4177
|
+
"stepData",
|
|
4178
|
+
JSON.stringify(stepData),
|
|
4179
|
+
"updatedAt",
|
|
4180
|
+
now.toString()
|
|
4181
|
+
);
|
|
4182
|
+
} catch (error) {
|
|
4183
|
+
log(`Error updating stepData for job ${jobId}: ${error}`);
|
|
2828
4184
|
}
|
|
2829
|
-
|
|
2830
|
-
|
|
2831
|
-
|
|
2832
|
-
|
|
4185
|
+
}
|
|
4186
|
+
/**
|
|
4187
|
+
* Create a waitpoint token.
|
|
4188
|
+
*
|
|
4189
|
+
* @param jobId - The job ID to associate with the token (null if created outside a handler).
|
|
4190
|
+
* @param options - Optional timeout string (e.g. '10m', '1h') and tags.
|
|
4191
|
+
* @returns The created waitpoint with its unique ID.
|
|
4192
|
+
*/
|
|
4193
|
+
async createWaitpoint(jobId, options) {
|
|
4194
|
+
const id = `wp_${crypto.randomUUID()}`;
|
|
4195
|
+
const now = this.nowMs();
|
|
4196
|
+
let timeoutAt = null;
|
|
4197
|
+
if (options?.timeout) {
|
|
4198
|
+
const ms = parseTimeoutString2(options.timeout);
|
|
4199
|
+
timeoutAt = now + ms;
|
|
2833
4200
|
}
|
|
2834
|
-
|
|
2835
|
-
|
|
4201
|
+
const key = `${this.prefix}waitpoint:${id}`;
|
|
4202
|
+
const fields = [
|
|
4203
|
+
"id",
|
|
4204
|
+
id,
|
|
4205
|
+
"jobId",
|
|
4206
|
+
jobId !== null ? jobId.toString() : "null",
|
|
4207
|
+
"status",
|
|
4208
|
+
"waiting",
|
|
4209
|
+
"output",
|
|
4210
|
+
"null",
|
|
4211
|
+
"timeoutAt",
|
|
4212
|
+
timeoutAt !== null ? timeoutAt.toString() : "null",
|
|
4213
|
+
"createdAt",
|
|
4214
|
+
now.toString(),
|
|
4215
|
+
"completedAt",
|
|
4216
|
+
"null",
|
|
4217
|
+
"tags",
|
|
4218
|
+
options?.tags ? JSON.stringify(options.tags) : "null"
|
|
4219
|
+
];
|
|
4220
|
+
await this.client.hmset(key, ...fields);
|
|
4221
|
+
if (timeoutAt !== null) {
|
|
4222
|
+
await this.client.zadd(`${this.prefix}waitpoint_timeout`, timeoutAt, id);
|
|
4223
|
+
}
|
|
4224
|
+
log(`Created waitpoint ${id} for job ${jobId}`);
|
|
4225
|
+
return { id };
|
|
2836
4226
|
}
|
|
2837
|
-
|
|
2838
|
-
|
|
4227
|
+
/**
|
|
4228
|
+
* Complete a waitpoint token and move the associated job back to 'pending'.
|
|
4229
|
+
*
|
|
4230
|
+
* @param tokenId - The waitpoint token ID to complete.
|
|
4231
|
+
* @param data - Optional data to pass to the waiting handler.
|
|
4232
|
+
*/
|
|
4233
|
+
async completeWaitpoint(tokenId, data) {
|
|
4234
|
+
const now = this.nowMs();
|
|
4235
|
+
const outputJson = data != null ? JSON.stringify(data) : "null";
|
|
2839
4236
|
const result = await this.client.eval(
|
|
2840
|
-
|
|
4237
|
+
COMPLETE_WAITPOINT_SCRIPT,
|
|
2841
4238
|
1,
|
|
2842
4239
|
this.prefix,
|
|
2843
|
-
|
|
4240
|
+
tokenId,
|
|
4241
|
+
outputJson,
|
|
4242
|
+
now
|
|
2844
4243
|
);
|
|
2845
|
-
|
|
2846
|
-
|
|
4244
|
+
if (Number(result) === 0) {
|
|
4245
|
+
log(`Waitpoint ${tokenId} not found or already completed`);
|
|
4246
|
+
return;
|
|
4247
|
+
}
|
|
4248
|
+
log(`Completed waitpoint ${tokenId}`);
|
|
2847
4249
|
}
|
|
2848
|
-
|
|
2849
|
-
|
|
2850
|
-
|
|
4250
|
+
/**
|
|
4251
|
+
* Retrieve a waitpoint token by its ID.
|
|
4252
|
+
*
|
|
4253
|
+
* @param tokenId - The waitpoint token ID to look up.
|
|
4254
|
+
* @returns The waitpoint record, or null if not found.
|
|
4255
|
+
*/
|
|
4256
|
+
async getWaitpoint(tokenId) {
|
|
4257
|
+
const data = await this.client.hgetall(
|
|
4258
|
+
`${this.prefix}waitpoint:${tokenId}`
|
|
2851
4259
|
);
|
|
2852
|
-
return
|
|
4260
|
+
if (!data || Object.keys(data).length === 0) return null;
|
|
4261
|
+
const nullish = (v) => v === void 0 || v === "null" || v === "" ? null : v;
|
|
4262
|
+
const numOrNull = (v) => {
|
|
4263
|
+
const n = nullish(v);
|
|
4264
|
+
return n === null ? null : Number(n);
|
|
4265
|
+
};
|
|
4266
|
+
const dateOrNull = (v) => {
|
|
4267
|
+
const n = numOrNull(v);
|
|
4268
|
+
return n === null ? null : new Date(n);
|
|
4269
|
+
};
|
|
4270
|
+
let output = null;
|
|
4271
|
+
if (data.output && data.output !== "null") {
|
|
4272
|
+
try {
|
|
4273
|
+
output = JSON.parse(data.output);
|
|
4274
|
+
} catch {
|
|
4275
|
+
output = data.output;
|
|
4276
|
+
}
|
|
4277
|
+
}
|
|
4278
|
+
let tags = null;
|
|
4279
|
+
if (data.tags && data.tags !== "null") {
|
|
4280
|
+
try {
|
|
4281
|
+
tags = JSON.parse(data.tags);
|
|
4282
|
+
} catch {
|
|
4283
|
+
}
|
|
4284
|
+
}
|
|
4285
|
+
return {
|
|
4286
|
+
id: data.id,
|
|
4287
|
+
jobId: numOrNull(data.jobId),
|
|
4288
|
+
status: data.status,
|
|
4289
|
+
output,
|
|
4290
|
+
timeoutAt: dateOrNull(data.timeoutAt),
|
|
4291
|
+
createdAt: new Date(Number(data.createdAt)),
|
|
4292
|
+
completedAt: dateOrNull(data.completedAt),
|
|
4293
|
+
tags
|
|
4294
|
+
};
|
|
2853
4295
|
}
|
|
2854
|
-
|
|
2855
|
-
|
|
4296
|
+
/**
|
|
4297
|
+
* Expire timed-out waitpoint tokens and move their associated jobs back to 'pending'.
|
|
4298
|
+
*
|
|
4299
|
+
* @returns The number of tokens that were expired.
|
|
4300
|
+
*/
|
|
4301
|
+
async expireTimedOutWaitpoints() {
|
|
2856
4302
|
const now = this.nowMs();
|
|
2857
4303
|
const result = await this.client.eval(
|
|
2858
|
-
|
|
4304
|
+
EXPIRE_TIMED_OUT_WAITPOINTS_SCRIPT,
|
|
2859
4305
|
1,
|
|
2860
4306
|
this.prefix,
|
|
2861
|
-
maxAgeMs,
|
|
2862
4307
|
now
|
|
2863
4308
|
);
|
|
2864
|
-
|
|
2865
|
-
|
|
4309
|
+
const count = Number(result);
|
|
4310
|
+
if (count > 0) {
|
|
4311
|
+
log(`Expired ${count} timed-out waitpoints`);
|
|
4312
|
+
}
|
|
4313
|
+
return count;
|
|
2866
4314
|
}
|
|
2867
4315
|
// ── Internal helpers ──────────────────────────────────────────────────
|
|
2868
4316
|
async setPendingReasonForUnpickedJobs(reason, jobType) {
|
|
@@ -2968,6 +4416,359 @@ var RedisBackend = class {
|
|
|
2968
4416
|
return true;
|
|
2969
4417
|
});
|
|
2970
4418
|
}
|
|
4419
|
+
// ── Cron schedules ──────────────────────────────────────────────────
|
|
4420
|
+
/** Create a cron schedule and return its ID. */
|
|
4421
|
+
async addCronSchedule(input) {
|
|
4422
|
+
const existingId = await this.client.get(
|
|
4423
|
+
`${this.prefix}cron_name:${input.scheduleName}`
|
|
4424
|
+
);
|
|
4425
|
+
if (existingId !== null) {
|
|
4426
|
+
throw new Error(
|
|
4427
|
+
`Cron schedule with name "${input.scheduleName}" already exists`
|
|
4428
|
+
);
|
|
4429
|
+
}
|
|
4430
|
+
const id = await this.client.incr(`${this.prefix}cron_id_seq`);
|
|
4431
|
+
const now = this.nowMs();
|
|
4432
|
+
const key = `${this.prefix}cron:${id}`;
|
|
4433
|
+
const fields = [
|
|
4434
|
+
"id",
|
|
4435
|
+
id.toString(),
|
|
4436
|
+
"scheduleName",
|
|
4437
|
+
input.scheduleName,
|
|
4438
|
+
"cronExpression",
|
|
4439
|
+
input.cronExpression,
|
|
4440
|
+
"jobType",
|
|
4441
|
+
input.jobType,
|
|
4442
|
+
"payload",
|
|
4443
|
+
JSON.stringify(input.payload),
|
|
4444
|
+
"maxAttempts",
|
|
4445
|
+
input.maxAttempts.toString(),
|
|
4446
|
+
"priority",
|
|
4447
|
+
input.priority.toString(),
|
|
4448
|
+
"timeoutMs",
|
|
4449
|
+
input.timeoutMs !== null ? input.timeoutMs.toString() : "null",
|
|
4450
|
+
"forceKillOnTimeout",
|
|
4451
|
+
input.forceKillOnTimeout ? "true" : "false",
|
|
4452
|
+
"tags",
|
|
4453
|
+
input.tags ? JSON.stringify(input.tags) : "null",
|
|
4454
|
+
"timezone",
|
|
4455
|
+
input.timezone,
|
|
4456
|
+
"allowOverlap",
|
|
4457
|
+
input.allowOverlap ? "true" : "false",
|
|
4458
|
+
"status",
|
|
4459
|
+
"active",
|
|
4460
|
+
"lastEnqueuedAt",
|
|
4461
|
+
"null",
|
|
4462
|
+
"lastJobId",
|
|
4463
|
+
"null",
|
|
4464
|
+
"nextRunAt",
|
|
4465
|
+
input.nextRunAt ? input.nextRunAt.getTime().toString() : "null",
|
|
4466
|
+
"createdAt",
|
|
4467
|
+
now.toString(),
|
|
4468
|
+
"updatedAt",
|
|
4469
|
+
now.toString(),
|
|
4470
|
+
"retryDelay",
|
|
4471
|
+
input.retryDelay !== null && input.retryDelay !== void 0 ? input.retryDelay.toString() : "null",
|
|
4472
|
+
"retryBackoff",
|
|
4473
|
+
input.retryBackoff !== null && input.retryBackoff !== void 0 ? input.retryBackoff.toString() : "null",
|
|
4474
|
+
"retryDelayMax",
|
|
4475
|
+
input.retryDelayMax !== null && input.retryDelayMax !== void 0 ? input.retryDelayMax.toString() : "null"
|
|
4476
|
+
];
|
|
4477
|
+
await this.client.hmset(key, ...fields);
|
|
4478
|
+
await this.client.set(
|
|
4479
|
+
`${this.prefix}cron_name:${input.scheduleName}`,
|
|
4480
|
+
id.toString()
|
|
4481
|
+
);
|
|
4482
|
+
await this.client.sadd(`${this.prefix}crons`, id.toString());
|
|
4483
|
+
await this.client.sadd(`${this.prefix}cron_status:active`, id.toString());
|
|
4484
|
+
if (input.nextRunAt) {
|
|
4485
|
+
await this.client.zadd(
|
|
4486
|
+
`${this.prefix}cron_due`,
|
|
4487
|
+
input.nextRunAt.getTime(),
|
|
4488
|
+
id.toString()
|
|
4489
|
+
);
|
|
4490
|
+
}
|
|
4491
|
+
log(`Added cron schedule ${id}: "${input.scheduleName}"`);
|
|
4492
|
+
return id;
|
|
4493
|
+
}
|
|
4494
|
+
/** Get a cron schedule by ID. */
|
|
4495
|
+
async getCronSchedule(id) {
|
|
4496
|
+
const data = await this.client.hgetall(`${this.prefix}cron:${id}`);
|
|
4497
|
+
if (!data || Object.keys(data).length === 0) return null;
|
|
4498
|
+
return this.deserializeCronSchedule(data);
|
|
4499
|
+
}
|
|
4500
|
+
/** Get a cron schedule by its unique name. */
|
|
4501
|
+
async getCronScheduleByName(name) {
|
|
4502
|
+
const id = await this.client.get(`${this.prefix}cron_name:${name}`);
|
|
4503
|
+
if (id === null) return null;
|
|
4504
|
+
return this.getCronSchedule(Number(id));
|
|
4505
|
+
}
|
|
4506
|
+
/** List cron schedules, optionally filtered by status. */
|
|
4507
|
+
async listCronSchedules(status) {
|
|
4508
|
+
let ids;
|
|
4509
|
+
if (status) {
|
|
4510
|
+
ids = await this.client.smembers(`${this.prefix}cron_status:${status}`);
|
|
4511
|
+
} else {
|
|
4512
|
+
ids = await this.client.smembers(`${this.prefix}crons`);
|
|
4513
|
+
}
|
|
4514
|
+
if (ids.length === 0) return [];
|
|
4515
|
+
const pipeline = this.client.pipeline();
|
|
4516
|
+
for (const id of ids) {
|
|
4517
|
+
pipeline.hgetall(`${this.prefix}cron:${id}`);
|
|
4518
|
+
}
|
|
4519
|
+
const results = await pipeline.exec();
|
|
4520
|
+
const schedules = [];
|
|
4521
|
+
if (results) {
|
|
4522
|
+
for (const [err, data] of results) {
|
|
4523
|
+
if (!err && data && typeof data === "object" && Object.keys(data).length > 0) {
|
|
4524
|
+
schedules.push(
|
|
4525
|
+
this.deserializeCronSchedule(data)
|
|
4526
|
+
);
|
|
4527
|
+
}
|
|
4528
|
+
}
|
|
4529
|
+
}
|
|
4530
|
+
schedules.sort((a, b) => a.createdAt.getTime() - b.createdAt.getTime());
|
|
4531
|
+
return schedules;
|
|
4532
|
+
}
|
|
4533
|
+
/** Delete a cron schedule by ID. */
|
|
4534
|
+
async removeCronSchedule(id) {
|
|
4535
|
+
const data = await this.client.hgetall(`${this.prefix}cron:${id}`);
|
|
4536
|
+
if (!data || Object.keys(data).length === 0) return;
|
|
4537
|
+
const name = data.scheduleName;
|
|
4538
|
+
const status = data.status;
|
|
4539
|
+
await this.client.del(`${this.prefix}cron:${id}`);
|
|
4540
|
+
await this.client.del(`${this.prefix}cron_name:${name}`);
|
|
4541
|
+
await this.client.srem(`${this.prefix}crons`, id.toString());
|
|
4542
|
+
await this.client.srem(
|
|
4543
|
+
`${this.prefix}cron_status:${status}`,
|
|
4544
|
+
id.toString()
|
|
4545
|
+
);
|
|
4546
|
+
await this.client.zrem(`${this.prefix}cron_due`, id.toString());
|
|
4547
|
+
log(`Removed cron schedule ${id}`);
|
|
4548
|
+
}
|
|
4549
|
+
/** Pause a cron schedule. */
|
|
4550
|
+
async pauseCronSchedule(id) {
|
|
4551
|
+
const now = this.nowMs();
|
|
4552
|
+
await this.client.hset(
|
|
4553
|
+
`${this.prefix}cron:${id}`,
|
|
4554
|
+
"status",
|
|
4555
|
+
"paused",
|
|
4556
|
+
"updatedAt",
|
|
4557
|
+
now.toString()
|
|
4558
|
+
);
|
|
4559
|
+
await this.client.srem(`${this.prefix}cron_status:active`, id.toString());
|
|
4560
|
+
await this.client.sadd(`${this.prefix}cron_status:paused`, id.toString());
|
|
4561
|
+
await this.client.zrem(`${this.prefix}cron_due`, id.toString());
|
|
4562
|
+
log(`Paused cron schedule ${id}`);
|
|
4563
|
+
}
|
|
4564
|
+
/** Resume a paused cron schedule. */
|
|
4565
|
+
async resumeCronSchedule(id) {
|
|
4566
|
+
const now = this.nowMs();
|
|
4567
|
+
await this.client.hset(
|
|
4568
|
+
`${this.prefix}cron:${id}`,
|
|
4569
|
+
"status",
|
|
4570
|
+
"active",
|
|
4571
|
+
"updatedAt",
|
|
4572
|
+
now.toString()
|
|
4573
|
+
);
|
|
4574
|
+
await this.client.srem(`${this.prefix}cron_status:paused`, id.toString());
|
|
4575
|
+
await this.client.sadd(`${this.prefix}cron_status:active`, id.toString());
|
|
4576
|
+
const nextRunAt = await this.client.hget(
|
|
4577
|
+
`${this.prefix}cron:${id}`,
|
|
4578
|
+
"nextRunAt"
|
|
4579
|
+
);
|
|
4580
|
+
if (nextRunAt && nextRunAt !== "null") {
|
|
4581
|
+
await this.client.zadd(
|
|
4582
|
+
`${this.prefix}cron_due`,
|
|
4583
|
+
Number(nextRunAt),
|
|
4584
|
+
id.toString()
|
|
4585
|
+
);
|
|
4586
|
+
}
|
|
4587
|
+
log(`Resumed cron schedule ${id}`);
|
|
4588
|
+
}
|
|
4589
|
+
/** Edit a cron schedule. */
|
|
4590
|
+
async editCronSchedule(id, updates, nextRunAt) {
|
|
4591
|
+
const now = this.nowMs();
|
|
4592
|
+
const fields = [];
|
|
4593
|
+
if (updates.cronExpression !== void 0) {
|
|
4594
|
+
fields.push("cronExpression", updates.cronExpression);
|
|
4595
|
+
}
|
|
4596
|
+
if (updates.payload !== void 0) {
|
|
4597
|
+
fields.push("payload", JSON.stringify(updates.payload));
|
|
4598
|
+
}
|
|
4599
|
+
if (updates.maxAttempts !== void 0) {
|
|
4600
|
+
fields.push("maxAttempts", updates.maxAttempts.toString());
|
|
4601
|
+
}
|
|
4602
|
+
if (updates.priority !== void 0) {
|
|
4603
|
+
fields.push("priority", updates.priority.toString());
|
|
4604
|
+
}
|
|
4605
|
+
if (updates.timeoutMs !== void 0) {
|
|
4606
|
+
fields.push(
|
|
4607
|
+
"timeoutMs",
|
|
4608
|
+
updates.timeoutMs !== null ? updates.timeoutMs.toString() : "null"
|
|
4609
|
+
);
|
|
4610
|
+
}
|
|
4611
|
+
if (updates.forceKillOnTimeout !== void 0) {
|
|
4612
|
+
fields.push(
|
|
4613
|
+
"forceKillOnTimeout",
|
|
4614
|
+
updates.forceKillOnTimeout ? "true" : "false"
|
|
4615
|
+
);
|
|
4616
|
+
}
|
|
4617
|
+
if (updates.tags !== void 0) {
|
|
4618
|
+
fields.push(
|
|
4619
|
+
"tags",
|
|
4620
|
+
updates.tags !== null ? JSON.stringify(updates.tags) : "null"
|
|
4621
|
+
);
|
|
4622
|
+
}
|
|
4623
|
+
if (updates.timezone !== void 0) {
|
|
4624
|
+
fields.push("timezone", updates.timezone);
|
|
4625
|
+
}
|
|
4626
|
+
if (updates.allowOverlap !== void 0) {
|
|
4627
|
+
fields.push("allowOverlap", updates.allowOverlap ? "true" : "false");
|
|
4628
|
+
}
|
|
4629
|
+
if (updates.retryDelay !== void 0) {
|
|
4630
|
+
fields.push(
|
|
4631
|
+
"retryDelay",
|
|
4632
|
+
updates.retryDelay !== null ? updates.retryDelay.toString() : "null"
|
|
4633
|
+
);
|
|
4634
|
+
}
|
|
4635
|
+
if (updates.retryBackoff !== void 0) {
|
|
4636
|
+
fields.push(
|
|
4637
|
+
"retryBackoff",
|
|
4638
|
+
updates.retryBackoff !== null ? updates.retryBackoff.toString() : "null"
|
|
4639
|
+
);
|
|
4640
|
+
}
|
|
4641
|
+
if (updates.retryDelayMax !== void 0) {
|
|
4642
|
+
fields.push(
|
|
4643
|
+
"retryDelayMax",
|
|
4644
|
+
updates.retryDelayMax !== null ? updates.retryDelayMax.toString() : "null"
|
|
4645
|
+
);
|
|
4646
|
+
}
|
|
4647
|
+
if (nextRunAt !== void 0) {
|
|
4648
|
+
const val = nextRunAt !== null ? nextRunAt.getTime().toString() : "null";
|
|
4649
|
+
fields.push("nextRunAt", val);
|
|
4650
|
+
if (nextRunAt !== null) {
|
|
4651
|
+
await this.client.zadd(
|
|
4652
|
+
`${this.prefix}cron_due`,
|
|
4653
|
+
nextRunAt.getTime(),
|
|
4654
|
+
id.toString()
|
|
4655
|
+
);
|
|
4656
|
+
} else {
|
|
4657
|
+
await this.client.zrem(`${this.prefix}cron_due`, id.toString());
|
|
4658
|
+
}
|
|
4659
|
+
}
|
|
4660
|
+
if (fields.length === 0) {
|
|
4661
|
+
log(`No fields to update for cron schedule ${id}`);
|
|
4662
|
+
return;
|
|
4663
|
+
}
|
|
4664
|
+
fields.push("updatedAt", now.toString());
|
|
4665
|
+
await this.client.hmset(`${this.prefix}cron:${id}`, ...fields);
|
|
4666
|
+
log(`Edited cron schedule ${id}`);
|
|
4667
|
+
}
|
|
4668
|
+
/**
|
|
4669
|
+
* Fetch all active cron schedules whose nextRunAt <= now.
|
|
4670
|
+
* Uses a sorted set (cron_due) for efficient range query.
|
|
4671
|
+
*/
|
|
4672
|
+
async getDueCronSchedules() {
|
|
4673
|
+
const now = this.nowMs();
|
|
4674
|
+
const ids = await this.client.zrangebyscore(
|
|
4675
|
+
`${this.prefix}cron_due`,
|
|
4676
|
+
0,
|
|
4677
|
+
now
|
|
4678
|
+
);
|
|
4679
|
+
if (ids.length === 0) {
|
|
4680
|
+
log("Found 0 due cron schedules");
|
|
4681
|
+
return [];
|
|
4682
|
+
}
|
|
4683
|
+
const schedules = [];
|
|
4684
|
+
for (const id of ids) {
|
|
4685
|
+
const data = await this.client.hgetall(`${this.prefix}cron:${id}`);
|
|
4686
|
+
if (data && Object.keys(data).length > 0 && data.status === "active") {
|
|
4687
|
+
schedules.push(this.deserializeCronSchedule(data));
|
|
4688
|
+
}
|
|
4689
|
+
}
|
|
4690
|
+
log(`Found ${schedules.length} due cron schedules`);
|
|
4691
|
+
return schedules;
|
|
4692
|
+
}
|
|
4693
|
+
/**
|
|
4694
|
+
* Update a cron schedule after a job has been enqueued.
|
|
4695
|
+
* Sets lastEnqueuedAt, lastJobId, and advances nextRunAt.
|
|
4696
|
+
*/
|
|
4697
|
+
async updateCronScheduleAfterEnqueue(id, lastEnqueuedAt, lastJobId, nextRunAt) {
|
|
4698
|
+
const fields = [
|
|
4699
|
+
"lastEnqueuedAt",
|
|
4700
|
+
lastEnqueuedAt.getTime().toString(),
|
|
4701
|
+
"lastJobId",
|
|
4702
|
+
lastJobId.toString(),
|
|
4703
|
+
"nextRunAt",
|
|
4704
|
+
nextRunAt ? nextRunAt.getTime().toString() : "null",
|
|
4705
|
+
"updatedAt",
|
|
4706
|
+
this.nowMs().toString()
|
|
4707
|
+
];
|
|
4708
|
+
await this.client.hmset(`${this.prefix}cron:${id}`, ...fields);
|
|
4709
|
+
if (nextRunAt) {
|
|
4710
|
+
await this.client.zadd(
|
|
4711
|
+
`${this.prefix}cron_due`,
|
|
4712
|
+
nextRunAt.getTime(),
|
|
4713
|
+
id.toString()
|
|
4714
|
+
);
|
|
4715
|
+
} else {
|
|
4716
|
+
await this.client.zrem(`${this.prefix}cron_due`, id.toString());
|
|
4717
|
+
}
|
|
4718
|
+
log(
|
|
4719
|
+
`Updated cron schedule ${id}: lastJobId=${lastJobId}, nextRunAt=${nextRunAt?.toISOString() ?? "null"}`
|
|
4720
|
+
);
|
|
4721
|
+
}
|
|
4722
|
+
/** Deserialize a Redis hash into a CronScheduleRecord. */
|
|
4723
|
+
deserializeCronSchedule(h) {
|
|
4724
|
+
const nullish = (v) => v === void 0 || v === "null" || v === "" ? null : v;
|
|
4725
|
+
const numOrNull = (v) => {
|
|
4726
|
+
const n = nullish(v);
|
|
4727
|
+
return n === null ? null : Number(n);
|
|
4728
|
+
};
|
|
4729
|
+
const dateOrNull = (v) => {
|
|
4730
|
+
const n = numOrNull(v);
|
|
4731
|
+
return n === null ? null : new Date(n);
|
|
4732
|
+
};
|
|
4733
|
+
let payload;
|
|
4734
|
+
try {
|
|
4735
|
+
payload = JSON.parse(h.payload);
|
|
4736
|
+
} catch {
|
|
4737
|
+
payload = h.payload;
|
|
4738
|
+
}
|
|
4739
|
+
let tags;
|
|
4740
|
+
try {
|
|
4741
|
+
const raw = h.tags;
|
|
4742
|
+
if (raw && raw !== "null") {
|
|
4743
|
+
tags = JSON.parse(raw);
|
|
4744
|
+
}
|
|
4745
|
+
} catch {
|
|
4746
|
+
}
|
|
4747
|
+
return {
|
|
4748
|
+
id: Number(h.id),
|
|
4749
|
+
scheduleName: h.scheduleName,
|
|
4750
|
+
cronExpression: h.cronExpression,
|
|
4751
|
+
jobType: h.jobType,
|
|
4752
|
+
payload,
|
|
4753
|
+
maxAttempts: Number(h.maxAttempts),
|
|
4754
|
+
priority: Number(h.priority),
|
|
4755
|
+
timeoutMs: numOrNull(h.timeoutMs),
|
|
4756
|
+
forceKillOnTimeout: h.forceKillOnTimeout === "true",
|
|
4757
|
+
tags,
|
|
4758
|
+
timezone: h.timezone,
|
|
4759
|
+
allowOverlap: h.allowOverlap === "true",
|
|
4760
|
+
status: h.status,
|
|
4761
|
+
lastEnqueuedAt: dateOrNull(h.lastEnqueuedAt),
|
|
4762
|
+
lastJobId: numOrNull(h.lastJobId),
|
|
4763
|
+
nextRunAt: dateOrNull(h.nextRunAt),
|
|
4764
|
+
createdAt: new Date(Number(h.createdAt)),
|
|
4765
|
+
updatedAt: new Date(Number(h.updatedAt)),
|
|
4766
|
+
retryDelay: numOrNull(h.retryDelay),
|
|
4767
|
+
retryBackoff: h.retryBackoff === "true" ? true : h.retryBackoff === "false" ? false : null,
|
|
4768
|
+
retryDelayMax: numOrNull(h.retryDelayMax)
|
|
4769
|
+
};
|
|
4770
|
+
}
|
|
4771
|
+
// ── Private helpers (filters) ─────────────────────────────────────────
|
|
2971
4772
|
async applyFilters(ids, filters) {
|
|
2972
4773
|
let result = ids;
|
|
2973
4774
|
if (filters.jobType) {
|
|
@@ -2997,6 +4798,19 @@ var RedisBackend = class {
|
|
|
2997
4798
|
return result;
|
|
2998
4799
|
}
|
|
2999
4800
|
};
|
|
4801
|
+
function getNextCronOccurrence(cronExpression, timezone = "UTC", after, CronImpl = croner.Cron) {
|
|
4802
|
+
const cron = new CronImpl(cronExpression, { timezone });
|
|
4803
|
+
const next = cron.nextRun(after ?? /* @__PURE__ */ new Date());
|
|
4804
|
+
return next ?? null;
|
|
4805
|
+
}
|
|
4806
|
+
function validateCronExpression(cronExpression, CronImpl = croner.Cron) {
|
|
4807
|
+
try {
|
|
4808
|
+
new CronImpl(cronExpression);
|
|
4809
|
+
return true;
|
|
4810
|
+
} catch {
|
|
4811
|
+
return false;
|
|
4812
|
+
}
|
|
4813
|
+
}
|
|
3000
4814
|
|
|
3001
4815
|
// src/handler-validation.ts
|
|
3002
4816
|
function validateHandlerSerializable2(handler, jobType) {
|
|
@@ -3072,29 +4886,103 @@ var initJobQueue = (config) => {
|
|
|
3072
4886
|
const backendType = config.backend ?? "postgres";
|
|
3073
4887
|
setLogContext(config.verbose ?? false);
|
|
3074
4888
|
let backend;
|
|
3075
|
-
let pool;
|
|
3076
4889
|
if (backendType === "postgres") {
|
|
3077
4890
|
const pgConfig = config;
|
|
3078
|
-
|
|
3079
|
-
|
|
4891
|
+
if (pgConfig.pool) {
|
|
4892
|
+
backend = new PostgresBackend(pgConfig.pool);
|
|
4893
|
+
} else if (pgConfig.databaseConfig) {
|
|
4894
|
+
const pool = createPool(pgConfig.databaseConfig);
|
|
4895
|
+
backend = new PostgresBackend(pool);
|
|
4896
|
+
} else {
|
|
4897
|
+
throw new Error(
|
|
4898
|
+
'PostgreSQL backend requires either "databaseConfig" or "pool" to be provided.'
|
|
4899
|
+
);
|
|
4900
|
+
}
|
|
3080
4901
|
} else if (backendType === "redis") {
|
|
3081
|
-
const redisConfig = config
|
|
3082
|
-
|
|
4902
|
+
const redisConfig = config;
|
|
4903
|
+
if (redisConfig.client) {
|
|
4904
|
+
backend = new RedisBackend(
|
|
4905
|
+
redisConfig.client,
|
|
4906
|
+
redisConfig.keyPrefix
|
|
4907
|
+
);
|
|
4908
|
+
} else if (redisConfig.redisConfig) {
|
|
4909
|
+
backend = new RedisBackend(redisConfig.redisConfig);
|
|
4910
|
+
} else {
|
|
4911
|
+
throw new Error(
|
|
4912
|
+
'Redis backend requires either "redisConfig" or "client" to be provided.'
|
|
4913
|
+
);
|
|
4914
|
+
}
|
|
3083
4915
|
} else {
|
|
3084
4916
|
throw new Error(`Unknown backend: ${backendType}`);
|
|
3085
4917
|
}
|
|
3086
|
-
const
|
|
3087
|
-
|
|
3088
|
-
|
|
3089
|
-
|
|
4918
|
+
const emitter = new events.EventEmitter();
|
|
4919
|
+
const emit = (event, data) => {
|
|
4920
|
+
emitter.emit(event, data);
|
|
4921
|
+
};
|
|
4922
|
+
const enqueueDueCronJobsImpl = async () => {
|
|
4923
|
+
const dueSchedules = await backend.getDueCronSchedules();
|
|
4924
|
+
let count = 0;
|
|
4925
|
+
for (const schedule of dueSchedules) {
|
|
4926
|
+
if (!schedule.allowOverlap && schedule.lastJobId !== null) {
|
|
4927
|
+
const lastJob = await backend.getJob(schedule.lastJobId);
|
|
4928
|
+
if (lastJob && (lastJob.status === "pending" || lastJob.status === "processing" || lastJob.status === "waiting")) {
|
|
4929
|
+
const nextRunAt2 = getNextCronOccurrence(
|
|
4930
|
+
schedule.cronExpression,
|
|
4931
|
+
schedule.timezone
|
|
4932
|
+
);
|
|
4933
|
+
await backend.updateCronScheduleAfterEnqueue(
|
|
4934
|
+
schedule.id,
|
|
4935
|
+
/* @__PURE__ */ new Date(),
|
|
4936
|
+
schedule.lastJobId,
|
|
4937
|
+
nextRunAt2
|
|
4938
|
+
);
|
|
4939
|
+
continue;
|
|
4940
|
+
}
|
|
4941
|
+
}
|
|
4942
|
+
const jobId = await backend.addJob({
|
|
4943
|
+
jobType: schedule.jobType,
|
|
4944
|
+
payload: schedule.payload,
|
|
4945
|
+
maxAttempts: schedule.maxAttempts,
|
|
4946
|
+
priority: schedule.priority,
|
|
4947
|
+
timeoutMs: schedule.timeoutMs ?? void 0,
|
|
4948
|
+
forceKillOnTimeout: schedule.forceKillOnTimeout,
|
|
4949
|
+
tags: schedule.tags,
|
|
4950
|
+
retryDelay: schedule.retryDelay ?? void 0,
|
|
4951
|
+
retryBackoff: schedule.retryBackoff ?? void 0,
|
|
4952
|
+
retryDelayMax: schedule.retryDelayMax ?? void 0
|
|
4953
|
+
});
|
|
4954
|
+
const nextRunAt = getNextCronOccurrence(
|
|
4955
|
+
schedule.cronExpression,
|
|
4956
|
+
schedule.timezone
|
|
4957
|
+
);
|
|
4958
|
+
await backend.updateCronScheduleAfterEnqueue(
|
|
4959
|
+
schedule.id,
|
|
4960
|
+
/* @__PURE__ */ new Date(),
|
|
4961
|
+
jobId,
|
|
4962
|
+
nextRunAt
|
|
3090
4963
|
);
|
|
4964
|
+
count++;
|
|
3091
4965
|
}
|
|
3092
|
-
return
|
|
4966
|
+
return count;
|
|
3093
4967
|
};
|
|
3094
4968
|
return {
|
|
3095
4969
|
// Job queue operations
|
|
3096
4970
|
addJob: withLogContext(
|
|
3097
|
-
(job) =>
|
|
4971
|
+
async (job, options) => {
|
|
4972
|
+
const jobId = await backend.addJob(job, options);
|
|
4973
|
+
emit("job:added", { jobId, jobType: job.jobType });
|
|
4974
|
+
return jobId;
|
|
4975
|
+
},
|
|
4976
|
+
config.verbose ?? false
|
|
4977
|
+
),
|
|
4978
|
+
addJobs: withLogContext(
|
|
4979
|
+
async (jobs, options) => {
|
|
4980
|
+
const jobIds = await backend.addJobs(jobs, options);
|
|
4981
|
+
for (let i = 0; i < jobIds.length; i++) {
|
|
4982
|
+
emit("job:added", { jobId: jobIds[i], jobType: jobs[i].jobType });
|
|
4983
|
+
}
|
|
4984
|
+
return jobIds;
|
|
4985
|
+
},
|
|
3098
4986
|
config.verbose ?? false
|
|
3099
4987
|
),
|
|
3100
4988
|
getJob: withLogContext(
|
|
@@ -3113,13 +5001,16 @@ var initJobQueue = (config) => {
|
|
|
3113
5001
|
(filters, limit, offset) => backend.getJobs(filters, limit, offset),
|
|
3114
5002
|
config.verbose ?? false
|
|
3115
5003
|
),
|
|
3116
|
-
retryJob: (jobId) =>
|
|
3117
|
-
|
|
3118
|
-
|
|
3119
|
-
|
|
3120
|
-
|
|
3121
|
-
|
|
3122
|
-
)
|
|
5004
|
+
retryJob: async (jobId) => {
|
|
5005
|
+
await backend.retryJob(jobId);
|
|
5006
|
+
emit("job:retried", { jobId });
|
|
5007
|
+
},
|
|
5008
|
+
cleanupOldJobs: (daysToKeep, batchSize) => backend.cleanupOldJobs(daysToKeep, batchSize),
|
|
5009
|
+
cleanupOldJobEvents: (daysToKeep, batchSize) => backend.cleanupOldJobEvents(daysToKeep, batchSize),
|
|
5010
|
+
cancelJob: withLogContext(async (jobId) => {
|
|
5011
|
+
await backend.cancelJob(jobId);
|
|
5012
|
+
emit("job:cancelled", { jobId });
|
|
5013
|
+
}, config.verbose ?? false),
|
|
3123
5014
|
editJob: withLogContext(
|
|
3124
5015
|
(jobId, updates) => backend.editJob(jobId, updates),
|
|
3125
5016
|
config.verbose ?? false
|
|
@@ -3143,33 +5034,139 @@ var initJobQueue = (config) => {
|
|
|
3143
5034
|
(tags, mode = "all", limit, offset) => backend.getJobsByTags(tags, mode, limit, offset),
|
|
3144
5035
|
config.verbose ?? false
|
|
3145
5036
|
),
|
|
3146
|
-
// Job processing
|
|
3147
|
-
createProcessor: (handlers, options) => createProcessor(
|
|
5037
|
+
// Job processing — automatically enqueues due cron jobs before each batch
|
|
5038
|
+
createProcessor: (handlers, options) => createProcessor(
|
|
5039
|
+
backend,
|
|
5040
|
+
handlers,
|
|
5041
|
+
options,
|
|
5042
|
+
async () => {
|
|
5043
|
+
await enqueueDueCronJobsImpl();
|
|
5044
|
+
},
|
|
5045
|
+
emit
|
|
5046
|
+
),
|
|
5047
|
+
// Background supervisor — automated maintenance
|
|
5048
|
+
createSupervisor: (options) => createSupervisor(backend, options, emit),
|
|
3148
5049
|
// Job events
|
|
3149
5050
|
getJobEvents: withLogContext(
|
|
3150
5051
|
(jobId) => backend.getJobEvents(jobId),
|
|
3151
5052
|
config.verbose ?? false
|
|
3152
5053
|
),
|
|
3153
|
-
// Wait / Token support (
|
|
5054
|
+
// Wait / Token support (works with all backends)
|
|
3154
5055
|
createToken: withLogContext(
|
|
3155
|
-
(options) => createWaitpoint(
|
|
5056
|
+
(options) => backend.createWaitpoint(null, options),
|
|
3156
5057
|
config.verbose ?? false
|
|
3157
5058
|
),
|
|
3158
5059
|
completeToken: withLogContext(
|
|
3159
|
-
(tokenId, data) => completeWaitpoint(
|
|
5060
|
+
(tokenId, data) => backend.completeWaitpoint(tokenId, data),
|
|
3160
5061
|
config.verbose ?? false
|
|
3161
5062
|
),
|
|
3162
5063
|
getToken: withLogContext(
|
|
3163
|
-
(tokenId) => getWaitpoint(
|
|
5064
|
+
(tokenId) => backend.getWaitpoint(tokenId),
|
|
3164
5065
|
config.verbose ?? false
|
|
3165
5066
|
),
|
|
3166
5067
|
expireTimedOutTokens: withLogContext(
|
|
3167
|
-
() => expireTimedOutWaitpoints(
|
|
5068
|
+
() => backend.expireTimedOutWaitpoints(),
|
|
5069
|
+
config.verbose ?? false
|
|
5070
|
+
),
|
|
5071
|
+
// Cron schedule operations
|
|
5072
|
+
addCronJob: withLogContext(
|
|
5073
|
+
(options) => {
|
|
5074
|
+
if (!validateCronExpression(options.cronExpression)) {
|
|
5075
|
+
return Promise.reject(
|
|
5076
|
+
new Error(`Invalid cron expression: "${options.cronExpression}"`)
|
|
5077
|
+
);
|
|
5078
|
+
}
|
|
5079
|
+
const nextRunAt = getNextCronOccurrence(
|
|
5080
|
+
options.cronExpression,
|
|
5081
|
+
options.timezone ?? "UTC"
|
|
5082
|
+
);
|
|
5083
|
+
const input = {
|
|
5084
|
+
scheduleName: options.scheduleName,
|
|
5085
|
+
cronExpression: options.cronExpression,
|
|
5086
|
+
jobType: options.jobType,
|
|
5087
|
+
payload: options.payload,
|
|
5088
|
+
maxAttempts: options.maxAttempts ?? 3,
|
|
5089
|
+
priority: options.priority ?? 0,
|
|
5090
|
+
timeoutMs: options.timeoutMs ?? null,
|
|
5091
|
+
forceKillOnTimeout: options.forceKillOnTimeout ?? false,
|
|
5092
|
+
tags: options.tags,
|
|
5093
|
+
timezone: options.timezone ?? "UTC",
|
|
5094
|
+
allowOverlap: options.allowOverlap ?? false,
|
|
5095
|
+
nextRunAt,
|
|
5096
|
+
retryDelay: options.retryDelay ?? null,
|
|
5097
|
+
retryBackoff: options.retryBackoff ?? null,
|
|
5098
|
+
retryDelayMax: options.retryDelayMax ?? null
|
|
5099
|
+
};
|
|
5100
|
+
return backend.addCronSchedule(input);
|
|
5101
|
+
},
|
|
5102
|
+
config.verbose ?? false
|
|
5103
|
+
),
|
|
5104
|
+
getCronJob: withLogContext(
|
|
5105
|
+
(id) => backend.getCronSchedule(id),
|
|
5106
|
+
config.verbose ?? false
|
|
5107
|
+
),
|
|
5108
|
+
getCronJobByName: withLogContext(
|
|
5109
|
+
(name) => backend.getCronScheduleByName(name),
|
|
5110
|
+
config.verbose ?? false
|
|
5111
|
+
),
|
|
5112
|
+
listCronJobs: withLogContext(
|
|
5113
|
+
(status) => backend.listCronSchedules(status),
|
|
5114
|
+
config.verbose ?? false
|
|
5115
|
+
),
|
|
5116
|
+
removeCronJob: withLogContext(
|
|
5117
|
+
(id) => backend.removeCronSchedule(id),
|
|
5118
|
+
config.verbose ?? false
|
|
5119
|
+
),
|
|
5120
|
+
pauseCronJob: withLogContext(
|
|
5121
|
+
(id) => backend.pauseCronSchedule(id),
|
|
5122
|
+
config.verbose ?? false
|
|
5123
|
+
),
|
|
5124
|
+
resumeCronJob: withLogContext(
|
|
5125
|
+
(id) => backend.resumeCronSchedule(id),
|
|
5126
|
+
config.verbose ?? false
|
|
5127
|
+
),
|
|
5128
|
+
editCronJob: withLogContext(
|
|
5129
|
+
async (id, updates) => {
|
|
5130
|
+
if (updates.cronExpression !== void 0 && !validateCronExpression(updates.cronExpression)) {
|
|
5131
|
+
throw new Error(
|
|
5132
|
+
`Invalid cron expression: "${updates.cronExpression}"`
|
|
5133
|
+
);
|
|
5134
|
+
}
|
|
5135
|
+
let nextRunAt;
|
|
5136
|
+
if (updates.cronExpression !== void 0 || updates.timezone !== void 0) {
|
|
5137
|
+
const existing = await backend.getCronSchedule(id);
|
|
5138
|
+
const expr = updates.cronExpression ?? existing?.cronExpression ?? "";
|
|
5139
|
+
const tz = updates.timezone ?? existing?.timezone ?? "UTC";
|
|
5140
|
+
nextRunAt = getNextCronOccurrence(expr, tz);
|
|
5141
|
+
}
|
|
5142
|
+
await backend.editCronSchedule(id, updates, nextRunAt);
|
|
5143
|
+
},
|
|
3168
5144
|
config.verbose ?? false
|
|
3169
5145
|
),
|
|
5146
|
+
enqueueDueCronJobs: withLogContext(
|
|
5147
|
+
() => enqueueDueCronJobsImpl(),
|
|
5148
|
+
config.verbose ?? false
|
|
5149
|
+
),
|
|
5150
|
+
// Event hooks
|
|
5151
|
+
on: (event, listener) => {
|
|
5152
|
+
emitter.on(event, listener);
|
|
5153
|
+
},
|
|
5154
|
+
once: (event, listener) => {
|
|
5155
|
+
emitter.once(event, listener);
|
|
5156
|
+
},
|
|
5157
|
+
off: (event, listener) => {
|
|
5158
|
+
emitter.off(event, listener);
|
|
5159
|
+
},
|
|
5160
|
+
removeAllListeners: (event) => {
|
|
5161
|
+
if (event) {
|
|
5162
|
+
emitter.removeAllListeners(event);
|
|
5163
|
+
} else {
|
|
5164
|
+
emitter.removeAllListeners();
|
|
5165
|
+
}
|
|
5166
|
+
},
|
|
3170
5167
|
// Advanced access
|
|
3171
5168
|
getPool: () => {
|
|
3172
|
-
if (
|
|
5169
|
+
if (!(backend instanceof PostgresBackend)) {
|
|
3173
5170
|
throw new Error(
|
|
3174
5171
|
"getPool() is only available with the PostgreSQL backend."
|
|
3175
5172
|
);
|
|
@@ -3195,8 +5192,10 @@ exports.FailureReason = FailureReason;
|
|
|
3195
5192
|
exports.JobEventType = JobEventType;
|
|
3196
5193
|
exports.PostgresBackend = PostgresBackend;
|
|
3197
5194
|
exports.WaitSignal = WaitSignal;
|
|
5195
|
+
exports.getNextCronOccurrence = getNextCronOccurrence;
|
|
3198
5196
|
exports.initJobQueue = initJobQueue;
|
|
3199
5197
|
exports.testHandlerSerialization = testHandlerSerialization;
|
|
5198
|
+
exports.validateCronExpression = validateCronExpression;
|
|
3200
5199
|
exports.validateHandlerSerializable = validateHandlerSerializable2;
|
|
3201
5200
|
//# sourceMappingURL=index.cjs.map
|
|
3202
5201
|
//# sourceMappingURL=index.cjs.map
|