@nicnocquee/dataqueue 1.30.0 → 1.32.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +2531 -1283
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +367 -17
- package/dist/index.d.ts +367 -17
- package/dist/index.js +2530 -1284
- package/dist/index.js.map +1 -1
- package/migrations/1781200000004_create_cron_schedules_table.sql +33 -0
- package/package.json +3 -2
- package/src/backend.ts +139 -4
- package/src/backends/postgres.ts +676 -30
- package/src/backends/redis-scripts.ts +197 -22
- package/src/backends/redis.test.ts +971 -0
- package/src/backends/redis.ts +789 -22
- package/src/cron.test.ts +126 -0
- package/src/cron.ts +40 -0
- package/src/index.test.ts +361 -0
- package/src/index.ts +165 -29
- package/src/processor.ts +36 -97
- package/src/queue.test.ts +29 -0
- package/src/queue.ts +19 -251
- package/src/types.ts +177 -10
package/dist/index.cjs
CHANGED
|
@@ -1,18 +1,21 @@
|
|
|
1
1
|
'use strict';
|
|
2
2
|
|
|
3
|
-
var async_hooks = require('async_hooks');
|
|
4
|
-
var crypto = require('crypto');
|
|
5
3
|
var worker_threads = require('worker_threads');
|
|
4
|
+
var async_hooks = require('async_hooks');
|
|
6
5
|
var pg = require('pg');
|
|
7
6
|
var pgConnectionString = require('pg-connection-string');
|
|
8
7
|
var fs = require('fs');
|
|
8
|
+
var crypto = require('crypto');
|
|
9
9
|
var module$1 = require('module');
|
|
10
|
+
var croner = require('croner');
|
|
10
11
|
|
|
11
12
|
var _documentCurrentScript = typeof document !== 'undefined' ? document.currentScript : null;
|
|
12
13
|
function _interopDefault (e) { return e && e.__esModule ? e : { default: e }; }
|
|
13
14
|
|
|
14
15
|
var fs__default = /*#__PURE__*/_interopDefault(fs);
|
|
15
16
|
|
|
17
|
+
// src/processor.ts
|
|
18
|
+
|
|
16
19
|
// src/types.ts
|
|
17
20
|
var JobEventType = /* @__PURE__ */ ((JobEventType2) => {
|
|
18
21
|
JobEventType2["Added"] = "added";
|
|
@@ -26,11 +29,11 @@ var JobEventType = /* @__PURE__ */ ((JobEventType2) => {
|
|
|
26
29
|
JobEventType2["Waiting"] = "waiting";
|
|
27
30
|
return JobEventType2;
|
|
28
31
|
})(JobEventType || {});
|
|
29
|
-
var FailureReason = /* @__PURE__ */ ((
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
return
|
|
32
|
+
var FailureReason = /* @__PURE__ */ ((FailureReason4) => {
|
|
33
|
+
FailureReason4["Timeout"] = "timeout";
|
|
34
|
+
FailureReason4["HandlerError"] = "handler_error";
|
|
35
|
+
FailureReason4["NoHandler"] = "no_handler";
|
|
36
|
+
return FailureReason4;
|
|
34
37
|
})(FailureReason || {});
|
|
35
38
|
var WaitSignal = class extends Error {
|
|
36
39
|
constructor(type, waitUntil, tokenId, stepData) {
|
|
@@ -57,250 +60,954 @@ var log = (message) => {
|
|
|
57
60
|
}
|
|
58
61
|
};
|
|
59
62
|
|
|
60
|
-
// src/
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
return this.pool;
|
|
68
|
-
}
|
|
69
|
-
// ── Events ──────────────────────────────────────────────────────────
|
|
70
|
-
async recordJobEvent(jobId, eventType, metadata) {
|
|
71
|
-
const client = await this.pool.connect();
|
|
72
|
-
try {
|
|
73
|
-
await client.query(
|
|
74
|
-
`INSERT INTO job_events (job_id, event_type, metadata) VALUES ($1, $2, $3)`,
|
|
75
|
-
[jobId, eventType, metadata ? JSON.stringify(metadata) : null]
|
|
76
|
-
);
|
|
77
|
-
} catch (error) {
|
|
78
|
-
log(`Error recording job event for job ${jobId}: ${error}`);
|
|
79
|
-
} finally {
|
|
80
|
-
client.release();
|
|
81
|
-
}
|
|
82
|
-
}
|
|
83
|
-
async getJobEvents(jobId) {
|
|
84
|
-
const client = await this.pool.connect();
|
|
85
|
-
try {
|
|
86
|
-
const res = await client.query(
|
|
87
|
-
`SELECT id, job_id AS "jobId", event_type AS "eventType", metadata, created_at AS "createdAt" FROM job_events WHERE job_id = $1 ORDER BY created_at ASC`,
|
|
88
|
-
[jobId]
|
|
89
|
-
);
|
|
90
|
-
return res.rows;
|
|
91
|
-
} finally {
|
|
92
|
-
client.release();
|
|
93
|
-
}
|
|
94
|
-
}
|
|
95
|
-
// ── Job CRUD ──────────────────────────────────────────────────────────
|
|
96
|
-
async addJob({
|
|
97
|
-
jobType,
|
|
98
|
-
payload,
|
|
99
|
-
maxAttempts = 3,
|
|
100
|
-
priority = 0,
|
|
101
|
-
runAt = null,
|
|
102
|
-
timeoutMs = void 0,
|
|
103
|
-
forceKillOnTimeout = false,
|
|
104
|
-
tags = void 0,
|
|
105
|
-
idempotencyKey = void 0
|
|
106
|
-
}) {
|
|
107
|
-
const client = await this.pool.connect();
|
|
108
|
-
try {
|
|
109
|
-
let result;
|
|
110
|
-
const onConflict = idempotencyKey ? `ON CONFLICT (idempotency_key) WHERE idempotency_key IS NOT NULL DO NOTHING` : "";
|
|
111
|
-
if (runAt) {
|
|
112
|
-
result = await client.query(
|
|
113
|
-
`INSERT INTO job_queue
|
|
114
|
-
(job_type, payload, max_attempts, priority, run_at, timeout_ms, force_kill_on_timeout, tags, idempotency_key)
|
|
115
|
-
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
|
|
116
|
-
${onConflict}
|
|
117
|
-
RETURNING id`,
|
|
118
|
-
[
|
|
119
|
-
jobType,
|
|
120
|
-
payload,
|
|
121
|
-
maxAttempts,
|
|
122
|
-
priority,
|
|
123
|
-
runAt,
|
|
124
|
-
timeoutMs ?? null,
|
|
125
|
-
forceKillOnTimeout ?? false,
|
|
126
|
-
tags ?? null,
|
|
127
|
-
idempotencyKey ?? null
|
|
128
|
-
]
|
|
129
|
-
);
|
|
130
|
-
} else {
|
|
131
|
-
result = await client.query(
|
|
132
|
-
`INSERT INTO job_queue
|
|
133
|
-
(job_type, payload, max_attempts, priority, timeout_ms, force_kill_on_timeout, tags, idempotency_key)
|
|
134
|
-
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
|
135
|
-
${onConflict}
|
|
136
|
-
RETURNING id`,
|
|
137
|
-
[
|
|
138
|
-
jobType,
|
|
139
|
-
payload,
|
|
140
|
-
maxAttempts,
|
|
141
|
-
priority,
|
|
142
|
-
timeoutMs ?? null,
|
|
143
|
-
forceKillOnTimeout ?? false,
|
|
144
|
-
tags ?? null,
|
|
145
|
-
idempotencyKey ?? null
|
|
146
|
-
]
|
|
147
|
-
);
|
|
148
|
-
}
|
|
149
|
-
if (result.rows.length === 0 && idempotencyKey) {
|
|
150
|
-
const existing = await client.query(
|
|
151
|
-
`SELECT id FROM job_queue WHERE idempotency_key = $1`,
|
|
152
|
-
[idempotencyKey]
|
|
153
|
-
);
|
|
154
|
-
if (existing.rows.length > 0) {
|
|
155
|
-
log(
|
|
156
|
-
`Job with idempotency key "${idempotencyKey}" already exists (id: ${existing.rows[0].id}), returning existing job`
|
|
157
|
-
);
|
|
158
|
-
return existing.rows[0].id;
|
|
159
|
-
}
|
|
160
|
-
throw new Error(
|
|
161
|
-
`Failed to insert job and could not find existing job with idempotency key "${idempotencyKey}"`
|
|
162
|
-
);
|
|
163
|
-
}
|
|
164
|
-
const jobId = result.rows[0].id;
|
|
165
|
-
log(
|
|
166
|
-
`Added job ${jobId}: payload ${JSON.stringify(payload)}, ${runAt ? `runAt ${runAt.toISOString()}, ` : ""}priority ${priority}, maxAttempts ${maxAttempts}, jobType ${jobType}, tags ${JSON.stringify(tags)}${idempotencyKey ? `, idempotencyKey "${idempotencyKey}"` : ""}`
|
|
63
|
+
// src/processor.ts
|
|
64
|
+
function validateHandlerSerializable(handler, jobType) {
|
|
65
|
+
try {
|
|
66
|
+
const handlerString = handler.toString();
|
|
67
|
+
if (handlerString.includes("this.") && !handlerString.match(/\([^)]*this[^)]*\)/)) {
|
|
68
|
+
throw new Error(
|
|
69
|
+
`Handler for job type "${jobType}" uses 'this' context which cannot be serialized. Use a regular function or avoid 'this' references when forceKillOnTimeout is enabled.`
|
|
167
70
|
);
|
|
168
|
-
await this.recordJobEvent(jobId, "added" /* Added */, {
|
|
169
|
-
jobType,
|
|
170
|
-
payload,
|
|
171
|
-
tags,
|
|
172
|
-
idempotencyKey
|
|
173
|
-
});
|
|
174
|
-
return jobId;
|
|
175
|
-
} catch (error) {
|
|
176
|
-
log(`Error adding job: ${error}`);
|
|
177
|
-
throw error;
|
|
178
|
-
} finally {
|
|
179
|
-
client.release();
|
|
180
71
|
}
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
try {
|
|
185
|
-
const result = await client.query(
|
|
186
|
-
`SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue WHERE id = $1`,
|
|
187
|
-
[id]
|
|
72
|
+
if (handlerString.includes("[native code]")) {
|
|
73
|
+
throw new Error(
|
|
74
|
+
`Handler for job type "${jobType}" contains native code which cannot be serialized. Ensure your handler is a plain function when forceKillOnTimeout is enabled.`
|
|
188
75
|
);
|
|
189
|
-
if (result.rows.length === 0) {
|
|
190
|
-
log(`Job ${id} not found`);
|
|
191
|
-
return null;
|
|
192
|
-
}
|
|
193
|
-
log(`Found job ${id}`);
|
|
194
|
-
const job = result.rows[0];
|
|
195
|
-
return {
|
|
196
|
-
...job,
|
|
197
|
-
payload: job.payload,
|
|
198
|
-
timeoutMs: job.timeoutMs,
|
|
199
|
-
forceKillOnTimeout: job.forceKillOnTimeout,
|
|
200
|
-
failureReason: job.failureReason
|
|
201
|
-
};
|
|
202
|
-
} catch (error) {
|
|
203
|
-
log(`Error getting job ${id}: ${error}`);
|
|
204
|
-
throw error;
|
|
205
|
-
} finally {
|
|
206
|
-
client.release();
|
|
207
76
|
}
|
|
208
|
-
}
|
|
209
|
-
async getJobsByStatus(status, limit = 100, offset = 0) {
|
|
210
|
-
const client = await this.pool.connect();
|
|
211
77
|
try {
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
78
|
+
new Function("return " + handlerString);
|
|
79
|
+
} catch (parseError) {
|
|
80
|
+
throw new Error(
|
|
81
|
+
`Handler for job type "${jobType}" cannot be serialized: ${parseError instanceof Error ? parseError.message : String(parseError)}. When using forceKillOnTimeout, handlers must be serializable functions without closures over external variables.`
|
|
215
82
|
);
|
|
216
|
-
log(`Found ${result.rows.length} jobs by status ${status}`);
|
|
217
|
-
return result.rows.map((job) => ({
|
|
218
|
-
...job,
|
|
219
|
-
payload: job.payload,
|
|
220
|
-
timeoutMs: job.timeoutMs,
|
|
221
|
-
forceKillOnTimeout: job.forceKillOnTimeout,
|
|
222
|
-
failureReason: job.failureReason
|
|
223
|
-
}));
|
|
224
|
-
} catch (error) {
|
|
225
|
-
log(`Error getting jobs by status ${status}: ${error}`);
|
|
226
|
-
throw error;
|
|
227
|
-
} finally {
|
|
228
|
-
client.release();
|
|
229
83
|
}
|
|
230
|
-
}
|
|
231
|
-
|
|
232
|
-
const client = await this.pool.connect();
|
|
233
|
-
try {
|
|
234
|
-
const result = await client.query(
|
|
235
|
-
`SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue ORDER BY created_at DESC LIMIT $1 OFFSET $2`,
|
|
236
|
-
[limit, offset]
|
|
237
|
-
);
|
|
238
|
-
log(`Found ${result.rows.length} jobs (all)`);
|
|
239
|
-
return result.rows.map((job) => ({
|
|
240
|
-
...job,
|
|
241
|
-
payload: job.payload,
|
|
242
|
-
timeoutMs: job.timeoutMs,
|
|
243
|
-
forceKillOnTimeout: job.forceKillOnTimeout
|
|
244
|
-
}));
|
|
245
|
-
} catch (error) {
|
|
246
|
-
log(`Error getting all jobs: ${error}`);
|
|
84
|
+
} catch (error) {
|
|
85
|
+
if (error instanceof Error) {
|
|
247
86
|
throw error;
|
|
248
|
-
} finally {
|
|
249
|
-
client.release();
|
|
250
87
|
}
|
|
88
|
+
throw new Error(
|
|
89
|
+
`Failed to validate handler serialization for job type "${jobType}": ${String(error)}`
|
|
90
|
+
);
|
|
251
91
|
}
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
92
|
+
}
|
|
93
|
+
async function runHandlerInWorker(handler, payload, timeoutMs, jobType) {
|
|
94
|
+
validateHandlerSerializable(handler, jobType);
|
|
95
|
+
return new Promise((resolve, reject) => {
|
|
96
|
+
const workerCode = `
|
|
97
|
+
(function() {
|
|
98
|
+
const { parentPort, workerData } = require('worker_threads');
|
|
99
|
+
const { handlerCode, payload, timeoutMs } = workerData;
|
|
100
|
+
|
|
101
|
+
// Create an AbortController for the handler
|
|
102
|
+
const controller = new AbortController();
|
|
103
|
+
const signal = controller.signal;
|
|
104
|
+
|
|
105
|
+
// Set up timeout
|
|
106
|
+
const timeoutId = setTimeout(() => {
|
|
107
|
+
controller.abort();
|
|
108
|
+
parentPort.postMessage({ type: 'timeout' });
|
|
109
|
+
}, timeoutMs);
|
|
110
|
+
|
|
111
|
+
try {
|
|
112
|
+
// Execute the handler
|
|
113
|
+
// Note: This uses Function constructor which requires the handler to be serializable.
|
|
114
|
+
// The handler should be validated before reaching this point.
|
|
115
|
+
let handlerFn;
|
|
116
|
+
try {
|
|
117
|
+
// Wrap handlerCode in parentheses to ensure it's treated as an expression
|
|
118
|
+
// This handles both arrow functions and regular functions
|
|
119
|
+
const wrappedCode = handlerCode.trim().startsWith('async') || handlerCode.trim().startsWith('function')
|
|
120
|
+
? handlerCode
|
|
121
|
+
: '(' + handlerCode + ')';
|
|
122
|
+
handlerFn = new Function('return ' + wrappedCode)();
|
|
123
|
+
} catch (parseError) {
|
|
124
|
+
clearTimeout(timeoutId);
|
|
125
|
+
parentPort.postMessage({
|
|
126
|
+
type: 'error',
|
|
127
|
+
error: {
|
|
128
|
+
message: 'Handler cannot be deserialized in worker thread. ' +
|
|
129
|
+
'Ensure your handler is a standalone function without closures over external variables. ' +
|
|
130
|
+
'Original error: ' + (parseError instanceof Error ? parseError.message : String(parseError)),
|
|
131
|
+
stack: parseError instanceof Error ? parseError.stack : undefined,
|
|
132
|
+
name: 'SerializationError',
|
|
133
|
+
},
|
|
134
|
+
});
|
|
135
|
+
return;
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
// Ensure handlerFn is actually a function
|
|
139
|
+
if (typeof handlerFn !== 'function') {
|
|
140
|
+
clearTimeout(timeoutId);
|
|
141
|
+
parentPort.postMessage({
|
|
142
|
+
type: 'error',
|
|
143
|
+
error: {
|
|
144
|
+
message: 'Handler deserialization did not produce a function. ' +
|
|
145
|
+
'Ensure your handler is a valid function when forceKillOnTimeout is enabled.',
|
|
146
|
+
name: 'SerializationError',
|
|
147
|
+
},
|
|
148
|
+
});
|
|
149
|
+
return;
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
handlerFn(payload, signal)
|
|
153
|
+
.then(() => {
|
|
154
|
+
clearTimeout(timeoutId);
|
|
155
|
+
parentPort.postMessage({ type: 'success' });
|
|
156
|
+
})
|
|
157
|
+
.catch((error) => {
|
|
158
|
+
clearTimeout(timeoutId);
|
|
159
|
+
parentPort.postMessage({
|
|
160
|
+
type: 'error',
|
|
161
|
+
error: {
|
|
162
|
+
message: error.message,
|
|
163
|
+
stack: error.stack,
|
|
164
|
+
name: error.name,
|
|
165
|
+
},
|
|
166
|
+
});
|
|
167
|
+
});
|
|
168
|
+
} catch (error) {
|
|
169
|
+
clearTimeout(timeoutId);
|
|
170
|
+
parentPort.postMessage({
|
|
171
|
+
type: 'error',
|
|
172
|
+
error: {
|
|
173
|
+
message: error.message,
|
|
174
|
+
stack: error.stack,
|
|
175
|
+
name: error.name,
|
|
176
|
+
},
|
|
177
|
+
});
|
|
178
|
+
}
|
|
179
|
+
})();
|
|
180
|
+
`;
|
|
181
|
+
const worker = new worker_threads.Worker(workerCode, {
|
|
182
|
+
eval: true,
|
|
183
|
+
workerData: {
|
|
184
|
+
handlerCode: handler.toString(),
|
|
185
|
+
payload,
|
|
186
|
+
timeoutMs
|
|
187
|
+
}
|
|
188
|
+
});
|
|
189
|
+
let resolved = false;
|
|
190
|
+
worker.on("message", (message) => {
|
|
191
|
+
if (resolved) return;
|
|
192
|
+
resolved = true;
|
|
193
|
+
if (message.type === "success") {
|
|
194
|
+
resolve();
|
|
195
|
+
} else if (message.type === "timeout") {
|
|
196
|
+
const timeoutError = new Error(
|
|
197
|
+
`Job timed out after ${timeoutMs} ms and was forcefully terminated`
|
|
198
|
+
);
|
|
199
|
+
timeoutError.failureReason = "timeout" /* Timeout */;
|
|
200
|
+
reject(timeoutError);
|
|
201
|
+
} else if (message.type === "error") {
|
|
202
|
+
const error = new Error(message.error.message);
|
|
203
|
+
error.stack = message.error.stack;
|
|
204
|
+
error.name = message.error.name;
|
|
205
|
+
reject(error);
|
|
206
|
+
}
|
|
207
|
+
});
|
|
208
|
+
worker.on("error", (error) => {
|
|
209
|
+
if (resolved) return;
|
|
210
|
+
resolved = true;
|
|
211
|
+
reject(error);
|
|
212
|
+
});
|
|
213
|
+
worker.on("exit", (code) => {
|
|
214
|
+
if (resolved) return;
|
|
215
|
+
if (code !== 0) {
|
|
216
|
+
resolved = true;
|
|
217
|
+
reject(new Error(`Worker stopped with exit code ${code}`));
|
|
218
|
+
}
|
|
219
|
+
});
|
|
220
|
+
setTimeout(() => {
|
|
221
|
+
if (!resolved) {
|
|
222
|
+
resolved = true;
|
|
223
|
+
worker.terminate().then(() => {
|
|
224
|
+
const timeoutError = new Error(
|
|
225
|
+
`Job timed out after ${timeoutMs} ms and was forcefully terminated`
|
|
226
|
+
);
|
|
227
|
+
timeoutError.failureReason = "timeout" /* Timeout */;
|
|
228
|
+
reject(timeoutError);
|
|
229
|
+
}).catch((err) => {
|
|
230
|
+
reject(err);
|
|
231
|
+
});
|
|
232
|
+
}
|
|
233
|
+
}, timeoutMs + 100);
|
|
234
|
+
});
|
|
235
|
+
}
|
|
236
|
+
function calculateWaitUntil(duration) {
|
|
237
|
+
const now = Date.now();
|
|
238
|
+
let ms = 0;
|
|
239
|
+
if (duration.seconds) ms += duration.seconds * 1e3;
|
|
240
|
+
if (duration.minutes) ms += duration.minutes * 60 * 1e3;
|
|
241
|
+
if (duration.hours) ms += duration.hours * 60 * 60 * 1e3;
|
|
242
|
+
if (duration.days) ms += duration.days * 24 * 60 * 60 * 1e3;
|
|
243
|
+
if (duration.weeks) ms += duration.weeks * 7 * 24 * 60 * 60 * 1e3;
|
|
244
|
+
if (duration.months) ms += duration.months * 30 * 24 * 60 * 60 * 1e3;
|
|
245
|
+
if (duration.years) ms += duration.years * 365 * 24 * 60 * 60 * 1e3;
|
|
246
|
+
if (ms <= 0) {
|
|
247
|
+
throw new Error(
|
|
248
|
+
"waitFor duration must be positive. Provide at least one positive duration field."
|
|
249
|
+
);
|
|
250
|
+
}
|
|
251
|
+
return new Date(now + ms);
|
|
252
|
+
}
|
|
253
|
+
async function resolveCompletedWaits(backend, stepData) {
|
|
254
|
+
for (const key of Object.keys(stepData)) {
|
|
255
|
+
if (!key.startsWith("__wait_")) continue;
|
|
256
|
+
const entry = stepData[key];
|
|
257
|
+
if (!entry || typeof entry !== "object" || entry.completed) continue;
|
|
258
|
+
if (entry.type === "duration" || entry.type === "date") {
|
|
259
|
+
stepData[key] = { ...entry, completed: true };
|
|
260
|
+
} else if (entry.type === "token" && entry.tokenId) {
|
|
261
|
+
const wp = await backend.getWaitpoint(entry.tokenId);
|
|
262
|
+
if (wp && wp.status === "completed") {
|
|
263
|
+
stepData[key] = {
|
|
264
|
+
...entry,
|
|
265
|
+
completed: true,
|
|
266
|
+
result: { ok: true, output: wp.output }
|
|
267
|
+
};
|
|
268
|
+
} else if (wp && wp.status === "timed_out") {
|
|
269
|
+
stepData[key] = {
|
|
270
|
+
...entry,
|
|
271
|
+
completed: true,
|
|
272
|
+
result: { ok: false, error: "Token timed out" }
|
|
273
|
+
};
|
|
274
|
+
}
|
|
275
|
+
}
|
|
276
|
+
}
|
|
277
|
+
}
|
|
278
|
+
function buildWaitContext(backend, jobId, stepData, baseCtx) {
|
|
279
|
+
let waitCounter = 0;
|
|
280
|
+
const ctx = {
|
|
281
|
+
prolong: baseCtx.prolong,
|
|
282
|
+
onTimeout: baseCtx.onTimeout,
|
|
283
|
+
run: async (stepName, fn) => {
|
|
284
|
+
const cached = stepData[stepName];
|
|
285
|
+
if (cached && typeof cached === "object" && cached.__completed) {
|
|
286
|
+
log(`Step "${stepName}" replayed from cache for job ${jobId}`);
|
|
287
|
+
return cached.result;
|
|
288
|
+
}
|
|
289
|
+
const result = await fn();
|
|
290
|
+
stepData[stepName] = { __completed: true, result };
|
|
291
|
+
await backend.updateStepData(jobId, stepData);
|
|
292
|
+
return result;
|
|
293
|
+
},
|
|
294
|
+
waitFor: async (duration) => {
|
|
295
|
+
const waitKey = `__wait_${waitCounter++}`;
|
|
296
|
+
const cached = stepData[waitKey];
|
|
297
|
+
if (cached && typeof cached === "object" && cached.completed) {
|
|
298
|
+
log(`Wait "${waitKey}" already completed for job ${jobId}, skipping`);
|
|
299
|
+
return;
|
|
300
|
+
}
|
|
301
|
+
const waitUntilDate = calculateWaitUntil(duration);
|
|
302
|
+
stepData[waitKey] = { type: "duration", completed: false };
|
|
303
|
+
throw new WaitSignal("duration", waitUntilDate, void 0, stepData);
|
|
304
|
+
},
|
|
305
|
+
waitUntil: async (date) => {
|
|
306
|
+
const waitKey = `__wait_${waitCounter++}`;
|
|
307
|
+
const cached = stepData[waitKey];
|
|
308
|
+
if (cached && typeof cached === "object" && cached.completed) {
|
|
309
|
+
log(`Wait "${waitKey}" already completed for job ${jobId}, skipping`);
|
|
310
|
+
return;
|
|
311
|
+
}
|
|
312
|
+
stepData[waitKey] = { type: "date", completed: false };
|
|
313
|
+
throw new WaitSignal("date", date, void 0, stepData);
|
|
314
|
+
},
|
|
315
|
+
createToken: async (options) => {
|
|
316
|
+
const token = await backend.createWaitpoint(jobId, options);
|
|
317
|
+
return token;
|
|
318
|
+
},
|
|
319
|
+
waitForToken: async (tokenId) => {
|
|
320
|
+
const waitKey = `__wait_${waitCounter++}`;
|
|
321
|
+
const cached = stepData[waitKey];
|
|
322
|
+
if (cached && typeof cached === "object" && cached.completed) {
|
|
323
|
+
log(
|
|
324
|
+
`Token wait "${waitKey}" already completed for job ${jobId}, returning cached result`
|
|
325
|
+
);
|
|
326
|
+
return cached.result;
|
|
327
|
+
}
|
|
328
|
+
const wp = await backend.getWaitpoint(tokenId);
|
|
329
|
+
if (wp && wp.status === "completed") {
|
|
330
|
+
const result = {
|
|
331
|
+
ok: true,
|
|
332
|
+
output: wp.output
|
|
333
|
+
};
|
|
334
|
+
stepData[waitKey] = {
|
|
335
|
+
type: "token",
|
|
336
|
+
tokenId,
|
|
337
|
+
completed: true,
|
|
338
|
+
result
|
|
339
|
+
};
|
|
340
|
+
await backend.updateStepData(jobId, stepData);
|
|
341
|
+
return result;
|
|
342
|
+
}
|
|
343
|
+
if (wp && wp.status === "timed_out") {
|
|
344
|
+
const result = {
|
|
345
|
+
ok: false,
|
|
346
|
+
error: "Token timed out"
|
|
347
|
+
};
|
|
348
|
+
stepData[waitKey] = {
|
|
349
|
+
type: "token",
|
|
350
|
+
tokenId,
|
|
351
|
+
completed: true,
|
|
352
|
+
result
|
|
353
|
+
};
|
|
354
|
+
await backend.updateStepData(jobId, stepData);
|
|
355
|
+
return result;
|
|
356
|
+
}
|
|
357
|
+
stepData[waitKey] = { type: "token", tokenId, completed: false };
|
|
358
|
+
throw new WaitSignal("token", void 0, tokenId, stepData);
|
|
359
|
+
},
|
|
360
|
+
setProgress: async (percent) => {
|
|
361
|
+
if (percent < 0 || percent > 100)
|
|
362
|
+
throw new Error("Progress must be between 0 and 100");
|
|
363
|
+
await backend.updateProgress(jobId, Math.round(percent));
|
|
364
|
+
}
|
|
365
|
+
};
|
|
366
|
+
return ctx;
|
|
367
|
+
}
|
|
368
|
+
async function processJobWithHandlers(backend, job, jobHandlers) {
|
|
369
|
+
const handler = jobHandlers[job.jobType];
|
|
370
|
+
if (!handler) {
|
|
371
|
+
await backend.setPendingReasonForUnpickedJobs(
|
|
372
|
+
`No handler registered for job type: ${job.jobType}`,
|
|
373
|
+
job.jobType
|
|
374
|
+
);
|
|
375
|
+
await backend.failJob(
|
|
376
|
+
job.id,
|
|
377
|
+
new Error(`No handler registered for job type: ${job.jobType}`),
|
|
378
|
+
"no_handler" /* NoHandler */
|
|
379
|
+
);
|
|
380
|
+
return;
|
|
381
|
+
}
|
|
382
|
+
const stepData = { ...job.stepData || {} };
|
|
383
|
+
const hasStepHistory = Object.keys(stepData).some(
|
|
384
|
+
(k) => k.startsWith("__wait_")
|
|
385
|
+
);
|
|
386
|
+
if (hasStepHistory) {
|
|
387
|
+
await resolveCompletedWaits(backend, stepData);
|
|
388
|
+
await backend.updateStepData(job.id, stepData);
|
|
389
|
+
}
|
|
390
|
+
const timeoutMs = job.timeoutMs ?? void 0;
|
|
391
|
+
const forceKillOnTimeout = job.forceKillOnTimeout ?? false;
|
|
392
|
+
let timeoutId;
|
|
393
|
+
const controller = new AbortController();
|
|
394
|
+
try {
|
|
395
|
+
if (forceKillOnTimeout && timeoutMs && timeoutMs > 0) {
|
|
396
|
+
await runHandlerInWorker(handler, job.payload, timeoutMs, job.jobType);
|
|
397
|
+
} else {
|
|
398
|
+
let onTimeoutCallback;
|
|
399
|
+
let timeoutReject;
|
|
400
|
+
const armTimeout = (ms) => {
|
|
401
|
+
if (timeoutId) clearTimeout(timeoutId);
|
|
402
|
+
timeoutId = setTimeout(() => {
|
|
403
|
+
if (onTimeoutCallback) {
|
|
404
|
+
try {
|
|
405
|
+
const extension = onTimeoutCallback();
|
|
406
|
+
if (typeof extension === "number" && extension > 0) {
|
|
407
|
+
backend.prolongJob(job.id).catch(() => {
|
|
408
|
+
});
|
|
409
|
+
armTimeout(extension);
|
|
410
|
+
return;
|
|
411
|
+
}
|
|
412
|
+
} catch (callbackError) {
|
|
413
|
+
log(
|
|
414
|
+
`onTimeout callback threw for job ${job.id}: ${callbackError}`
|
|
415
|
+
);
|
|
416
|
+
}
|
|
417
|
+
}
|
|
418
|
+
controller.abort();
|
|
419
|
+
const timeoutError = new Error(`Job timed out after ${ms} ms`);
|
|
420
|
+
timeoutError.failureReason = "timeout" /* Timeout */;
|
|
421
|
+
if (timeoutReject) {
|
|
422
|
+
timeoutReject(timeoutError);
|
|
423
|
+
}
|
|
424
|
+
}, ms);
|
|
425
|
+
};
|
|
426
|
+
const hasTimeout = timeoutMs != null && timeoutMs > 0;
|
|
427
|
+
const baseCtx = hasTimeout ? {
|
|
428
|
+
prolong: (ms) => {
|
|
429
|
+
const duration = ms ?? timeoutMs;
|
|
430
|
+
if (duration != null && duration > 0) {
|
|
431
|
+
armTimeout(duration);
|
|
432
|
+
backend.prolongJob(job.id).catch(() => {
|
|
433
|
+
});
|
|
434
|
+
}
|
|
435
|
+
},
|
|
436
|
+
onTimeout: (callback) => {
|
|
437
|
+
onTimeoutCallback = callback;
|
|
438
|
+
}
|
|
439
|
+
} : {
|
|
440
|
+
prolong: () => {
|
|
441
|
+
log("prolong() called but ignored: job has no timeout set");
|
|
442
|
+
},
|
|
443
|
+
onTimeout: () => {
|
|
444
|
+
log("onTimeout() called but ignored: job has no timeout set");
|
|
445
|
+
}
|
|
446
|
+
};
|
|
447
|
+
const ctx = buildWaitContext(backend, job.id, stepData, baseCtx);
|
|
448
|
+
if (forceKillOnTimeout && !hasTimeout) {
|
|
449
|
+
log(
|
|
450
|
+
`forceKillOnTimeout is set but no timeoutMs for job ${job.id}, running without force kill`
|
|
451
|
+
);
|
|
452
|
+
}
|
|
453
|
+
const jobPromise = handler(job.payload, controller.signal, ctx);
|
|
454
|
+
if (hasTimeout) {
|
|
455
|
+
await Promise.race([
|
|
456
|
+
jobPromise,
|
|
457
|
+
new Promise((_, reject) => {
|
|
458
|
+
timeoutReject = reject;
|
|
459
|
+
armTimeout(timeoutMs);
|
|
460
|
+
})
|
|
461
|
+
]);
|
|
462
|
+
} else {
|
|
463
|
+
await jobPromise;
|
|
464
|
+
}
|
|
465
|
+
}
|
|
466
|
+
if (timeoutId) clearTimeout(timeoutId);
|
|
467
|
+
await backend.completeJob(job.id);
|
|
468
|
+
} catch (error) {
|
|
469
|
+
if (timeoutId) clearTimeout(timeoutId);
|
|
470
|
+
if (error instanceof WaitSignal) {
|
|
471
|
+
log(
|
|
472
|
+
`Job ${job.id} entering wait: type=${error.type}, waitUntil=${error.waitUntil?.toISOString() ?? "none"}, tokenId=${error.tokenId ?? "none"}`
|
|
473
|
+
);
|
|
474
|
+
await backend.waitJob(job.id, {
|
|
475
|
+
waitUntil: error.waitUntil,
|
|
476
|
+
waitTokenId: error.tokenId,
|
|
477
|
+
stepData: error.stepData
|
|
478
|
+
});
|
|
479
|
+
return;
|
|
480
|
+
}
|
|
481
|
+
console.error(`Error processing job ${job.id}:`, error);
|
|
482
|
+
let failureReason = "handler_error" /* HandlerError */;
|
|
483
|
+
if (error && typeof error === "object" && "failureReason" in error && error.failureReason === "timeout" /* Timeout */) {
|
|
484
|
+
failureReason = "timeout" /* Timeout */;
|
|
485
|
+
}
|
|
486
|
+
await backend.failJob(
|
|
487
|
+
job.id,
|
|
488
|
+
error instanceof Error ? error : new Error(String(error)),
|
|
489
|
+
failureReason
|
|
490
|
+
);
|
|
491
|
+
}
|
|
492
|
+
}
|
|
493
|
+
async function processBatchWithHandlers(backend, workerId, batchSize, jobType, jobHandlers, concurrency, onError) {
|
|
494
|
+
const jobs = await backend.getNextBatch(
|
|
495
|
+
workerId,
|
|
496
|
+
batchSize,
|
|
497
|
+
jobType
|
|
498
|
+
);
|
|
499
|
+
if (!concurrency || concurrency >= jobs.length) {
|
|
500
|
+
await Promise.all(
|
|
501
|
+
jobs.map((job) => processJobWithHandlers(backend, job, jobHandlers))
|
|
502
|
+
);
|
|
503
|
+
return jobs.length;
|
|
504
|
+
}
|
|
505
|
+
let idx = 0;
|
|
506
|
+
let running = 0;
|
|
507
|
+
let finished = 0;
|
|
508
|
+
return new Promise((resolve, reject) => {
|
|
509
|
+
const next = () => {
|
|
510
|
+
if (finished === jobs.length) return resolve(jobs.length);
|
|
511
|
+
while (running < concurrency && idx < jobs.length) {
|
|
512
|
+
const job = jobs[idx++];
|
|
513
|
+
running++;
|
|
514
|
+
processJobWithHandlers(backend, job, jobHandlers).then(() => {
|
|
515
|
+
running--;
|
|
516
|
+
finished++;
|
|
517
|
+
next();
|
|
518
|
+
}).catch((err) => {
|
|
519
|
+
running--;
|
|
520
|
+
finished++;
|
|
521
|
+
if (onError) {
|
|
522
|
+
onError(err instanceof Error ? err : new Error(String(err)));
|
|
523
|
+
}
|
|
524
|
+
next();
|
|
525
|
+
});
|
|
526
|
+
}
|
|
527
|
+
};
|
|
528
|
+
next();
|
|
529
|
+
});
|
|
530
|
+
}
|
|
531
|
+
var createProcessor = (backend, handlers, options = {}, onBeforeBatch) => {
|
|
532
|
+
const {
|
|
533
|
+
workerId = `worker-${Math.random().toString(36).substring(2, 9)}`,
|
|
534
|
+
batchSize = 10,
|
|
535
|
+
pollInterval = 5e3,
|
|
536
|
+
onError = (error) => console.error("Job processor error:", error),
|
|
537
|
+
jobType,
|
|
538
|
+
concurrency = 3
|
|
539
|
+
} = options;
|
|
540
|
+
let running = false;
|
|
541
|
+
let intervalId = null;
|
|
542
|
+
let currentBatchPromise = null;
|
|
543
|
+
setLogContext(options.verbose ?? false);
|
|
544
|
+
const processJobs = async () => {
|
|
545
|
+
if (!running) return 0;
|
|
546
|
+
if (onBeforeBatch) {
|
|
547
|
+
try {
|
|
548
|
+
await onBeforeBatch();
|
|
549
|
+
} catch (hookError) {
|
|
550
|
+
log(`onBeforeBatch hook error: ${hookError}`);
|
|
551
|
+
if (onError) {
|
|
552
|
+
onError(
|
|
553
|
+
hookError instanceof Error ? hookError : new Error(String(hookError))
|
|
554
|
+
);
|
|
555
|
+
}
|
|
556
|
+
}
|
|
557
|
+
}
|
|
558
|
+
log(
|
|
559
|
+
`Processing jobs with workerId: ${workerId}${jobType ? ` and jobType: ${Array.isArray(jobType) ? jobType.join(",") : jobType}` : ""}`
|
|
560
|
+
);
|
|
561
|
+
try {
|
|
562
|
+
const processed = await processBatchWithHandlers(
|
|
563
|
+
backend,
|
|
564
|
+
workerId,
|
|
565
|
+
batchSize,
|
|
566
|
+
jobType,
|
|
567
|
+
handlers,
|
|
568
|
+
concurrency,
|
|
569
|
+
onError
|
|
570
|
+
);
|
|
571
|
+
return processed;
|
|
572
|
+
} catch (error) {
|
|
573
|
+
onError(error instanceof Error ? error : new Error(String(error)));
|
|
574
|
+
}
|
|
575
|
+
return 0;
|
|
576
|
+
};
|
|
577
|
+
return {
|
|
578
|
+
/**
|
|
579
|
+
* Start the job processor in the background.
|
|
580
|
+
* - This will run periodically (every pollInterval milliseconds or 5 seconds if not provided) and process jobs as they become available.
|
|
581
|
+
* - You have to call the stop method to stop the processor.
|
|
582
|
+
*/
|
|
583
|
+
startInBackground: () => {
|
|
584
|
+
if (running) return;
|
|
585
|
+
log(`Starting job processor with workerId: ${workerId}`);
|
|
586
|
+
running = true;
|
|
587
|
+
const scheduleNext = (immediate) => {
|
|
588
|
+
if (!running) return;
|
|
589
|
+
if (immediate) {
|
|
590
|
+
intervalId = setTimeout(loop, 0);
|
|
591
|
+
} else {
|
|
592
|
+
intervalId = setTimeout(loop, pollInterval);
|
|
593
|
+
}
|
|
594
|
+
};
|
|
595
|
+
const loop = async () => {
|
|
596
|
+
if (!running) return;
|
|
597
|
+
currentBatchPromise = processJobs();
|
|
598
|
+
const processed = await currentBatchPromise;
|
|
599
|
+
currentBatchPromise = null;
|
|
600
|
+
scheduleNext(processed === batchSize);
|
|
601
|
+
};
|
|
602
|
+
loop();
|
|
603
|
+
},
|
|
604
|
+
/**
|
|
605
|
+
* Stop the job processor that runs in the background.
|
|
606
|
+
* Does not wait for in-flight jobs.
|
|
607
|
+
*/
|
|
608
|
+
stop: () => {
|
|
609
|
+
log(`Stopping job processor with workerId: ${workerId}`);
|
|
610
|
+
running = false;
|
|
611
|
+
if (intervalId) {
|
|
612
|
+
clearTimeout(intervalId);
|
|
613
|
+
intervalId = null;
|
|
614
|
+
}
|
|
615
|
+
},
|
|
616
|
+
/**
|
|
617
|
+
* Stop the job processor and wait for all in-flight jobs to complete.
|
|
618
|
+
* Useful for graceful shutdown (e.g., SIGTERM handling).
|
|
619
|
+
*/
|
|
620
|
+
stopAndDrain: async (drainTimeoutMs = 3e4) => {
|
|
621
|
+
log(`Stopping and draining job processor with workerId: ${workerId}`);
|
|
622
|
+
running = false;
|
|
623
|
+
if (intervalId) {
|
|
624
|
+
clearTimeout(intervalId);
|
|
625
|
+
intervalId = null;
|
|
626
|
+
}
|
|
627
|
+
if (currentBatchPromise) {
|
|
628
|
+
await Promise.race([
|
|
629
|
+
currentBatchPromise.catch(() => {
|
|
630
|
+
}),
|
|
631
|
+
new Promise((resolve) => setTimeout(resolve, drainTimeoutMs))
|
|
632
|
+
]);
|
|
633
|
+
currentBatchPromise = null;
|
|
634
|
+
}
|
|
635
|
+
log(`Job processor ${workerId} drained`);
|
|
636
|
+
},
|
|
637
|
+
/**
|
|
638
|
+
* Start the job processor synchronously.
|
|
639
|
+
* - This will process all jobs immediately and then stop.
|
|
640
|
+
* - The pollInterval is ignored.
|
|
641
|
+
*/
|
|
642
|
+
start: async () => {
|
|
643
|
+
log(`Starting job processor with workerId: ${workerId}`);
|
|
644
|
+
running = true;
|
|
645
|
+
const processed = await processJobs();
|
|
646
|
+
running = false;
|
|
647
|
+
return processed;
|
|
648
|
+
},
|
|
649
|
+
isRunning: () => running
|
|
650
|
+
};
|
|
651
|
+
};
|
|
652
|
+
function loadPemOrFile(value) {
|
|
653
|
+
if (!value) return void 0;
|
|
654
|
+
if (value.startsWith("file://")) {
|
|
655
|
+
const filePath = value.slice(7);
|
|
656
|
+
return fs__default.default.readFileSync(filePath, "utf8");
|
|
657
|
+
}
|
|
658
|
+
return value;
|
|
659
|
+
}
|
|
660
|
+
var createPool = (config) => {
|
|
661
|
+
let searchPath;
|
|
662
|
+
let ssl = void 0;
|
|
663
|
+
let customCA;
|
|
664
|
+
let sslmode;
|
|
665
|
+
if (config.connectionString) {
|
|
666
|
+
try {
|
|
667
|
+
const url = new URL(config.connectionString);
|
|
668
|
+
searchPath = url.searchParams.get("search_path") || void 0;
|
|
669
|
+
sslmode = url.searchParams.get("sslmode") || void 0;
|
|
670
|
+
if (sslmode === "no-verify") {
|
|
671
|
+
ssl = { rejectUnauthorized: false };
|
|
672
|
+
}
|
|
673
|
+
} catch (e) {
|
|
674
|
+
const parsed = pgConnectionString.parse(config.connectionString);
|
|
675
|
+
if (parsed.options) {
|
|
676
|
+
const match = parsed.options.match(/search_path=([^\s]+)/);
|
|
677
|
+
if (match) {
|
|
678
|
+
searchPath = match[1];
|
|
679
|
+
}
|
|
680
|
+
}
|
|
681
|
+
sslmode = typeof parsed.sslmode === "string" ? parsed.sslmode : void 0;
|
|
682
|
+
if (sslmode === "no-verify") {
|
|
683
|
+
ssl = { rejectUnauthorized: false };
|
|
684
|
+
}
|
|
685
|
+
}
|
|
686
|
+
}
|
|
687
|
+
if (config.ssl) {
|
|
688
|
+
if (typeof config.ssl.ca === "string") {
|
|
689
|
+
customCA = config.ssl.ca;
|
|
690
|
+
} else if (typeof process.env.PGSSLROOTCERT === "string") {
|
|
691
|
+
customCA = process.env.PGSSLROOTCERT;
|
|
692
|
+
} else {
|
|
693
|
+
customCA = void 0;
|
|
694
|
+
}
|
|
695
|
+
const caValue = typeof customCA === "string" ? loadPemOrFile(customCA) : void 0;
|
|
696
|
+
ssl = {
|
|
697
|
+
...ssl,
|
|
698
|
+
...caValue ? { ca: caValue } : {},
|
|
699
|
+
cert: loadPemOrFile(
|
|
700
|
+
typeof config.ssl.cert === "string" ? config.ssl.cert : process.env.PGSSLCERT
|
|
701
|
+
),
|
|
702
|
+
key: loadPemOrFile(
|
|
703
|
+
typeof config.ssl.key === "string" ? config.ssl.key : process.env.PGSSLKEY
|
|
704
|
+
),
|
|
705
|
+
rejectUnauthorized: config.ssl.rejectUnauthorized !== void 0 ? config.ssl.rejectUnauthorized : true
|
|
706
|
+
};
|
|
707
|
+
}
|
|
708
|
+
if (sslmode && customCA) {
|
|
709
|
+
const warning = `
|
|
710
|
+
|
|
711
|
+
\x1B[33m**************************************************
|
|
712
|
+
\u26A0\uFE0F WARNING: SSL CONFIGURATION ISSUE
|
|
713
|
+
**************************************************
|
|
714
|
+
Both sslmode ('${sslmode}') is set in the connection string
|
|
715
|
+
and a custom CA is provided (via config.ssl.ca or PGSSLROOTCERT).
|
|
716
|
+
This combination may cause connection failures or unexpected behavior.
|
|
717
|
+
|
|
718
|
+
Recommended: Remove sslmode from the connection string when using a custom CA.
|
|
719
|
+
**************************************************\x1B[0m
|
|
720
|
+
`;
|
|
721
|
+
console.warn(warning);
|
|
722
|
+
}
|
|
723
|
+
const pool = new pg.Pool({
|
|
724
|
+
...config,
|
|
725
|
+
...ssl ? { ssl } : {}
|
|
726
|
+
});
|
|
727
|
+
if (searchPath) {
|
|
728
|
+
pool.on("connect", (client) => {
|
|
729
|
+
client.query(`SET search_path TO ${searchPath}`);
|
|
730
|
+
});
|
|
731
|
+
}
|
|
732
|
+
return pool;
|
|
733
|
+
};
|
|
734
|
+
var MAX_TIMEOUT_MS = 365 * 24 * 60 * 60 * 1e3;
|
|
735
|
+
function parseTimeoutString(timeout) {
|
|
736
|
+
const match = timeout.match(/^(\d+)(s|m|h|d)$/);
|
|
737
|
+
if (!match) {
|
|
738
|
+
throw new Error(
|
|
739
|
+
`Invalid timeout format: "${timeout}". Expected format like "10m", "1h", "24h", "7d".`
|
|
740
|
+
);
|
|
741
|
+
}
|
|
742
|
+
const value = parseInt(match[1], 10);
|
|
743
|
+
const unit = match[2];
|
|
744
|
+
let ms;
|
|
745
|
+
switch (unit) {
|
|
746
|
+
case "s":
|
|
747
|
+
ms = value * 1e3;
|
|
748
|
+
break;
|
|
749
|
+
case "m":
|
|
750
|
+
ms = value * 60 * 1e3;
|
|
751
|
+
break;
|
|
752
|
+
case "h":
|
|
753
|
+
ms = value * 60 * 60 * 1e3;
|
|
754
|
+
break;
|
|
755
|
+
case "d":
|
|
756
|
+
ms = value * 24 * 60 * 60 * 1e3;
|
|
757
|
+
break;
|
|
758
|
+
default:
|
|
759
|
+
throw new Error(`Unknown timeout unit: "${unit}"`);
|
|
760
|
+
}
|
|
761
|
+
if (!Number.isFinite(ms) || ms > MAX_TIMEOUT_MS) {
|
|
762
|
+
throw new Error(
|
|
763
|
+
`Timeout value "${timeout}" is too large. Maximum allowed is 365 days.`
|
|
764
|
+
);
|
|
765
|
+
}
|
|
766
|
+
return ms;
|
|
767
|
+
}
|
|
768
|
+
var PostgresBackend = class {
|
|
769
|
+
constructor(pool) {
|
|
770
|
+
this.pool = pool;
|
|
771
|
+
}
|
|
772
|
+
/** Expose the raw pool for advanced usage. */
|
|
773
|
+
getPool() {
|
|
774
|
+
return this.pool;
|
|
775
|
+
}
|
|
776
|
+
// ── Events ──────────────────────────────────────────────────────────
|
|
777
|
+
async recordJobEvent(jobId, eventType, metadata) {
|
|
778
|
+
const client = await this.pool.connect();
|
|
779
|
+
try {
|
|
780
|
+
await client.query(
|
|
781
|
+
`INSERT INTO job_events (job_id, event_type, metadata) VALUES ($1, $2, $3)`,
|
|
782
|
+
[jobId, eventType, metadata ? JSON.stringify(metadata) : null]
|
|
783
|
+
);
|
|
784
|
+
} catch (error) {
|
|
785
|
+
log(`Error recording job event for job ${jobId}: ${error}`);
|
|
786
|
+
} finally {
|
|
787
|
+
client.release();
|
|
788
|
+
}
|
|
789
|
+
}
|
|
790
|
+
async getJobEvents(jobId) {
|
|
791
|
+
const client = await this.pool.connect();
|
|
792
|
+
try {
|
|
793
|
+
const res = await client.query(
|
|
794
|
+
`SELECT id, job_id AS "jobId", event_type AS "eventType", metadata, created_at AS "createdAt" FROM job_events WHERE job_id = $1 ORDER BY created_at ASC`,
|
|
795
|
+
[jobId]
|
|
796
|
+
);
|
|
797
|
+
return res.rows;
|
|
798
|
+
} finally {
|
|
799
|
+
client.release();
|
|
800
|
+
}
|
|
801
|
+
}
|
|
802
|
+
// ── Job CRUD ──────────────────────────────────────────────────────────
|
|
803
|
+
async addJob({
|
|
804
|
+
jobType,
|
|
805
|
+
payload,
|
|
806
|
+
maxAttempts = 3,
|
|
807
|
+
priority = 0,
|
|
808
|
+
runAt = null,
|
|
809
|
+
timeoutMs = void 0,
|
|
810
|
+
forceKillOnTimeout = false,
|
|
811
|
+
tags = void 0,
|
|
812
|
+
idempotencyKey = void 0
|
|
813
|
+
}) {
|
|
814
|
+
const client = await this.pool.connect();
|
|
815
|
+
try {
|
|
816
|
+
let result;
|
|
817
|
+
const onConflict = idempotencyKey ? `ON CONFLICT (idempotency_key) WHERE idempotency_key IS NOT NULL DO NOTHING` : "";
|
|
818
|
+
if (runAt) {
|
|
819
|
+
result = await client.query(
|
|
820
|
+
`INSERT INTO job_queue
|
|
821
|
+
(job_type, payload, max_attempts, priority, run_at, timeout_ms, force_kill_on_timeout, tags, idempotency_key)
|
|
822
|
+
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
|
|
823
|
+
${onConflict}
|
|
824
|
+
RETURNING id`,
|
|
825
|
+
[
|
|
826
|
+
jobType,
|
|
827
|
+
payload,
|
|
828
|
+
maxAttempts,
|
|
829
|
+
priority,
|
|
830
|
+
runAt,
|
|
831
|
+
timeoutMs ?? null,
|
|
832
|
+
forceKillOnTimeout ?? false,
|
|
833
|
+
tags ?? null,
|
|
834
|
+
idempotencyKey ?? null
|
|
835
|
+
]
|
|
836
|
+
);
|
|
837
|
+
} else {
|
|
838
|
+
result = await client.query(
|
|
839
|
+
`INSERT INTO job_queue
|
|
840
|
+
(job_type, payload, max_attempts, priority, timeout_ms, force_kill_on_timeout, tags, idempotency_key)
|
|
841
|
+
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
|
842
|
+
${onConflict}
|
|
843
|
+
RETURNING id`,
|
|
844
|
+
[
|
|
845
|
+
jobType,
|
|
846
|
+
payload,
|
|
847
|
+
maxAttempts,
|
|
848
|
+
priority,
|
|
849
|
+
timeoutMs ?? null,
|
|
850
|
+
forceKillOnTimeout ?? false,
|
|
851
|
+
tags ?? null,
|
|
852
|
+
idempotencyKey ?? null
|
|
853
|
+
]
|
|
854
|
+
);
|
|
855
|
+
}
|
|
856
|
+
if (result.rows.length === 0 && idempotencyKey) {
|
|
857
|
+
const existing = await client.query(
|
|
858
|
+
`SELECT id FROM job_queue WHERE idempotency_key = $1`,
|
|
859
|
+
[idempotencyKey]
|
|
860
|
+
);
|
|
861
|
+
if (existing.rows.length > 0) {
|
|
862
|
+
log(
|
|
863
|
+
`Job with idempotency key "${idempotencyKey}" already exists (id: ${existing.rows[0].id}), returning existing job`
|
|
864
|
+
);
|
|
865
|
+
return existing.rows[0].id;
|
|
866
|
+
}
|
|
867
|
+
throw new Error(
|
|
868
|
+
`Failed to insert job and could not find existing job with idempotency key "${idempotencyKey}"`
|
|
869
|
+
);
|
|
870
|
+
}
|
|
871
|
+
const jobId = result.rows[0].id;
|
|
872
|
+
log(
|
|
873
|
+
`Added job ${jobId}: payload ${JSON.stringify(payload)}, ${runAt ? `runAt ${runAt.toISOString()}, ` : ""}priority ${priority}, maxAttempts ${maxAttempts}, jobType ${jobType}, tags ${JSON.stringify(tags)}${idempotencyKey ? `, idempotencyKey "${idempotencyKey}"` : ""}`
|
|
874
|
+
);
|
|
875
|
+
await this.recordJobEvent(jobId, "added" /* Added */, {
|
|
876
|
+
jobType,
|
|
877
|
+
payload,
|
|
878
|
+
tags,
|
|
879
|
+
idempotencyKey
|
|
880
|
+
});
|
|
881
|
+
return jobId;
|
|
882
|
+
} catch (error) {
|
|
883
|
+
log(`Error adding job: ${error}`);
|
|
884
|
+
throw error;
|
|
885
|
+
} finally {
|
|
886
|
+
client.release();
|
|
887
|
+
}
|
|
888
|
+
}
|
|
889
|
+
async getJob(id) {
|
|
890
|
+
const client = await this.pool.connect();
|
|
891
|
+
try {
|
|
892
|
+
const result = await client.query(
|
|
893
|
+
`SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue WHERE id = $1`,
|
|
894
|
+
[id]
|
|
895
|
+
);
|
|
896
|
+
if (result.rows.length === 0) {
|
|
897
|
+
log(`Job ${id} not found`);
|
|
898
|
+
return null;
|
|
899
|
+
}
|
|
900
|
+
log(`Found job ${id}`);
|
|
901
|
+
const job = result.rows[0];
|
|
902
|
+
return {
|
|
903
|
+
...job,
|
|
904
|
+
payload: job.payload,
|
|
905
|
+
timeoutMs: job.timeoutMs,
|
|
906
|
+
forceKillOnTimeout: job.forceKillOnTimeout,
|
|
907
|
+
failureReason: job.failureReason
|
|
908
|
+
};
|
|
909
|
+
} catch (error) {
|
|
910
|
+
log(`Error getting job ${id}: ${error}`);
|
|
911
|
+
throw error;
|
|
912
|
+
} finally {
|
|
913
|
+
client.release();
|
|
914
|
+
}
|
|
915
|
+
}
|
|
916
|
+
async getJobsByStatus(status, limit = 100, offset = 0) {
|
|
917
|
+
const client = await this.pool.connect();
|
|
918
|
+
try {
|
|
919
|
+
const result = await client.query(
|
|
920
|
+
`SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue WHERE status = $1 ORDER BY created_at DESC LIMIT $2 OFFSET $3`,
|
|
921
|
+
[status, limit, offset]
|
|
922
|
+
);
|
|
923
|
+
log(`Found ${result.rows.length} jobs by status ${status}`);
|
|
924
|
+
return result.rows.map((job) => ({
|
|
925
|
+
...job,
|
|
926
|
+
payload: job.payload,
|
|
927
|
+
timeoutMs: job.timeoutMs,
|
|
928
|
+
forceKillOnTimeout: job.forceKillOnTimeout,
|
|
929
|
+
failureReason: job.failureReason
|
|
930
|
+
}));
|
|
931
|
+
} catch (error) {
|
|
932
|
+
log(`Error getting jobs by status ${status}: ${error}`);
|
|
933
|
+
throw error;
|
|
934
|
+
} finally {
|
|
935
|
+
client.release();
|
|
936
|
+
}
|
|
937
|
+
}
|
|
938
|
+
async getAllJobs(limit = 100, offset = 0) {
|
|
939
|
+
const client = await this.pool.connect();
|
|
940
|
+
try {
|
|
941
|
+
const result = await client.query(
|
|
942
|
+
`SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue ORDER BY created_at DESC LIMIT $1 OFFSET $2`,
|
|
943
|
+
[limit, offset]
|
|
944
|
+
);
|
|
945
|
+
log(`Found ${result.rows.length} jobs (all)`);
|
|
946
|
+
return result.rows.map((job) => ({
|
|
947
|
+
...job,
|
|
948
|
+
payload: job.payload,
|
|
949
|
+
timeoutMs: job.timeoutMs,
|
|
950
|
+
forceKillOnTimeout: job.forceKillOnTimeout
|
|
951
|
+
}));
|
|
952
|
+
} catch (error) {
|
|
953
|
+
log(`Error getting all jobs: ${error}`);
|
|
954
|
+
throw error;
|
|
955
|
+
} finally {
|
|
956
|
+
client.release();
|
|
957
|
+
}
|
|
958
|
+
}
|
|
959
|
+
async getJobs(filters, limit = 100, offset = 0) {
|
|
960
|
+
const client = await this.pool.connect();
|
|
961
|
+
try {
|
|
962
|
+
let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue`;
|
|
963
|
+
const params = [];
|
|
964
|
+
const where = [];
|
|
965
|
+
let paramIdx = 1;
|
|
966
|
+
if (filters) {
|
|
967
|
+
if (filters.jobType) {
|
|
968
|
+
where.push(`job_type = $${paramIdx++}`);
|
|
969
|
+
params.push(filters.jobType);
|
|
970
|
+
}
|
|
971
|
+
if (filters.priority !== void 0) {
|
|
972
|
+
where.push(`priority = $${paramIdx++}`);
|
|
973
|
+
params.push(filters.priority);
|
|
974
|
+
}
|
|
975
|
+
if (filters.runAt) {
|
|
976
|
+
if (filters.runAt instanceof Date) {
|
|
977
|
+
where.push(`run_at = $${paramIdx++}`);
|
|
978
|
+
params.push(filters.runAt);
|
|
979
|
+
} else if (typeof filters.runAt === "object" && (filters.runAt.gt !== void 0 || filters.runAt.gte !== void 0 || filters.runAt.lt !== void 0 || filters.runAt.lte !== void 0 || filters.runAt.eq !== void 0)) {
|
|
980
|
+
const ops = filters.runAt;
|
|
981
|
+
if (ops.gt) {
|
|
982
|
+
where.push(`run_at > $${paramIdx++}`);
|
|
983
|
+
params.push(ops.gt);
|
|
984
|
+
}
|
|
985
|
+
if (ops.gte) {
|
|
986
|
+
where.push(`run_at >= $${paramIdx++}`);
|
|
987
|
+
params.push(ops.gte);
|
|
988
|
+
}
|
|
989
|
+
if (ops.lt) {
|
|
990
|
+
where.push(`run_at < $${paramIdx++}`);
|
|
991
|
+
params.push(ops.lt);
|
|
992
|
+
}
|
|
993
|
+
if (ops.lte) {
|
|
994
|
+
where.push(`run_at <= $${paramIdx++}`);
|
|
995
|
+
params.push(ops.lte);
|
|
996
|
+
}
|
|
997
|
+
if (ops.eq) {
|
|
998
|
+
where.push(`run_at = $${paramIdx++}`);
|
|
999
|
+
params.push(ops.eq);
|
|
1000
|
+
}
|
|
1001
|
+
}
|
|
1002
|
+
}
|
|
1003
|
+
if (filters.tags && filters.tags.values && filters.tags.values.length > 0) {
|
|
1004
|
+
const mode = filters.tags.mode || "all";
|
|
1005
|
+
const tagValues = filters.tags.values;
|
|
1006
|
+
switch (mode) {
|
|
1007
|
+
case "exact":
|
|
1008
|
+
where.push(`tags = $${paramIdx++}`);
|
|
1009
|
+
params.push(tagValues);
|
|
1010
|
+
break;
|
|
304
1011
|
case "all":
|
|
305
1012
|
where.push(`tags @> $${paramIdx++}`);
|
|
306
1013
|
params.push(tagValues);
|
|
@@ -894,1037 +1601,668 @@ var PostgresBackend = class {
|
|
|
894
1601
|
metadata.maxAttempts = updates.maxAttempts;
|
|
895
1602
|
if (updates.priority !== void 0) metadata.priority = updates.priority;
|
|
896
1603
|
if (updates.runAt !== void 0) metadata.runAt = updates.runAt;
|
|
897
|
-
if (updates.timeoutMs !== void 0)
|
|
898
|
-
metadata.timeoutMs = updates.timeoutMs;
|
|
899
|
-
if (updates.tags !== void 0) metadata.tags = updates.tags;
|
|
900
|
-
for (const row of result.rows) {
|
|
901
|
-
await this.recordJobEvent(row.id, "edited" /* Edited */, metadata);
|
|
902
|
-
}
|
|
903
|
-
log(`Edited ${editedCount} pending jobs: ${JSON.stringify(metadata)}`);
|
|
904
|
-
return editedCount;
|
|
905
|
-
} catch (error) {
|
|
906
|
-
log(`Error editing pending jobs: ${error}`);
|
|
907
|
-
throw error;
|
|
908
|
-
} finally {
|
|
909
|
-
client.release();
|
|
910
|
-
}
|
|
911
|
-
}
|
|
912
|
-
async cleanupOldJobs(daysToKeep = 30) {
|
|
913
|
-
const client = await this.pool.connect();
|
|
914
|
-
try {
|
|
915
|
-
const result = await client.query(
|
|
916
|
-
`
|
|
917
|
-
DELETE FROM job_queue
|
|
918
|
-
WHERE status = 'completed'
|
|
919
|
-
AND updated_at < NOW() - INTERVAL '1 day' * $1::int
|
|
920
|
-
RETURNING id
|
|
921
|
-
`,
|
|
922
|
-
[daysToKeep]
|
|
923
|
-
);
|
|
924
|
-
log(`Deleted ${result.rowCount} old jobs`);
|
|
925
|
-
return result.rowCount || 0;
|
|
926
|
-
} catch (error) {
|
|
927
|
-
log(`Error cleaning up old jobs: ${error}`);
|
|
928
|
-
throw error;
|
|
929
|
-
} finally {
|
|
930
|
-
client.release();
|
|
931
|
-
}
|
|
932
|
-
}
|
|
933
|
-
async cleanupOldJobEvents(daysToKeep = 30) {
|
|
934
|
-
const client = await this.pool.connect();
|
|
935
|
-
try {
|
|
936
|
-
const result = await client.query(
|
|
937
|
-
`
|
|
938
|
-
DELETE FROM job_events
|
|
939
|
-
WHERE created_at < NOW() - INTERVAL '1 day' * $1::int
|
|
940
|
-
RETURNING id
|
|
941
|
-
`,
|
|
942
|
-
[daysToKeep]
|
|
943
|
-
);
|
|
944
|
-
log(`Deleted ${result.rowCount} old job events`);
|
|
945
|
-
return result.rowCount || 0;
|
|
946
|
-
} catch (error) {
|
|
947
|
-
log(`Error cleaning up old job events: ${error}`);
|
|
948
|
-
throw error;
|
|
949
|
-
} finally {
|
|
950
|
-
client.release();
|
|
951
|
-
}
|
|
952
|
-
}
|
|
953
|
-
async reclaimStuckJobs(maxProcessingTimeMinutes = 10) {
|
|
954
|
-
const client = await this.pool.connect();
|
|
955
|
-
try {
|
|
956
|
-
const result = await client.query(
|
|
957
|
-
`
|
|
958
|
-
UPDATE job_queue
|
|
959
|
-
SET status = 'pending', locked_at = NULL, locked_by = NULL, updated_at = NOW()
|
|
960
|
-
WHERE status = 'processing'
|
|
961
|
-
AND locked_at < NOW() - GREATEST(
|
|
962
|
-
INTERVAL '1 minute' * $1::int,
|
|
963
|
-
INTERVAL '1 millisecond' * COALESCE(timeout_ms, 0)
|
|
964
|
-
)
|
|
965
|
-
RETURNING id
|
|
966
|
-
`,
|
|
967
|
-
[maxProcessingTimeMinutes]
|
|
968
|
-
);
|
|
969
|
-
log(`Reclaimed ${result.rowCount} stuck jobs`);
|
|
970
|
-
return result.rowCount || 0;
|
|
971
|
-
} catch (error) {
|
|
972
|
-
log(`Error reclaiming stuck jobs: ${error}`);
|
|
973
|
-
throw error;
|
|
974
|
-
} finally {
|
|
975
|
-
client.release();
|
|
976
|
-
}
|
|
977
|
-
}
|
|
978
|
-
// ── Internal helpers ──────────────────────────────────────────────────
|
|
979
|
-
/**
|
|
980
|
-
* Batch-insert multiple job events in a single query.
|
|
981
|
-
* More efficient than individual recordJobEvent calls.
|
|
982
|
-
*/
|
|
983
|
-
async recordJobEventsBatch(events) {
|
|
984
|
-
if (events.length === 0) return;
|
|
985
|
-
const client = await this.pool.connect();
|
|
986
|
-
try {
|
|
987
|
-
const values = [];
|
|
988
|
-
const params = [];
|
|
989
|
-
let paramIdx = 1;
|
|
990
|
-
for (const event of events) {
|
|
991
|
-
values.push(`($${paramIdx++}, $${paramIdx++}, $${paramIdx++})`);
|
|
992
|
-
params.push(
|
|
993
|
-
event.jobId,
|
|
994
|
-
event.eventType,
|
|
995
|
-
event.metadata ? JSON.stringify(event.metadata) : null
|
|
996
|
-
);
|
|
997
|
-
}
|
|
998
|
-
await client.query(
|
|
999
|
-
`INSERT INTO job_events (job_id, event_type, metadata) VALUES ${values.join(", ")}`,
|
|
1000
|
-
params
|
|
1001
|
-
);
|
|
1002
|
-
} catch (error) {
|
|
1003
|
-
log(`Error recording batch job events: ${error}`);
|
|
1004
|
-
} finally {
|
|
1005
|
-
client.release();
|
|
1006
|
-
}
|
|
1007
|
-
}
|
|
1008
|
-
async setPendingReasonForUnpickedJobs(reason, jobType) {
|
|
1009
|
-
const client = await this.pool.connect();
|
|
1010
|
-
try {
|
|
1011
|
-
let jobTypeFilter = "";
|
|
1012
|
-
const params = [reason];
|
|
1013
|
-
if (jobType) {
|
|
1014
|
-
if (Array.isArray(jobType)) {
|
|
1015
|
-
jobTypeFilter = ` AND job_type = ANY($2)`;
|
|
1016
|
-
params.push(jobType);
|
|
1017
|
-
} else {
|
|
1018
|
-
jobTypeFilter = ` AND job_type = $2`;
|
|
1019
|
-
params.push(jobType);
|
|
1020
|
-
}
|
|
1021
|
-
}
|
|
1022
|
-
await client.query(
|
|
1023
|
-
`UPDATE job_queue SET pending_reason = $1 WHERE status = 'pending'${jobTypeFilter}`,
|
|
1024
|
-
params
|
|
1025
|
-
);
|
|
1026
|
-
} finally {
|
|
1027
|
-
client.release();
|
|
1028
|
-
}
|
|
1029
|
-
}
|
|
1030
|
-
};
|
|
1031
|
-
var recordJobEvent = async (pool, jobId, eventType, metadata) => new PostgresBackend(pool).recordJobEvent(jobId, eventType, metadata);
|
|
1032
|
-
var waitJob = async (pool, jobId, options) => {
|
|
1033
|
-
const client = await pool.connect();
|
|
1034
|
-
try {
|
|
1035
|
-
const result = await client.query(
|
|
1036
|
-
`
|
|
1037
|
-
UPDATE job_queue
|
|
1038
|
-
SET status = 'waiting',
|
|
1039
|
-
wait_until = $2,
|
|
1040
|
-
wait_token_id = $3,
|
|
1041
|
-
step_data = $4,
|
|
1042
|
-
locked_at = NULL,
|
|
1043
|
-
locked_by = NULL,
|
|
1044
|
-
updated_at = NOW()
|
|
1045
|
-
WHERE id = $1 AND status = 'processing'
|
|
1046
|
-
`,
|
|
1047
|
-
[
|
|
1048
|
-
jobId,
|
|
1049
|
-
options.waitUntil ?? null,
|
|
1050
|
-
options.waitTokenId ?? null,
|
|
1051
|
-
JSON.stringify(options.stepData)
|
|
1052
|
-
]
|
|
1053
|
-
);
|
|
1054
|
-
if (result.rowCount === 0) {
|
|
1055
|
-
log(
|
|
1056
|
-
`Job ${jobId} could not be set to waiting (may have been reclaimed or is no longer processing)`
|
|
1057
|
-
);
|
|
1058
|
-
return;
|
|
1059
|
-
}
|
|
1060
|
-
await recordJobEvent(pool, jobId, "waiting" /* Waiting */, {
|
|
1061
|
-
waitUntil: options.waitUntil?.toISOString() ?? null,
|
|
1062
|
-
waitTokenId: options.waitTokenId ?? null
|
|
1063
|
-
});
|
|
1064
|
-
log(`Job ${jobId} set to waiting`);
|
|
1065
|
-
} catch (error) {
|
|
1066
|
-
log(`Error setting job ${jobId} to waiting: ${error}`);
|
|
1067
|
-
throw error;
|
|
1068
|
-
} finally {
|
|
1069
|
-
client.release();
|
|
1070
|
-
}
|
|
1071
|
-
};
|
|
1072
|
-
var updateStepData = async (pool, jobId, stepData) => {
|
|
1073
|
-
const client = await pool.connect();
|
|
1074
|
-
try {
|
|
1075
|
-
await client.query(
|
|
1076
|
-
`UPDATE job_queue SET step_data = $2, updated_at = NOW() WHERE id = $1`,
|
|
1077
|
-
[jobId, JSON.stringify(stepData)]
|
|
1078
|
-
);
|
|
1079
|
-
} catch (error) {
|
|
1080
|
-
log(`Error updating step_data for job ${jobId}: ${error}`);
|
|
1081
|
-
} finally {
|
|
1082
|
-
client.release();
|
|
1083
|
-
}
|
|
1084
|
-
};
|
|
1085
|
-
var MAX_TIMEOUT_MS = 365 * 24 * 60 * 60 * 1e3;
|
|
1086
|
-
function parseTimeoutString(timeout) {
|
|
1087
|
-
const match = timeout.match(/^(\d+)(s|m|h|d)$/);
|
|
1088
|
-
if (!match) {
|
|
1089
|
-
throw new Error(
|
|
1090
|
-
`Invalid timeout format: "${timeout}". Expected format like "10m", "1h", "24h", "7d".`
|
|
1091
|
-
);
|
|
1092
|
-
}
|
|
1093
|
-
const value = parseInt(match[1], 10);
|
|
1094
|
-
const unit = match[2];
|
|
1095
|
-
let ms;
|
|
1096
|
-
switch (unit) {
|
|
1097
|
-
case "s":
|
|
1098
|
-
ms = value * 1e3;
|
|
1099
|
-
break;
|
|
1100
|
-
case "m":
|
|
1101
|
-
ms = value * 60 * 1e3;
|
|
1102
|
-
break;
|
|
1103
|
-
case "h":
|
|
1104
|
-
ms = value * 60 * 60 * 1e3;
|
|
1105
|
-
break;
|
|
1106
|
-
case "d":
|
|
1107
|
-
ms = value * 24 * 60 * 60 * 1e3;
|
|
1108
|
-
break;
|
|
1109
|
-
default:
|
|
1110
|
-
throw new Error(`Unknown timeout unit: "${unit}"`);
|
|
1111
|
-
}
|
|
1112
|
-
if (!Number.isFinite(ms) || ms > MAX_TIMEOUT_MS) {
|
|
1113
|
-
throw new Error(
|
|
1114
|
-
`Timeout value "${timeout}" is too large. Maximum allowed is 365 days.`
|
|
1115
|
-
);
|
|
1116
|
-
}
|
|
1117
|
-
return ms;
|
|
1118
|
-
}
|
|
1119
|
-
var createWaitpoint = async (pool, jobId, options) => {
|
|
1120
|
-
const client = await pool.connect();
|
|
1121
|
-
try {
|
|
1122
|
-
const id = `wp_${crypto.randomUUID()}`;
|
|
1123
|
-
let timeoutAt = null;
|
|
1124
|
-
if (options?.timeout) {
|
|
1125
|
-
const ms = parseTimeoutString(options.timeout);
|
|
1126
|
-
timeoutAt = new Date(Date.now() + ms);
|
|
1127
|
-
}
|
|
1128
|
-
await client.query(
|
|
1129
|
-
`INSERT INTO waitpoints (id, job_id, status, timeout_at, tags) VALUES ($1, $2, 'waiting', $3, $4)`,
|
|
1130
|
-
[id, jobId, timeoutAt, options?.tags ?? null]
|
|
1131
|
-
);
|
|
1132
|
-
log(`Created waitpoint ${id} for job ${jobId}`);
|
|
1133
|
-
return { id };
|
|
1134
|
-
} catch (error) {
|
|
1135
|
-
log(`Error creating waitpoint: ${error}`);
|
|
1136
|
-
throw error;
|
|
1137
|
-
} finally {
|
|
1138
|
-
client.release();
|
|
1139
|
-
}
|
|
1140
|
-
};
|
|
1141
|
-
var completeWaitpoint = async (pool, tokenId, data) => {
|
|
1142
|
-
const client = await pool.connect();
|
|
1143
|
-
try {
|
|
1144
|
-
await client.query("BEGIN");
|
|
1145
|
-
const wpResult = await client.query(
|
|
1146
|
-
`UPDATE waitpoints SET status = 'completed', output = $2, completed_at = NOW()
|
|
1147
|
-
WHERE id = $1 AND status = 'waiting'
|
|
1148
|
-
RETURNING job_id`,
|
|
1149
|
-
[tokenId, data != null ? JSON.stringify(data) : null]
|
|
1150
|
-
);
|
|
1151
|
-
if (wpResult.rows.length === 0) {
|
|
1152
|
-
await client.query("ROLLBACK");
|
|
1153
|
-
log(`Waitpoint ${tokenId} not found or already completed`);
|
|
1154
|
-
return;
|
|
1155
|
-
}
|
|
1156
|
-
const jobId = wpResult.rows[0].job_id;
|
|
1157
|
-
if (jobId != null) {
|
|
1158
|
-
await client.query(
|
|
1159
|
-
`UPDATE job_queue
|
|
1160
|
-
SET status = 'pending', wait_token_id = NULL, wait_until = NULL, updated_at = NOW()
|
|
1161
|
-
WHERE id = $1 AND status = 'waiting'`,
|
|
1162
|
-
[jobId]
|
|
1163
|
-
);
|
|
1164
|
-
}
|
|
1165
|
-
await client.query("COMMIT");
|
|
1166
|
-
log(`Completed waitpoint ${tokenId} for job ${jobId}`);
|
|
1167
|
-
} catch (error) {
|
|
1168
|
-
await client.query("ROLLBACK");
|
|
1169
|
-
log(`Error completing waitpoint ${tokenId}: ${error}`);
|
|
1170
|
-
throw error;
|
|
1171
|
-
} finally {
|
|
1172
|
-
client.release();
|
|
1173
|
-
}
|
|
1174
|
-
};
|
|
1175
|
-
var getWaitpoint = async (pool, tokenId) => {
|
|
1176
|
-
const client = await pool.connect();
|
|
1177
|
-
try {
|
|
1178
|
-
const result = await client.query(
|
|
1179
|
-
`SELECT id, job_id AS "jobId", status, output, timeout_at AS "timeoutAt", created_at AS "createdAt", completed_at AS "completedAt", tags FROM waitpoints WHERE id = $1`,
|
|
1180
|
-
[tokenId]
|
|
1181
|
-
);
|
|
1182
|
-
if (result.rows.length === 0) return null;
|
|
1183
|
-
return result.rows[0];
|
|
1184
|
-
} catch (error) {
|
|
1185
|
-
log(`Error getting waitpoint ${tokenId}: ${error}`);
|
|
1186
|
-
throw error;
|
|
1187
|
-
} finally {
|
|
1188
|
-
client.release();
|
|
1189
|
-
}
|
|
1190
|
-
};
|
|
1191
|
-
var expireTimedOutWaitpoints = async (pool) => {
|
|
1192
|
-
const client = await pool.connect();
|
|
1193
|
-
try {
|
|
1194
|
-
await client.query("BEGIN");
|
|
1195
|
-
const result = await client.query(
|
|
1196
|
-
`UPDATE waitpoints
|
|
1197
|
-
SET status = 'timed_out'
|
|
1198
|
-
WHERE status = 'waiting' AND timeout_at IS NOT NULL AND timeout_at <= NOW()
|
|
1199
|
-
RETURNING id, job_id`
|
|
1200
|
-
);
|
|
1201
|
-
for (const row of result.rows) {
|
|
1202
|
-
if (row.job_id != null) {
|
|
1203
|
-
await client.query(
|
|
1204
|
-
`UPDATE job_queue
|
|
1205
|
-
SET status = 'pending', wait_token_id = NULL, wait_until = NULL, updated_at = NOW()
|
|
1206
|
-
WHERE id = $1 AND status = 'waiting'`,
|
|
1207
|
-
[row.job_id]
|
|
1208
|
-
);
|
|
1209
|
-
}
|
|
1210
|
-
}
|
|
1211
|
-
await client.query("COMMIT");
|
|
1212
|
-
const count = result.rowCount || 0;
|
|
1213
|
-
if (count > 0) {
|
|
1214
|
-
log(`Expired ${count} timed-out waitpoints`);
|
|
1215
|
-
}
|
|
1216
|
-
return count;
|
|
1217
|
-
} catch (error) {
|
|
1218
|
-
await client.query("ROLLBACK");
|
|
1219
|
-
log(`Error expiring timed-out waitpoints: ${error}`);
|
|
1220
|
-
throw error;
|
|
1221
|
-
} finally {
|
|
1222
|
-
client.release();
|
|
1223
|
-
}
|
|
1224
|
-
};
|
|
1225
|
-
function tryExtractPool(backend) {
|
|
1226
|
-
if (backend instanceof PostgresBackend) {
|
|
1227
|
-
return backend.getPool();
|
|
1228
|
-
}
|
|
1229
|
-
return null;
|
|
1230
|
-
}
|
|
1231
|
-
function buildBasicContext(backend, jobId, baseCtx) {
|
|
1232
|
-
const waitError = () => new Error(
|
|
1233
|
-
"Wait features (waitFor, waitUntil, createToken, waitForToken, ctx.run) are currently only supported with the PostgreSQL backend."
|
|
1234
|
-
);
|
|
1235
|
-
return {
|
|
1236
|
-
prolong: baseCtx.prolong,
|
|
1237
|
-
onTimeout: baseCtx.onTimeout,
|
|
1238
|
-
run: async (_stepName, fn) => {
|
|
1239
|
-
return fn();
|
|
1240
|
-
},
|
|
1241
|
-
waitFor: async () => {
|
|
1242
|
-
throw waitError();
|
|
1243
|
-
},
|
|
1244
|
-
waitUntil: async () => {
|
|
1245
|
-
throw waitError();
|
|
1246
|
-
},
|
|
1247
|
-
createToken: async () => {
|
|
1248
|
-
throw waitError();
|
|
1249
|
-
},
|
|
1250
|
-
waitForToken: async () => {
|
|
1251
|
-
throw waitError();
|
|
1252
|
-
},
|
|
1253
|
-
setProgress: async (percent) => {
|
|
1254
|
-
if (percent < 0 || percent > 100)
|
|
1255
|
-
throw new Error("Progress must be between 0 and 100");
|
|
1256
|
-
await backend.updateProgress(jobId, Math.round(percent));
|
|
1257
|
-
}
|
|
1258
|
-
};
|
|
1259
|
-
}
|
|
1260
|
-
function validateHandlerSerializable(handler, jobType) {
|
|
1261
|
-
try {
|
|
1262
|
-
const handlerString = handler.toString();
|
|
1263
|
-
if (handlerString.includes("this.") && !handlerString.match(/\([^)]*this[^)]*\)/)) {
|
|
1264
|
-
throw new Error(
|
|
1265
|
-
`Handler for job type "${jobType}" uses 'this' context which cannot be serialized. Use a regular function or avoid 'this' references when forceKillOnTimeout is enabled.`
|
|
1266
|
-
);
|
|
1267
|
-
}
|
|
1268
|
-
if (handlerString.includes("[native code]")) {
|
|
1269
|
-
throw new Error(
|
|
1270
|
-
`Handler for job type "${jobType}" contains native code which cannot be serialized. Ensure your handler is a plain function when forceKillOnTimeout is enabled.`
|
|
1271
|
-
);
|
|
1604
|
+
if (updates.timeoutMs !== void 0)
|
|
1605
|
+
metadata.timeoutMs = updates.timeoutMs;
|
|
1606
|
+
if (updates.tags !== void 0) metadata.tags = updates.tags;
|
|
1607
|
+
for (const row of result.rows) {
|
|
1608
|
+
await this.recordJobEvent(row.id, "edited" /* Edited */, metadata);
|
|
1609
|
+
}
|
|
1610
|
+
log(`Edited ${editedCount} pending jobs: ${JSON.stringify(metadata)}`);
|
|
1611
|
+
return editedCount;
|
|
1612
|
+
} catch (error) {
|
|
1613
|
+
log(`Error editing pending jobs: ${error}`);
|
|
1614
|
+
throw error;
|
|
1615
|
+
} finally {
|
|
1616
|
+
client.release();
|
|
1272
1617
|
}
|
|
1618
|
+
}
|
|
1619
|
+
/**
|
|
1620
|
+
* Delete completed jobs older than the given number of days.
|
|
1621
|
+
* Deletes in batches of 1000 to avoid long-running transactions
|
|
1622
|
+
* and excessive WAL bloat at scale.
|
|
1623
|
+
*
|
|
1624
|
+
* @param daysToKeep - Number of days to retain completed jobs (default 30).
|
|
1625
|
+
* @param batchSize - Number of rows to delete per batch (default 1000).
|
|
1626
|
+
* @returns Total number of deleted jobs.
|
|
1627
|
+
*/
|
|
1628
|
+
async cleanupOldJobs(daysToKeep = 30, batchSize = 1e3) {
|
|
1629
|
+
let totalDeleted = 0;
|
|
1273
1630
|
try {
|
|
1274
|
-
|
|
1275
|
-
|
|
1276
|
-
|
|
1277
|
-
|
|
1278
|
-
|
|
1279
|
-
|
|
1280
|
-
|
|
1281
|
-
|
|
1631
|
+
let deletedInBatch;
|
|
1632
|
+
do {
|
|
1633
|
+
const client = await this.pool.connect();
|
|
1634
|
+
try {
|
|
1635
|
+
const result = await client.query(
|
|
1636
|
+
`
|
|
1637
|
+
DELETE FROM job_queue
|
|
1638
|
+
WHERE id IN (
|
|
1639
|
+
SELECT id FROM job_queue
|
|
1640
|
+
WHERE status = 'completed'
|
|
1641
|
+
AND updated_at < NOW() - INTERVAL '1 day' * $1::int
|
|
1642
|
+
LIMIT $2
|
|
1643
|
+
)
|
|
1644
|
+
`,
|
|
1645
|
+
[daysToKeep, batchSize]
|
|
1646
|
+
);
|
|
1647
|
+
deletedInBatch = result.rowCount || 0;
|
|
1648
|
+
totalDeleted += deletedInBatch;
|
|
1649
|
+
} finally {
|
|
1650
|
+
client.release();
|
|
1651
|
+
}
|
|
1652
|
+
} while (deletedInBatch === batchSize);
|
|
1653
|
+
log(`Deleted ${totalDeleted} old jobs`);
|
|
1654
|
+
return totalDeleted;
|
|
1655
|
+
} catch (error) {
|
|
1656
|
+
log(`Error cleaning up old jobs: ${error}`);
|
|
1282
1657
|
throw error;
|
|
1283
1658
|
}
|
|
1284
|
-
throw new Error(
|
|
1285
|
-
`Failed to validate handler serialization for job type "${jobType}": ${String(error)}`
|
|
1286
|
-
);
|
|
1287
1659
|
}
|
|
1288
|
-
|
|
1289
|
-
|
|
1290
|
-
|
|
1291
|
-
|
|
1292
|
-
|
|
1293
|
-
|
|
1294
|
-
|
|
1295
|
-
|
|
1296
|
-
|
|
1297
|
-
|
|
1298
|
-
|
|
1299
|
-
|
|
1300
|
-
|
|
1301
|
-
|
|
1302
|
-
const
|
|
1303
|
-
controller.abort();
|
|
1304
|
-
parentPort.postMessage({ type: 'timeout' });
|
|
1305
|
-
}, timeoutMs);
|
|
1306
|
-
|
|
1660
|
+
/**
|
|
1661
|
+
* Delete job events older than the given number of days.
|
|
1662
|
+
* Deletes in batches of 1000 to avoid long-running transactions
|
|
1663
|
+
* and excessive WAL bloat at scale.
|
|
1664
|
+
*
|
|
1665
|
+
* @param daysToKeep - Number of days to retain events (default 30).
|
|
1666
|
+
* @param batchSize - Number of rows to delete per batch (default 1000).
|
|
1667
|
+
* @returns Total number of deleted events.
|
|
1668
|
+
*/
|
|
1669
|
+
async cleanupOldJobEvents(daysToKeep = 30, batchSize = 1e3) {
|
|
1670
|
+
let totalDeleted = 0;
|
|
1671
|
+
try {
|
|
1672
|
+
let deletedInBatch;
|
|
1673
|
+
do {
|
|
1674
|
+
const client = await this.pool.connect();
|
|
1307
1675
|
try {
|
|
1308
|
-
|
|
1309
|
-
|
|
1310
|
-
|
|
1311
|
-
|
|
1312
|
-
|
|
1313
|
-
|
|
1314
|
-
|
|
1315
|
-
|
|
1316
|
-
|
|
1317
|
-
|
|
1318
|
-
handlerFn = new Function('return ' + wrappedCode)();
|
|
1319
|
-
} catch (parseError) {
|
|
1320
|
-
clearTimeout(timeoutId);
|
|
1321
|
-
parentPort.postMessage({
|
|
1322
|
-
type: 'error',
|
|
1323
|
-
error: {
|
|
1324
|
-
message: 'Handler cannot be deserialized in worker thread. ' +
|
|
1325
|
-
'Ensure your handler is a standalone function without closures over external variables. ' +
|
|
1326
|
-
'Original error: ' + (parseError instanceof Error ? parseError.message : String(parseError)),
|
|
1327
|
-
stack: parseError instanceof Error ? parseError.stack : undefined,
|
|
1328
|
-
name: 'SerializationError',
|
|
1329
|
-
},
|
|
1330
|
-
});
|
|
1331
|
-
return;
|
|
1332
|
-
}
|
|
1333
|
-
|
|
1334
|
-
// Ensure handlerFn is actually a function
|
|
1335
|
-
if (typeof handlerFn !== 'function') {
|
|
1336
|
-
clearTimeout(timeoutId);
|
|
1337
|
-
parentPort.postMessage({
|
|
1338
|
-
type: 'error',
|
|
1339
|
-
error: {
|
|
1340
|
-
message: 'Handler deserialization did not produce a function. ' +
|
|
1341
|
-
'Ensure your handler is a valid function when forceKillOnTimeout is enabled.',
|
|
1342
|
-
name: 'SerializationError',
|
|
1343
|
-
},
|
|
1344
|
-
});
|
|
1345
|
-
return;
|
|
1346
|
-
}
|
|
1347
|
-
|
|
1348
|
-
handlerFn(payload, signal)
|
|
1349
|
-
.then(() => {
|
|
1350
|
-
clearTimeout(timeoutId);
|
|
1351
|
-
parentPort.postMessage({ type: 'success' });
|
|
1352
|
-
})
|
|
1353
|
-
.catch((error) => {
|
|
1354
|
-
clearTimeout(timeoutId);
|
|
1355
|
-
parentPort.postMessage({
|
|
1356
|
-
type: 'error',
|
|
1357
|
-
error: {
|
|
1358
|
-
message: error.message,
|
|
1359
|
-
stack: error.stack,
|
|
1360
|
-
name: error.name,
|
|
1361
|
-
},
|
|
1362
|
-
});
|
|
1363
|
-
});
|
|
1364
|
-
} catch (error) {
|
|
1365
|
-
clearTimeout(timeoutId);
|
|
1366
|
-
parentPort.postMessage({
|
|
1367
|
-
type: 'error',
|
|
1368
|
-
error: {
|
|
1369
|
-
message: error.message,
|
|
1370
|
-
stack: error.stack,
|
|
1371
|
-
name: error.name,
|
|
1372
|
-
},
|
|
1373
|
-
});
|
|
1374
|
-
}
|
|
1375
|
-
})();
|
|
1376
|
-
`;
|
|
1377
|
-
const worker = new worker_threads.Worker(workerCode, {
|
|
1378
|
-
eval: true,
|
|
1379
|
-
workerData: {
|
|
1380
|
-
handlerCode: handler.toString(),
|
|
1381
|
-
payload,
|
|
1382
|
-
timeoutMs
|
|
1383
|
-
}
|
|
1384
|
-
});
|
|
1385
|
-
let resolved = false;
|
|
1386
|
-
worker.on("message", (message) => {
|
|
1387
|
-
if (resolved) return;
|
|
1388
|
-
resolved = true;
|
|
1389
|
-
if (message.type === "success") {
|
|
1390
|
-
resolve();
|
|
1391
|
-
} else if (message.type === "timeout") {
|
|
1392
|
-
const timeoutError = new Error(
|
|
1393
|
-
`Job timed out after ${timeoutMs} ms and was forcefully terminated`
|
|
1394
|
-
);
|
|
1395
|
-
timeoutError.failureReason = "timeout" /* Timeout */;
|
|
1396
|
-
reject(timeoutError);
|
|
1397
|
-
} else if (message.type === "error") {
|
|
1398
|
-
const error = new Error(message.error.message);
|
|
1399
|
-
error.stack = message.error.stack;
|
|
1400
|
-
error.name = message.error.name;
|
|
1401
|
-
reject(error);
|
|
1402
|
-
}
|
|
1403
|
-
});
|
|
1404
|
-
worker.on("error", (error) => {
|
|
1405
|
-
if (resolved) return;
|
|
1406
|
-
resolved = true;
|
|
1407
|
-
reject(error);
|
|
1408
|
-
});
|
|
1409
|
-
worker.on("exit", (code) => {
|
|
1410
|
-
if (resolved) return;
|
|
1411
|
-
if (code !== 0) {
|
|
1412
|
-
resolved = true;
|
|
1413
|
-
reject(new Error(`Worker stopped with exit code ${code}`));
|
|
1414
|
-
}
|
|
1415
|
-
});
|
|
1416
|
-
setTimeout(() => {
|
|
1417
|
-
if (!resolved) {
|
|
1418
|
-
resolved = true;
|
|
1419
|
-
worker.terminate().then(() => {
|
|
1420
|
-
const timeoutError = new Error(
|
|
1421
|
-
`Job timed out after ${timeoutMs} ms and was forcefully terminated`
|
|
1676
|
+
const result = await client.query(
|
|
1677
|
+
`
|
|
1678
|
+
DELETE FROM job_events
|
|
1679
|
+
WHERE id IN (
|
|
1680
|
+
SELECT id FROM job_events
|
|
1681
|
+
WHERE created_at < NOW() - INTERVAL '1 day' * $1::int
|
|
1682
|
+
LIMIT $2
|
|
1683
|
+
)
|
|
1684
|
+
`,
|
|
1685
|
+
[daysToKeep, batchSize]
|
|
1422
1686
|
);
|
|
1423
|
-
|
|
1424
|
-
|
|
1425
|
-
}
|
|
1426
|
-
|
|
1427
|
-
}
|
|
1428
|
-
}
|
|
1429
|
-
|
|
1430
|
-
|
|
1431
|
-
}
|
|
1432
|
-
|
|
1433
|
-
|
|
1434
|
-
|
|
1435
|
-
if (duration.seconds) ms += duration.seconds * 1e3;
|
|
1436
|
-
if (duration.minutes) ms += duration.minutes * 60 * 1e3;
|
|
1437
|
-
if (duration.hours) ms += duration.hours * 60 * 60 * 1e3;
|
|
1438
|
-
if (duration.days) ms += duration.days * 24 * 60 * 60 * 1e3;
|
|
1439
|
-
if (duration.weeks) ms += duration.weeks * 7 * 24 * 60 * 60 * 1e3;
|
|
1440
|
-
if (duration.months) ms += duration.months * 30 * 24 * 60 * 60 * 1e3;
|
|
1441
|
-
if (duration.years) ms += duration.years * 365 * 24 * 60 * 60 * 1e3;
|
|
1442
|
-
if (ms <= 0) {
|
|
1443
|
-
throw new Error(
|
|
1444
|
-
"waitFor duration must be positive. Provide at least one positive duration field."
|
|
1445
|
-
);
|
|
1687
|
+
deletedInBatch = result.rowCount || 0;
|
|
1688
|
+
totalDeleted += deletedInBatch;
|
|
1689
|
+
} finally {
|
|
1690
|
+
client.release();
|
|
1691
|
+
}
|
|
1692
|
+
} while (deletedInBatch === batchSize);
|
|
1693
|
+
log(`Deleted ${totalDeleted} old job events`);
|
|
1694
|
+
return totalDeleted;
|
|
1695
|
+
} catch (error) {
|
|
1696
|
+
log(`Error cleaning up old job events: ${error}`);
|
|
1697
|
+
throw error;
|
|
1698
|
+
}
|
|
1446
1699
|
}
|
|
1447
|
-
|
|
1448
|
-
|
|
1449
|
-
|
|
1450
|
-
|
|
1451
|
-
|
|
1452
|
-
|
|
1453
|
-
|
|
1454
|
-
|
|
1455
|
-
|
|
1456
|
-
|
|
1457
|
-
|
|
1458
|
-
|
|
1459
|
-
|
|
1460
|
-
|
|
1461
|
-
|
|
1462
|
-
|
|
1463
|
-
|
|
1464
|
-
|
|
1465
|
-
|
|
1466
|
-
|
|
1467
|
-
|
|
1468
|
-
|
|
1469
|
-
|
|
1700
|
+
async reclaimStuckJobs(maxProcessingTimeMinutes = 10) {
|
|
1701
|
+
const client = await this.pool.connect();
|
|
1702
|
+
try {
|
|
1703
|
+
const result = await client.query(
|
|
1704
|
+
`
|
|
1705
|
+
UPDATE job_queue
|
|
1706
|
+
SET status = 'pending', locked_at = NULL, locked_by = NULL, updated_at = NOW()
|
|
1707
|
+
WHERE status = 'processing'
|
|
1708
|
+
AND locked_at < NOW() - GREATEST(
|
|
1709
|
+
INTERVAL '1 minute' * $1::int,
|
|
1710
|
+
INTERVAL '1 millisecond' * COALESCE(timeout_ms, 0)
|
|
1711
|
+
)
|
|
1712
|
+
RETURNING id
|
|
1713
|
+
`,
|
|
1714
|
+
[maxProcessingTimeMinutes]
|
|
1715
|
+
);
|
|
1716
|
+
log(`Reclaimed ${result.rowCount} stuck jobs`);
|
|
1717
|
+
return result.rowCount || 0;
|
|
1718
|
+
} catch (error) {
|
|
1719
|
+
log(`Error reclaiming stuck jobs: ${error}`);
|
|
1720
|
+
throw error;
|
|
1721
|
+
} finally {
|
|
1722
|
+
client.release();
|
|
1723
|
+
}
|
|
1724
|
+
}
|
|
1725
|
+
// ── Internal helpers ──────────────────────────────────────────────────
|
|
1726
|
+
/**
|
|
1727
|
+
* Batch-insert multiple job events in a single query.
|
|
1728
|
+
* More efficient than individual recordJobEvent calls.
|
|
1729
|
+
*/
|
|
1730
|
+
async recordJobEventsBatch(events) {
|
|
1731
|
+
if (events.length === 0) return;
|
|
1732
|
+
const client = await this.pool.connect();
|
|
1733
|
+
try {
|
|
1734
|
+
const values = [];
|
|
1735
|
+
const params = [];
|
|
1736
|
+
let paramIdx = 1;
|
|
1737
|
+
for (const event of events) {
|
|
1738
|
+
values.push(`($${paramIdx++}, $${paramIdx++}, $${paramIdx++})`);
|
|
1739
|
+
params.push(
|
|
1740
|
+
event.jobId,
|
|
1741
|
+
event.eventType,
|
|
1742
|
+
event.metadata ? JSON.stringify(event.metadata) : null
|
|
1743
|
+
);
|
|
1470
1744
|
}
|
|
1745
|
+
await client.query(
|
|
1746
|
+
`INSERT INTO job_events (job_id, event_type, metadata) VALUES ${values.join(", ")}`,
|
|
1747
|
+
params
|
|
1748
|
+
);
|
|
1749
|
+
} catch (error) {
|
|
1750
|
+
log(`Error recording batch job events: ${error}`);
|
|
1751
|
+
} finally {
|
|
1752
|
+
client.release();
|
|
1471
1753
|
}
|
|
1472
1754
|
}
|
|
1473
|
-
|
|
1474
|
-
|
|
1475
|
-
|
|
1476
|
-
|
|
1477
|
-
|
|
1478
|
-
|
|
1479
|
-
|
|
1480
|
-
|
|
1481
|
-
|
|
1482
|
-
|
|
1483
|
-
|
|
1484
|
-
|
|
1485
|
-
|
|
1486
|
-
|
|
1487
|
-
|
|
1488
|
-
|
|
1489
|
-
|
|
1490
|
-
|
|
1491
|
-
|
|
1492
|
-
|
|
1493
|
-
|
|
1494
|
-
|
|
1495
|
-
|
|
1496
|
-
|
|
1497
|
-
|
|
1498
|
-
|
|
1499
|
-
|
|
1500
|
-
|
|
1501
|
-
|
|
1502
|
-
|
|
1503
|
-
|
|
1504
|
-
if (
|
|
1505
|
-
|
|
1506
|
-
|
|
1507
|
-
}
|
|
1508
|
-
stepData[waitKey] = { type: "date", completed: false };
|
|
1509
|
-
throw new WaitSignal("date", date, void 0, stepData);
|
|
1510
|
-
},
|
|
1511
|
-
createToken: async (options) => {
|
|
1512
|
-
const token = await createWaitpoint(pool, jobId, options);
|
|
1513
|
-
return token;
|
|
1514
|
-
},
|
|
1515
|
-
waitForToken: async (tokenId) => {
|
|
1516
|
-
const waitKey = `__wait_${waitCounter++}`;
|
|
1517
|
-
const cached = stepData[waitKey];
|
|
1518
|
-
if (cached && typeof cached === "object" && cached.completed) {
|
|
1519
|
-
log(
|
|
1520
|
-
`Token wait "${waitKey}" already completed for job ${jobId}, returning cached result`
|
|
1755
|
+
// ── Cron schedules ──────────────────────────────────────────────────
|
|
1756
|
+
/** Create a cron schedule and return its ID. */
|
|
1757
|
+
async addCronSchedule(input) {
|
|
1758
|
+
const client = await this.pool.connect();
|
|
1759
|
+
try {
|
|
1760
|
+
const result = await client.query(
|
|
1761
|
+
`INSERT INTO cron_schedules
|
|
1762
|
+
(schedule_name, cron_expression, job_type, payload, max_attempts,
|
|
1763
|
+
priority, timeout_ms, force_kill_on_timeout, tags, timezone,
|
|
1764
|
+
allow_overlap, next_run_at)
|
|
1765
|
+
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)
|
|
1766
|
+
RETURNING id`,
|
|
1767
|
+
[
|
|
1768
|
+
input.scheduleName,
|
|
1769
|
+
input.cronExpression,
|
|
1770
|
+
input.jobType,
|
|
1771
|
+
input.payload,
|
|
1772
|
+
input.maxAttempts,
|
|
1773
|
+
input.priority,
|
|
1774
|
+
input.timeoutMs,
|
|
1775
|
+
input.forceKillOnTimeout,
|
|
1776
|
+
input.tags ?? null,
|
|
1777
|
+
input.timezone,
|
|
1778
|
+
input.allowOverlap,
|
|
1779
|
+
input.nextRunAt
|
|
1780
|
+
]
|
|
1781
|
+
);
|
|
1782
|
+
const id = result.rows[0].id;
|
|
1783
|
+
log(`Added cron schedule ${id}: "${input.scheduleName}"`);
|
|
1784
|
+
return id;
|
|
1785
|
+
} catch (error) {
|
|
1786
|
+
if (error?.code === "23505") {
|
|
1787
|
+
throw new Error(
|
|
1788
|
+
`Cron schedule with name "${input.scheduleName}" already exists`
|
|
1521
1789
|
);
|
|
1522
|
-
return cached.result;
|
|
1523
|
-
}
|
|
1524
|
-
const wp = await getWaitpoint(pool, tokenId);
|
|
1525
|
-
if (wp && wp.status === "completed") {
|
|
1526
|
-
const result = {
|
|
1527
|
-
ok: true,
|
|
1528
|
-
output: wp.output
|
|
1529
|
-
};
|
|
1530
|
-
stepData[waitKey] = {
|
|
1531
|
-
type: "token",
|
|
1532
|
-
tokenId,
|
|
1533
|
-
completed: true,
|
|
1534
|
-
result
|
|
1535
|
-
};
|
|
1536
|
-
await updateStepData(pool, jobId, stepData);
|
|
1537
|
-
return result;
|
|
1538
1790
|
}
|
|
1539
|
-
|
|
1540
|
-
|
|
1541
|
-
|
|
1542
|
-
|
|
1543
|
-
|
|
1544
|
-
|
|
1545
|
-
|
|
1546
|
-
|
|
1547
|
-
|
|
1548
|
-
|
|
1549
|
-
|
|
1550
|
-
|
|
1551
|
-
|
|
1791
|
+
log(`Error adding cron schedule: ${error}`);
|
|
1792
|
+
throw error;
|
|
1793
|
+
} finally {
|
|
1794
|
+
client.release();
|
|
1795
|
+
}
|
|
1796
|
+
}
|
|
1797
|
+
/** Get a cron schedule by ID. */
|
|
1798
|
+
async getCronSchedule(id) {
|
|
1799
|
+
const client = await this.pool.connect();
|
|
1800
|
+
try {
|
|
1801
|
+
const result = await client.query(
|
|
1802
|
+
`SELECT id, schedule_name AS "scheduleName", cron_expression AS "cronExpression",
|
|
1803
|
+
job_type AS "jobType", payload, max_attempts AS "maxAttempts",
|
|
1804
|
+
priority, timeout_ms AS "timeoutMs",
|
|
1805
|
+
force_kill_on_timeout AS "forceKillOnTimeout", tags,
|
|
1806
|
+
timezone, allow_overlap AS "allowOverlap", status,
|
|
1807
|
+
last_enqueued_at AS "lastEnqueuedAt", last_job_id AS "lastJobId",
|
|
1808
|
+
next_run_at AS "nextRunAt",
|
|
1809
|
+
created_at AS "createdAt", updated_at AS "updatedAt"
|
|
1810
|
+
FROM cron_schedules WHERE id = $1`,
|
|
1811
|
+
[id]
|
|
1812
|
+
);
|
|
1813
|
+
if (result.rows.length === 0) return null;
|
|
1814
|
+
return result.rows[0];
|
|
1815
|
+
} catch (error) {
|
|
1816
|
+
log(`Error getting cron schedule ${id}: ${error}`);
|
|
1817
|
+
throw error;
|
|
1818
|
+
} finally {
|
|
1819
|
+
client.release();
|
|
1820
|
+
}
|
|
1821
|
+
}
|
|
1822
|
+
/** Get a cron schedule by its unique name. */
|
|
1823
|
+
async getCronScheduleByName(name) {
|
|
1824
|
+
const client = await this.pool.connect();
|
|
1825
|
+
try {
|
|
1826
|
+
const result = await client.query(
|
|
1827
|
+
`SELECT id, schedule_name AS "scheduleName", cron_expression AS "cronExpression",
|
|
1828
|
+
job_type AS "jobType", payload, max_attempts AS "maxAttempts",
|
|
1829
|
+
priority, timeout_ms AS "timeoutMs",
|
|
1830
|
+
force_kill_on_timeout AS "forceKillOnTimeout", tags,
|
|
1831
|
+
timezone, allow_overlap AS "allowOverlap", status,
|
|
1832
|
+
last_enqueued_at AS "lastEnqueuedAt", last_job_id AS "lastJobId",
|
|
1833
|
+
next_run_at AS "nextRunAt",
|
|
1834
|
+
created_at AS "createdAt", updated_at AS "updatedAt"
|
|
1835
|
+
FROM cron_schedules WHERE schedule_name = $1`,
|
|
1836
|
+
[name]
|
|
1837
|
+
);
|
|
1838
|
+
if (result.rows.length === 0) return null;
|
|
1839
|
+
return result.rows[0];
|
|
1840
|
+
} catch (error) {
|
|
1841
|
+
log(`Error getting cron schedule by name "${name}": ${error}`);
|
|
1842
|
+
throw error;
|
|
1843
|
+
} finally {
|
|
1844
|
+
client.release();
|
|
1845
|
+
}
|
|
1846
|
+
}
|
|
1847
|
+
/** List cron schedules, optionally filtered by status. */
|
|
1848
|
+
async listCronSchedules(status) {
|
|
1849
|
+
const client = await this.pool.connect();
|
|
1850
|
+
try {
|
|
1851
|
+
let query = `SELECT id, schedule_name AS "scheduleName", cron_expression AS "cronExpression",
|
|
1852
|
+
job_type AS "jobType", payload, max_attempts AS "maxAttempts",
|
|
1853
|
+
priority, timeout_ms AS "timeoutMs",
|
|
1854
|
+
force_kill_on_timeout AS "forceKillOnTimeout", tags,
|
|
1855
|
+
timezone, allow_overlap AS "allowOverlap", status,
|
|
1856
|
+
last_enqueued_at AS "lastEnqueuedAt", last_job_id AS "lastJobId",
|
|
1857
|
+
next_run_at AS "nextRunAt",
|
|
1858
|
+
created_at AS "createdAt", updated_at AS "updatedAt"
|
|
1859
|
+
FROM cron_schedules`;
|
|
1860
|
+
const params = [];
|
|
1861
|
+
if (status) {
|
|
1862
|
+
query += ` WHERE status = $1`;
|
|
1863
|
+
params.push(status);
|
|
1552
1864
|
}
|
|
1553
|
-
|
|
1554
|
-
|
|
1555
|
-
|
|
1556
|
-
|
|
1557
|
-
|
|
1558
|
-
|
|
1559
|
-
|
|
1865
|
+
query += ` ORDER BY created_at ASC`;
|
|
1866
|
+
const result = await client.query(query, params);
|
|
1867
|
+
return result.rows;
|
|
1868
|
+
} catch (error) {
|
|
1869
|
+
log(`Error listing cron schedules: ${error}`);
|
|
1870
|
+
throw error;
|
|
1871
|
+
} finally {
|
|
1872
|
+
client.release();
|
|
1560
1873
|
}
|
|
1561
|
-
};
|
|
1562
|
-
return ctx;
|
|
1563
|
-
}
|
|
1564
|
-
async function processJobWithHandlers(backend, job, jobHandlers) {
|
|
1565
|
-
const handler = jobHandlers[job.jobType];
|
|
1566
|
-
if (!handler) {
|
|
1567
|
-
await backend.setPendingReasonForUnpickedJobs(
|
|
1568
|
-
`No handler registered for job type: ${job.jobType}`,
|
|
1569
|
-
job.jobType
|
|
1570
|
-
);
|
|
1571
|
-
await backend.failJob(
|
|
1572
|
-
job.id,
|
|
1573
|
-
new Error(`No handler registered for job type: ${job.jobType}`),
|
|
1574
|
-
"no_handler" /* NoHandler */
|
|
1575
|
-
);
|
|
1576
|
-
return;
|
|
1577
1874
|
}
|
|
1578
|
-
|
|
1579
|
-
|
|
1580
|
-
|
|
1581
|
-
|
|
1582
|
-
|
|
1583
|
-
|
|
1584
|
-
|
|
1585
|
-
|
|
1875
|
+
/** Delete a cron schedule by ID. */
|
|
1876
|
+
async removeCronSchedule(id) {
|
|
1877
|
+
const client = await this.pool.connect();
|
|
1878
|
+
try {
|
|
1879
|
+
await client.query(`DELETE FROM cron_schedules WHERE id = $1`, [id]);
|
|
1880
|
+
log(`Removed cron schedule ${id}`);
|
|
1881
|
+
} catch (error) {
|
|
1882
|
+
log(`Error removing cron schedule ${id}: ${error}`);
|
|
1883
|
+
throw error;
|
|
1884
|
+
} finally {
|
|
1885
|
+
client.release();
|
|
1886
|
+
}
|
|
1586
1887
|
}
|
|
1587
|
-
|
|
1588
|
-
|
|
1589
|
-
|
|
1590
|
-
|
|
1591
|
-
|
|
1592
|
-
|
|
1593
|
-
|
|
1594
|
-
|
|
1595
|
-
|
|
1596
|
-
|
|
1597
|
-
|
|
1598
|
-
|
|
1599
|
-
|
|
1600
|
-
|
|
1601
|
-
|
|
1602
|
-
|
|
1603
|
-
|
|
1604
|
-
|
|
1605
|
-
|
|
1606
|
-
|
|
1607
|
-
|
|
1608
|
-
|
|
1609
|
-
|
|
1610
|
-
|
|
1611
|
-
|
|
1612
|
-
|
|
1613
|
-
|
|
1614
|
-
|
|
1615
|
-
|
|
1616
|
-
|
|
1617
|
-
|
|
1618
|
-
|
|
1619
|
-
|
|
1620
|
-
|
|
1621
|
-
|
|
1622
|
-
|
|
1623
|
-
const
|
|
1624
|
-
const
|
|
1625
|
-
|
|
1626
|
-
|
|
1627
|
-
|
|
1628
|
-
|
|
1629
|
-
|
|
1630
|
-
|
|
1631
|
-
|
|
1632
|
-
|
|
1633
|
-
|
|
1634
|
-
|
|
1635
|
-
}
|
|
1636
|
-
|
|
1637
|
-
prolong: () => {
|
|
1638
|
-
log("prolong() called but ignored: job has no timeout set");
|
|
1639
|
-
},
|
|
1640
|
-
onTimeout: () => {
|
|
1641
|
-
log("onTimeout() called but ignored: job has no timeout set");
|
|
1642
|
-
}
|
|
1643
|
-
};
|
|
1644
|
-
const ctx = pool ? buildWaitContext(backend, pool, job.id, stepData, baseCtx) : buildBasicContext(backend, job.id, baseCtx);
|
|
1645
|
-
if (forceKillOnTimeout && !hasTimeout) {
|
|
1646
|
-
log(
|
|
1647
|
-
`forceKillOnTimeout is set but no timeoutMs for job ${job.id}, running without force kill`
|
|
1648
|
-
);
|
|
1888
|
+
/** Pause a cron schedule. */
|
|
1889
|
+
async pauseCronSchedule(id) {
|
|
1890
|
+
const client = await this.pool.connect();
|
|
1891
|
+
try {
|
|
1892
|
+
await client.query(
|
|
1893
|
+
`UPDATE cron_schedules SET status = 'paused', updated_at = NOW() WHERE id = $1`,
|
|
1894
|
+
[id]
|
|
1895
|
+
);
|
|
1896
|
+
log(`Paused cron schedule ${id}`);
|
|
1897
|
+
} catch (error) {
|
|
1898
|
+
log(`Error pausing cron schedule ${id}: ${error}`);
|
|
1899
|
+
throw error;
|
|
1900
|
+
} finally {
|
|
1901
|
+
client.release();
|
|
1902
|
+
}
|
|
1903
|
+
}
|
|
1904
|
+
/** Resume a paused cron schedule. */
|
|
1905
|
+
async resumeCronSchedule(id) {
|
|
1906
|
+
const client = await this.pool.connect();
|
|
1907
|
+
try {
|
|
1908
|
+
await client.query(
|
|
1909
|
+
`UPDATE cron_schedules SET status = 'active', updated_at = NOW() WHERE id = $1`,
|
|
1910
|
+
[id]
|
|
1911
|
+
);
|
|
1912
|
+
log(`Resumed cron schedule ${id}`);
|
|
1913
|
+
} catch (error) {
|
|
1914
|
+
log(`Error resuming cron schedule ${id}: ${error}`);
|
|
1915
|
+
throw error;
|
|
1916
|
+
} finally {
|
|
1917
|
+
client.release();
|
|
1918
|
+
}
|
|
1919
|
+
}
|
|
1920
|
+
/** Edit a cron schedule. */
|
|
1921
|
+
async editCronSchedule(id, updates, nextRunAt) {
|
|
1922
|
+
const client = await this.pool.connect();
|
|
1923
|
+
try {
|
|
1924
|
+
const updateFields = [];
|
|
1925
|
+
const params = [];
|
|
1926
|
+
let paramIdx = 1;
|
|
1927
|
+
if (updates.cronExpression !== void 0) {
|
|
1928
|
+
updateFields.push(`cron_expression = $${paramIdx++}`);
|
|
1929
|
+
params.push(updates.cronExpression);
|
|
1930
|
+
}
|
|
1931
|
+
if (updates.payload !== void 0) {
|
|
1932
|
+
updateFields.push(`payload = $${paramIdx++}`);
|
|
1933
|
+
params.push(updates.payload);
|
|
1934
|
+
}
|
|
1935
|
+
if (updates.maxAttempts !== void 0) {
|
|
1936
|
+
updateFields.push(`max_attempts = $${paramIdx++}`);
|
|
1937
|
+
params.push(updates.maxAttempts);
|
|
1649
1938
|
}
|
|
1650
|
-
|
|
1651
|
-
|
|
1652
|
-
|
|
1653
|
-
jobPromise,
|
|
1654
|
-
new Promise((_, reject) => {
|
|
1655
|
-
timeoutReject = reject;
|
|
1656
|
-
armTimeout(timeoutMs);
|
|
1657
|
-
})
|
|
1658
|
-
]);
|
|
1659
|
-
} else {
|
|
1660
|
-
await jobPromise;
|
|
1939
|
+
if (updates.priority !== void 0) {
|
|
1940
|
+
updateFields.push(`priority = $${paramIdx++}`);
|
|
1941
|
+
params.push(updates.priority);
|
|
1661
1942
|
}
|
|
1662
|
-
|
|
1663
|
-
|
|
1664
|
-
|
|
1665
|
-
|
|
1666
|
-
|
|
1667
|
-
|
|
1668
|
-
|
|
1669
|
-
|
|
1670
|
-
|
|
1671
|
-
|
|
1672
|
-
|
|
1673
|
-
|
|
1674
|
-
|
|
1675
|
-
);
|
|
1943
|
+
if (updates.timeoutMs !== void 0) {
|
|
1944
|
+
updateFields.push(`timeout_ms = $${paramIdx++}`);
|
|
1945
|
+
params.push(updates.timeoutMs);
|
|
1946
|
+
}
|
|
1947
|
+
if (updates.forceKillOnTimeout !== void 0) {
|
|
1948
|
+
updateFields.push(`force_kill_on_timeout = $${paramIdx++}`);
|
|
1949
|
+
params.push(updates.forceKillOnTimeout);
|
|
1950
|
+
}
|
|
1951
|
+
if (updates.tags !== void 0) {
|
|
1952
|
+
updateFields.push(`tags = $${paramIdx++}`);
|
|
1953
|
+
params.push(updates.tags);
|
|
1954
|
+
}
|
|
1955
|
+
if (updates.timezone !== void 0) {
|
|
1956
|
+
updateFields.push(`timezone = $${paramIdx++}`);
|
|
1957
|
+
params.push(updates.timezone);
|
|
1958
|
+
}
|
|
1959
|
+
if (updates.allowOverlap !== void 0) {
|
|
1960
|
+
updateFields.push(`allow_overlap = $${paramIdx++}`);
|
|
1961
|
+
params.push(updates.allowOverlap);
|
|
1962
|
+
}
|
|
1963
|
+
if (nextRunAt !== void 0) {
|
|
1964
|
+
updateFields.push(`next_run_at = $${paramIdx++}`);
|
|
1965
|
+
params.push(nextRunAt);
|
|
1966
|
+
}
|
|
1967
|
+
if (updateFields.length === 0) {
|
|
1968
|
+
log(`No fields to update for cron schedule ${id}`);
|
|
1676
1969
|
return;
|
|
1677
1970
|
}
|
|
1678
|
-
|
|
1679
|
-
|
|
1680
|
-
)
|
|
1681
|
-
await
|
|
1682
|
-
|
|
1683
|
-
|
|
1684
|
-
|
|
1685
|
-
|
|
1686
|
-
|
|
1687
|
-
|
|
1688
|
-
console.error(`Error processing job ${job.id}:`, error);
|
|
1689
|
-
let failureReason = "handler_error" /* HandlerError */;
|
|
1690
|
-
if (error && typeof error === "object" && "failureReason" in error && error.failureReason === "timeout" /* Timeout */) {
|
|
1691
|
-
failureReason = "timeout" /* Timeout */;
|
|
1971
|
+
updateFields.push(`updated_at = NOW()`);
|
|
1972
|
+
params.push(id);
|
|
1973
|
+
const query = `UPDATE cron_schedules SET ${updateFields.join(", ")} WHERE id = $${paramIdx}`;
|
|
1974
|
+
await client.query(query, params);
|
|
1975
|
+
log(`Edited cron schedule ${id}`);
|
|
1976
|
+
} catch (error) {
|
|
1977
|
+
log(`Error editing cron schedule ${id}: ${error}`);
|
|
1978
|
+
throw error;
|
|
1979
|
+
} finally {
|
|
1980
|
+
client.release();
|
|
1692
1981
|
}
|
|
1693
|
-
await backend.failJob(
|
|
1694
|
-
job.id,
|
|
1695
|
-
error instanceof Error ? error : new Error(String(error)),
|
|
1696
|
-
failureReason
|
|
1697
|
-
);
|
|
1698
|
-
}
|
|
1699
|
-
}
|
|
1700
|
-
async function processBatchWithHandlers(backend, workerId, batchSize, jobType, jobHandlers, concurrency, onError) {
|
|
1701
|
-
const jobs = await backend.getNextBatch(
|
|
1702
|
-
workerId,
|
|
1703
|
-
batchSize,
|
|
1704
|
-
jobType
|
|
1705
|
-
);
|
|
1706
|
-
if (!concurrency || concurrency >= jobs.length) {
|
|
1707
|
-
await Promise.all(
|
|
1708
|
-
jobs.map((job) => processJobWithHandlers(backend, job, jobHandlers))
|
|
1709
|
-
);
|
|
1710
|
-
return jobs.length;
|
|
1711
1982
|
}
|
|
1712
|
-
|
|
1713
|
-
|
|
1714
|
-
|
|
1715
|
-
|
|
1716
|
-
|
|
1717
|
-
|
|
1718
|
-
|
|
1719
|
-
|
|
1720
|
-
|
|
1721
|
-
|
|
1722
|
-
|
|
1723
|
-
|
|
1724
|
-
|
|
1725
|
-
|
|
1726
|
-
|
|
1727
|
-
|
|
1728
|
-
|
|
1729
|
-
|
|
1730
|
-
|
|
1731
|
-
|
|
1732
|
-
|
|
1983
|
+
/**
|
|
1984
|
+
* Atomically fetch all active cron schedules whose nextRunAt <= NOW().
|
|
1985
|
+
* Uses FOR UPDATE SKIP LOCKED to prevent duplicate enqueuing across workers.
|
|
1986
|
+
*/
|
|
1987
|
+
async getDueCronSchedules() {
|
|
1988
|
+
const client = await this.pool.connect();
|
|
1989
|
+
try {
|
|
1990
|
+
const result = await client.query(
|
|
1991
|
+
`SELECT id, schedule_name AS "scheduleName", cron_expression AS "cronExpression",
|
|
1992
|
+
job_type AS "jobType", payload, max_attempts AS "maxAttempts",
|
|
1993
|
+
priority, timeout_ms AS "timeoutMs",
|
|
1994
|
+
force_kill_on_timeout AS "forceKillOnTimeout", tags,
|
|
1995
|
+
timezone, allow_overlap AS "allowOverlap", status,
|
|
1996
|
+
last_enqueued_at AS "lastEnqueuedAt", last_job_id AS "lastJobId",
|
|
1997
|
+
next_run_at AS "nextRunAt",
|
|
1998
|
+
created_at AS "createdAt", updated_at AS "updatedAt"
|
|
1999
|
+
FROM cron_schedules
|
|
2000
|
+
WHERE status = 'active'
|
|
2001
|
+
AND next_run_at IS NOT NULL
|
|
2002
|
+
AND next_run_at <= NOW()
|
|
2003
|
+
ORDER BY next_run_at ASC
|
|
2004
|
+
FOR UPDATE SKIP LOCKED`
|
|
2005
|
+
);
|
|
2006
|
+
log(`Found ${result.rows.length} due cron schedules`);
|
|
2007
|
+
return result.rows;
|
|
2008
|
+
} catch (error) {
|
|
2009
|
+
if (error?.code === "42P01") {
|
|
2010
|
+
log("cron_schedules table does not exist, skipping cron enqueue");
|
|
2011
|
+
return [];
|
|
1733
2012
|
}
|
|
1734
|
-
|
|
1735
|
-
|
|
1736
|
-
|
|
1737
|
-
|
|
1738
|
-
|
|
1739
|
-
|
|
1740
|
-
|
|
1741
|
-
|
|
1742
|
-
|
|
1743
|
-
|
|
1744
|
-
|
|
1745
|
-
|
|
1746
|
-
} = options;
|
|
1747
|
-
let running = false;
|
|
1748
|
-
let intervalId = null;
|
|
1749
|
-
let currentBatchPromise = null;
|
|
1750
|
-
setLogContext(options.verbose ?? false);
|
|
1751
|
-
const processJobs = async () => {
|
|
1752
|
-
if (!running) return 0;
|
|
1753
|
-
log(
|
|
1754
|
-
`Processing jobs with workerId: ${workerId}${jobType ? ` and jobType: ${Array.isArray(jobType) ? jobType.join(",") : jobType}` : ""}`
|
|
1755
|
-
);
|
|
2013
|
+
log(`Error getting due cron schedules: ${error}`);
|
|
2014
|
+
throw error;
|
|
2015
|
+
} finally {
|
|
2016
|
+
client.release();
|
|
2017
|
+
}
|
|
2018
|
+
}
|
|
2019
|
+
/**
|
|
2020
|
+
* Update a cron schedule after a job has been enqueued.
|
|
2021
|
+
* Sets lastEnqueuedAt, lastJobId, and advances nextRunAt.
|
|
2022
|
+
*/
|
|
2023
|
+
async updateCronScheduleAfterEnqueue(id, lastEnqueuedAt, lastJobId, nextRunAt) {
|
|
2024
|
+
const client = await this.pool.connect();
|
|
1756
2025
|
try {
|
|
1757
|
-
|
|
1758
|
-
|
|
1759
|
-
|
|
1760
|
-
|
|
1761
|
-
|
|
1762
|
-
|
|
1763
|
-
|
|
1764
|
-
|
|
2026
|
+
await client.query(
|
|
2027
|
+
`UPDATE cron_schedules
|
|
2028
|
+
SET last_enqueued_at = $2,
|
|
2029
|
+
last_job_id = $3,
|
|
2030
|
+
next_run_at = $4,
|
|
2031
|
+
updated_at = NOW()
|
|
2032
|
+
WHERE id = $1`,
|
|
2033
|
+
[id, lastEnqueuedAt, lastJobId, nextRunAt]
|
|
2034
|
+
);
|
|
2035
|
+
log(
|
|
2036
|
+
`Updated cron schedule ${id}: lastJobId=${lastJobId}, nextRunAt=${nextRunAt?.toISOString() ?? "null"}`
|
|
1765
2037
|
);
|
|
1766
|
-
return processed;
|
|
1767
2038
|
} catch (error) {
|
|
1768
|
-
|
|
2039
|
+
log(`Error updating cron schedule ${id} after enqueue: ${error}`);
|
|
2040
|
+
throw error;
|
|
2041
|
+
} finally {
|
|
2042
|
+
client.release();
|
|
1769
2043
|
}
|
|
1770
|
-
|
|
1771
|
-
|
|
1772
|
-
|
|
1773
|
-
|
|
1774
|
-
|
|
1775
|
-
|
|
1776
|
-
|
|
1777
|
-
|
|
1778
|
-
|
|
1779
|
-
|
|
1780
|
-
|
|
1781
|
-
|
|
1782
|
-
const
|
|
1783
|
-
|
|
1784
|
-
|
|
1785
|
-
|
|
1786
|
-
|
|
1787
|
-
|
|
1788
|
-
|
|
1789
|
-
|
|
1790
|
-
|
|
1791
|
-
|
|
1792
|
-
|
|
1793
|
-
|
|
1794
|
-
|
|
1795
|
-
|
|
1796
|
-
|
|
1797
|
-
|
|
1798
|
-
|
|
1799
|
-
|
|
1800
|
-
|
|
1801
|
-
|
|
1802
|
-
|
|
1803
|
-
|
|
1804
|
-
|
|
1805
|
-
|
|
1806
|
-
if (intervalId) {
|
|
1807
|
-
clearTimeout(intervalId);
|
|
1808
|
-
intervalId = null;
|
|
1809
|
-
}
|
|
1810
|
-
},
|
|
1811
|
-
/**
|
|
1812
|
-
* Stop the job processor and wait for all in-flight jobs to complete.
|
|
1813
|
-
* Useful for graceful shutdown (e.g., SIGTERM handling).
|
|
1814
|
-
*/
|
|
1815
|
-
stopAndDrain: async (drainTimeoutMs = 3e4) => {
|
|
1816
|
-
log(`Stopping and draining job processor with workerId: ${workerId}`);
|
|
1817
|
-
running = false;
|
|
1818
|
-
if (intervalId) {
|
|
1819
|
-
clearTimeout(intervalId);
|
|
1820
|
-
intervalId = null;
|
|
1821
|
-
}
|
|
1822
|
-
if (currentBatchPromise) {
|
|
1823
|
-
await Promise.race([
|
|
1824
|
-
currentBatchPromise.catch(() => {
|
|
1825
|
-
}),
|
|
1826
|
-
new Promise((resolve) => setTimeout(resolve, drainTimeoutMs))
|
|
1827
|
-
]);
|
|
1828
|
-
currentBatchPromise = null;
|
|
2044
|
+
}
|
|
2045
|
+
// ── Wait / step-data support ────────────────────────────────────────
|
|
2046
|
+
/**
|
|
2047
|
+
* Transition a job from 'processing' to 'waiting' status.
|
|
2048
|
+
* Persists step data so the handler can resume from where it left off.
|
|
2049
|
+
*
|
|
2050
|
+
* @param jobId - The job to pause.
|
|
2051
|
+
* @param options - Wait configuration including optional waitUntil date, token ID, and step data.
|
|
2052
|
+
*/
|
|
2053
|
+
async waitJob(jobId, options) {
|
|
2054
|
+
const client = await this.pool.connect();
|
|
2055
|
+
try {
|
|
2056
|
+
const result = await client.query(
|
|
2057
|
+
`
|
|
2058
|
+
UPDATE job_queue
|
|
2059
|
+
SET status = 'waiting',
|
|
2060
|
+
wait_until = $2,
|
|
2061
|
+
wait_token_id = $3,
|
|
2062
|
+
step_data = $4,
|
|
2063
|
+
locked_at = NULL,
|
|
2064
|
+
locked_by = NULL,
|
|
2065
|
+
updated_at = NOW()
|
|
2066
|
+
WHERE id = $1 AND status = 'processing'
|
|
2067
|
+
`,
|
|
2068
|
+
[
|
|
2069
|
+
jobId,
|
|
2070
|
+
options.waitUntil ?? null,
|
|
2071
|
+
options.waitTokenId ?? null,
|
|
2072
|
+
JSON.stringify(options.stepData)
|
|
2073
|
+
]
|
|
2074
|
+
);
|
|
2075
|
+
if (result.rowCount === 0) {
|
|
2076
|
+
log(
|
|
2077
|
+
`Job ${jobId} could not be set to waiting (may have been reclaimed or is no longer processing)`
|
|
2078
|
+
);
|
|
2079
|
+
return;
|
|
1829
2080
|
}
|
|
1830
|
-
|
|
1831
|
-
|
|
1832
|
-
|
|
1833
|
-
|
|
1834
|
-
|
|
1835
|
-
|
|
1836
|
-
|
|
1837
|
-
|
|
1838
|
-
|
|
1839
|
-
|
|
1840
|
-
|
|
1841
|
-
|
|
1842
|
-
|
|
1843
|
-
|
|
1844
|
-
|
|
1845
|
-
|
|
1846
|
-
|
|
1847
|
-
|
|
1848
|
-
|
|
1849
|
-
|
|
1850
|
-
const
|
|
1851
|
-
|
|
2081
|
+
await this.recordJobEvent(jobId, "waiting" /* Waiting */, {
|
|
2082
|
+
waitUntil: options.waitUntil?.toISOString() ?? null,
|
|
2083
|
+
waitTokenId: options.waitTokenId ?? null
|
|
2084
|
+
});
|
|
2085
|
+
log(`Job ${jobId} set to waiting`);
|
|
2086
|
+
} catch (error) {
|
|
2087
|
+
log(`Error setting job ${jobId} to waiting: ${error}`);
|
|
2088
|
+
throw error;
|
|
2089
|
+
} finally {
|
|
2090
|
+
client.release();
|
|
2091
|
+
}
|
|
2092
|
+
}
|
|
2093
|
+
/**
|
|
2094
|
+
* Persist step data for a job. Called after each ctx.run() step completes.
|
|
2095
|
+
* Best-effort: does not throw to avoid killing the running handler.
|
|
2096
|
+
*
|
|
2097
|
+
* @param jobId - The job to update.
|
|
2098
|
+
* @param stepData - The step data to persist.
|
|
2099
|
+
*/
|
|
2100
|
+
async updateStepData(jobId, stepData) {
|
|
2101
|
+
const client = await this.pool.connect();
|
|
2102
|
+
try {
|
|
2103
|
+
await client.query(
|
|
2104
|
+
`UPDATE job_queue SET step_data = $2, updated_at = NOW() WHERE id = $1`,
|
|
2105
|
+
[jobId, JSON.stringify(stepData)]
|
|
2106
|
+
);
|
|
2107
|
+
} catch (error) {
|
|
2108
|
+
log(`Error updating step_data for job ${jobId}: ${error}`);
|
|
2109
|
+
} finally {
|
|
2110
|
+
client.release();
|
|
2111
|
+
}
|
|
1852
2112
|
}
|
|
1853
|
-
|
|
1854
|
-
|
|
1855
|
-
|
|
1856
|
-
|
|
1857
|
-
|
|
1858
|
-
|
|
1859
|
-
|
|
1860
|
-
|
|
2113
|
+
/**
|
|
2114
|
+
* Create a waitpoint token in the database.
|
|
2115
|
+
*
|
|
2116
|
+
* @param jobId - The job ID to associate with the token (null if created outside a handler).
|
|
2117
|
+
* @param options - Optional timeout string (e.g. '10m', '1h') and tags.
|
|
2118
|
+
* @returns The created waitpoint with its unique ID.
|
|
2119
|
+
*/
|
|
2120
|
+
async createWaitpoint(jobId, options) {
|
|
2121
|
+
const client = await this.pool.connect();
|
|
1861
2122
|
try {
|
|
1862
|
-
const
|
|
1863
|
-
|
|
1864
|
-
|
|
1865
|
-
|
|
1866
|
-
|
|
2123
|
+
const id = `wp_${crypto.randomUUID()}`;
|
|
2124
|
+
let timeoutAt = null;
|
|
2125
|
+
if (options?.timeout) {
|
|
2126
|
+
const ms = parseTimeoutString(options.timeout);
|
|
2127
|
+
timeoutAt = new Date(Date.now() + ms);
|
|
1867
2128
|
}
|
|
1868
|
-
|
|
1869
|
-
|
|
1870
|
-
|
|
1871
|
-
|
|
1872
|
-
|
|
1873
|
-
|
|
1874
|
-
|
|
2129
|
+
await client.query(
|
|
2130
|
+
`INSERT INTO waitpoints (id, job_id, status, timeout_at, tags) VALUES ($1, $2, 'waiting', $3, $4)`,
|
|
2131
|
+
[id, jobId, timeoutAt, options?.tags ?? null]
|
|
2132
|
+
);
|
|
2133
|
+
log(`Created waitpoint ${id} for job ${jobId}`);
|
|
2134
|
+
return { id };
|
|
2135
|
+
} catch (error) {
|
|
2136
|
+
log(`Error creating waitpoint: ${error}`);
|
|
2137
|
+
throw error;
|
|
2138
|
+
} finally {
|
|
2139
|
+
client.release();
|
|
2140
|
+
}
|
|
2141
|
+
}
|
|
2142
|
+
/**
|
|
2143
|
+
* Complete a waitpoint token and move the associated job back to 'pending'.
|
|
2144
|
+
*
|
|
2145
|
+
* @param tokenId - The waitpoint token ID to complete.
|
|
2146
|
+
* @param data - Optional data to pass to the waiting handler.
|
|
2147
|
+
*/
|
|
2148
|
+
async completeWaitpoint(tokenId, data) {
|
|
2149
|
+
const client = await this.pool.connect();
|
|
2150
|
+
try {
|
|
2151
|
+
await client.query("BEGIN");
|
|
2152
|
+
const wpResult = await client.query(
|
|
2153
|
+
`UPDATE waitpoints SET status = 'completed', output = $2, completed_at = NOW()
|
|
2154
|
+
WHERE id = $1 AND status = 'waiting'
|
|
2155
|
+
RETURNING job_id`,
|
|
2156
|
+
[tokenId, data != null ? JSON.stringify(data) : null]
|
|
2157
|
+
);
|
|
2158
|
+
if (wpResult.rows.length === 0) {
|
|
2159
|
+
await client.query("ROLLBACK");
|
|
2160
|
+
log(`Waitpoint ${tokenId} not found or already completed`);
|
|
2161
|
+
return;
|
|
1875
2162
|
}
|
|
1876
|
-
|
|
1877
|
-
if (
|
|
1878
|
-
|
|
2163
|
+
const jobId = wpResult.rows[0].job_id;
|
|
2164
|
+
if (jobId != null) {
|
|
2165
|
+
await client.query(
|
|
2166
|
+
`UPDATE job_queue
|
|
2167
|
+
SET status = 'pending', wait_token_id = NULL, wait_until = NULL, updated_at = NOW()
|
|
2168
|
+
WHERE id = $1 AND status = 'waiting'`,
|
|
2169
|
+
[jobId]
|
|
2170
|
+
);
|
|
1879
2171
|
}
|
|
2172
|
+
await client.query("COMMIT");
|
|
2173
|
+
log(`Completed waitpoint ${tokenId} for job ${jobId}`);
|
|
2174
|
+
} catch (error) {
|
|
2175
|
+
await client.query("ROLLBACK");
|
|
2176
|
+
log(`Error completing waitpoint ${tokenId}: ${error}`);
|
|
2177
|
+
throw error;
|
|
2178
|
+
} finally {
|
|
2179
|
+
client.release();
|
|
1880
2180
|
}
|
|
1881
2181
|
}
|
|
1882
|
-
|
|
1883
|
-
|
|
1884
|
-
|
|
1885
|
-
|
|
1886
|
-
|
|
1887
|
-
|
|
1888
|
-
|
|
2182
|
+
/**
|
|
2183
|
+
* Retrieve a waitpoint token by its ID.
|
|
2184
|
+
*
|
|
2185
|
+
* @param tokenId - The waitpoint token ID to look up.
|
|
2186
|
+
* @returns The waitpoint record, or null if not found.
|
|
2187
|
+
*/
|
|
2188
|
+
async getWaitpoint(tokenId) {
|
|
2189
|
+
const client = await this.pool.connect();
|
|
2190
|
+
try {
|
|
2191
|
+
const result = await client.query(
|
|
2192
|
+
`SELECT id, job_id AS "jobId", status, output, timeout_at AS "timeoutAt", created_at AS "createdAt", completed_at AS "completedAt", tags FROM waitpoints WHERE id = $1`,
|
|
2193
|
+
[tokenId]
|
|
2194
|
+
);
|
|
2195
|
+
if (result.rows.length === 0) return null;
|
|
2196
|
+
return result.rows[0];
|
|
2197
|
+
} catch (error) {
|
|
2198
|
+
log(`Error getting waitpoint ${tokenId}: ${error}`);
|
|
2199
|
+
throw error;
|
|
2200
|
+
} finally {
|
|
2201
|
+
client.release();
|
|
1889
2202
|
}
|
|
1890
|
-
const caValue = typeof customCA === "string" ? loadPemOrFile(customCA) : void 0;
|
|
1891
|
-
ssl = {
|
|
1892
|
-
...ssl,
|
|
1893
|
-
...caValue ? { ca: caValue } : {},
|
|
1894
|
-
cert: loadPemOrFile(
|
|
1895
|
-
typeof config.ssl.cert === "string" ? config.ssl.cert : process.env.PGSSLCERT
|
|
1896
|
-
),
|
|
1897
|
-
key: loadPemOrFile(
|
|
1898
|
-
typeof config.ssl.key === "string" ? config.ssl.key : process.env.PGSSLKEY
|
|
1899
|
-
),
|
|
1900
|
-
rejectUnauthorized: config.ssl.rejectUnauthorized !== void 0 ? config.ssl.rejectUnauthorized : true
|
|
1901
|
-
};
|
|
1902
2203
|
}
|
|
1903
|
-
|
|
1904
|
-
|
|
1905
|
-
|
|
1906
|
-
|
|
1907
|
-
|
|
1908
|
-
|
|
1909
|
-
|
|
1910
|
-
|
|
1911
|
-
|
|
1912
|
-
|
|
1913
|
-
|
|
1914
|
-
|
|
1915
|
-
|
|
1916
|
-
|
|
2204
|
+
/**
|
|
2205
|
+
* Expire timed-out waitpoint tokens and move their associated jobs back to 'pending'.
|
|
2206
|
+
*
|
|
2207
|
+
* @returns The number of tokens that were expired.
|
|
2208
|
+
*/
|
|
2209
|
+
async expireTimedOutWaitpoints() {
|
|
2210
|
+
const client = await this.pool.connect();
|
|
2211
|
+
try {
|
|
2212
|
+
await client.query("BEGIN");
|
|
2213
|
+
const result = await client.query(
|
|
2214
|
+
`UPDATE waitpoints
|
|
2215
|
+
SET status = 'timed_out'
|
|
2216
|
+
WHERE status = 'waiting' AND timeout_at IS NOT NULL AND timeout_at <= NOW()
|
|
2217
|
+
RETURNING id, job_id`
|
|
2218
|
+
);
|
|
2219
|
+
for (const row of result.rows) {
|
|
2220
|
+
if (row.job_id != null) {
|
|
2221
|
+
await client.query(
|
|
2222
|
+
`UPDATE job_queue
|
|
2223
|
+
SET status = 'pending', wait_token_id = NULL, wait_until = NULL, updated_at = NOW()
|
|
2224
|
+
WHERE id = $1 AND status = 'waiting'`,
|
|
2225
|
+
[row.job_id]
|
|
2226
|
+
);
|
|
2227
|
+
}
|
|
2228
|
+
}
|
|
2229
|
+
await client.query("COMMIT");
|
|
2230
|
+
const count = result.rowCount || 0;
|
|
2231
|
+
if (count > 0) {
|
|
2232
|
+
log(`Expired ${count} timed-out waitpoints`);
|
|
2233
|
+
}
|
|
2234
|
+
return count;
|
|
2235
|
+
} catch (error) {
|
|
2236
|
+
await client.query("ROLLBACK");
|
|
2237
|
+
log(`Error expiring timed-out waitpoints: ${error}`);
|
|
2238
|
+
throw error;
|
|
2239
|
+
} finally {
|
|
2240
|
+
client.release();
|
|
2241
|
+
}
|
|
1917
2242
|
}
|
|
1918
|
-
|
|
1919
|
-
|
|
1920
|
-
|
|
1921
|
-
|
|
1922
|
-
|
|
1923
|
-
|
|
1924
|
-
|
|
1925
|
-
|
|
2243
|
+
// ── Internal helpers ──────────────────────────────────────────────────
|
|
2244
|
+
async setPendingReasonForUnpickedJobs(reason, jobType) {
|
|
2245
|
+
const client = await this.pool.connect();
|
|
2246
|
+
try {
|
|
2247
|
+
let jobTypeFilter = "";
|
|
2248
|
+
const params = [reason];
|
|
2249
|
+
if (jobType) {
|
|
2250
|
+
if (Array.isArray(jobType)) {
|
|
2251
|
+
jobTypeFilter = ` AND job_type = ANY($2)`;
|
|
2252
|
+
params.push(jobType);
|
|
2253
|
+
} else {
|
|
2254
|
+
jobTypeFilter = ` AND job_type = $2`;
|
|
2255
|
+
params.push(jobType);
|
|
2256
|
+
}
|
|
2257
|
+
}
|
|
2258
|
+
await client.query(
|
|
2259
|
+
`UPDATE job_queue SET pending_reason = $1 WHERE status = 'pending'${jobTypeFilter}`,
|
|
2260
|
+
params
|
|
2261
|
+
);
|
|
2262
|
+
} finally {
|
|
2263
|
+
client.release();
|
|
2264
|
+
}
|
|
1926
2265
|
}
|
|
1927
|
-
return pool;
|
|
1928
2266
|
};
|
|
1929
2267
|
|
|
1930
2268
|
// src/backends/redis-scripts.ts
|
|
@@ -1981,7 +2319,10 @@ redis.call('HMSET', jobKey,
|
|
|
1981
2319
|
'lastFailedAt', 'null',
|
|
1982
2320
|
'lastCancelledAt', 'null',
|
|
1983
2321
|
'tags', tagsJson,
|
|
1984
|
-
'idempotencyKey', idempotencyKey
|
|
2322
|
+
'idempotencyKey', idempotencyKey,
|
|
2323
|
+
'waitUntil', 'null',
|
|
2324
|
+
'waitTokenId', 'null',
|
|
2325
|
+
'stepData', 'null'
|
|
1985
2326
|
)
|
|
1986
2327
|
|
|
1987
2328
|
-- Status index
|
|
@@ -2064,7 +2405,25 @@ for _, jobId in ipairs(retries) do
|
|
|
2064
2405
|
redis.call('ZREM', prefix .. 'retry', jobId)
|
|
2065
2406
|
end
|
|
2066
2407
|
|
|
2067
|
-
-- 3.
|
|
2408
|
+
-- 3. Move ready waiting jobs (time-based, no token) into queue
|
|
2409
|
+
local waitingJobs = redis.call('ZRANGEBYSCORE', prefix .. 'waiting', '-inf', nowMs, 'LIMIT', 0, 200)
|
|
2410
|
+
for _, jobId in ipairs(waitingJobs) do
|
|
2411
|
+
local jk = prefix .. 'job:' .. jobId
|
|
2412
|
+
local status = redis.call('HGET', jk, 'status')
|
|
2413
|
+
local waitTokenId = redis.call('HGET', jk, 'waitTokenId')
|
|
2414
|
+
if status == 'waiting' and (waitTokenId == false or waitTokenId == 'null') then
|
|
2415
|
+
local pri = tonumber(redis.call('HGET', jk, 'priority') or '0')
|
|
2416
|
+
local ca = tonumber(redis.call('HGET', jk, 'createdAt'))
|
|
2417
|
+
local score = pri * ${SCORE_RANGE} + (${SCORE_RANGE} - ca)
|
|
2418
|
+
redis.call('ZADD', prefix .. 'queue', score, jobId)
|
|
2419
|
+
redis.call('SREM', prefix .. 'status:waiting', jobId)
|
|
2420
|
+
redis.call('SADD', prefix .. 'status:pending', jobId)
|
|
2421
|
+
redis.call('HMSET', jk, 'status', 'pending', 'waitUntil', 'null')
|
|
2422
|
+
end
|
|
2423
|
+
redis.call('ZREM', prefix .. 'waiting', jobId)
|
|
2424
|
+
end
|
|
2425
|
+
|
|
2426
|
+
-- 4. Parse job type filter
|
|
2068
2427
|
local filterTypes = nil
|
|
2069
2428
|
if jobTypeFilter ~= "null" then
|
|
2070
2429
|
-- Could be a JSON array or a plain string
|
|
@@ -2077,7 +2436,7 @@ if jobTypeFilter ~= "null" then
|
|
|
2077
2436
|
end
|
|
2078
2437
|
end
|
|
2079
2438
|
|
|
2080
|
-
--
|
|
2439
|
+
-- 5. Pop candidates from queue (highest score first)
|
|
2081
2440
|
-- We pop more than batchSize because some may be filtered out
|
|
2082
2441
|
local popCount = batchSize * 3
|
|
2083
2442
|
local candidates = redis.call('ZPOPMAX', prefix .. 'queue', popCount)
|
|
@@ -2161,7 +2520,10 @@ local jk = prefix .. 'job:' .. jobId
|
|
|
2161
2520
|
redis.call('HMSET', jk,
|
|
2162
2521
|
'status', 'completed',
|
|
2163
2522
|
'updatedAt', nowMs,
|
|
2164
|
-
'completedAt', nowMs
|
|
2523
|
+
'completedAt', nowMs,
|
|
2524
|
+
'stepData', 'null',
|
|
2525
|
+
'waitUntil', 'null',
|
|
2526
|
+
'waitTokenId', 'null'
|
|
2165
2527
|
)
|
|
2166
2528
|
redis.call('SREM', prefix .. 'status:processing', jobId)
|
|
2167
2529
|
redis.call('SADD', prefix .. 'status:completed', jobId)
|
|
@@ -2220,6 +2582,7 @@ local nowMs = tonumber(ARGV[2])
|
|
|
2220
2582
|
local jk = prefix .. 'job:' .. jobId
|
|
2221
2583
|
|
|
2222
2584
|
local oldStatus = redis.call('HGET', jk, 'status')
|
|
2585
|
+
if oldStatus ~= 'failed' and oldStatus ~= 'processing' then return 0 end
|
|
2223
2586
|
|
|
2224
2587
|
redis.call('HMSET', jk,
|
|
2225
2588
|
'status', 'pending',
|
|
@@ -2231,9 +2594,7 @@ redis.call('HMSET', jk,
|
|
|
2231
2594
|
)
|
|
2232
2595
|
|
|
2233
2596
|
-- Remove from old status, add to pending
|
|
2234
|
-
|
|
2235
|
-
redis.call('SREM', prefix .. 'status:' .. oldStatus, jobId)
|
|
2236
|
-
end
|
|
2597
|
+
redis.call('SREM', prefix .. 'status:' .. oldStatus, jobId)
|
|
2237
2598
|
redis.call('SADD', prefix .. 'status:pending', jobId)
|
|
2238
2599
|
|
|
2239
2600
|
-- Remove from retry sorted set if present
|
|
@@ -2254,18 +2615,21 @@ local nowMs = ARGV[2]
|
|
|
2254
2615
|
local jk = prefix .. 'job:' .. jobId
|
|
2255
2616
|
|
|
2256
2617
|
local status = redis.call('HGET', jk, 'status')
|
|
2257
|
-
if status ~= 'pending' then return 0 end
|
|
2618
|
+
if status ~= 'pending' and status ~= 'waiting' then return 0 end
|
|
2258
2619
|
|
|
2259
2620
|
redis.call('HMSET', jk,
|
|
2260
2621
|
'status', 'cancelled',
|
|
2261
2622
|
'updatedAt', nowMs,
|
|
2262
|
-
'lastCancelledAt', nowMs
|
|
2623
|
+
'lastCancelledAt', nowMs,
|
|
2624
|
+
'waitUntil', 'null',
|
|
2625
|
+
'waitTokenId', 'null'
|
|
2263
2626
|
)
|
|
2264
|
-
redis.call('SREM', prefix .. 'status:
|
|
2627
|
+
redis.call('SREM', prefix .. 'status:' .. status, jobId)
|
|
2265
2628
|
redis.call('SADD', prefix .. 'status:cancelled', jobId)
|
|
2266
|
-
-- Remove from queue / delayed
|
|
2629
|
+
-- Remove from queue / delayed / waiting
|
|
2267
2630
|
redis.call('ZREM', prefix .. 'queue', jobId)
|
|
2268
2631
|
redis.call('ZREM', prefix .. 'delayed', jobId)
|
|
2632
|
+
redis.call('ZREM', prefix .. 'waiting', jobId)
|
|
2269
2633
|
|
|
2270
2634
|
return 1
|
|
2271
2635
|
`;
|
|
@@ -2333,18 +2697,16 @@ end
|
|
|
2333
2697
|
|
|
2334
2698
|
return count
|
|
2335
2699
|
`;
|
|
2336
|
-
var
|
|
2700
|
+
var CLEANUP_OLD_JOBS_BATCH_SCRIPT = `
|
|
2337
2701
|
local prefix = KEYS[1]
|
|
2338
2702
|
local cutoffMs = tonumber(ARGV[1])
|
|
2339
|
-
|
|
2340
|
-
local completed = redis.call('SMEMBERS', prefix .. 'status:completed')
|
|
2341
2703
|
local count = 0
|
|
2342
2704
|
|
|
2343
|
-
for
|
|
2705
|
+
for i = 2, #ARGV do
|
|
2706
|
+
local jobId = ARGV[i]
|
|
2344
2707
|
local jk = prefix .. 'job:' .. jobId
|
|
2345
2708
|
local updatedAt = tonumber(redis.call('HGET', jk, 'updatedAt'))
|
|
2346
2709
|
if updatedAt and updatedAt < cutoffMs then
|
|
2347
|
-
-- Remove all indexes
|
|
2348
2710
|
local jobType = redis.call('HGET', jk, 'jobType')
|
|
2349
2711
|
local tagsJson = redis.call('HGET', jk, 'tags')
|
|
2350
2712
|
local idempotencyKey = redis.call('HGET', jk, 'idempotencyKey')
|
|
@@ -2367,7 +2729,6 @@ for _, jobId in ipairs(completed) do
|
|
|
2367
2729
|
if idempotencyKey and idempotencyKey ~= 'null' then
|
|
2368
2730
|
redis.call('DEL', prefix .. 'idempotency:' .. idempotencyKey)
|
|
2369
2731
|
end
|
|
2370
|
-
-- Delete events
|
|
2371
2732
|
redis.call('DEL', prefix .. 'events:' .. jobId)
|
|
2372
2733
|
|
|
2373
2734
|
count = count + 1
|
|
@@ -2376,8 +2737,158 @@ end
|
|
|
2376
2737
|
|
|
2377
2738
|
return count
|
|
2378
2739
|
`;
|
|
2740
|
+
var WAIT_JOB_SCRIPT = `
|
|
2741
|
+
local prefix = KEYS[1]
|
|
2742
|
+
local jobId = ARGV[1]
|
|
2743
|
+
local waitUntilMs = ARGV[2]
|
|
2744
|
+
local waitTokenId = ARGV[3]
|
|
2745
|
+
local stepDataJson = ARGV[4]
|
|
2746
|
+
local nowMs = ARGV[5]
|
|
2747
|
+
local jk = prefix .. 'job:' .. jobId
|
|
2748
|
+
|
|
2749
|
+
local status = redis.call('HGET', jk, 'status')
|
|
2750
|
+
if status ~= 'processing' then return 0 end
|
|
2751
|
+
|
|
2752
|
+
redis.call('HMSET', jk,
|
|
2753
|
+
'status', 'waiting',
|
|
2754
|
+
'waitUntil', waitUntilMs,
|
|
2755
|
+
'waitTokenId', waitTokenId,
|
|
2756
|
+
'stepData', stepDataJson,
|
|
2757
|
+
'lockedAt', 'null',
|
|
2758
|
+
'lockedBy', 'null',
|
|
2759
|
+
'updatedAt', nowMs
|
|
2760
|
+
)
|
|
2761
|
+
redis.call('SREM', prefix .. 'status:processing', jobId)
|
|
2762
|
+
redis.call('SADD', prefix .. 'status:waiting', jobId)
|
|
2763
|
+
|
|
2764
|
+
-- Add to waiting sorted set if time-based wait
|
|
2765
|
+
if waitUntilMs ~= 'null' then
|
|
2766
|
+
redis.call('ZADD', prefix .. 'waiting', tonumber(waitUntilMs), jobId)
|
|
2767
|
+
end
|
|
2768
|
+
|
|
2769
|
+
return 1
|
|
2770
|
+
`;
|
|
2771
|
+
var COMPLETE_WAITPOINT_SCRIPT = `
|
|
2772
|
+
local prefix = KEYS[1]
|
|
2773
|
+
local tokenId = ARGV[1]
|
|
2774
|
+
local outputJson = ARGV[2]
|
|
2775
|
+
local nowMs = ARGV[3]
|
|
2776
|
+
local wpk = prefix .. 'waitpoint:' .. tokenId
|
|
2777
|
+
|
|
2778
|
+
local wpStatus = redis.call('HGET', wpk, 'status')
|
|
2779
|
+
if not wpStatus or wpStatus ~= 'waiting' then return 0 end
|
|
2780
|
+
|
|
2781
|
+
redis.call('HMSET', wpk,
|
|
2782
|
+
'status', 'completed',
|
|
2783
|
+
'output', outputJson,
|
|
2784
|
+
'completedAt', nowMs
|
|
2785
|
+
)
|
|
2786
|
+
|
|
2787
|
+
-- Move associated job back to pending
|
|
2788
|
+
local jobId = redis.call('HGET', wpk, 'jobId')
|
|
2789
|
+
if jobId and jobId ~= 'null' then
|
|
2790
|
+
local jk = prefix .. 'job:' .. jobId
|
|
2791
|
+
local jobStatus = redis.call('HGET', jk, 'status')
|
|
2792
|
+
if jobStatus == 'waiting' then
|
|
2793
|
+
redis.call('HMSET', jk,
|
|
2794
|
+
'status', 'pending',
|
|
2795
|
+
'waitTokenId', 'null',
|
|
2796
|
+
'waitUntil', 'null',
|
|
2797
|
+
'updatedAt', nowMs
|
|
2798
|
+
)
|
|
2799
|
+
redis.call('SREM', prefix .. 'status:waiting', jobId)
|
|
2800
|
+
redis.call('SADD', prefix .. 'status:pending', jobId)
|
|
2801
|
+
redis.call('ZREM', prefix .. 'waiting', jobId)
|
|
2802
|
+
|
|
2803
|
+
-- Re-add to queue
|
|
2804
|
+
local priority = tonumber(redis.call('HGET', jk, 'priority') or '0')
|
|
2805
|
+
local createdAt = tonumber(redis.call('HGET', jk, 'createdAt'))
|
|
2806
|
+
local score = priority * ${SCORE_RANGE} + (${SCORE_RANGE} - createdAt)
|
|
2807
|
+
redis.call('ZADD', prefix .. 'queue', score, jobId)
|
|
2808
|
+
end
|
|
2809
|
+
end
|
|
2810
|
+
|
|
2811
|
+
return 1
|
|
2812
|
+
`;
|
|
2813
|
+
var EXPIRE_TIMED_OUT_WAITPOINTS_SCRIPT = `
|
|
2814
|
+
local prefix = KEYS[1]
|
|
2815
|
+
local nowMs = tonumber(ARGV[1])
|
|
2816
|
+
|
|
2817
|
+
local expiredIds = redis.call('ZRANGEBYSCORE', prefix .. 'waitpoint_timeout', '-inf', nowMs)
|
|
2818
|
+
local count = 0
|
|
2819
|
+
|
|
2820
|
+
for _, tokenId in ipairs(expiredIds) do
|
|
2821
|
+
local wpk = prefix .. 'waitpoint:' .. tokenId
|
|
2822
|
+
local wpStatus = redis.call('HGET', wpk, 'status')
|
|
2823
|
+
if wpStatus == 'waiting' then
|
|
2824
|
+
redis.call('HMSET', wpk,
|
|
2825
|
+
'status', 'timed_out'
|
|
2826
|
+
)
|
|
2827
|
+
|
|
2828
|
+
-- Move associated job back to pending
|
|
2829
|
+
local jobId = redis.call('HGET', wpk, 'jobId')
|
|
2830
|
+
if jobId and jobId ~= 'null' then
|
|
2831
|
+
local jk = prefix .. 'job:' .. jobId
|
|
2832
|
+
local jobStatus = redis.call('HGET', jk, 'status')
|
|
2833
|
+
if jobStatus == 'waiting' then
|
|
2834
|
+
redis.call('HMSET', jk,
|
|
2835
|
+
'status', 'pending',
|
|
2836
|
+
'waitTokenId', 'null',
|
|
2837
|
+
'waitUntil', 'null',
|
|
2838
|
+
'updatedAt', nowMs
|
|
2839
|
+
)
|
|
2840
|
+
redis.call('SREM', prefix .. 'status:waiting', jobId)
|
|
2841
|
+
redis.call('SADD', prefix .. 'status:pending', jobId)
|
|
2842
|
+
redis.call('ZREM', prefix .. 'waiting', jobId)
|
|
2379
2843
|
|
|
2380
|
-
|
|
2844
|
+
local priority = tonumber(redis.call('HGET', jk, 'priority') or '0')
|
|
2845
|
+
local createdAt = tonumber(redis.call('HGET', jk, 'createdAt'))
|
|
2846
|
+
local score = priority * ${SCORE_RANGE} + (${SCORE_RANGE} - createdAt)
|
|
2847
|
+
redis.call('ZADD', prefix .. 'queue', score, jobId)
|
|
2848
|
+
end
|
|
2849
|
+
end
|
|
2850
|
+
|
|
2851
|
+
count = count + 1
|
|
2852
|
+
end
|
|
2853
|
+
redis.call('ZREM', prefix .. 'waitpoint_timeout', tokenId)
|
|
2854
|
+
end
|
|
2855
|
+
|
|
2856
|
+
return count
|
|
2857
|
+
`;
|
|
2858
|
+
var MAX_TIMEOUT_MS2 = 365 * 24 * 60 * 60 * 1e3;
|
|
2859
|
+
function parseTimeoutString2(timeout) {
|
|
2860
|
+
const match = timeout.match(/^(\d+)(s|m|h|d)$/);
|
|
2861
|
+
if (!match) {
|
|
2862
|
+
throw new Error(
|
|
2863
|
+
`Invalid timeout format: "${timeout}". Expected format like "10m", "1h", "24h", "7d".`
|
|
2864
|
+
);
|
|
2865
|
+
}
|
|
2866
|
+
const value = parseInt(match[1], 10);
|
|
2867
|
+
const unit = match[2];
|
|
2868
|
+
let ms;
|
|
2869
|
+
switch (unit) {
|
|
2870
|
+
case "s":
|
|
2871
|
+
ms = value * 1e3;
|
|
2872
|
+
break;
|
|
2873
|
+
case "m":
|
|
2874
|
+
ms = value * 60 * 1e3;
|
|
2875
|
+
break;
|
|
2876
|
+
case "h":
|
|
2877
|
+
ms = value * 60 * 60 * 1e3;
|
|
2878
|
+
break;
|
|
2879
|
+
case "d":
|
|
2880
|
+
ms = value * 24 * 60 * 60 * 1e3;
|
|
2881
|
+
break;
|
|
2882
|
+
default:
|
|
2883
|
+
throw new Error(`Unknown timeout unit: "${unit}"`);
|
|
2884
|
+
}
|
|
2885
|
+
if (!Number.isFinite(ms) || ms > MAX_TIMEOUT_MS2) {
|
|
2886
|
+
throw new Error(
|
|
2887
|
+
`Timeout value "${timeout}" is too large. Maximum allowed is 365 days.`
|
|
2888
|
+
);
|
|
2889
|
+
}
|
|
2890
|
+
return ms;
|
|
2891
|
+
}
|
|
2381
2892
|
function hashToObject(arr) {
|
|
2382
2893
|
const obj = {};
|
|
2383
2894
|
for (let i = 0; i < arr.length; i += 2) {
|
|
@@ -2443,9 +2954,20 @@ function deserializeJob(h) {
|
|
|
2443
2954
|
lastCancelledAt: dateOrNull(h.lastCancelledAt),
|
|
2444
2955
|
tags,
|
|
2445
2956
|
idempotencyKey: nullish(h.idempotencyKey),
|
|
2446
|
-
progress: numOrNull(h.progress)
|
|
2957
|
+
progress: numOrNull(h.progress),
|
|
2958
|
+
waitUntil: dateOrNull(h.waitUntil),
|
|
2959
|
+
waitTokenId: nullish(h.waitTokenId),
|
|
2960
|
+
stepData: parseStepData(h.stepData)
|
|
2447
2961
|
};
|
|
2448
2962
|
}
|
|
2963
|
+
function parseStepData(raw) {
|
|
2964
|
+
if (!raw || raw === "null") return void 0;
|
|
2965
|
+
try {
|
|
2966
|
+
return JSON.parse(raw);
|
|
2967
|
+
} catch {
|
|
2968
|
+
return void 0;
|
|
2969
|
+
}
|
|
2970
|
+
}
|
|
2449
2971
|
var RedisBackend = class {
|
|
2450
2972
|
constructor(redisConfig) {
|
|
2451
2973
|
let IORedis;
|
|
@@ -2601,8 +3123,14 @@ var RedisBackend = class {
|
|
|
2601
3123
|
if (filters.runAt) {
|
|
2602
3124
|
jobs = this.filterByRunAt(jobs, filters.runAt);
|
|
2603
3125
|
}
|
|
3126
|
+
if (filters.cursor !== void 0) {
|
|
3127
|
+
jobs = jobs.filter((j) => j.id < filters.cursor);
|
|
3128
|
+
}
|
|
3129
|
+
}
|
|
3130
|
+
jobs.sort((a, b) => b.id - a.id);
|
|
3131
|
+
if (filters?.cursor !== void 0) {
|
|
3132
|
+
return jobs.slice(0, limit);
|
|
2604
3133
|
}
|
|
2605
|
-
jobs.sort((a, b) => b.createdAt.getTime() - a.createdAt.getTime());
|
|
2606
3134
|
return jobs.slice(offset, offset + limit);
|
|
2607
3135
|
}
|
|
2608
3136
|
async getJobsByTags(tags, mode = "all", limit = 100, offset = 0) {
|
|
@@ -2834,22 +3362,104 @@ var RedisBackend = class {
|
|
|
2834
3362
|
log(`Edited ${count} pending jobs`);
|
|
2835
3363
|
return count;
|
|
2836
3364
|
}
|
|
2837
|
-
|
|
3365
|
+
/**
|
|
3366
|
+
* Delete completed jobs older than the given number of days.
|
|
3367
|
+
* Uses SSCAN to iterate the completed set in batches, avoiding
|
|
3368
|
+
* loading all IDs into memory and preventing long Redis blocks.
|
|
3369
|
+
*
|
|
3370
|
+
* @param daysToKeep - Number of days to retain completed jobs (default 30).
|
|
3371
|
+
* @param batchSize - Number of IDs to scan per SSCAN iteration (default 200).
|
|
3372
|
+
* @returns Total number of deleted jobs.
|
|
3373
|
+
*/
|
|
3374
|
+
async cleanupOldJobs(daysToKeep = 30, batchSize = 200) {
|
|
2838
3375
|
const cutoffMs = this.nowMs() - daysToKeep * 24 * 60 * 60 * 1e3;
|
|
2839
|
-
const
|
|
2840
|
-
|
|
2841
|
-
|
|
2842
|
-
|
|
2843
|
-
|
|
2844
|
-
|
|
2845
|
-
|
|
2846
|
-
|
|
3376
|
+
const setKey = `${this.prefix}status:completed`;
|
|
3377
|
+
let totalDeleted = 0;
|
|
3378
|
+
let cursor = "0";
|
|
3379
|
+
do {
|
|
3380
|
+
const [nextCursor, ids] = await this.client.sscan(
|
|
3381
|
+
setKey,
|
|
3382
|
+
cursor,
|
|
3383
|
+
"COUNT",
|
|
3384
|
+
batchSize
|
|
3385
|
+
);
|
|
3386
|
+
cursor = nextCursor;
|
|
3387
|
+
if (ids.length > 0) {
|
|
3388
|
+
const result = await this.client.eval(
|
|
3389
|
+
CLEANUP_OLD_JOBS_BATCH_SCRIPT,
|
|
3390
|
+
1,
|
|
3391
|
+
this.prefix,
|
|
3392
|
+
cutoffMs,
|
|
3393
|
+
...ids
|
|
3394
|
+
);
|
|
3395
|
+
totalDeleted += Number(result);
|
|
3396
|
+
}
|
|
3397
|
+
} while (cursor !== "0");
|
|
3398
|
+
log(`Deleted ${totalDeleted} old jobs`);
|
|
3399
|
+
return totalDeleted;
|
|
2847
3400
|
}
|
|
2848
|
-
|
|
2849
|
-
|
|
2850
|
-
|
|
2851
|
-
|
|
2852
|
-
|
|
3401
|
+
/**
|
|
3402
|
+
* Delete job events older than the given number of days.
|
|
3403
|
+
* Iterates all event lists and removes events whose createdAt is before the cutoff.
|
|
3404
|
+
* Also removes orphaned event lists (where the job no longer exists).
|
|
3405
|
+
*
|
|
3406
|
+
* @param daysToKeep - Number of days to retain events (default 30).
|
|
3407
|
+
* @param batchSize - Number of event keys to scan per SCAN iteration (default 200).
|
|
3408
|
+
* @returns Total number of deleted events.
|
|
3409
|
+
*/
|
|
3410
|
+
async cleanupOldJobEvents(daysToKeep = 30, batchSize = 200) {
|
|
3411
|
+
const cutoffMs = this.nowMs() - daysToKeep * 24 * 60 * 60 * 1e3;
|
|
3412
|
+
const pattern = `${this.prefix}events:*`;
|
|
3413
|
+
let totalDeleted = 0;
|
|
3414
|
+
let cursor = "0";
|
|
3415
|
+
do {
|
|
3416
|
+
const [nextCursor, keys] = await this.client.scan(
|
|
3417
|
+
cursor,
|
|
3418
|
+
"MATCH",
|
|
3419
|
+
pattern,
|
|
3420
|
+
"COUNT",
|
|
3421
|
+
batchSize
|
|
3422
|
+
);
|
|
3423
|
+
cursor = nextCursor;
|
|
3424
|
+
for (const key of keys) {
|
|
3425
|
+
const jobIdStr = key.slice(`${this.prefix}events:`.length);
|
|
3426
|
+
const jobExists = await this.client.exists(
|
|
3427
|
+
`${this.prefix}job:${jobIdStr}`
|
|
3428
|
+
);
|
|
3429
|
+
if (!jobExists) {
|
|
3430
|
+
const len = await this.client.llen(key);
|
|
3431
|
+
await this.client.del(key);
|
|
3432
|
+
totalDeleted += len;
|
|
3433
|
+
continue;
|
|
3434
|
+
}
|
|
3435
|
+
const events = await this.client.lrange(key, 0, -1);
|
|
3436
|
+
const kept = [];
|
|
3437
|
+
for (const raw of events) {
|
|
3438
|
+
try {
|
|
3439
|
+
const e = JSON.parse(raw);
|
|
3440
|
+
if (e.createdAt >= cutoffMs) {
|
|
3441
|
+
kept.push(raw);
|
|
3442
|
+
} else {
|
|
3443
|
+
totalDeleted++;
|
|
3444
|
+
}
|
|
3445
|
+
} catch {
|
|
3446
|
+
totalDeleted++;
|
|
3447
|
+
}
|
|
3448
|
+
}
|
|
3449
|
+
if (kept.length === 0) {
|
|
3450
|
+
await this.client.del(key);
|
|
3451
|
+
} else if (kept.length < events.length) {
|
|
3452
|
+
const pipeline = this.client.pipeline();
|
|
3453
|
+
pipeline.del(key);
|
|
3454
|
+
for (const raw of kept) {
|
|
3455
|
+
pipeline.rpush(key, raw);
|
|
3456
|
+
}
|
|
3457
|
+
await pipeline.exec();
|
|
3458
|
+
}
|
|
3459
|
+
}
|
|
3460
|
+
} while (cursor !== "0");
|
|
3461
|
+
log(`Deleted ${totalDeleted} old job events`);
|
|
3462
|
+
return totalDeleted;
|
|
2853
3463
|
}
|
|
2854
3464
|
async reclaimStuckJobs(maxProcessingTimeMinutes = 10) {
|
|
2855
3465
|
const maxAgeMs = maxProcessingTimeMinutes * 60 * 1e3;
|
|
@@ -2864,6 +3474,191 @@ var RedisBackend = class {
|
|
|
2864
3474
|
log(`Reclaimed ${result} stuck jobs`);
|
|
2865
3475
|
return Number(result);
|
|
2866
3476
|
}
|
|
3477
|
+
// ── Wait / step-data support ────────────────────────────────────────
|
|
3478
|
+
/**
|
|
3479
|
+
* Transition a job from 'processing' to 'waiting' status.
|
|
3480
|
+
* Persists step data so the handler can resume from where it left off.
|
|
3481
|
+
*
|
|
3482
|
+
* @param jobId - The job to pause.
|
|
3483
|
+
* @param options - Wait configuration including optional waitUntil date, token ID, and step data.
|
|
3484
|
+
*/
|
|
3485
|
+
async waitJob(jobId, options) {
|
|
3486
|
+
const now = this.nowMs();
|
|
3487
|
+
const waitUntilMs = options.waitUntil ? options.waitUntil.getTime().toString() : "null";
|
|
3488
|
+
const waitTokenId = options.waitTokenId ?? "null";
|
|
3489
|
+
const stepDataJson = JSON.stringify(options.stepData);
|
|
3490
|
+
const result = await this.client.eval(
|
|
3491
|
+
WAIT_JOB_SCRIPT,
|
|
3492
|
+
1,
|
|
3493
|
+
this.prefix,
|
|
3494
|
+
jobId,
|
|
3495
|
+
waitUntilMs,
|
|
3496
|
+
waitTokenId,
|
|
3497
|
+
stepDataJson,
|
|
3498
|
+
now
|
|
3499
|
+
);
|
|
3500
|
+
if (Number(result) === 0) {
|
|
3501
|
+
log(
|
|
3502
|
+
`Job ${jobId} could not be set to waiting (may have been reclaimed or is no longer processing)`
|
|
3503
|
+
);
|
|
3504
|
+
return;
|
|
3505
|
+
}
|
|
3506
|
+
await this.recordJobEvent(jobId, "waiting" /* Waiting */, {
|
|
3507
|
+
waitUntil: options.waitUntil?.toISOString() ?? null,
|
|
3508
|
+
waitTokenId: options.waitTokenId ?? null
|
|
3509
|
+
});
|
|
3510
|
+
log(`Job ${jobId} set to waiting`);
|
|
3511
|
+
}
|
|
3512
|
+
/**
|
|
3513
|
+
* Persist step data for a job. Called after each ctx.run() step completes.
|
|
3514
|
+
* Best-effort: does not throw to avoid killing the running handler.
|
|
3515
|
+
*
|
|
3516
|
+
* @param jobId - The job to update.
|
|
3517
|
+
* @param stepData - The step data to persist.
|
|
3518
|
+
*/
|
|
3519
|
+
async updateStepData(jobId, stepData) {
|
|
3520
|
+
try {
|
|
3521
|
+
const now = this.nowMs();
|
|
3522
|
+
await this.client.hset(
|
|
3523
|
+
`${this.prefix}job:${jobId}`,
|
|
3524
|
+
"stepData",
|
|
3525
|
+
JSON.stringify(stepData),
|
|
3526
|
+
"updatedAt",
|
|
3527
|
+
now.toString()
|
|
3528
|
+
);
|
|
3529
|
+
} catch (error) {
|
|
3530
|
+
log(`Error updating stepData for job ${jobId}: ${error}`);
|
|
3531
|
+
}
|
|
3532
|
+
}
|
|
3533
|
+
/**
|
|
3534
|
+
* Create a waitpoint token.
|
|
3535
|
+
*
|
|
3536
|
+
* @param jobId - The job ID to associate with the token (null if created outside a handler).
|
|
3537
|
+
* @param options - Optional timeout string (e.g. '10m', '1h') and tags.
|
|
3538
|
+
* @returns The created waitpoint with its unique ID.
|
|
3539
|
+
*/
|
|
3540
|
+
async createWaitpoint(jobId, options) {
|
|
3541
|
+
const id = `wp_${crypto.randomUUID()}`;
|
|
3542
|
+
const now = this.nowMs();
|
|
3543
|
+
let timeoutAt = null;
|
|
3544
|
+
if (options?.timeout) {
|
|
3545
|
+
const ms = parseTimeoutString2(options.timeout);
|
|
3546
|
+
timeoutAt = now + ms;
|
|
3547
|
+
}
|
|
3548
|
+
const key = `${this.prefix}waitpoint:${id}`;
|
|
3549
|
+
const fields = [
|
|
3550
|
+
"id",
|
|
3551
|
+
id,
|
|
3552
|
+
"jobId",
|
|
3553
|
+
jobId !== null ? jobId.toString() : "null",
|
|
3554
|
+
"status",
|
|
3555
|
+
"waiting",
|
|
3556
|
+
"output",
|
|
3557
|
+
"null",
|
|
3558
|
+
"timeoutAt",
|
|
3559
|
+
timeoutAt !== null ? timeoutAt.toString() : "null",
|
|
3560
|
+
"createdAt",
|
|
3561
|
+
now.toString(),
|
|
3562
|
+
"completedAt",
|
|
3563
|
+
"null",
|
|
3564
|
+
"tags",
|
|
3565
|
+
options?.tags ? JSON.stringify(options.tags) : "null"
|
|
3566
|
+
];
|
|
3567
|
+
await this.client.hmset(key, ...fields);
|
|
3568
|
+
if (timeoutAt !== null) {
|
|
3569
|
+
await this.client.zadd(`${this.prefix}waitpoint_timeout`, timeoutAt, id);
|
|
3570
|
+
}
|
|
3571
|
+
log(`Created waitpoint ${id} for job ${jobId}`);
|
|
3572
|
+
return { id };
|
|
3573
|
+
}
|
|
3574
|
+
/**
|
|
3575
|
+
* Complete a waitpoint token and move the associated job back to 'pending'.
|
|
3576
|
+
*
|
|
3577
|
+
* @param tokenId - The waitpoint token ID to complete.
|
|
3578
|
+
* @param data - Optional data to pass to the waiting handler.
|
|
3579
|
+
*/
|
|
3580
|
+
async completeWaitpoint(tokenId, data) {
|
|
3581
|
+
const now = this.nowMs();
|
|
3582
|
+
const outputJson = data != null ? JSON.stringify(data) : "null";
|
|
3583
|
+
const result = await this.client.eval(
|
|
3584
|
+
COMPLETE_WAITPOINT_SCRIPT,
|
|
3585
|
+
1,
|
|
3586
|
+
this.prefix,
|
|
3587
|
+
tokenId,
|
|
3588
|
+
outputJson,
|
|
3589
|
+
now
|
|
3590
|
+
);
|
|
3591
|
+
if (Number(result) === 0) {
|
|
3592
|
+
log(`Waitpoint ${tokenId} not found or already completed`);
|
|
3593
|
+
return;
|
|
3594
|
+
}
|
|
3595
|
+
log(`Completed waitpoint ${tokenId}`);
|
|
3596
|
+
}
|
|
3597
|
+
/**
|
|
3598
|
+
* Retrieve a waitpoint token by its ID.
|
|
3599
|
+
*
|
|
3600
|
+
* @param tokenId - The waitpoint token ID to look up.
|
|
3601
|
+
* @returns The waitpoint record, or null if not found.
|
|
3602
|
+
*/
|
|
3603
|
+
async getWaitpoint(tokenId) {
|
|
3604
|
+
const data = await this.client.hgetall(
|
|
3605
|
+
`${this.prefix}waitpoint:${tokenId}`
|
|
3606
|
+
);
|
|
3607
|
+
if (!data || Object.keys(data).length === 0) return null;
|
|
3608
|
+
const nullish = (v) => v === void 0 || v === "null" || v === "" ? null : v;
|
|
3609
|
+
const numOrNull = (v) => {
|
|
3610
|
+
const n = nullish(v);
|
|
3611
|
+
return n === null ? null : Number(n);
|
|
3612
|
+
};
|
|
3613
|
+
const dateOrNull = (v) => {
|
|
3614
|
+
const n = numOrNull(v);
|
|
3615
|
+
return n === null ? null : new Date(n);
|
|
3616
|
+
};
|
|
3617
|
+
let output = null;
|
|
3618
|
+
if (data.output && data.output !== "null") {
|
|
3619
|
+
try {
|
|
3620
|
+
output = JSON.parse(data.output);
|
|
3621
|
+
} catch {
|
|
3622
|
+
output = data.output;
|
|
3623
|
+
}
|
|
3624
|
+
}
|
|
3625
|
+
let tags = null;
|
|
3626
|
+
if (data.tags && data.tags !== "null") {
|
|
3627
|
+
try {
|
|
3628
|
+
tags = JSON.parse(data.tags);
|
|
3629
|
+
} catch {
|
|
3630
|
+
}
|
|
3631
|
+
}
|
|
3632
|
+
return {
|
|
3633
|
+
id: data.id,
|
|
3634
|
+
jobId: numOrNull(data.jobId),
|
|
3635
|
+
status: data.status,
|
|
3636
|
+
output,
|
|
3637
|
+
timeoutAt: dateOrNull(data.timeoutAt),
|
|
3638
|
+
createdAt: new Date(Number(data.createdAt)),
|
|
3639
|
+
completedAt: dateOrNull(data.completedAt),
|
|
3640
|
+
tags
|
|
3641
|
+
};
|
|
3642
|
+
}
|
|
3643
|
+
/**
|
|
3644
|
+
* Expire timed-out waitpoint tokens and move their associated jobs back to 'pending'.
|
|
3645
|
+
*
|
|
3646
|
+
* @returns The number of tokens that were expired.
|
|
3647
|
+
*/
|
|
3648
|
+
async expireTimedOutWaitpoints() {
|
|
3649
|
+
const now = this.nowMs();
|
|
3650
|
+
const result = await this.client.eval(
|
|
3651
|
+
EXPIRE_TIMED_OUT_WAITPOINTS_SCRIPT,
|
|
3652
|
+
1,
|
|
3653
|
+
this.prefix,
|
|
3654
|
+
now
|
|
3655
|
+
);
|
|
3656
|
+
const count = Number(result);
|
|
3657
|
+
if (count > 0) {
|
|
3658
|
+
log(`Expired ${count} timed-out waitpoints`);
|
|
3659
|
+
}
|
|
3660
|
+
return count;
|
|
3661
|
+
}
|
|
2867
3662
|
// ── Internal helpers ──────────────────────────────────────────────────
|
|
2868
3663
|
async setPendingReasonForUnpickedJobs(reason, jobType) {
|
|
2869
3664
|
let ids = await this.client.smembers(`${this.prefix}status:pending`);
|
|
@@ -2968,6 +3763,332 @@ var RedisBackend = class {
|
|
|
2968
3763
|
return true;
|
|
2969
3764
|
});
|
|
2970
3765
|
}
|
|
3766
|
+
// ── Cron schedules ──────────────────────────────────────────────────
|
|
3767
|
+
/** Create a cron schedule and return its ID. */
|
|
3768
|
+
async addCronSchedule(input) {
|
|
3769
|
+
const existingId = await this.client.get(
|
|
3770
|
+
`${this.prefix}cron_name:${input.scheduleName}`
|
|
3771
|
+
);
|
|
3772
|
+
if (existingId !== null) {
|
|
3773
|
+
throw new Error(
|
|
3774
|
+
`Cron schedule with name "${input.scheduleName}" already exists`
|
|
3775
|
+
);
|
|
3776
|
+
}
|
|
3777
|
+
const id = await this.client.incr(`${this.prefix}cron_id_seq`);
|
|
3778
|
+
const now = this.nowMs();
|
|
3779
|
+
const key = `${this.prefix}cron:${id}`;
|
|
3780
|
+
const fields = [
|
|
3781
|
+
"id",
|
|
3782
|
+
id.toString(),
|
|
3783
|
+
"scheduleName",
|
|
3784
|
+
input.scheduleName,
|
|
3785
|
+
"cronExpression",
|
|
3786
|
+
input.cronExpression,
|
|
3787
|
+
"jobType",
|
|
3788
|
+
input.jobType,
|
|
3789
|
+
"payload",
|
|
3790
|
+
JSON.stringify(input.payload),
|
|
3791
|
+
"maxAttempts",
|
|
3792
|
+
input.maxAttempts.toString(),
|
|
3793
|
+
"priority",
|
|
3794
|
+
input.priority.toString(),
|
|
3795
|
+
"timeoutMs",
|
|
3796
|
+
input.timeoutMs !== null ? input.timeoutMs.toString() : "null",
|
|
3797
|
+
"forceKillOnTimeout",
|
|
3798
|
+
input.forceKillOnTimeout ? "true" : "false",
|
|
3799
|
+
"tags",
|
|
3800
|
+
input.tags ? JSON.stringify(input.tags) : "null",
|
|
3801
|
+
"timezone",
|
|
3802
|
+
input.timezone,
|
|
3803
|
+
"allowOverlap",
|
|
3804
|
+
input.allowOverlap ? "true" : "false",
|
|
3805
|
+
"status",
|
|
3806
|
+
"active",
|
|
3807
|
+
"lastEnqueuedAt",
|
|
3808
|
+
"null",
|
|
3809
|
+
"lastJobId",
|
|
3810
|
+
"null",
|
|
3811
|
+
"nextRunAt",
|
|
3812
|
+
input.nextRunAt ? input.nextRunAt.getTime().toString() : "null",
|
|
3813
|
+
"createdAt",
|
|
3814
|
+
now.toString(),
|
|
3815
|
+
"updatedAt",
|
|
3816
|
+
now.toString()
|
|
3817
|
+
];
|
|
3818
|
+
await this.client.hmset(key, ...fields);
|
|
3819
|
+
await this.client.set(
|
|
3820
|
+
`${this.prefix}cron_name:${input.scheduleName}`,
|
|
3821
|
+
id.toString()
|
|
3822
|
+
);
|
|
3823
|
+
await this.client.sadd(`${this.prefix}crons`, id.toString());
|
|
3824
|
+
await this.client.sadd(`${this.prefix}cron_status:active`, id.toString());
|
|
3825
|
+
if (input.nextRunAt) {
|
|
3826
|
+
await this.client.zadd(
|
|
3827
|
+
`${this.prefix}cron_due`,
|
|
3828
|
+
input.nextRunAt.getTime(),
|
|
3829
|
+
id.toString()
|
|
3830
|
+
);
|
|
3831
|
+
}
|
|
3832
|
+
log(`Added cron schedule ${id}: "${input.scheduleName}"`);
|
|
3833
|
+
return id;
|
|
3834
|
+
}
|
|
3835
|
+
/** Get a cron schedule by ID. */
|
|
3836
|
+
async getCronSchedule(id) {
|
|
3837
|
+
const data = await this.client.hgetall(`${this.prefix}cron:${id}`);
|
|
3838
|
+
if (!data || Object.keys(data).length === 0) return null;
|
|
3839
|
+
return this.deserializeCronSchedule(data);
|
|
3840
|
+
}
|
|
3841
|
+
/** Get a cron schedule by its unique name. */
|
|
3842
|
+
async getCronScheduleByName(name) {
|
|
3843
|
+
const id = await this.client.get(`${this.prefix}cron_name:${name}`);
|
|
3844
|
+
if (id === null) return null;
|
|
3845
|
+
return this.getCronSchedule(Number(id));
|
|
3846
|
+
}
|
|
3847
|
+
/** List cron schedules, optionally filtered by status. */
|
|
3848
|
+
async listCronSchedules(status) {
|
|
3849
|
+
let ids;
|
|
3850
|
+
if (status) {
|
|
3851
|
+
ids = await this.client.smembers(`${this.prefix}cron_status:${status}`);
|
|
3852
|
+
} else {
|
|
3853
|
+
ids = await this.client.smembers(`${this.prefix}crons`);
|
|
3854
|
+
}
|
|
3855
|
+
if (ids.length === 0) return [];
|
|
3856
|
+
const pipeline = this.client.pipeline();
|
|
3857
|
+
for (const id of ids) {
|
|
3858
|
+
pipeline.hgetall(`${this.prefix}cron:${id}`);
|
|
3859
|
+
}
|
|
3860
|
+
const results = await pipeline.exec();
|
|
3861
|
+
const schedules = [];
|
|
3862
|
+
if (results) {
|
|
3863
|
+
for (const [err, data] of results) {
|
|
3864
|
+
if (!err && data && typeof data === "object" && Object.keys(data).length > 0) {
|
|
3865
|
+
schedules.push(
|
|
3866
|
+
this.deserializeCronSchedule(data)
|
|
3867
|
+
);
|
|
3868
|
+
}
|
|
3869
|
+
}
|
|
3870
|
+
}
|
|
3871
|
+
schedules.sort((a, b) => a.createdAt.getTime() - b.createdAt.getTime());
|
|
3872
|
+
return schedules;
|
|
3873
|
+
}
|
|
3874
|
+
/** Delete a cron schedule by ID. */
|
|
3875
|
+
async removeCronSchedule(id) {
|
|
3876
|
+
const data = await this.client.hgetall(`${this.prefix}cron:${id}`);
|
|
3877
|
+
if (!data || Object.keys(data).length === 0) return;
|
|
3878
|
+
const name = data.scheduleName;
|
|
3879
|
+
const status = data.status;
|
|
3880
|
+
await this.client.del(`${this.prefix}cron:${id}`);
|
|
3881
|
+
await this.client.del(`${this.prefix}cron_name:${name}`);
|
|
3882
|
+
await this.client.srem(`${this.prefix}crons`, id.toString());
|
|
3883
|
+
await this.client.srem(
|
|
3884
|
+
`${this.prefix}cron_status:${status}`,
|
|
3885
|
+
id.toString()
|
|
3886
|
+
);
|
|
3887
|
+
await this.client.zrem(`${this.prefix}cron_due`, id.toString());
|
|
3888
|
+
log(`Removed cron schedule ${id}`);
|
|
3889
|
+
}
|
|
3890
|
+
/** Pause a cron schedule. */
|
|
3891
|
+
async pauseCronSchedule(id) {
|
|
3892
|
+
const now = this.nowMs();
|
|
3893
|
+
await this.client.hset(
|
|
3894
|
+
`${this.prefix}cron:${id}`,
|
|
3895
|
+
"status",
|
|
3896
|
+
"paused",
|
|
3897
|
+
"updatedAt",
|
|
3898
|
+
now.toString()
|
|
3899
|
+
);
|
|
3900
|
+
await this.client.srem(`${this.prefix}cron_status:active`, id.toString());
|
|
3901
|
+
await this.client.sadd(`${this.prefix}cron_status:paused`, id.toString());
|
|
3902
|
+
await this.client.zrem(`${this.prefix}cron_due`, id.toString());
|
|
3903
|
+
log(`Paused cron schedule ${id}`);
|
|
3904
|
+
}
|
|
3905
|
+
/** Resume a paused cron schedule. */
|
|
3906
|
+
async resumeCronSchedule(id) {
|
|
3907
|
+
const now = this.nowMs();
|
|
3908
|
+
await this.client.hset(
|
|
3909
|
+
`${this.prefix}cron:${id}`,
|
|
3910
|
+
"status",
|
|
3911
|
+
"active",
|
|
3912
|
+
"updatedAt",
|
|
3913
|
+
now.toString()
|
|
3914
|
+
);
|
|
3915
|
+
await this.client.srem(`${this.prefix}cron_status:paused`, id.toString());
|
|
3916
|
+
await this.client.sadd(`${this.prefix}cron_status:active`, id.toString());
|
|
3917
|
+
const nextRunAt = await this.client.hget(
|
|
3918
|
+
`${this.prefix}cron:${id}`,
|
|
3919
|
+
"nextRunAt"
|
|
3920
|
+
);
|
|
3921
|
+
if (nextRunAt && nextRunAt !== "null") {
|
|
3922
|
+
await this.client.zadd(
|
|
3923
|
+
`${this.prefix}cron_due`,
|
|
3924
|
+
Number(nextRunAt),
|
|
3925
|
+
id.toString()
|
|
3926
|
+
);
|
|
3927
|
+
}
|
|
3928
|
+
log(`Resumed cron schedule ${id}`);
|
|
3929
|
+
}
|
|
3930
|
+
/** Edit a cron schedule. */
|
|
3931
|
+
async editCronSchedule(id, updates, nextRunAt) {
|
|
3932
|
+
const now = this.nowMs();
|
|
3933
|
+
const fields = [];
|
|
3934
|
+
if (updates.cronExpression !== void 0) {
|
|
3935
|
+
fields.push("cronExpression", updates.cronExpression);
|
|
3936
|
+
}
|
|
3937
|
+
if (updates.payload !== void 0) {
|
|
3938
|
+
fields.push("payload", JSON.stringify(updates.payload));
|
|
3939
|
+
}
|
|
3940
|
+
if (updates.maxAttempts !== void 0) {
|
|
3941
|
+
fields.push("maxAttempts", updates.maxAttempts.toString());
|
|
3942
|
+
}
|
|
3943
|
+
if (updates.priority !== void 0) {
|
|
3944
|
+
fields.push("priority", updates.priority.toString());
|
|
3945
|
+
}
|
|
3946
|
+
if (updates.timeoutMs !== void 0) {
|
|
3947
|
+
fields.push(
|
|
3948
|
+
"timeoutMs",
|
|
3949
|
+
updates.timeoutMs !== null ? updates.timeoutMs.toString() : "null"
|
|
3950
|
+
);
|
|
3951
|
+
}
|
|
3952
|
+
if (updates.forceKillOnTimeout !== void 0) {
|
|
3953
|
+
fields.push(
|
|
3954
|
+
"forceKillOnTimeout",
|
|
3955
|
+
updates.forceKillOnTimeout ? "true" : "false"
|
|
3956
|
+
);
|
|
3957
|
+
}
|
|
3958
|
+
if (updates.tags !== void 0) {
|
|
3959
|
+
fields.push(
|
|
3960
|
+
"tags",
|
|
3961
|
+
updates.tags !== null ? JSON.stringify(updates.tags) : "null"
|
|
3962
|
+
);
|
|
3963
|
+
}
|
|
3964
|
+
if (updates.timezone !== void 0) {
|
|
3965
|
+
fields.push("timezone", updates.timezone);
|
|
3966
|
+
}
|
|
3967
|
+
if (updates.allowOverlap !== void 0) {
|
|
3968
|
+
fields.push("allowOverlap", updates.allowOverlap ? "true" : "false");
|
|
3969
|
+
}
|
|
3970
|
+
if (nextRunAt !== void 0) {
|
|
3971
|
+
const val = nextRunAt !== null ? nextRunAt.getTime().toString() : "null";
|
|
3972
|
+
fields.push("nextRunAt", val);
|
|
3973
|
+
if (nextRunAt !== null) {
|
|
3974
|
+
await this.client.zadd(
|
|
3975
|
+
`${this.prefix}cron_due`,
|
|
3976
|
+
nextRunAt.getTime(),
|
|
3977
|
+
id.toString()
|
|
3978
|
+
);
|
|
3979
|
+
} else {
|
|
3980
|
+
await this.client.zrem(`${this.prefix}cron_due`, id.toString());
|
|
3981
|
+
}
|
|
3982
|
+
}
|
|
3983
|
+
if (fields.length === 0) {
|
|
3984
|
+
log(`No fields to update for cron schedule ${id}`);
|
|
3985
|
+
return;
|
|
3986
|
+
}
|
|
3987
|
+
fields.push("updatedAt", now.toString());
|
|
3988
|
+
await this.client.hmset(`${this.prefix}cron:${id}`, ...fields);
|
|
3989
|
+
log(`Edited cron schedule ${id}`);
|
|
3990
|
+
}
|
|
3991
|
+
/**
|
|
3992
|
+
* Fetch all active cron schedules whose nextRunAt <= now.
|
|
3993
|
+
* Uses a sorted set (cron_due) for efficient range query.
|
|
3994
|
+
*/
|
|
3995
|
+
async getDueCronSchedules() {
|
|
3996
|
+
const now = this.nowMs();
|
|
3997
|
+
const ids = await this.client.zrangebyscore(
|
|
3998
|
+
`${this.prefix}cron_due`,
|
|
3999
|
+
0,
|
|
4000
|
+
now
|
|
4001
|
+
);
|
|
4002
|
+
if (ids.length === 0) {
|
|
4003
|
+
log("Found 0 due cron schedules");
|
|
4004
|
+
return [];
|
|
4005
|
+
}
|
|
4006
|
+
const schedules = [];
|
|
4007
|
+
for (const id of ids) {
|
|
4008
|
+
const data = await this.client.hgetall(`${this.prefix}cron:${id}`);
|
|
4009
|
+
if (data && Object.keys(data).length > 0 && data.status === "active") {
|
|
4010
|
+
schedules.push(this.deserializeCronSchedule(data));
|
|
4011
|
+
}
|
|
4012
|
+
}
|
|
4013
|
+
log(`Found ${schedules.length} due cron schedules`);
|
|
4014
|
+
return schedules;
|
|
4015
|
+
}
|
|
4016
|
+
/**
|
|
4017
|
+
* Update a cron schedule after a job has been enqueued.
|
|
4018
|
+
* Sets lastEnqueuedAt, lastJobId, and advances nextRunAt.
|
|
4019
|
+
*/
|
|
4020
|
+
async updateCronScheduleAfterEnqueue(id, lastEnqueuedAt, lastJobId, nextRunAt) {
|
|
4021
|
+
const fields = [
|
|
4022
|
+
"lastEnqueuedAt",
|
|
4023
|
+
lastEnqueuedAt.getTime().toString(),
|
|
4024
|
+
"lastJobId",
|
|
4025
|
+
lastJobId.toString(),
|
|
4026
|
+
"nextRunAt",
|
|
4027
|
+
nextRunAt ? nextRunAt.getTime().toString() : "null",
|
|
4028
|
+
"updatedAt",
|
|
4029
|
+
this.nowMs().toString()
|
|
4030
|
+
];
|
|
4031
|
+
await this.client.hmset(`${this.prefix}cron:${id}`, ...fields);
|
|
4032
|
+
if (nextRunAt) {
|
|
4033
|
+
await this.client.zadd(
|
|
4034
|
+
`${this.prefix}cron_due`,
|
|
4035
|
+
nextRunAt.getTime(),
|
|
4036
|
+
id.toString()
|
|
4037
|
+
);
|
|
4038
|
+
} else {
|
|
4039
|
+
await this.client.zrem(`${this.prefix}cron_due`, id.toString());
|
|
4040
|
+
}
|
|
4041
|
+
log(
|
|
4042
|
+
`Updated cron schedule ${id}: lastJobId=${lastJobId}, nextRunAt=${nextRunAt?.toISOString() ?? "null"}`
|
|
4043
|
+
);
|
|
4044
|
+
}
|
|
4045
|
+
/** Deserialize a Redis hash into a CronScheduleRecord. */
|
|
4046
|
+
deserializeCronSchedule(h) {
|
|
4047
|
+
const nullish = (v) => v === void 0 || v === "null" || v === "" ? null : v;
|
|
4048
|
+
const numOrNull = (v) => {
|
|
4049
|
+
const n = nullish(v);
|
|
4050
|
+
return n === null ? null : Number(n);
|
|
4051
|
+
};
|
|
4052
|
+
const dateOrNull = (v) => {
|
|
4053
|
+
const n = numOrNull(v);
|
|
4054
|
+
return n === null ? null : new Date(n);
|
|
4055
|
+
};
|
|
4056
|
+
let payload;
|
|
4057
|
+
try {
|
|
4058
|
+
payload = JSON.parse(h.payload);
|
|
4059
|
+
} catch {
|
|
4060
|
+
payload = h.payload;
|
|
4061
|
+
}
|
|
4062
|
+
let tags;
|
|
4063
|
+
try {
|
|
4064
|
+
const raw = h.tags;
|
|
4065
|
+
if (raw && raw !== "null") {
|
|
4066
|
+
tags = JSON.parse(raw);
|
|
4067
|
+
}
|
|
4068
|
+
} catch {
|
|
4069
|
+
}
|
|
4070
|
+
return {
|
|
4071
|
+
id: Number(h.id),
|
|
4072
|
+
scheduleName: h.scheduleName,
|
|
4073
|
+
cronExpression: h.cronExpression,
|
|
4074
|
+
jobType: h.jobType,
|
|
4075
|
+
payload,
|
|
4076
|
+
maxAttempts: Number(h.maxAttempts),
|
|
4077
|
+
priority: Number(h.priority),
|
|
4078
|
+
timeoutMs: numOrNull(h.timeoutMs),
|
|
4079
|
+
forceKillOnTimeout: h.forceKillOnTimeout === "true",
|
|
4080
|
+
tags,
|
|
4081
|
+
timezone: h.timezone,
|
|
4082
|
+
allowOverlap: h.allowOverlap === "true",
|
|
4083
|
+
status: h.status,
|
|
4084
|
+
lastEnqueuedAt: dateOrNull(h.lastEnqueuedAt),
|
|
4085
|
+
lastJobId: numOrNull(h.lastJobId),
|
|
4086
|
+
nextRunAt: dateOrNull(h.nextRunAt),
|
|
4087
|
+
createdAt: new Date(Number(h.createdAt)),
|
|
4088
|
+
updatedAt: new Date(Number(h.updatedAt))
|
|
4089
|
+
};
|
|
4090
|
+
}
|
|
4091
|
+
// ── Private helpers (filters) ─────────────────────────────────────────
|
|
2971
4092
|
async applyFilters(ids, filters) {
|
|
2972
4093
|
let result = ids;
|
|
2973
4094
|
if (filters.jobType) {
|
|
@@ -2997,6 +4118,19 @@ var RedisBackend = class {
|
|
|
2997
4118
|
return result;
|
|
2998
4119
|
}
|
|
2999
4120
|
};
|
|
4121
|
+
function getNextCronOccurrence(cronExpression, timezone = "UTC", after, CronImpl = croner.Cron) {
|
|
4122
|
+
const cron = new CronImpl(cronExpression, { timezone });
|
|
4123
|
+
const next = cron.nextRun(after ?? /* @__PURE__ */ new Date());
|
|
4124
|
+
return next ?? null;
|
|
4125
|
+
}
|
|
4126
|
+
function validateCronExpression(cronExpression, CronImpl = croner.Cron) {
|
|
4127
|
+
try {
|
|
4128
|
+
new CronImpl(cronExpression);
|
|
4129
|
+
return true;
|
|
4130
|
+
} catch {
|
|
4131
|
+
return false;
|
|
4132
|
+
}
|
|
4133
|
+
}
|
|
3000
4134
|
|
|
3001
4135
|
// src/handler-validation.ts
|
|
3002
4136
|
function validateHandlerSerializable2(handler, jobType) {
|
|
@@ -3072,10 +4206,9 @@ var initJobQueue = (config) => {
|
|
|
3072
4206
|
const backendType = config.backend ?? "postgres";
|
|
3073
4207
|
setLogContext(config.verbose ?? false);
|
|
3074
4208
|
let backend;
|
|
3075
|
-
let pool;
|
|
3076
4209
|
if (backendType === "postgres") {
|
|
3077
4210
|
const pgConfig = config;
|
|
3078
|
-
pool = createPool(pgConfig.databaseConfig);
|
|
4211
|
+
const pool = createPool(pgConfig.databaseConfig);
|
|
3079
4212
|
backend = new PostgresBackend(pool);
|
|
3080
4213
|
} else if (backendType === "redis") {
|
|
3081
4214
|
const redisConfig = config.redisConfig;
|
|
@@ -3083,13 +4216,48 @@ var initJobQueue = (config) => {
|
|
|
3083
4216
|
} else {
|
|
3084
4217
|
throw new Error(`Unknown backend: ${backendType}`);
|
|
3085
4218
|
}
|
|
3086
|
-
const
|
|
3087
|
-
|
|
3088
|
-
|
|
3089
|
-
|
|
4219
|
+
const enqueueDueCronJobsImpl = async () => {
|
|
4220
|
+
const dueSchedules = await backend.getDueCronSchedules();
|
|
4221
|
+
let count = 0;
|
|
4222
|
+
for (const schedule of dueSchedules) {
|
|
4223
|
+
if (!schedule.allowOverlap && schedule.lastJobId !== null) {
|
|
4224
|
+
const lastJob = await backend.getJob(schedule.lastJobId);
|
|
4225
|
+
if (lastJob && (lastJob.status === "pending" || lastJob.status === "processing" || lastJob.status === "waiting")) {
|
|
4226
|
+
const nextRunAt2 = getNextCronOccurrence(
|
|
4227
|
+
schedule.cronExpression,
|
|
4228
|
+
schedule.timezone
|
|
4229
|
+
);
|
|
4230
|
+
await backend.updateCronScheduleAfterEnqueue(
|
|
4231
|
+
schedule.id,
|
|
4232
|
+
/* @__PURE__ */ new Date(),
|
|
4233
|
+
schedule.lastJobId,
|
|
4234
|
+
nextRunAt2
|
|
4235
|
+
);
|
|
4236
|
+
continue;
|
|
4237
|
+
}
|
|
4238
|
+
}
|
|
4239
|
+
const jobId = await backend.addJob({
|
|
4240
|
+
jobType: schedule.jobType,
|
|
4241
|
+
payload: schedule.payload,
|
|
4242
|
+
maxAttempts: schedule.maxAttempts,
|
|
4243
|
+
priority: schedule.priority,
|
|
4244
|
+
timeoutMs: schedule.timeoutMs ?? void 0,
|
|
4245
|
+
forceKillOnTimeout: schedule.forceKillOnTimeout,
|
|
4246
|
+
tags: schedule.tags
|
|
4247
|
+
});
|
|
4248
|
+
const nextRunAt = getNextCronOccurrence(
|
|
4249
|
+
schedule.cronExpression,
|
|
4250
|
+
schedule.timezone
|
|
4251
|
+
);
|
|
4252
|
+
await backend.updateCronScheduleAfterEnqueue(
|
|
4253
|
+
schedule.id,
|
|
4254
|
+
/* @__PURE__ */ new Date(),
|
|
4255
|
+
jobId,
|
|
4256
|
+
nextRunAt
|
|
3090
4257
|
);
|
|
4258
|
+
count++;
|
|
3091
4259
|
}
|
|
3092
|
-
return
|
|
4260
|
+
return count;
|
|
3093
4261
|
};
|
|
3094
4262
|
return {
|
|
3095
4263
|
// Job queue operations
|
|
@@ -3114,8 +4282,8 @@ var initJobQueue = (config) => {
|
|
|
3114
4282
|
config.verbose ?? false
|
|
3115
4283
|
),
|
|
3116
4284
|
retryJob: (jobId) => backend.retryJob(jobId),
|
|
3117
|
-
cleanupOldJobs: (daysToKeep) => backend.cleanupOldJobs(daysToKeep),
|
|
3118
|
-
cleanupOldJobEvents: (daysToKeep) => backend.cleanupOldJobEvents(daysToKeep),
|
|
4285
|
+
cleanupOldJobs: (daysToKeep, batchSize) => backend.cleanupOldJobs(daysToKeep, batchSize),
|
|
4286
|
+
cleanupOldJobEvents: (daysToKeep, batchSize) => backend.cleanupOldJobEvents(daysToKeep, batchSize),
|
|
3119
4287
|
cancelJob: withLogContext(
|
|
3120
4288
|
(jobId) => backend.cancelJob(jobId),
|
|
3121
4289
|
config.verbose ?? false
|
|
@@ -3143,33 +4311,111 @@ var initJobQueue = (config) => {
|
|
|
3143
4311
|
(tags, mode = "all", limit, offset) => backend.getJobsByTags(tags, mode, limit, offset),
|
|
3144
4312
|
config.verbose ?? false
|
|
3145
4313
|
),
|
|
3146
|
-
// Job processing
|
|
3147
|
-
createProcessor: (handlers, options) => createProcessor(backend, handlers, options)
|
|
4314
|
+
// Job processing — automatically enqueues due cron jobs before each batch
|
|
4315
|
+
createProcessor: (handlers, options) => createProcessor(backend, handlers, options, async () => {
|
|
4316
|
+
await enqueueDueCronJobsImpl();
|
|
4317
|
+
}),
|
|
3148
4318
|
// Job events
|
|
3149
4319
|
getJobEvents: withLogContext(
|
|
3150
4320
|
(jobId) => backend.getJobEvents(jobId),
|
|
3151
4321
|
config.verbose ?? false
|
|
3152
4322
|
),
|
|
3153
|
-
// Wait / Token support (
|
|
4323
|
+
// Wait / Token support (works with all backends)
|
|
3154
4324
|
createToken: withLogContext(
|
|
3155
|
-
(options) => createWaitpoint(
|
|
4325
|
+
(options) => backend.createWaitpoint(null, options),
|
|
3156
4326
|
config.verbose ?? false
|
|
3157
4327
|
),
|
|
3158
4328
|
completeToken: withLogContext(
|
|
3159
|
-
(tokenId, data) => completeWaitpoint(
|
|
4329
|
+
(tokenId, data) => backend.completeWaitpoint(tokenId, data),
|
|
3160
4330
|
config.verbose ?? false
|
|
3161
4331
|
),
|
|
3162
4332
|
getToken: withLogContext(
|
|
3163
|
-
(tokenId) => getWaitpoint(
|
|
4333
|
+
(tokenId) => backend.getWaitpoint(tokenId),
|
|
3164
4334
|
config.verbose ?? false
|
|
3165
4335
|
),
|
|
3166
4336
|
expireTimedOutTokens: withLogContext(
|
|
3167
|
-
() => expireTimedOutWaitpoints(
|
|
4337
|
+
() => backend.expireTimedOutWaitpoints(),
|
|
4338
|
+
config.verbose ?? false
|
|
4339
|
+
),
|
|
4340
|
+
// Cron schedule operations
|
|
4341
|
+
addCronJob: withLogContext(
|
|
4342
|
+
(options) => {
|
|
4343
|
+
if (!validateCronExpression(options.cronExpression)) {
|
|
4344
|
+
return Promise.reject(
|
|
4345
|
+
new Error(`Invalid cron expression: "${options.cronExpression}"`)
|
|
4346
|
+
);
|
|
4347
|
+
}
|
|
4348
|
+
const nextRunAt = getNextCronOccurrence(
|
|
4349
|
+
options.cronExpression,
|
|
4350
|
+
options.timezone ?? "UTC"
|
|
4351
|
+
);
|
|
4352
|
+
const input = {
|
|
4353
|
+
scheduleName: options.scheduleName,
|
|
4354
|
+
cronExpression: options.cronExpression,
|
|
4355
|
+
jobType: options.jobType,
|
|
4356
|
+
payload: options.payload,
|
|
4357
|
+
maxAttempts: options.maxAttempts ?? 3,
|
|
4358
|
+
priority: options.priority ?? 0,
|
|
4359
|
+
timeoutMs: options.timeoutMs ?? null,
|
|
4360
|
+
forceKillOnTimeout: options.forceKillOnTimeout ?? false,
|
|
4361
|
+
tags: options.tags,
|
|
4362
|
+
timezone: options.timezone ?? "UTC",
|
|
4363
|
+
allowOverlap: options.allowOverlap ?? false,
|
|
4364
|
+
nextRunAt
|
|
4365
|
+
};
|
|
4366
|
+
return backend.addCronSchedule(input);
|
|
4367
|
+
},
|
|
4368
|
+
config.verbose ?? false
|
|
4369
|
+
),
|
|
4370
|
+
getCronJob: withLogContext(
|
|
4371
|
+
(id) => backend.getCronSchedule(id),
|
|
4372
|
+
config.verbose ?? false
|
|
4373
|
+
),
|
|
4374
|
+
getCronJobByName: withLogContext(
|
|
4375
|
+
(name) => backend.getCronScheduleByName(name),
|
|
4376
|
+
config.verbose ?? false
|
|
4377
|
+
),
|
|
4378
|
+
listCronJobs: withLogContext(
|
|
4379
|
+
(status) => backend.listCronSchedules(status),
|
|
4380
|
+
config.verbose ?? false
|
|
4381
|
+
),
|
|
4382
|
+
removeCronJob: withLogContext(
|
|
4383
|
+
(id) => backend.removeCronSchedule(id),
|
|
4384
|
+
config.verbose ?? false
|
|
4385
|
+
),
|
|
4386
|
+
pauseCronJob: withLogContext(
|
|
4387
|
+
(id) => backend.pauseCronSchedule(id),
|
|
4388
|
+
config.verbose ?? false
|
|
4389
|
+
),
|
|
4390
|
+
resumeCronJob: withLogContext(
|
|
4391
|
+
(id) => backend.resumeCronSchedule(id),
|
|
4392
|
+
config.verbose ?? false
|
|
4393
|
+
),
|
|
4394
|
+
editCronJob: withLogContext(
|
|
4395
|
+
async (id, updates) => {
|
|
4396
|
+
if (updates.cronExpression !== void 0 && !validateCronExpression(updates.cronExpression)) {
|
|
4397
|
+
throw new Error(
|
|
4398
|
+
`Invalid cron expression: "${updates.cronExpression}"`
|
|
4399
|
+
);
|
|
4400
|
+
}
|
|
4401
|
+
let nextRunAt;
|
|
4402
|
+
if (updates.cronExpression !== void 0 || updates.timezone !== void 0) {
|
|
4403
|
+
const existing = await backend.getCronSchedule(id);
|
|
4404
|
+
const expr = updates.cronExpression ?? existing?.cronExpression ?? "";
|
|
4405
|
+
const tz = updates.timezone ?? existing?.timezone ?? "UTC";
|
|
4406
|
+
nextRunAt = getNextCronOccurrence(expr, tz);
|
|
4407
|
+
}
|
|
4408
|
+
await backend.editCronSchedule(id, updates, nextRunAt);
|
|
4409
|
+
},
|
|
4410
|
+
config.verbose ?? false
|
|
4411
|
+
),
|
|
4412
|
+
enqueueDueCronJobs: withLogContext(
|
|
4413
|
+
() => enqueueDueCronJobsImpl(),
|
|
3168
4414
|
config.verbose ?? false
|
|
3169
4415
|
),
|
|
3170
4416
|
// Advanced access
|
|
3171
4417
|
getPool: () => {
|
|
3172
|
-
if (
|
|
4418
|
+
if (!(backend instanceof PostgresBackend)) {
|
|
3173
4419
|
throw new Error(
|
|
3174
4420
|
"getPool() is only available with the PostgreSQL backend."
|
|
3175
4421
|
);
|
|
@@ -3195,8 +4441,10 @@ exports.FailureReason = FailureReason;
|
|
|
3195
4441
|
exports.JobEventType = JobEventType;
|
|
3196
4442
|
exports.PostgresBackend = PostgresBackend;
|
|
3197
4443
|
exports.WaitSignal = WaitSignal;
|
|
4444
|
+
exports.getNextCronOccurrence = getNextCronOccurrence;
|
|
3198
4445
|
exports.initJobQueue = initJobQueue;
|
|
3199
4446
|
exports.testHandlerSerialization = testHandlerSerialization;
|
|
4447
|
+
exports.validateCronExpression = validateCronExpression;
|
|
3200
4448
|
exports.validateHandlerSerializable = validateHandlerSerializable2;
|
|
3201
4449
|
//# sourceMappingURL=index.cjs.map
|
|
3202
4450
|
//# sourceMappingURL=index.cjs.map
|