@nicnocquee/dataqueue 1.30.0 → 1.32.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +2531 -1283
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +367 -17
- package/dist/index.d.ts +367 -17
- package/dist/index.js +2530 -1284
- package/dist/index.js.map +1 -1
- package/migrations/1781200000004_create_cron_schedules_table.sql +33 -0
- package/package.json +3 -2
- package/src/backend.ts +139 -4
- package/src/backends/postgres.ts +676 -30
- package/src/backends/redis-scripts.ts +197 -22
- package/src/backends/redis.test.ts +971 -0
- package/src/backends/redis.ts +789 -22
- package/src/cron.test.ts +126 -0
- package/src/cron.ts +40 -0
- package/src/index.test.ts +361 -0
- package/src/index.ts +165 -29
- package/src/processor.ts +36 -97
- package/src/queue.test.ts +29 -0
- package/src/queue.ts +19 -251
- package/src/types.ts +177 -10
package/dist/index.js
CHANGED
|
@@ -1,10 +1,13 @@
|
|
|
1
|
-
import { AsyncLocalStorage } from 'async_hooks';
|
|
2
|
-
import { randomUUID } from 'crypto';
|
|
3
1
|
import { Worker } from 'worker_threads';
|
|
2
|
+
import { AsyncLocalStorage } from 'async_hooks';
|
|
4
3
|
import { Pool } from 'pg';
|
|
5
4
|
import { parse } from 'pg-connection-string';
|
|
6
5
|
import fs from 'fs';
|
|
6
|
+
import { randomUUID } from 'crypto';
|
|
7
7
|
import { createRequire } from 'module';
|
|
8
|
+
import { Cron } from 'croner';
|
|
9
|
+
|
|
10
|
+
// src/processor.ts
|
|
8
11
|
|
|
9
12
|
// src/types.ts
|
|
10
13
|
var JobEventType = /* @__PURE__ */ ((JobEventType2) => {
|
|
@@ -19,11 +22,11 @@ var JobEventType = /* @__PURE__ */ ((JobEventType2) => {
|
|
|
19
22
|
JobEventType2["Waiting"] = "waiting";
|
|
20
23
|
return JobEventType2;
|
|
21
24
|
})(JobEventType || {});
|
|
22
|
-
var FailureReason = /* @__PURE__ */ ((
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
return
|
|
25
|
+
var FailureReason = /* @__PURE__ */ ((FailureReason4) => {
|
|
26
|
+
FailureReason4["Timeout"] = "timeout";
|
|
27
|
+
FailureReason4["HandlerError"] = "handler_error";
|
|
28
|
+
FailureReason4["NoHandler"] = "no_handler";
|
|
29
|
+
return FailureReason4;
|
|
27
30
|
})(FailureReason || {});
|
|
28
31
|
var WaitSignal = class extends Error {
|
|
29
32
|
constructor(type, waitUntil, tokenId, stepData) {
|
|
@@ -50,250 +53,954 @@ var log = (message) => {
|
|
|
50
53
|
}
|
|
51
54
|
};
|
|
52
55
|
|
|
53
|
-
// src/
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
return this.pool;
|
|
61
|
-
}
|
|
62
|
-
// ── Events ──────────────────────────────────────────────────────────
|
|
63
|
-
async recordJobEvent(jobId, eventType, metadata) {
|
|
64
|
-
const client = await this.pool.connect();
|
|
65
|
-
try {
|
|
66
|
-
await client.query(
|
|
67
|
-
`INSERT INTO job_events (job_id, event_type, metadata) VALUES ($1, $2, $3)`,
|
|
68
|
-
[jobId, eventType, metadata ? JSON.stringify(metadata) : null]
|
|
69
|
-
);
|
|
70
|
-
} catch (error) {
|
|
71
|
-
log(`Error recording job event for job ${jobId}: ${error}`);
|
|
72
|
-
} finally {
|
|
73
|
-
client.release();
|
|
74
|
-
}
|
|
75
|
-
}
|
|
76
|
-
async getJobEvents(jobId) {
|
|
77
|
-
const client = await this.pool.connect();
|
|
78
|
-
try {
|
|
79
|
-
const res = await client.query(
|
|
80
|
-
`SELECT id, job_id AS "jobId", event_type AS "eventType", metadata, created_at AS "createdAt" FROM job_events WHERE job_id = $1 ORDER BY created_at ASC`,
|
|
81
|
-
[jobId]
|
|
82
|
-
);
|
|
83
|
-
return res.rows;
|
|
84
|
-
} finally {
|
|
85
|
-
client.release();
|
|
86
|
-
}
|
|
87
|
-
}
|
|
88
|
-
// ── Job CRUD ──────────────────────────────────────────────────────────
|
|
89
|
-
async addJob({
|
|
90
|
-
jobType,
|
|
91
|
-
payload,
|
|
92
|
-
maxAttempts = 3,
|
|
93
|
-
priority = 0,
|
|
94
|
-
runAt = null,
|
|
95
|
-
timeoutMs = void 0,
|
|
96
|
-
forceKillOnTimeout = false,
|
|
97
|
-
tags = void 0,
|
|
98
|
-
idempotencyKey = void 0
|
|
99
|
-
}) {
|
|
100
|
-
const client = await this.pool.connect();
|
|
101
|
-
try {
|
|
102
|
-
let result;
|
|
103
|
-
const onConflict = idempotencyKey ? `ON CONFLICT (idempotency_key) WHERE idempotency_key IS NOT NULL DO NOTHING` : "";
|
|
104
|
-
if (runAt) {
|
|
105
|
-
result = await client.query(
|
|
106
|
-
`INSERT INTO job_queue
|
|
107
|
-
(job_type, payload, max_attempts, priority, run_at, timeout_ms, force_kill_on_timeout, tags, idempotency_key)
|
|
108
|
-
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
|
|
109
|
-
${onConflict}
|
|
110
|
-
RETURNING id`,
|
|
111
|
-
[
|
|
112
|
-
jobType,
|
|
113
|
-
payload,
|
|
114
|
-
maxAttempts,
|
|
115
|
-
priority,
|
|
116
|
-
runAt,
|
|
117
|
-
timeoutMs ?? null,
|
|
118
|
-
forceKillOnTimeout ?? false,
|
|
119
|
-
tags ?? null,
|
|
120
|
-
idempotencyKey ?? null
|
|
121
|
-
]
|
|
122
|
-
);
|
|
123
|
-
} else {
|
|
124
|
-
result = await client.query(
|
|
125
|
-
`INSERT INTO job_queue
|
|
126
|
-
(job_type, payload, max_attempts, priority, timeout_ms, force_kill_on_timeout, tags, idempotency_key)
|
|
127
|
-
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
|
128
|
-
${onConflict}
|
|
129
|
-
RETURNING id`,
|
|
130
|
-
[
|
|
131
|
-
jobType,
|
|
132
|
-
payload,
|
|
133
|
-
maxAttempts,
|
|
134
|
-
priority,
|
|
135
|
-
timeoutMs ?? null,
|
|
136
|
-
forceKillOnTimeout ?? false,
|
|
137
|
-
tags ?? null,
|
|
138
|
-
idempotencyKey ?? null
|
|
139
|
-
]
|
|
140
|
-
);
|
|
141
|
-
}
|
|
142
|
-
if (result.rows.length === 0 && idempotencyKey) {
|
|
143
|
-
const existing = await client.query(
|
|
144
|
-
`SELECT id FROM job_queue WHERE idempotency_key = $1`,
|
|
145
|
-
[idempotencyKey]
|
|
146
|
-
);
|
|
147
|
-
if (existing.rows.length > 0) {
|
|
148
|
-
log(
|
|
149
|
-
`Job with idempotency key "${idempotencyKey}" already exists (id: ${existing.rows[0].id}), returning existing job`
|
|
150
|
-
);
|
|
151
|
-
return existing.rows[0].id;
|
|
152
|
-
}
|
|
153
|
-
throw new Error(
|
|
154
|
-
`Failed to insert job and could not find existing job with idempotency key "${idempotencyKey}"`
|
|
155
|
-
);
|
|
156
|
-
}
|
|
157
|
-
const jobId = result.rows[0].id;
|
|
158
|
-
log(
|
|
159
|
-
`Added job ${jobId}: payload ${JSON.stringify(payload)}, ${runAt ? `runAt ${runAt.toISOString()}, ` : ""}priority ${priority}, maxAttempts ${maxAttempts}, jobType ${jobType}, tags ${JSON.stringify(tags)}${idempotencyKey ? `, idempotencyKey "${idempotencyKey}"` : ""}`
|
|
56
|
+
// src/processor.ts
|
|
57
|
+
function validateHandlerSerializable(handler, jobType) {
|
|
58
|
+
try {
|
|
59
|
+
const handlerString = handler.toString();
|
|
60
|
+
if (handlerString.includes("this.") && !handlerString.match(/\([^)]*this[^)]*\)/)) {
|
|
61
|
+
throw new Error(
|
|
62
|
+
`Handler for job type "${jobType}" uses 'this' context which cannot be serialized. Use a regular function or avoid 'this' references when forceKillOnTimeout is enabled.`
|
|
160
63
|
);
|
|
161
|
-
await this.recordJobEvent(jobId, "added" /* Added */, {
|
|
162
|
-
jobType,
|
|
163
|
-
payload,
|
|
164
|
-
tags,
|
|
165
|
-
idempotencyKey
|
|
166
|
-
});
|
|
167
|
-
return jobId;
|
|
168
|
-
} catch (error) {
|
|
169
|
-
log(`Error adding job: ${error}`);
|
|
170
|
-
throw error;
|
|
171
|
-
} finally {
|
|
172
|
-
client.release();
|
|
173
64
|
}
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
try {
|
|
178
|
-
const result = await client.query(
|
|
179
|
-
`SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue WHERE id = $1`,
|
|
180
|
-
[id]
|
|
65
|
+
if (handlerString.includes("[native code]")) {
|
|
66
|
+
throw new Error(
|
|
67
|
+
`Handler for job type "${jobType}" contains native code which cannot be serialized. Ensure your handler is a plain function when forceKillOnTimeout is enabled.`
|
|
181
68
|
);
|
|
182
|
-
if (result.rows.length === 0) {
|
|
183
|
-
log(`Job ${id} not found`);
|
|
184
|
-
return null;
|
|
185
|
-
}
|
|
186
|
-
log(`Found job ${id}`);
|
|
187
|
-
const job = result.rows[0];
|
|
188
|
-
return {
|
|
189
|
-
...job,
|
|
190
|
-
payload: job.payload,
|
|
191
|
-
timeoutMs: job.timeoutMs,
|
|
192
|
-
forceKillOnTimeout: job.forceKillOnTimeout,
|
|
193
|
-
failureReason: job.failureReason
|
|
194
|
-
};
|
|
195
|
-
} catch (error) {
|
|
196
|
-
log(`Error getting job ${id}: ${error}`);
|
|
197
|
-
throw error;
|
|
198
|
-
} finally {
|
|
199
|
-
client.release();
|
|
200
69
|
}
|
|
201
|
-
}
|
|
202
|
-
async getJobsByStatus(status, limit = 100, offset = 0) {
|
|
203
|
-
const client = await this.pool.connect();
|
|
204
70
|
try {
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
71
|
+
new Function("return " + handlerString);
|
|
72
|
+
} catch (parseError) {
|
|
73
|
+
throw new Error(
|
|
74
|
+
`Handler for job type "${jobType}" cannot be serialized: ${parseError instanceof Error ? parseError.message : String(parseError)}. When using forceKillOnTimeout, handlers must be serializable functions without closures over external variables.`
|
|
208
75
|
);
|
|
209
|
-
log(`Found ${result.rows.length} jobs by status ${status}`);
|
|
210
|
-
return result.rows.map((job) => ({
|
|
211
|
-
...job,
|
|
212
|
-
payload: job.payload,
|
|
213
|
-
timeoutMs: job.timeoutMs,
|
|
214
|
-
forceKillOnTimeout: job.forceKillOnTimeout,
|
|
215
|
-
failureReason: job.failureReason
|
|
216
|
-
}));
|
|
217
|
-
} catch (error) {
|
|
218
|
-
log(`Error getting jobs by status ${status}: ${error}`);
|
|
219
|
-
throw error;
|
|
220
|
-
} finally {
|
|
221
|
-
client.release();
|
|
222
76
|
}
|
|
223
|
-
}
|
|
224
|
-
|
|
225
|
-
const client = await this.pool.connect();
|
|
226
|
-
try {
|
|
227
|
-
const result = await client.query(
|
|
228
|
-
`SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue ORDER BY created_at DESC LIMIT $1 OFFSET $2`,
|
|
229
|
-
[limit, offset]
|
|
230
|
-
);
|
|
231
|
-
log(`Found ${result.rows.length} jobs (all)`);
|
|
232
|
-
return result.rows.map((job) => ({
|
|
233
|
-
...job,
|
|
234
|
-
payload: job.payload,
|
|
235
|
-
timeoutMs: job.timeoutMs,
|
|
236
|
-
forceKillOnTimeout: job.forceKillOnTimeout
|
|
237
|
-
}));
|
|
238
|
-
} catch (error) {
|
|
239
|
-
log(`Error getting all jobs: ${error}`);
|
|
77
|
+
} catch (error) {
|
|
78
|
+
if (error instanceof Error) {
|
|
240
79
|
throw error;
|
|
241
|
-
} finally {
|
|
242
|
-
client.release();
|
|
243
80
|
}
|
|
81
|
+
throw new Error(
|
|
82
|
+
`Failed to validate handler serialization for job type "${jobType}": ${String(error)}`
|
|
83
|
+
);
|
|
244
84
|
}
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
85
|
+
}
|
|
86
|
+
async function runHandlerInWorker(handler, payload, timeoutMs, jobType) {
|
|
87
|
+
validateHandlerSerializable(handler, jobType);
|
|
88
|
+
return new Promise((resolve, reject) => {
|
|
89
|
+
const workerCode = `
|
|
90
|
+
(function() {
|
|
91
|
+
const { parentPort, workerData } = require('worker_threads');
|
|
92
|
+
const { handlerCode, payload, timeoutMs } = workerData;
|
|
93
|
+
|
|
94
|
+
// Create an AbortController for the handler
|
|
95
|
+
const controller = new AbortController();
|
|
96
|
+
const signal = controller.signal;
|
|
97
|
+
|
|
98
|
+
// Set up timeout
|
|
99
|
+
const timeoutId = setTimeout(() => {
|
|
100
|
+
controller.abort();
|
|
101
|
+
parentPort.postMessage({ type: 'timeout' });
|
|
102
|
+
}, timeoutMs);
|
|
103
|
+
|
|
104
|
+
try {
|
|
105
|
+
// Execute the handler
|
|
106
|
+
// Note: This uses Function constructor which requires the handler to be serializable.
|
|
107
|
+
// The handler should be validated before reaching this point.
|
|
108
|
+
let handlerFn;
|
|
109
|
+
try {
|
|
110
|
+
// Wrap handlerCode in parentheses to ensure it's treated as an expression
|
|
111
|
+
// This handles both arrow functions and regular functions
|
|
112
|
+
const wrappedCode = handlerCode.trim().startsWith('async') || handlerCode.trim().startsWith('function')
|
|
113
|
+
? handlerCode
|
|
114
|
+
: '(' + handlerCode + ')';
|
|
115
|
+
handlerFn = new Function('return ' + wrappedCode)();
|
|
116
|
+
} catch (parseError) {
|
|
117
|
+
clearTimeout(timeoutId);
|
|
118
|
+
parentPort.postMessage({
|
|
119
|
+
type: 'error',
|
|
120
|
+
error: {
|
|
121
|
+
message: 'Handler cannot be deserialized in worker thread. ' +
|
|
122
|
+
'Ensure your handler is a standalone function without closures over external variables. ' +
|
|
123
|
+
'Original error: ' + (parseError instanceof Error ? parseError.message : String(parseError)),
|
|
124
|
+
stack: parseError instanceof Error ? parseError.stack : undefined,
|
|
125
|
+
name: 'SerializationError',
|
|
126
|
+
},
|
|
127
|
+
});
|
|
128
|
+
return;
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
// Ensure handlerFn is actually a function
|
|
132
|
+
if (typeof handlerFn !== 'function') {
|
|
133
|
+
clearTimeout(timeoutId);
|
|
134
|
+
parentPort.postMessage({
|
|
135
|
+
type: 'error',
|
|
136
|
+
error: {
|
|
137
|
+
message: 'Handler deserialization did not produce a function. ' +
|
|
138
|
+
'Ensure your handler is a valid function when forceKillOnTimeout is enabled.',
|
|
139
|
+
name: 'SerializationError',
|
|
140
|
+
},
|
|
141
|
+
});
|
|
142
|
+
return;
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
handlerFn(payload, signal)
|
|
146
|
+
.then(() => {
|
|
147
|
+
clearTimeout(timeoutId);
|
|
148
|
+
parentPort.postMessage({ type: 'success' });
|
|
149
|
+
})
|
|
150
|
+
.catch((error) => {
|
|
151
|
+
clearTimeout(timeoutId);
|
|
152
|
+
parentPort.postMessage({
|
|
153
|
+
type: 'error',
|
|
154
|
+
error: {
|
|
155
|
+
message: error.message,
|
|
156
|
+
stack: error.stack,
|
|
157
|
+
name: error.name,
|
|
158
|
+
},
|
|
159
|
+
});
|
|
160
|
+
});
|
|
161
|
+
} catch (error) {
|
|
162
|
+
clearTimeout(timeoutId);
|
|
163
|
+
parentPort.postMessage({
|
|
164
|
+
type: 'error',
|
|
165
|
+
error: {
|
|
166
|
+
message: error.message,
|
|
167
|
+
stack: error.stack,
|
|
168
|
+
name: error.name,
|
|
169
|
+
},
|
|
170
|
+
});
|
|
171
|
+
}
|
|
172
|
+
})();
|
|
173
|
+
`;
|
|
174
|
+
const worker = new Worker(workerCode, {
|
|
175
|
+
eval: true,
|
|
176
|
+
workerData: {
|
|
177
|
+
handlerCode: handler.toString(),
|
|
178
|
+
payload,
|
|
179
|
+
timeoutMs
|
|
180
|
+
}
|
|
181
|
+
});
|
|
182
|
+
let resolved = false;
|
|
183
|
+
worker.on("message", (message) => {
|
|
184
|
+
if (resolved) return;
|
|
185
|
+
resolved = true;
|
|
186
|
+
if (message.type === "success") {
|
|
187
|
+
resolve();
|
|
188
|
+
} else if (message.type === "timeout") {
|
|
189
|
+
const timeoutError = new Error(
|
|
190
|
+
`Job timed out after ${timeoutMs} ms and was forcefully terminated`
|
|
191
|
+
);
|
|
192
|
+
timeoutError.failureReason = "timeout" /* Timeout */;
|
|
193
|
+
reject(timeoutError);
|
|
194
|
+
} else if (message.type === "error") {
|
|
195
|
+
const error = new Error(message.error.message);
|
|
196
|
+
error.stack = message.error.stack;
|
|
197
|
+
error.name = message.error.name;
|
|
198
|
+
reject(error);
|
|
199
|
+
}
|
|
200
|
+
});
|
|
201
|
+
worker.on("error", (error) => {
|
|
202
|
+
if (resolved) return;
|
|
203
|
+
resolved = true;
|
|
204
|
+
reject(error);
|
|
205
|
+
});
|
|
206
|
+
worker.on("exit", (code) => {
|
|
207
|
+
if (resolved) return;
|
|
208
|
+
if (code !== 0) {
|
|
209
|
+
resolved = true;
|
|
210
|
+
reject(new Error(`Worker stopped with exit code ${code}`));
|
|
211
|
+
}
|
|
212
|
+
});
|
|
213
|
+
setTimeout(() => {
|
|
214
|
+
if (!resolved) {
|
|
215
|
+
resolved = true;
|
|
216
|
+
worker.terminate().then(() => {
|
|
217
|
+
const timeoutError = new Error(
|
|
218
|
+
`Job timed out after ${timeoutMs} ms and was forcefully terminated`
|
|
219
|
+
);
|
|
220
|
+
timeoutError.failureReason = "timeout" /* Timeout */;
|
|
221
|
+
reject(timeoutError);
|
|
222
|
+
}).catch((err) => {
|
|
223
|
+
reject(err);
|
|
224
|
+
});
|
|
225
|
+
}
|
|
226
|
+
}, timeoutMs + 100);
|
|
227
|
+
});
|
|
228
|
+
}
|
|
229
|
+
function calculateWaitUntil(duration) {
|
|
230
|
+
const now = Date.now();
|
|
231
|
+
let ms = 0;
|
|
232
|
+
if (duration.seconds) ms += duration.seconds * 1e3;
|
|
233
|
+
if (duration.minutes) ms += duration.minutes * 60 * 1e3;
|
|
234
|
+
if (duration.hours) ms += duration.hours * 60 * 60 * 1e3;
|
|
235
|
+
if (duration.days) ms += duration.days * 24 * 60 * 60 * 1e3;
|
|
236
|
+
if (duration.weeks) ms += duration.weeks * 7 * 24 * 60 * 60 * 1e3;
|
|
237
|
+
if (duration.months) ms += duration.months * 30 * 24 * 60 * 60 * 1e3;
|
|
238
|
+
if (duration.years) ms += duration.years * 365 * 24 * 60 * 60 * 1e3;
|
|
239
|
+
if (ms <= 0) {
|
|
240
|
+
throw new Error(
|
|
241
|
+
"waitFor duration must be positive. Provide at least one positive duration field."
|
|
242
|
+
);
|
|
243
|
+
}
|
|
244
|
+
return new Date(now + ms);
|
|
245
|
+
}
|
|
246
|
+
async function resolveCompletedWaits(backend, stepData) {
|
|
247
|
+
for (const key of Object.keys(stepData)) {
|
|
248
|
+
if (!key.startsWith("__wait_")) continue;
|
|
249
|
+
const entry = stepData[key];
|
|
250
|
+
if (!entry || typeof entry !== "object" || entry.completed) continue;
|
|
251
|
+
if (entry.type === "duration" || entry.type === "date") {
|
|
252
|
+
stepData[key] = { ...entry, completed: true };
|
|
253
|
+
} else if (entry.type === "token" && entry.tokenId) {
|
|
254
|
+
const wp = await backend.getWaitpoint(entry.tokenId);
|
|
255
|
+
if (wp && wp.status === "completed") {
|
|
256
|
+
stepData[key] = {
|
|
257
|
+
...entry,
|
|
258
|
+
completed: true,
|
|
259
|
+
result: { ok: true, output: wp.output }
|
|
260
|
+
};
|
|
261
|
+
} else if (wp && wp.status === "timed_out") {
|
|
262
|
+
stepData[key] = {
|
|
263
|
+
...entry,
|
|
264
|
+
completed: true,
|
|
265
|
+
result: { ok: false, error: "Token timed out" }
|
|
266
|
+
};
|
|
267
|
+
}
|
|
268
|
+
}
|
|
269
|
+
}
|
|
270
|
+
}
|
|
271
|
+
function buildWaitContext(backend, jobId, stepData, baseCtx) {
|
|
272
|
+
let waitCounter = 0;
|
|
273
|
+
const ctx = {
|
|
274
|
+
prolong: baseCtx.prolong,
|
|
275
|
+
onTimeout: baseCtx.onTimeout,
|
|
276
|
+
run: async (stepName, fn) => {
|
|
277
|
+
const cached = stepData[stepName];
|
|
278
|
+
if (cached && typeof cached === "object" && cached.__completed) {
|
|
279
|
+
log(`Step "${stepName}" replayed from cache for job ${jobId}`);
|
|
280
|
+
return cached.result;
|
|
281
|
+
}
|
|
282
|
+
const result = await fn();
|
|
283
|
+
stepData[stepName] = { __completed: true, result };
|
|
284
|
+
await backend.updateStepData(jobId, stepData);
|
|
285
|
+
return result;
|
|
286
|
+
},
|
|
287
|
+
waitFor: async (duration) => {
|
|
288
|
+
const waitKey = `__wait_${waitCounter++}`;
|
|
289
|
+
const cached = stepData[waitKey];
|
|
290
|
+
if (cached && typeof cached === "object" && cached.completed) {
|
|
291
|
+
log(`Wait "${waitKey}" already completed for job ${jobId}, skipping`);
|
|
292
|
+
return;
|
|
293
|
+
}
|
|
294
|
+
const waitUntilDate = calculateWaitUntil(duration);
|
|
295
|
+
stepData[waitKey] = { type: "duration", completed: false };
|
|
296
|
+
throw new WaitSignal("duration", waitUntilDate, void 0, stepData);
|
|
297
|
+
},
|
|
298
|
+
waitUntil: async (date) => {
|
|
299
|
+
const waitKey = `__wait_${waitCounter++}`;
|
|
300
|
+
const cached = stepData[waitKey];
|
|
301
|
+
if (cached && typeof cached === "object" && cached.completed) {
|
|
302
|
+
log(`Wait "${waitKey}" already completed for job ${jobId}, skipping`);
|
|
303
|
+
return;
|
|
304
|
+
}
|
|
305
|
+
stepData[waitKey] = { type: "date", completed: false };
|
|
306
|
+
throw new WaitSignal("date", date, void 0, stepData);
|
|
307
|
+
},
|
|
308
|
+
createToken: async (options) => {
|
|
309
|
+
const token = await backend.createWaitpoint(jobId, options);
|
|
310
|
+
return token;
|
|
311
|
+
},
|
|
312
|
+
waitForToken: async (tokenId) => {
|
|
313
|
+
const waitKey = `__wait_${waitCounter++}`;
|
|
314
|
+
const cached = stepData[waitKey];
|
|
315
|
+
if (cached && typeof cached === "object" && cached.completed) {
|
|
316
|
+
log(
|
|
317
|
+
`Token wait "${waitKey}" already completed for job ${jobId}, returning cached result`
|
|
318
|
+
);
|
|
319
|
+
return cached.result;
|
|
320
|
+
}
|
|
321
|
+
const wp = await backend.getWaitpoint(tokenId);
|
|
322
|
+
if (wp && wp.status === "completed") {
|
|
323
|
+
const result = {
|
|
324
|
+
ok: true,
|
|
325
|
+
output: wp.output
|
|
326
|
+
};
|
|
327
|
+
stepData[waitKey] = {
|
|
328
|
+
type: "token",
|
|
329
|
+
tokenId,
|
|
330
|
+
completed: true,
|
|
331
|
+
result
|
|
332
|
+
};
|
|
333
|
+
await backend.updateStepData(jobId, stepData);
|
|
334
|
+
return result;
|
|
335
|
+
}
|
|
336
|
+
if (wp && wp.status === "timed_out") {
|
|
337
|
+
const result = {
|
|
338
|
+
ok: false,
|
|
339
|
+
error: "Token timed out"
|
|
340
|
+
};
|
|
341
|
+
stepData[waitKey] = {
|
|
342
|
+
type: "token",
|
|
343
|
+
tokenId,
|
|
344
|
+
completed: true,
|
|
345
|
+
result
|
|
346
|
+
};
|
|
347
|
+
await backend.updateStepData(jobId, stepData);
|
|
348
|
+
return result;
|
|
349
|
+
}
|
|
350
|
+
stepData[waitKey] = { type: "token", tokenId, completed: false };
|
|
351
|
+
throw new WaitSignal("token", void 0, tokenId, stepData);
|
|
352
|
+
},
|
|
353
|
+
setProgress: async (percent) => {
|
|
354
|
+
if (percent < 0 || percent > 100)
|
|
355
|
+
throw new Error("Progress must be between 0 and 100");
|
|
356
|
+
await backend.updateProgress(jobId, Math.round(percent));
|
|
357
|
+
}
|
|
358
|
+
};
|
|
359
|
+
return ctx;
|
|
360
|
+
}
|
|
361
|
+
async function processJobWithHandlers(backend, job, jobHandlers) {
|
|
362
|
+
const handler = jobHandlers[job.jobType];
|
|
363
|
+
if (!handler) {
|
|
364
|
+
await backend.setPendingReasonForUnpickedJobs(
|
|
365
|
+
`No handler registered for job type: ${job.jobType}`,
|
|
366
|
+
job.jobType
|
|
367
|
+
);
|
|
368
|
+
await backend.failJob(
|
|
369
|
+
job.id,
|
|
370
|
+
new Error(`No handler registered for job type: ${job.jobType}`),
|
|
371
|
+
"no_handler" /* NoHandler */
|
|
372
|
+
);
|
|
373
|
+
return;
|
|
374
|
+
}
|
|
375
|
+
const stepData = { ...job.stepData || {} };
|
|
376
|
+
const hasStepHistory = Object.keys(stepData).some(
|
|
377
|
+
(k) => k.startsWith("__wait_")
|
|
378
|
+
);
|
|
379
|
+
if (hasStepHistory) {
|
|
380
|
+
await resolveCompletedWaits(backend, stepData);
|
|
381
|
+
await backend.updateStepData(job.id, stepData);
|
|
382
|
+
}
|
|
383
|
+
const timeoutMs = job.timeoutMs ?? void 0;
|
|
384
|
+
const forceKillOnTimeout = job.forceKillOnTimeout ?? false;
|
|
385
|
+
let timeoutId;
|
|
386
|
+
const controller = new AbortController();
|
|
387
|
+
try {
|
|
388
|
+
if (forceKillOnTimeout && timeoutMs && timeoutMs > 0) {
|
|
389
|
+
await runHandlerInWorker(handler, job.payload, timeoutMs, job.jobType);
|
|
390
|
+
} else {
|
|
391
|
+
let onTimeoutCallback;
|
|
392
|
+
let timeoutReject;
|
|
393
|
+
const armTimeout = (ms) => {
|
|
394
|
+
if (timeoutId) clearTimeout(timeoutId);
|
|
395
|
+
timeoutId = setTimeout(() => {
|
|
396
|
+
if (onTimeoutCallback) {
|
|
397
|
+
try {
|
|
398
|
+
const extension = onTimeoutCallback();
|
|
399
|
+
if (typeof extension === "number" && extension > 0) {
|
|
400
|
+
backend.prolongJob(job.id).catch(() => {
|
|
401
|
+
});
|
|
402
|
+
armTimeout(extension);
|
|
403
|
+
return;
|
|
404
|
+
}
|
|
405
|
+
} catch (callbackError) {
|
|
406
|
+
log(
|
|
407
|
+
`onTimeout callback threw for job ${job.id}: ${callbackError}`
|
|
408
|
+
);
|
|
409
|
+
}
|
|
410
|
+
}
|
|
411
|
+
controller.abort();
|
|
412
|
+
const timeoutError = new Error(`Job timed out after ${ms} ms`);
|
|
413
|
+
timeoutError.failureReason = "timeout" /* Timeout */;
|
|
414
|
+
if (timeoutReject) {
|
|
415
|
+
timeoutReject(timeoutError);
|
|
416
|
+
}
|
|
417
|
+
}, ms);
|
|
418
|
+
};
|
|
419
|
+
const hasTimeout = timeoutMs != null && timeoutMs > 0;
|
|
420
|
+
const baseCtx = hasTimeout ? {
|
|
421
|
+
prolong: (ms) => {
|
|
422
|
+
const duration = ms ?? timeoutMs;
|
|
423
|
+
if (duration != null && duration > 0) {
|
|
424
|
+
armTimeout(duration);
|
|
425
|
+
backend.prolongJob(job.id).catch(() => {
|
|
426
|
+
});
|
|
427
|
+
}
|
|
428
|
+
},
|
|
429
|
+
onTimeout: (callback) => {
|
|
430
|
+
onTimeoutCallback = callback;
|
|
431
|
+
}
|
|
432
|
+
} : {
|
|
433
|
+
prolong: () => {
|
|
434
|
+
log("prolong() called but ignored: job has no timeout set");
|
|
435
|
+
},
|
|
436
|
+
onTimeout: () => {
|
|
437
|
+
log("onTimeout() called but ignored: job has no timeout set");
|
|
438
|
+
}
|
|
439
|
+
};
|
|
440
|
+
const ctx = buildWaitContext(backend, job.id, stepData, baseCtx);
|
|
441
|
+
if (forceKillOnTimeout && !hasTimeout) {
|
|
442
|
+
log(
|
|
443
|
+
`forceKillOnTimeout is set but no timeoutMs for job ${job.id}, running without force kill`
|
|
444
|
+
);
|
|
445
|
+
}
|
|
446
|
+
const jobPromise = handler(job.payload, controller.signal, ctx);
|
|
447
|
+
if (hasTimeout) {
|
|
448
|
+
await Promise.race([
|
|
449
|
+
jobPromise,
|
|
450
|
+
new Promise((_, reject) => {
|
|
451
|
+
timeoutReject = reject;
|
|
452
|
+
armTimeout(timeoutMs);
|
|
453
|
+
})
|
|
454
|
+
]);
|
|
455
|
+
} else {
|
|
456
|
+
await jobPromise;
|
|
457
|
+
}
|
|
458
|
+
}
|
|
459
|
+
if (timeoutId) clearTimeout(timeoutId);
|
|
460
|
+
await backend.completeJob(job.id);
|
|
461
|
+
} catch (error) {
|
|
462
|
+
if (timeoutId) clearTimeout(timeoutId);
|
|
463
|
+
if (error instanceof WaitSignal) {
|
|
464
|
+
log(
|
|
465
|
+
`Job ${job.id} entering wait: type=${error.type}, waitUntil=${error.waitUntil?.toISOString() ?? "none"}, tokenId=${error.tokenId ?? "none"}`
|
|
466
|
+
);
|
|
467
|
+
await backend.waitJob(job.id, {
|
|
468
|
+
waitUntil: error.waitUntil,
|
|
469
|
+
waitTokenId: error.tokenId,
|
|
470
|
+
stepData: error.stepData
|
|
471
|
+
});
|
|
472
|
+
return;
|
|
473
|
+
}
|
|
474
|
+
console.error(`Error processing job ${job.id}:`, error);
|
|
475
|
+
let failureReason = "handler_error" /* HandlerError */;
|
|
476
|
+
if (error && typeof error === "object" && "failureReason" in error && error.failureReason === "timeout" /* Timeout */) {
|
|
477
|
+
failureReason = "timeout" /* Timeout */;
|
|
478
|
+
}
|
|
479
|
+
await backend.failJob(
|
|
480
|
+
job.id,
|
|
481
|
+
error instanceof Error ? error : new Error(String(error)),
|
|
482
|
+
failureReason
|
|
483
|
+
);
|
|
484
|
+
}
|
|
485
|
+
}
|
|
486
|
+
async function processBatchWithHandlers(backend, workerId, batchSize, jobType, jobHandlers, concurrency, onError) {
|
|
487
|
+
const jobs = await backend.getNextBatch(
|
|
488
|
+
workerId,
|
|
489
|
+
batchSize,
|
|
490
|
+
jobType
|
|
491
|
+
);
|
|
492
|
+
if (!concurrency || concurrency >= jobs.length) {
|
|
493
|
+
await Promise.all(
|
|
494
|
+
jobs.map((job) => processJobWithHandlers(backend, job, jobHandlers))
|
|
495
|
+
);
|
|
496
|
+
return jobs.length;
|
|
497
|
+
}
|
|
498
|
+
let idx = 0;
|
|
499
|
+
let running = 0;
|
|
500
|
+
let finished = 0;
|
|
501
|
+
return new Promise((resolve, reject) => {
|
|
502
|
+
const next = () => {
|
|
503
|
+
if (finished === jobs.length) return resolve(jobs.length);
|
|
504
|
+
while (running < concurrency && idx < jobs.length) {
|
|
505
|
+
const job = jobs[idx++];
|
|
506
|
+
running++;
|
|
507
|
+
processJobWithHandlers(backend, job, jobHandlers).then(() => {
|
|
508
|
+
running--;
|
|
509
|
+
finished++;
|
|
510
|
+
next();
|
|
511
|
+
}).catch((err) => {
|
|
512
|
+
running--;
|
|
513
|
+
finished++;
|
|
514
|
+
if (onError) {
|
|
515
|
+
onError(err instanceof Error ? err : new Error(String(err)));
|
|
516
|
+
}
|
|
517
|
+
next();
|
|
518
|
+
});
|
|
519
|
+
}
|
|
520
|
+
};
|
|
521
|
+
next();
|
|
522
|
+
});
|
|
523
|
+
}
|
|
524
|
+
var createProcessor = (backend, handlers, options = {}, onBeforeBatch) => {
|
|
525
|
+
const {
|
|
526
|
+
workerId = `worker-${Math.random().toString(36).substring(2, 9)}`,
|
|
527
|
+
batchSize = 10,
|
|
528
|
+
pollInterval = 5e3,
|
|
529
|
+
onError = (error) => console.error("Job processor error:", error),
|
|
530
|
+
jobType,
|
|
531
|
+
concurrency = 3
|
|
532
|
+
} = options;
|
|
533
|
+
let running = false;
|
|
534
|
+
let intervalId = null;
|
|
535
|
+
let currentBatchPromise = null;
|
|
536
|
+
setLogContext(options.verbose ?? false);
|
|
537
|
+
const processJobs = async () => {
|
|
538
|
+
if (!running) return 0;
|
|
539
|
+
if (onBeforeBatch) {
|
|
540
|
+
try {
|
|
541
|
+
await onBeforeBatch();
|
|
542
|
+
} catch (hookError) {
|
|
543
|
+
log(`onBeforeBatch hook error: ${hookError}`);
|
|
544
|
+
if (onError) {
|
|
545
|
+
onError(
|
|
546
|
+
hookError instanceof Error ? hookError : new Error(String(hookError))
|
|
547
|
+
);
|
|
548
|
+
}
|
|
549
|
+
}
|
|
550
|
+
}
|
|
551
|
+
log(
|
|
552
|
+
`Processing jobs with workerId: ${workerId}${jobType ? ` and jobType: ${Array.isArray(jobType) ? jobType.join(",") : jobType}` : ""}`
|
|
553
|
+
);
|
|
554
|
+
try {
|
|
555
|
+
const processed = await processBatchWithHandlers(
|
|
556
|
+
backend,
|
|
557
|
+
workerId,
|
|
558
|
+
batchSize,
|
|
559
|
+
jobType,
|
|
560
|
+
handlers,
|
|
561
|
+
concurrency,
|
|
562
|
+
onError
|
|
563
|
+
);
|
|
564
|
+
return processed;
|
|
565
|
+
} catch (error) {
|
|
566
|
+
onError(error instanceof Error ? error : new Error(String(error)));
|
|
567
|
+
}
|
|
568
|
+
return 0;
|
|
569
|
+
};
|
|
570
|
+
return {
|
|
571
|
+
/**
|
|
572
|
+
* Start the job processor in the background.
|
|
573
|
+
* - This will run periodically (every pollInterval milliseconds or 5 seconds if not provided) and process jobs as they become available.
|
|
574
|
+
* - You have to call the stop method to stop the processor.
|
|
575
|
+
*/
|
|
576
|
+
startInBackground: () => {
|
|
577
|
+
if (running) return;
|
|
578
|
+
log(`Starting job processor with workerId: ${workerId}`);
|
|
579
|
+
running = true;
|
|
580
|
+
const scheduleNext = (immediate) => {
|
|
581
|
+
if (!running) return;
|
|
582
|
+
if (immediate) {
|
|
583
|
+
intervalId = setTimeout(loop, 0);
|
|
584
|
+
} else {
|
|
585
|
+
intervalId = setTimeout(loop, pollInterval);
|
|
586
|
+
}
|
|
587
|
+
};
|
|
588
|
+
const loop = async () => {
|
|
589
|
+
if (!running) return;
|
|
590
|
+
currentBatchPromise = processJobs();
|
|
591
|
+
const processed = await currentBatchPromise;
|
|
592
|
+
currentBatchPromise = null;
|
|
593
|
+
scheduleNext(processed === batchSize);
|
|
594
|
+
};
|
|
595
|
+
loop();
|
|
596
|
+
},
|
|
597
|
+
/**
|
|
598
|
+
* Stop the job processor that runs in the background.
|
|
599
|
+
* Does not wait for in-flight jobs.
|
|
600
|
+
*/
|
|
601
|
+
stop: () => {
|
|
602
|
+
log(`Stopping job processor with workerId: ${workerId}`);
|
|
603
|
+
running = false;
|
|
604
|
+
if (intervalId) {
|
|
605
|
+
clearTimeout(intervalId);
|
|
606
|
+
intervalId = null;
|
|
607
|
+
}
|
|
608
|
+
},
|
|
609
|
+
/**
|
|
610
|
+
* Stop the job processor and wait for all in-flight jobs to complete.
|
|
611
|
+
* Useful for graceful shutdown (e.g., SIGTERM handling).
|
|
612
|
+
*/
|
|
613
|
+
stopAndDrain: async (drainTimeoutMs = 3e4) => {
|
|
614
|
+
log(`Stopping and draining job processor with workerId: ${workerId}`);
|
|
615
|
+
running = false;
|
|
616
|
+
if (intervalId) {
|
|
617
|
+
clearTimeout(intervalId);
|
|
618
|
+
intervalId = null;
|
|
619
|
+
}
|
|
620
|
+
if (currentBatchPromise) {
|
|
621
|
+
await Promise.race([
|
|
622
|
+
currentBatchPromise.catch(() => {
|
|
623
|
+
}),
|
|
624
|
+
new Promise((resolve) => setTimeout(resolve, drainTimeoutMs))
|
|
625
|
+
]);
|
|
626
|
+
currentBatchPromise = null;
|
|
627
|
+
}
|
|
628
|
+
log(`Job processor ${workerId} drained`);
|
|
629
|
+
},
|
|
630
|
+
/**
|
|
631
|
+
* Start the job processor synchronously.
|
|
632
|
+
* - This will process all jobs immediately and then stop.
|
|
633
|
+
* - The pollInterval is ignored.
|
|
634
|
+
*/
|
|
635
|
+
start: async () => {
|
|
636
|
+
log(`Starting job processor with workerId: ${workerId}`);
|
|
637
|
+
running = true;
|
|
638
|
+
const processed = await processJobs();
|
|
639
|
+
running = false;
|
|
640
|
+
return processed;
|
|
641
|
+
},
|
|
642
|
+
isRunning: () => running
|
|
643
|
+
};
|
|
644
|
+
};
|
|
645
|
+
function loadPemOrFile(value) {
|
|
646
|
+
if (!value) return void 0;
|
|
647
|
+
if (value.startsWith("file://")) {
|
|
648
|
+
const filePath = value.slice(7);
|
|
649
|
+
return fs.readFileSync(filePath, "utf8");
|
|
650
|
+
}
|
|
651
|
+
return value;
|
|
652
|
+
}
|
|
653
|
+
var createPool = (config) => {
|
|
654
|
+
let searchPath;
|
|
655
|
+
let ssl = void 0;
|
|
656
|
+
let customCA;
|
|
657
|
+
let sslmode;
|
|
658
|
+
if (config.connectionString) {
|
|
659
|
+
try {
|
|
660
|
+
const url = new URL(config.connectionString);
|
|
661
|
+
searchPath = url.searchParams.get("search_path") || void 0;
|
|
662
|
+
sslmode = url.searchParams.get("sslmode") || void 0;
|
|
663
|
+
if (sslmode === "no-verify") {
|
|
664
|
+
ssl = { rejectUnauthorized: false };
|
|
665
|
+
}
|
|
666
|
+
} catch (e) {
|
|
667
|
+
const parsed = parse(config.connectionString);
|
|
668
|
+
if (parsed.options) {
|
|
669
|
+
const match = parsed.options.match(/search_path=([^\s]+)/);
|
|
670
|
+
if (match) {
|
|
671
|
+
searchPath = match[1];
|
|
672
|
+
}
|
|
673
|
+
}
|
|
674
|
+
sslmode = typeof parsed.sslmode === "string" ? parsed.sslmode : void 0;
|
|
675
|
+
if (sslmode === "no-verify") {
|
|
676
|
+
ssl = { rejectUnauthorized: false };
|
|
677
|
+
}
|
|
678
|
+
}
|
|
679
|
+
}
|
|
680
|
+
if (config.ssl) {
|
|
681
|
+
if (typeof config.ssl.ca === "string") {
|
|
682
|
+
customCA = config.ssl.ca;
|
|
683
|
+
} else if (typeof process.env.PGSSLROOTCERT === "string") {
|
|
684
|
+
customCA = process.env.PGSSLROOTCERT;
|
|
685
|
+
} else {
|
|
686
|
+
customCA = void 0;
|
|
687
|
+
}
|
|
688
|
+
const caValue = typeof customCA === "string" ? loadPemOrFile(customCA) : void 0;
|
|
689
|
+
ssl = {
|
|
690
|
+
...ssl,
|
|
691
|
+
...caValue ? { ca: caValue } : {},
|
|
692
|
+
cert: loadPemOrFile(
|
|
693
|
+
typeof config.ssl.cert === "string" ? config.ssl.cert : process.env.PGSSLCERT
|
|
694
|
+
),
|
|
695
|
+
key: loadPemOrFile(
|
|
696
|
+
typeof config.ssl.key === "string" ? config.ssl.key : process.env.PGSSLKEY
|
|
697
|
+
),
|
|
698
|
+
rejectUnauthorized: config.ssl.rejectUnauthorized !== void 0 ? config.ssl.rejectUnauthorized : true
|
|
699
|
+
};
|
|
700
|
+
}
|
|
701
|
+
if (sslmode && customCA) {
|
|
702
|
+
const warning = `
|
|
703
|
+
|
|
704
|
+
\x1B[33m**************************************************
|
|
705
|
+
\u26A0\uFE0F WARNING: SSL CONFIGURATION ISSUE
|
|
706
|
+
**************************************************
|
|
707
|
+
Both sslmode ('${sslmode}') is set in the connection string
|
|
708
|
+
and a custom CA is provided (via config.ssl.ca or PGSSLROOTCERT).
|
|
709
|
+
This combination may cause connection failures or unexpected behavior.
|
|
710
|
+
|
|
711
|
+
Recommended: Remove sslmode from the connection string when using a custom CA.
|
|
712
|
+
**************************************************\x1B[0m
|
|
713
|
+
`;
|
|
714
|
+
console.warn(warning);
|
|
715
|
+
}
|
|
716
|
+
const pool = new Pool({
|
|
717
|
+
...config,
|
|
718
|
+
...ssl ? { ssl } : {}
|
|
719
|
+
});
|
|
720
|
+
if (searchPath) {
|
|
721
|
+
pool.on("connect", (client) => {
|
|
722
|
+
client.query(`SET search_path TO ${searchPath}`);
|
|
723
|
+
});
|
|
724
|
+
}
|
|
725
|
+
return pool;
|
|
726
|
+
};
|
|
727
|
+
var MAX_TIMEOUT_MS = 365 * 24 * 60 * 60 * 1e3;
|
|
728
|
+
function parseTimeoutString(timeout) {
|
|
729
|
+
const match = timeout.match(/^(\d+)(s|m|h|d)$/);
|
|
730
|
+
if (!match) {
|
|
731
|
+
throw new Error(
|
|
732
|
+
`Invalid timeout format: "${timeout}". Expected format like "10m", "1h", "24h", "7d".`
|
|
733
|
+
);
|
|
734
|
+
}
|
|
735
|
+
const value = parseInt(match[1], 10);
|
|
736
|
+
const unit = match[2];
|
|
737
|
+
let ms;
|
|
738
|
+
switch (unit) {
|
|
739
|
+
case "s":
|
|
740
|
+
ms = value * 1e3;
|
|
741
|
+
break;
|
|
742
|
+
case "m":
|
|
743
|
+
ms = value * 60 * 1e3;
|
|
744
|
+
break;
|
|
745
|
+
case "h":
|
|
746
|
+
ms = value * 60 * 60 * 1e3;
|
|
747
|
+
break;
|
|
748
|
+
case "d":
|
|
749
|
+
ms = value * 24 * 60 * 60 * 1e3;
|
|
750
|
+
break;
|
|
751
|
+
default:
|
|
752
|
+
throw new Error(`Unknown timeout unit: "${unit}"`);
|
|
753
|
+
}
|
|
754
|
+
if (!Number.isFinite(ms) || ms > MAX_TIMEOUT_MS) {
|
|
755
|
+
throw new Error(
|
|
756
|
+
`Timeout value "${timeout}" is too large. Maximum allowed is 365 days.`
|
|
757
|
+
);
|
|
758
|
+
}
|
|
759
|
+
return ms;
|
|
760
|
+
}
|
|
761
|
+
var PostgresBackend = class {
|
|
762
|
+
constructor(pool) {
|
|
763
|
+
this.pool = pool;
|
|
764
|
+
}
|
|
765
|
+
/** Expose the raw pool for advanced usage. */
|
|
766
|
+
getPool() {
|
|
767
|
+
return this.pool;
|
|
768
|
+
}
|
|
769
|
+
// ── Events ──────────────────────────────────────────────────────────
|
|
770
|
+
async recordJobEvent(jobId, eventType, metadata) {
|
|
771
|
+
const client = await this.pool.connect();
|
|
772
|
+
try {
|
|
773
|
+
await client.query(
|
|
774
|
+
`INSERT INTO job_events (job_id, event_type, metadata) VALUES ($1, $2, $3)`,
|
|
775
|
+
[jobId, eventType, metadata ? JSON.stringify(metadata) : null]
|
|
776
|
+
);
|
|
777
|
+
} catch (error) {
|
|
778
|
+
log(`Error recording job event for job ${jobId}: ${error}`);
|
|
779
|
+
} finally {
|
|
780
|
+
client.release();
|
|
781
|
+
}
|
|
782
|
+
}
|
|
783
|
+
async getJobEvents(jobId) {
|
|
784
|
+
const client = await this.pool.connect();
|
|
785
|
+
try {
|
|
786
|
+
const res = await client.query(
|
|
787
|
+
`SELECT id, job_id AS "jobId", event_type AS "eventType", metadata, created_at AS "createdAt" FROM job_events WHERE job_id = $1 ORDER BY created_at ASC`,
|
|
788
|
+
[jobId]
|
|
789
|
+
);
|
|
790
|
+
return res.rows;
|
|
791
|
+
} finally {
|
|
792
|
+
client.release();
|
|
793
|
+
}
|
|
794
|
+
}
|
|
795
|
+
// ── Job CRUD ──────────────────────────────────────────────────────────
|
|
796
|
+
async addJob({
|
|
797
|
+
jobType,
|
|
798
|
+
payload,
|
|
799
|
+
maxAttempts = 3,
|
|
800
|
+
priority = 0,
|
|
801
|
+
runAt = null,
|
|
802
|
+
timeoutMs = void 0,
|
|
803
|
+
forceKillOnTimeout = false,
|
|
804
|
+
tags = void 0,
|
|
805
|
+
idempotencyKey = void 0
|
|
806
|
+
}) {
|
|
807
|
+
const client = await this.pool.connect();
|
|
808
|
+
try {
|
|
809
|
+
let result;
|
|
810
|
+
const onConflict = idempotencyKey ? `ON CONFLICT (idempotency_key) WHERE idempotency_key IS NOT NULL DO NOTHING` : "";
|
|
811
|
+
if (runAt) {
|
|
812
|
+
result = await client.query(
|
|
813
|
+
`INSERT INTO job_queue
|
|
814
|
+
(job_type, payload, max_attempts, priority, run_at, timeout_ms, force_kill_on_timeout, tags, idempotency_key)
|
|
815
|
+
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
|
|
816
|
+
${onConflict}
|
|
817
|
+
RETURNING id`,
|
|
818
|
+
[
|
|
819
|
+
jobType,
|
|
820
|
+
payload,
|
|
821
|
+
maxAttempts,
|
|
822
|
+
priority,
|
|
823
|
+
runAt,
|
|
824
|
+
timeoutMs ?? null,
|
|
825
|
+
forceKillOnTimeout ?? false,
|
|
826
|
+
tags ?? null,
|
|
827
|
+
idempotencyKey ?? null
|
|
828
|
+
]
|
|
829
|
+
);
|
|
830
|
+
} else {
|
|
831
|
+
result = await client.query(
|
|
832
|
+
`INSERT INTO job_queue
|
|
833
|
+
(job_type, payload, max_attempts, priority, timeout_ms, force_kill_on_timeout, tags, idempotency_key)
|
|
834
|
+
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
|
835
|
+
${onConflict}
|
|
836
|
+
RETURNING id`,
|
|
837
|
+
[
|
|
838
|
+
jobType,
|
|
839
|
+
payload,
|
|
840
|
+
maxAttempts,
|
|
841
|
+
priority,
|
|
842
|
+
timeoutMs ?? null,
|
|
843
|
+
forceKillOnTimeout ?? false,
|
|
844
|
+
tags ?? null,
|
|
845
|
+
idempotencyKey ?? null
|
|
846
|
+
]
|
|
847
|
+
);
|
|
848
|
+
}
|
|
849
|
+
if (result.rows.length === 0 && idempotencyKey) {
|
|
850
|
+
const existing = await client.query(
|
|
851
|
+
`SELECT id FROM job_queue WHERE idempotency_key = $1`,
|
|
852
|
+
[idempotencyKey]
|
|
853
|
+
);
|
|
854
|
+
if (existing.rows.length > 0) {
|
|
855
|
+
log(
|
|
856
|
+
`Job with idempotency key "${idempotencyKey}" already exists (id: ${existing.rows[0].id}), returning existing job`
|
|
857
|
+
);
|
|
858
|
+
return existing.rows[0].id;
|
|
859
|
+
}
|
|
860
|
+
throw new Error(
|
|
861
|
+
`Failed to insert job and could not find existing job with idempotency key "${idempotencyKey}"`
|
|
862
|
+
);
|
|
863
|
+
}
|
|
864
|
+
const jobId = result.rows[0].id;
|
|
865
|
+
log(
|
|
866
|
+
`Added job ${jobId}: payload ${JSON.stringify(payload)}, ${runAt ? `runAt ${runAt.toISOString()}, ` : ""}priority ${priority}, maxAttempts ${maxAttempts}, jobType ${jobType}, tags ${JSON.stringify(tags)}${idempotencyKey ? `, idempotencyKey "${idempotencyKey}"` : ""}`
|
|
867
|
+
);
|
|
868
|
+
await this.recordJobEvent(jobId, "added" /* Added */, {
|
|
869
|
+
jobType,
|
|
870
|
+
payload,
|
|
871
|
+
tags,
|
|
872
|
+
idempotencyKey
|
|
873
|
+
});
|
|
874
|
+
return jobId;
|
|
875
|
+
} catch (error) {
|
|
876
|
+
log(`Error adding job: ${error}`);
|
|
877
|
+
throw error;
|
|
878
|
+
} finally {
|
|
879
|
+
client.release();
|
|
880
|
+
}
|
|
881
|
+
}
|
|
882
|
+
async getJob(id) {
|
|
883
|
+
const client = await this.pool.connect();
|
|
884
|
+
try {
|
|
885
|
+
const result = await client.query(
|
|
886
|
+
`SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue WHERE id = $1`,
|
|
887
|
+
[id]
|
|
888
|
+
);
|
|
889
|
+
if (result.rows.length === 0) {
|
|
890
|
+
log(`Job ${id} not found`);
|
|
891
|
+
return null;
|
|
892
|
+
}
|
|
893
|
+
log(`Found job ${id}`);
|
|
894
|
+
const job = result.rows[0];
|
|
895
|
+
return {
|
|
896
|
+
...job,
|
|
897
|
+
payload: job.payload,
|
|
898
|
+
timeoutMs: job.timeoutMs,
|
|
899
|
+
forceKillOnTimeout: job.forceKillOnTimeout,
|
|
900
|
+
failureReason: job.failureReason
|
|
901
|
+
};
|
|
902
|
+
} catch (error) {
|
|
903
|
+
log(`Error getting job ${id}: ${error}`);
|
|
904
|
+
throw error;
|
|
905
|
+
} finally {
|
|
906
|
+
client.release();
|
|
907
|
+
}
|
|
908
|
+
}
|
|
909
|
+
async getJobsByStatus(status, limit = 100, offset = 0) {
|
|
910
|
+
const client = await this.pool.connect();
|
|
911
|
+
try {
|
|
912
|
+
const result = await client.query(
|
|
913
|
+
`SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue WHERE status = $1 ORDER BY created_at DESC LIMIT $2 OFFSET $3`,
|
|
914
|
+
[status, limit, offset]
|
|
915
|
+
);
|
|
916
|
+
log(`Found ${result.rows.length} jobs by status ${status}`);
|
|
917
|
+
return result.rows.map((job) => ({
|
|
918
|
+
...job,
|
|
919
|
+
payload: job.payload,
|
|
920
|
+
timeoutMs: job.timeoutMs,
|
|
921
|
+
forceKillOnTimeout: job.forceKillOnTimeout,
|
|
922
|
+
failureReason: job.failureReason
|
|
923
|
+
}));
|
|
924
|
+
} catch (error) {
|
|
925
|
+
log(`Error getting jobs by status ${status}: ${error}`);
|
|
926
|
+
throw error;
|
|
927
|
+
} finally {
|
|
928
|
+
client.release();
|
|
929
|
+
}
|
|
930
|
+
}
|
|
931
|
+
async getAllJobs(limit = 100, offset = 0) {
|
|
932
|
+
const client = await this.pool.connect();
|
|
933
|
+
try {
|
|
934
|
+
const result = await client.query(
|
|
935
|
+
`SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue ORDER BY created_at DESC LIMIT $1 OFFSET $2`,
|
|
936
|
+
[limit, offset]
|
|
937
|
+
);
|
|
938
|
+
log(`Found ${result.rows.length} jobs (all)`);
|
|
939
|
+
return result.rows.map((job) => ({
|
|
940
|
+
...job,
|
|
941
|
+
payload: job.payload,
|
|
942
|
+
timeoutMs: job.timeoutMs,
|
|
943
|
+
forceKillOnTimeout: job.forceKillOnTimeout
|
|
944
|
+
}));
|
|
945
|
+
} catch (error) {
|
|
946
|
+
log(`Error getting all jobs: ${error}`);
|
|
947
|
+
throw error;
|
|
948
|
+
} finally {
|
|
949
|
+
client.release();
|
|
950
|
+
}
|
|
951
|
+
}
|
|
952
|
+
async getJobs(filters, limit = 100, offset = 0) {
|
|
953
|
+
const client = await this.pool.connect();
|
|
954
|
+
try {
|
|
955
|
+
let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue`;
|
|
956
|
+
const params = [];
|
|
957
|
+
const where = [];
|
|
958
|
+
let paramIdx = 1;
|
|
959
|
+
if (filters) {
|
|
960
|
+
if (filters.jobType) {
|
|
961
|
+
where.push(`job_type = $${paramIdx++}`);
|
|
962
|
+
params.push(filters.jobType);
|
|
963
|
+
}
|
|
964
|
+
if (filters.priority !== void 0) {
|
|
965
|
+
where.push(`priority = $${paramIdx++}`);
|
|
966
|
+
params.push(filters.priority);
|
|
967
|
+
}
|
|
968
|
+
if (filters.runAt) {
|
|
969
|
+
if (filters.runAt instanceof Date) {
|
|
970
|
+
where.push(`run_at = $${paramIdx++}`);
|
|
971
|
+
params.push(filters.runAt);
|
|
972
|
+
} else if (typeof filters.runAt === "object" && (filters.runAt.gt !== void 0 || filters.runAt.gte !== void 0 || filters.runAt.lt !== void 0 || filters.runAt.lte !== void 0 || filters.runAt.eq !== void 0)) {
|
|
973
|
+
const ops = filters.runAt;
|
|
974
|
+
if (ops.gt) {
|
|
975
|
+
where.push(`run_at > $${paramIdx++}`);
|
|
976
|
+
params.push(ops.gt);
|
|
977
|
+
}
|
|
978
|
+
if (ops.gte) {
|
|
979
|
+
where.push(`run_at >= $${paramIdx++}`);
|
|
980
|
+
params.push(ops.gte);
|
|
981
|
+
}
|
|
982
|
+
if (ops.lt) {
|
|
983
|
+
where.push(`run_at < $${paramIdx++}`);
|
|
984
|
+
params.push(ops.lt);
|
|
985
|
+
}
|
|
986
|
+
if (ops.lte) {
|
|
987
|
+
where.push(`run_at <= $${paramIdx++}`);
|
|
988
|
+
params.push(ops.lte);
|
|
989
|
+
}
|
|
990
|
+
if (ops.eq) {
|
|
991
|
+
where.push(`run_at = $${paramIdx++}`);
|
|
992
|
+
params.push(ops.eq);
|
|
993
|
+
}
|
|
994
|
+
}
|
|
995
|
+
}
|
|
996
|
+
if (filters.tags && filters.tags.values && filters.tags.values.length > 0) {
|
|
997
|
+
const mode = filters.tags.mode || "all";
|
|
998
|
+
const tagValues = filters.tags.values;
|
|
999
|
+
switch (mode) {
|
|
1000
|
+
case "exact":
|
|
1001
|
+
where.push(`tags = $${paramIdx++}`);
|
|
1002
|
+
params.push(tagValues);
|
|
1003
|
+
break;
|
|
297
1004
|
case "all":
|
|
298
1005
|
where.push(`tags @> $${paramIdx++}`);
|
|
299
1006
|
params.push(tagValues);
|
|
@@ -887,1037 +1594,668 @@ var PostgresBackend = class {
|
|
|
887
1594
|
metadata.maxAttempts = updates.maxAttempts;
|
|
888
1595
|
if (updates.priority !== void 0) metadata.priority = updates.priority;
|
|
889
1596
|
if (updates.runAt !== void 0) metadata.runAt = updates.runAt;
|
|
890
|
-
if (updates.timeoutMs !== void 0)
|
|
891
|
-
metadata.timeoutMs = updates.timeoutMs;
|
|
892
|
-
if (updates.tags !== void 0) metadata.tags = updates.tags;
|
|
893
|
-
for (const row of result.rows) {
|
|
894
|
-
await this.recordJobEvent(row.id, "edited" /* Edited */, metadata);
|
|
895
|
-
}
|
|
896
|
-
log(`Edited ${editedCount} pending jobs: ${JSON.stringify(metadata)}`);
|
|
897
|
-
return editedCount;
|
|
898
|
-
} catch (error) {
|
|
899
|
-
log(`Error editing pending jobs: ${error}`);
|
|
900
|
-
throw error;
|
|
901
|
-
} finally {
|
|
902
|
-
client.release();
|
|
903
|
-
}
|
|
904
|
-
}
|
|
905
|
-
async cleanupOldJobs(daysToKeep = 30) {
|
|
906
|
-
const client = await this.pool.connect();
|
|
907
|
-
try {
|
|
908
|
-
const result = await client.query(
|
|
909
|
-
`
|
|
910
|
-
DELETE FROM job_queue
|
|
911
|
-
WHERE status = 'completed'
|
|
912
|
-
AND updated_at < NOW() - INTERVAL '1 day' * $1::int
|
|
913
|
-
RETURNING id
|
|
914
|
-
`,
|
|
915
|
-
[daysToKeep]
|
|
916
|
-
);
|
|
917
|
-
log(`Deleted ${result.rowCount} old jobs`);
|
|
918
|
-
return result.rowCount || 0;
|
|
919
|
-
} catch (error) {
|
|
920
|
-
log(`Error cleaning up old jobs: ${error}`);
|
|
921
|
-
throw error;
|
|
922
|
-
} finally {
|
|
923
|
-
client.release();
|
|
924
|
-
}
|
|
925
|
-
}
|
|
926
|
-
async cleanupOldJobEvents(daysToKeep = 30) {
|
|
927
|
-
const client = await this.pool.connect();
|
|
928
|
-
try {
|
|
929
|
-
const result = await client.query(
|
|
930
|
-
`
|
|
931
|
-
DELETE FROM job_events
|
|
932
|
-
WHERE created_at < NOW() - INTERVAL '1 day' * $1::int
|
|
933
|
-
RETURNING id
|
|
934
|
-
`,
|
|
935
|
-
[daysToKeep]
|
|
936
|
-
);
|
|
937
|
-
log(`Deleted ${result.rowCount} old job events`);
|
|
938
|
-
return result.rowCount || 0;
|
|
939
|
-
} catch (error) {
|
|
940
|
-
log(`Error cleaning up old job events: ${error}`);
|
|
941
|
-
throw error;
|
|
942
|
-
} finally {
|
|
943
|
-
client.release();
|
|
944
|
-
}
|
|
945
|
-
}
|
|
946
|
-
async reclaimStuckJobs(maxProcessingTimeMinutes = 10) {
|
|
947
|
-
const client = await this.pool.connect();
|
|
948
|
-
try {
|
|
949
|
-
const result = await client.query(
|
|
950
|
-
`
|
|
951
|
-
UPDATE job_queue
|
|
952
|
-
SET status = 'pending', locked_at = NULL, locked_by = NULL, updated_at = NOW()
|
|
953
|
-
WHERE status = 'processing'
|
|
954
|
-
AND locked_at < NOW() - GREATEST(
|
|
955
|
-
INTERVAL '1 minute' * $1::int,
|
|
956
|
-
INTERVAL '1 millisecond' * COALESCE(timeout_ms, 0)
|
|
957
|
-
)
|
|
958
|
-
RETURNING id
|
|
959
|
-
`,
|
|
960
|
-
[maxProcessingTimeMinutes]
|
|
961
|
-
);
|
|
962
|
-
log(`Reclaimed ${result.rowCount} stuck jobs`);
|
|
963
|
-
return result.rowCount || 0;
|
|
964
|
-
} catch (error) {
|
|
965
|
-
log(`Error reclaiming stuck jobs: ${error}`);
|
|
966
|
-
throw error;
|
|
967
|
-
} finally {
|
|
968
|
-
client.release();
|
|
969
|
-
}
|
|
970
|
-
}
|
|
971
|
-
// ── Internal helpers ──────────────────────────────────────────────────
|
|
972
|
-
/**
|
|
973
|
-
* Batch-insert multiple job events in a single query.
|
|
974
|
-
* More efficient than individual recordJobEvent calls.
|
|
975
|
-
*/
|
|
976
|
-
async recordJobEventsBatch(events) {
|
|
977
|
-
if (events.length === 0) return;
|
|
978
|
-
const client = await this.pool.connect();
|
|
979
|
-
try {
|
|
980
|
-
const values = [];
|
|
981
|
-
const params = [];
|
|
982
|
-
let paramIdx = 1;
|
|
983
|
-
for (const event of events) {
|
|
984
|
-
values.push(`($${paramIdx++}, $${paramIdx++}, $${paramIdx++})`);
|
|
985
|
-
params.push(
|
|
986
|
-
event.jobId,
|
|
987
|
-
event.eventType,
|
|
988
|
-
event.metadata ? JSON.stringify(event.metadata) : null
|
|
989
|
-
);
|
|
990
|
-
}
|
|
991
|
-
await client.query(
|
|
992
|
-
`INSERT INTO job_events (job_id, event_type, metadata) VALUES ${values.join(", ")}`,
|
|
993
|
-
params
|
|
994
|
-
);
|
|
995
|
-
} catch (error) {
|
|
996
|
-
log(`Error recording batch job events: ${error}`);
|
|
997
|
-
} finally {
|
|
998
|
-
client.release();
|
|
999
|
-
}
|
|
1000
|
-
}
|
|
1001
|
-
async setPendingReasonForUnpickedJobs(reason, jobType) {
|
|
1002
|
-
const client = await this.pool.connect();
|
|
1003
|
-
try {
|
|
1004
|
-
let jobTypeFilter = "";
|
|
1005
|
-
const params = [reason];
|
|
1006
|
-
if (jobType) {
|
|
1007
|
-
if (Array.isArray(jobType)) {
|
|
1008
|
-
jobTypeFilter = ` AND job_type = ANY($2)`;
|
|
1009
|
-
params.push(jobType);
|
|
1010
|
-
} else {
|
|
1011
|
-
jobTypeFilter = ` AND job_type = $2`;
|
|
1012
|
-
params.push(jobType);
|
|
1013
|
-
}
|
|
1014
|
-
}
|
|
1015
|
-
await client.query(
|
|
1016
|
-
`UPDATE job_queue SET pending_reason = $1 WHERE status = 'pending'${jobTypeFilter}`,
|
|
1017
|
-
params
|
|
1018
|
-
);
|
|
1019
|
-
} finally {
|
|
1020
|
-
client.release();
|
|
1021
|
-
}
|
|
1022
|
-
}
|
|
1023
|
-
};
|
|
1024
|
-
var recordJobEvent = async (pool, jobId, eventType, metadata) => new PostgresBackend(pool).recordJobEvent(jobId, eventType, metadata);
|
|
1025
|
-
var waitJob = async (pool, jobId, options) => {
|
|
1026
|
-
const client = await pool.connect();
|
|
1027
|
-
try {
|
|
1028
|
-
const result = await client.query(
|
|
1029
|
-
`
|
|
1030
|
-
UPDATE job_queue
|
|
1031
|
-
SET status = 'waiting',
|
|
1032
|
-
wait_until = $2,
|
|
1033
|
-
wait_token_id = $3,
|
|
1034
|
-
step_data = $4,
|
|
1035
|
-
locked_at = NULL,
|
|
1036
|
-
locked_by = NULL,
|
|
1037
|
-
updated_at = NOW()
|
|
1038
|
-
WHERE id = $1 AND status = 'processing'
|
|
1039
|
-
`,
|
|
1040
|
-
[
|
|
1041
|
-
jobId,
|
|
1042
|
-
options.waitUntil ?? null,
|
|
1043
|
-
options.waitTokenId ?? null,
|
|
1044
|
-
JSON.stringify(options.stepData)
|
|
1045
|
-
]
|
|
1046
|
-
);
|
|
1047
|
-
if (result.rowCount === 0) {
|
|
1048
|
-
log(
|
|
1049
|
-
`Job ${jobId} could not be set to waiting (may have been reclaimed or is no longer processing)`
|
|
1050
|
-
);
|
|
1051
|
-
return;
|
|
1052
|
-
}
|
|
1053
|
-
await recordJobEvent(pool, jobId, "waiting" /* Waiting */, {
|
|
1054
|
-
waitUntil: options.waitUntil?.toISOString() ?? null,
|
|
1055
|
-
waitTokenId: options.waitTokenId ?? null
|
|
1056
|
-
});
|
|
1057
|
-
log(`Job ${jobId} set to waiting`);
|
|
1058
|
-
} catch (error) {
|
|
1059
|
-
log(`Error setting job ${jobId} to waiting: ${error}`);
|
|
1060
|
-
throw error;
|
|
1061
|
-
} finally {
|
|
1062
|
-
client.release();
|
|
1063
|
-
}
|
|
1064
|
-
};
|
|
1065
|
-
var updateStepData = async (pool, jobId, stepData) => {
|
|
1066
|
-
const client = await pool.connect();
|
|
1067
|
-
try {
|
|
1068
|
-
await client.query(
|
|
1069
|
-
`UPDATE job_queue SET step_data = $2, updated_at = NOW() WHERE id = $1`,
|
|
1070
|
-
[jobId, JSON.stringify(stepData)]
|
|
1071
|
-
);
|
|
1072
|
-
} catch (error) {
|
|
1073
|
-
log(`Error updating step_data for job ${jobId}: ${error}`);
|
|
1074
|
-
} finally {
|
|
1075
|
-
client.release();
|
|
1076
|
-
}
|
|
1077
|
-
};
|
|
1078
|
-
var MAX_TIMEOUT_MS = 365 * 24 * 60 * 60 * 1e3;
|
|
1079
|
-
function parseTimeoutString(timeout) {
|
|
1080
|
-
const match = timeout.match(/^(\d+)(s|m|h|d)$/);
|
|
1081
|
-
if (!match) {
|
|
1082
|
-
throw new Error(
|
|
1083
|
-
`Invalid timeout format: "${timeout}". Expected format like "10m", "1h", "24h", "7d".`
|
|
1084
|
-
);
|
|
1085
|
-
}
|
|
1086
|
-
const value = parseInt(match[1], 10);
|
|
1087
|
-
const unit = match[2];
|
|
1088
|
-
let ms;
|
|
1089
|
-
switch (unit) {
|
|
1090
|
-
case "s":
|
|
1091
|
-
ms = value * 1e3;
|
|
1092
|
-
break;
|
|
1093
|
-
case "m":
|
|
1094
|
-
ms = value * 60 * 1e3;
|
|
1095
|
-
break;
|
|
1096
|
-
case "h":
|
|
1097
|
-
ms = value * 60 * 60 * 1e3;
|
|
1098
|
-
break;
|
|
1099
|
-
case "d":
|
|
1100
|
-
ms = value * 24 * 60 * 60 * 1e3;
|
|
1101
|
-
break;
|
|
1102
|
-
default:
|
|
1103
|
-
throw new Error(`Unknown timeout unit: "${unit}"`);
|
|
1104
|
-
}
|
|
1105
|
-
if (!Number.isFinite(ms) || ms > MAX_TIMEOUT_MS) {
|
|
1106
|
-
throw new Error(
|
|
1107
|
-
`Timeout value "${timeout}" is too large. Maximum allowed is 365 days.`
|
|
1108
|
-
);
|
|
1109
|
-
}
|
|
1110
|
-
return ms;
|
|
1111
|
-
}
|
|
1112
|
-
var createWaitpoint = async (pool, jobId, options) => {
|
|
1113
|
-
const client = await pool.connect();
|
|
1114
|
-
try {
|
|
1115
|
-
const id = `wp_${randomUUID()}`;
|
|
1116
|
-
let timeoutAt = null;
|
|
1117
|
-
if (options?.timeout) {
|
|
1118
|
-
const ms = parseTimeoutString(options.timeout);
|
|
1119
|
-
timeoutAt = new Date(Date.now() + ms);
|
|
1120
|
-
}
|
|
1121
|
-
await client.query(
|
|
1122
|
-
`INSERT INTO waitpoints (id, job_id, status, timeout_at, tags) VALUES ($1, $2, 'waiting', $3, $4)`,
|
|
1123
|
-
[id, jobId, timeoutAt, options?.tags ?? null]
|
|
1124
|
-
);
|
|
1125
|
-
log(`Created waitpoint ${id} for job ${jobId}`);
|
|
1126
|
-
return { id };
|
|
1127
|
-
} catch (error) {
|
|
1128
|
-
log(`Error creating waitpoint: ${error}`);
|
|
1129
|
-
throw error;
|
|
1130
|
-
} finally {
|
|
1131
|
-
client.release();
|
|
1132
|
-
}
|
|
1133
|
-
};
|
|
1134
|
-
var completeWaitpoint = async (pool, tokenId, data) => {
|
|
1135
|
-
const client = await pool.connect();
|
|
1136
|
-
try {
|
|
1137
|
-
await client.query("BEGIN");
|
|
1138
|
-
const wpResult = await client.query(
|
|
1139
|
-
`UPDATE waitpoints SET status = 'completed', output = $2, completed_at = NOW()
|
|
1140
|
-
WHERE id = $1 AND status = 'waiting'
|
|
1141
|
-
RETURNING job_id`,
|
|
1142
|
-
[tokenId, data != null ? JSON.stringify(data) : null]
|
|
1143
|
-
);
|
|
1144
|
-
if (wpResult.rows.length === 0) {
|
|
1145
|
-
await client.query("ROLLBACK");
|
|
1146
|
-
log(`Waitpoint ${tokenId} not found or already completed`);
|
|
1147
|
-
return;
|
|
1148
|
-
}
|
|
1149
|
-
const jobId = wpResult.rows[0].job_id;
|
|
1150
|
-
if (jobId != null) {
|
|
1151
|
-
await client.query(
|
|
1152
|
-
`UPDATE job_queue
|
|
1153
|
-
SET status = 'pending', wait_token_id = NULL, wait_until = NULL, updated_at = NOW()
|
|
1154
|
-
WHERE id = $1 AND status = 'waiting'`,
|
|
1155
|
-
[jobId]
|
|
1156
|
-
);
|
|
1157
|
-
}
|
|
1158
|
-
await client.query("COMMIT");
|
|
1159
|
-
log(`Completed waitpoint ${tokenId} for job ${jobId}`);
|
|
1160
|
-
} catch (error) {
|
|
1161
|
-
await client.query("ROLLBACK");
|
|
1162
|
-
log(`Error completing waitpoint ${tokenId}: ${error}`);
|
|
1163
|
-
throw error;
|
|
1164
|
-
} finally {
|
|
1165
|
-
client.release();
|
|
1166
|
-
}
|
|
1167
|
-
};
|
|
1168
|
-
var getWaitpoint = async (pool, tokenId) => {
|
|
1169
|
-
const client = await pool.connect();
|
|
1170
|
-
try {
|
|
1171
|
-
const result = await client.query(
|
|
1172
|
-
`SELECT id, job_id AS "jobId", status, output, timeout_at AS "timeoutAt", created_at AS "createdAt", completed_at AS "completedAt", tags FROM waitpoints WHERE id = $1`,
|
|
1173
|
-
[tokenId]
|
|
1174
|
-
);
|
|
1175
|
-
if (result.rows.length === 0) return null;
|
|
1176
|
-
return result.rows[0];
|
|
1177
|
-
} catch (error) {
|
|
1178
|
-
log(`Error getting waitpoint ${tokenId}: ${error}`);
|
|
1179
|
-
throw error;
|
|
1180
|
-
} finally {
|
|
1181
|
-
client.release();
|
|
1182
|
-
}
|
|
1183
|
-
};
|
|
1184
|
-
var expireTimedOutWaitpoints = async (pool) => {
|
|
1185
|
-
const client = await pool.connect();
|
|
1186
|
-
try {
|
|
1187
|
-
await client.query("BEGIN");
|
|
1188
|
-
const result = await client.query(
|
|
1189
|
-
`UPDATE waitpoints
|
|
1190
|
-
SET status = 'timed_out'
|
|
1191
|
-
WHERE status = 'waiting' AND timeout_at IS NOT NULL AND timeout_at <= NOW()
|
|
1192
|
-
RETURNING id, job_id`
|
|
1193
|
-
);
|
|
1194
|
-
for (const row of result.rows) {
|
|
1195
|
-
if (row.job_id != null) {
|
|
1196
|
-
await client.query(
|
|
1197
|
-
`UPDATE job_queue
|
|
1198
|
-
SET status = 'pending', wait_token_id = NULL, wait_until = NULL, updated_at = NOW()
|
|
1199
|
-
WHERE id = $1 AND status = 'waiting'`,
|
|
1200
|
-
[row.job_id]
|
|
1201
|
-
);
|
|
1202
|
-
}
|
|
1203
|
-
}
|
|
1204
|
-
await client.query("COMMIT");
|
|
1205
|
-
const count = result.rowCount || 0;
|
|
1206
|
-
if (count > 0) {
|
|
1207
|
-
log(`Expired ${count} timed-out waitpoints`);
|
|
1208
|
-
}
|
|
1209
|
-
return count;
|
|
1210
|
-
} catch (error) {
|
|
1211
|
-
await client.query("ROLLBACK");
|
|
1212
|
-
log(`Error expiring timed-out waitpoints: ${error}`);
|
|
1213
|
-
throw error;
|
|
1214
|
-
} finally {
|
|
1215
|
-
client.release();
|
|
1216
|
-
}
|
|
1217
|
-
};
|
|
1218
|
-
function tryExtractPool(backend) {
|
|
1219
|
-
if (backend instanceof PostgresBackend) {
|
|
1220
|
-
return backend.getPool();
|
|
1221
|
-
}
|
|
1222
|
-
return null;
|
|
1223
|
-
}
|
|
1224
|
-
function buildBasicContext(backend, jobId, baseCtx) {
|
|
1225
|
-
const waitError = () => new Error(
|
|
1226
|
-
"Wait features (waitFor, waitUntil, createToken, waitForToken, ctx.run) are currently only supported with the PostgreSQL backend."
|
|
1227
|
-
);
|
|
1228
|
-
return {
|
|
1229
|
-
prolong: baseCtx.prolong,
|
|
1230
|
-
onTimeout: baseCtx.onTimeout,
|
|
1231
|
-
run: async (_stepName, fn) => {
|
|
1232
|
-
return fn();
|
|
1233
|
-
},
|
|
1234
|
-
waitFor: async () => {
|
|
1235
|
-
throw waitError();
|
|
1236
|
-
},
|
|
1237
|
-
waitUntil: async () => {
|
|
1238
|
-
throw waitError();
|
|
1239
|
-
},
|
|
1240
|
-
createToken: async () => {
|
|
1241
|
-
throw waitError();
|
|
1242
|
-
},
|
|
1243
|
-
waitForToken: async () => {
|
|
1244
|
-
throw waitError();
|
|
1245
|
-
},
|
|
1246
|
-
setProgress: async (percent) => {
|
|
1247
|
-
if (percent < 0 || percent > 100)
|
|
1248
|
-
throw new Error("Progress must be between 0 and 100");
|
|
1249
|
-
await backend.updateProgress(jobId, Math.round(percent));
|
|
1250
|
-
}
|
|
1251
|
-
};
|
|
1252
|
-
}
|
|
1253
|
-
function validateHandlerSerializable(handler, jobType) {
|
|
1254
|
-
try {
|
|
1255
|
-
const handlerString = handler.toString();
|
|
1256
|
-
if (handlerString.includes("this.") && !handlerString.match(/\([^)]*this[^)]*\)/)) {
|
|
1257
|
-
throw new Error(
|
|
1258
|
-
`Handler for job type "${jobType}" uses 'this' context which cannot be serialized. Use a regular function or avoid 'this' references when forceKillOnTimeout is enabled.`
|
|
1259
|
-
);
|
|
1260
|
-
}
|
|
1261
|
-
if (handlerString.includes("[native code]")) {
|
|
1262
|
-
throw new Error(
|
|
1263
|
-
`Handler for job type "${jobType}" contains native code which cannot be serialized. Ensure your handler is a plain function when forceKillOnTimeout is enabled.`
|
|
1264
|
-
);
|
|
1597
|
+
if (updates.timeoutMs !== void 0)
|
|
1598
|
+
metadata.timeoutMs = updates.timeoutMs;
|
|
1599
|
+
if (updates.tags !== void 0) metadata.tags = updates.tags;
|
|
1600
|
+
for (const row of result.rows) {
|
|
1601
|
+
await this.recordJobEvent(row.id, "edited" /* Edited */, metadata);
|
|
1602
|
+
}
|
|
1603
|
+
log(`Edited ${editedCount} pending jobs: ${JSON.stringify(metadata)}`);
|
|
1604
|
+
return editedCount;
|
|
1605
|
+
} catch (error) {
|
|
1606
|
+
log(`Error editing pending jobs: ${error}`);
|
|
1607
|
+
throw error;
|
|
1608
|
+
} finally {
|
|
1609
|
+
client.release();
|
|
1265
1610
|
}
|
|
1611
|
+
}
|
|
1612
|
+
/**
|
|
1613
|
+
* Delete completed jobs older than the given number of days.
|
|
1614
|
+
* Deletes in batches of 1000 to avoid long-running transactions
|
|
1615
|
+
* and excessive WAL bloat at scale.
|
|
1616
|
+
*
|
|
1617
|
+
* @param daysToKeep - Number of days to retain completed jobs (default 30).
|
|
1618
|
+
* @param batchSize - Number of rows to delete per batch (default 1000).
|
|
1619
|
+
* @returns Total number of deleted jobs.
|
|
1620
|
+
*/
|
|
1621
|
+
async cleanupOldJobs(daysToKeep = 30, batchSize = 1e3) {
|
|
1622
|
+
let totalDeleted = 0;
|
|
1266
1623
|
try {
|
|
1267
|
-
|
|
1268
|
-
|
|
1269
|
-
|
|
1270
|
-
|
|
1271
|
-
|
|
1272
|
-
|
|
1273
|
-
|
|
1274
|
-
|
|
1624
|
+
let deletedInBatch;
|
|
1625
|
+
do {
|
|
1626
|
+
const client = await this.pool.connect();
|
|
1627
|
+
try {
|
|
1628
|
+
const result = await client.query(
|
|
1629
|
+
`
|
|
1630
|
+
DELETE FROM job_queue
|
|
1631
|
+
WHERE id IN (
|
|
1632
|
+
SELECT id FROM job_queue
|
|
1633
|
+
WHERE status = 'completed'
|
|
1634
|
+
AND updated_at < NOW() - INTERVAL '1 day' * $1::int
|
|
1635
|
+
LIMIT $2
|
|
1636
|
+
)
|
|
1637
|
+
`,
|
|
1638
|
+
[daysToKeep, batchSize]
|
|
1639
|
+
);
|
|
1640
|
+
deletedInBatch = result.rowCount || 0;
|
|
1641
|
+
totalDeleted += deletedInBatch;
|
|
1642
|
+
} finally {
|
|
1643
|
+
client.release();
|
|
1644
|
+
}
|
|
1645
|
+
} while (deletedInBatch === batchSize);
|
|
1646
|
+
log(`Deleted ${totalDeleted} old jobs`);
|
|
1647
|
+
return totalDeleted;
|
|
1648
|
+
} catch (error) {
|
|
1649
|
+
log(`Error cleaning up old jobs: ${error}`);
|
|
1275
1650
|
throw error;
|
|
1276
1651
|
}
|
|
1277
|
-
throw new Error(
|
|
1278
|
-
`Failed to validate handler serialization for job type "${jobType}": ${String(error)}`
|
|
1279
|
-
);
|
|
1280
1652
|
}
|
|
1281
|
-
|
|
1282
|
-
|
|
1283
|
-
|
|
1284
|
-
|
|
1285
|
-
|
|
1286
|
-
|
|
1287
|
-
|
|
1288
|
-
|
|
1289
|
-
|
|
1290
|
-
|
|
1291
|
-
|
|
1292
|
-
|
|
1293
|
-
|
|
1294
|
-
|
|
1295
|
-
const
|
|
1296
|
-
controller.abort();
|
|
1297
|
-
parentPort.postMessage({ type: 'timeout' });
|
|
1298
|
-
}, timeoutMs);
|
|
1299
|
-
|
|
1653
|
+
/**
|
|
1654
|
+
* Delete job events older than the given number of days.
|
|
1655
|
+
* Deletes in batches of 1000 to avoid long-running transactions
|
|
1656
|
+
* and excessive WAL bloat at scale.
|
|
1657
|
+
*
|
|
1658
|
+
* @param daysToKeep - Number of days to retain events (default 30).
|
|
1659
|
+
* @param batchSize - Number of rows to delete per batch (default 1000).
|
|
1660
|
+
* @returns Total number of deleted events.
|
|
1661
|
+
*/
|
|
1662
|
+
async cleanupOldJobEvents(daysToKeep = 30, batchSize = 1e3) {
|
|
1663
|
+
let totalDeleted = 0;
|
|
1664
|
+
try {
|
|
1665
|
+
let deletedInBatch;
|
|
1666
|
+
do {
|
|
1667
|
+
const client = await this.pool.connect();
|
|
1300
1668
|
try {
|
|
1301
|
-
|
|
1302
|
-
|
|
1303
|
-
|
|
1304
|
-
|
|
1305
|
-
|
|
1306
|
-
|
|
1307
|
-
|
|
1308
|
-
|
|
1309
|
-
|
|
1310
|
-
|
|
1311
|
-
handlerFn = new Function('return ' + wrappedCode)();
|
|
1312
|
-
} catch (parseError) {
|
|
1313
|
-
clearTimeout(timeoutId);
|
|
1314
|
-
parentPort.postMessage({
|
|
1315
|
-
type: 'error',
|
|
1316
|
-
error: {
|
|
1317
|
-
message: 'Handler cannot be deserialized in worker thread. ' +
|
|
1318
|
-
'Ensure your handler is a standalone function without closures over external variables. ' +
|
|
1319
|
-
'Original error: ' + (parseError instanceof Error ? parseError.message : String(parseError)),
|
|
1320
|
-
stack: parseError instanceof Error ? parseError.stack : undefined,
|
|
1321
|
-
name: 'SerializationError',
|
|
1322
|
-
},
|
|
1323
|
-
});
|
|
1324
|
-
return;
|
|
1325
|
-
}
|
|
1326
|
-
|
|
1327
|
-
// Ensure handlerFn is actually a function
|
|
1328
|
-
if (typeof handlerFn !== 'function') {
|
|
1329
|
-
clearTimeout(timeoutId);
|
|
1330
|
-
parentPort.postMessage({
|
|
1331
|
-
type: 'error',
|
|
1332
|
-
error: {
|
|
1333
|
-
message: 'Handler deserialization did not produce a function. ' +
|
|
1334
|
-
'Ensure your handler is a valid function when forceKillOnTimeout is enabled.',
|
|
1335
|
-
name: 'SerializationError',
|
|
1336
|
-
},
|
|
1337
|
-
});
|
|
1338
|
-
return;
|
|
1339
|
-
}
|
|
1340
|
-
|
|
1341
|
-
handlerFn(payload, signal)
|
|
1342
|
-
.then(() => {
|
|
1343
|
-
clearTimeout(timeoutId);
|
|
1344
|
-
parentPort.postMessage({ type: 'success' });
|
|
1345
|
-
})
|
|
1346
|
-
.catch((error) => {
|
|
1347
|
-
clearTimeout(timeoutId);
|
|
1348
|
-
parentPort.postMessage({
|
|
1349
|
-
type: 'error',
|
|
1350
|
-
error: {
|
|
1351
|
-
message: error.message,
|
|
1352
|
-
stack: error.stack,
|
|
1353
|
-
name: error.name,
|
|
1354
|
-
},
|
|
1355
|
-
});
|
|
1356
|
-
});
|
|
1357
|
-
} catch (error) {
|
|
1358
|
-
clearTimeout(timeoutId);
|
|
1359
|
-
parentPort.postMessage({
|
|
1360
|
-
type: 'error',
|
|
1361
|
-
error: {
|
|
1362
|
-
message: error.message,
|
|
1363
|
-
stack: error.stack,
|
|
1364
|
-
name: error.name,
|
|
1365
|
-
},
|
|
1366
|
-
});
|
|
1367
|
-
}
|
|
1368
|
-
})();
|
|
1369
|
-
`;
|
|
1370
|
-
const worker = new Worker(workerCode, {
|
|
1371
|
-
eval: true,
|
|
1372
|
-
workerData: {
|
|
1373
|
-
handlerCode: handler.toString(),
|
|
1374
|
-
payload,
|
|
1375
|
-
timeoutMs
|
|
1376
|
-
}
|
|
1377
|
-
});
|
|
1378
|
-
let resolved = false;
|
|
1379
|
-
worker.on("message", (message) => {
|
|
1380
|
-
if (resolved) return;
|
|
1381
|
-
resolved = true;
|
|
1382
|
-
if (message.type === "success") {
|
|
1383
|
-
resolve();
|
|
1384
|
-
} else if (message.type === "timeout") {
|
|
1385
|
-
const timeoutError = new Error(
|
|
1386
|
-
`Job timed out after ${timeoutMs} ms and was forcefully terminated`
|
|
1387
|
-
);
|
|
1388
|
-
timeoutError.failureReason = "timeout" /* Timeout */;
|
|
1389
|
-
reject(timeoutError);
|
|
1390
|
-
} else if (message.type === "error") {
|
|
1391
|
-
const error = new Error(message.error.message);
|
|
1392
|
-
error.stack = message.error.stack;
|
|
1393
|
-
error.name = message.error.name;
|
|
1394
|
-
reject(error);
|
|
1395
|
-
}
|
|
1396
|
-
});
|
|
1397
|
-
worker.on("error", (error) => {
|
|
1398
|
-
if (resolved) return;
|
|
1399
|
-
resolved = true;
|
|
1400
|
-
reject(error);
|
|
1401
|
-
});
|
|
1402
|
-
worker.on("exit", (code) => {
|
|
1403
|
-
if (resolved) return;
|
|
1404
|
-
if (code !== 0) {
|
|
1405
|
-
resolved = true;
|
|
1406
|
-
reject(new Error(`Worker stopped with exit code ${code}`));
|
|
1407
|
-
}
|
|
1408
|
-
});
|
|
1409
|
-
setTimeout(() => {
|
|
1410
|
-
if (!resolved) {
|
|
1411
|
-
resolved = true;
|
|
1412
|
-
worker.terminate().then(() => {
|
|
1413
|
-
const timeoutError = new Error(
|
|
1414
|
-
`Job timed out after ${timeoutMs} ms and was forcefully terminated`
|
|
1669
|
+
const result = await client.query(
|
|
1670
|
+
`
|
|
1671
|
+
DELETE FROM job_events
|
|
1672
|
+
WHERE id IN (
|
|
1673
|
+
SELECT id FROM job_events
|
|
1674
|
+
WHERE created_at < NOW() - INTERVAL '1 day' * $1::int
|
|
1675
|
+
LIMIT $2
|
|
1676
|
+
)
|
|
1677
|
+
`,
|
|
1678
|
+
[daysToKeep, batchSize]
|
|
1415
1679
|
);
|
|
1416
|
-
|
|
1417
|
-
|
|
1418
|
-
}
|
|
1419
|
-
|
|
1420
|
-
}
|
|
1421
|
-
}
|
|
1422
|
-
|
|
1423
|
-
|
|
1424
|
-
}
|
|
1425
|
-
|
|
1426
|
-
|
|
1427
|
-
|
|
1428
|
-
if (duration.seconds) ms += duration.seconds * 1e3;
|
|
1429
|
-
if (duration.minutes) ms += duration.minutes * 60 * 1e3;
|
|
1430
|
-
if (duration.hours) ms += duration.hours * 60 * 60 * 1e3;
|
|
1431
|
-
if (duration.days) ms += duration.days * 24 * 60 * 60 * 1e3;
|
|
1432
|
-
if (duration.weeks) ms += duration.weeks * 7 * 24 * 60 * 60 * 1e3;
|
|
1433
|
-
if (duration.months) ms += duration.months * 30 * 24 * 60 * 60 * 1e3;
|
|
1434
|
-
if (duration.years) ms += duration.years * 365 * 24 * 60 * 60 * 1e3;
|
|
1435
|
-
if (ms <= 0) {
|
|
1436
|
-
throw new Error(
|
|
1437
|
-
"waitFor duration must be positive. Provide at least one positive duration field."
|
|
1438
|
-
);
|
|
1680
|
+
deletedInBatch = result.rowCount || 0;
|
|
1681
|
+
totalDeleted += deletedInBatch;
|
|
1682
|
+
} finally {
|
|
1683
|
+
client.release();
|
|
1684
|
+
}
|
|
1685
|
+
} while (deletedInBatch === batchSize);
|
|
1686
|
+
log(`Deleted ${totalDeleted} old job events`);
|
|
1687
|
+
return totalDeleted;
|
|
1688
|
+
} catch (error) {
|
|
1689
|
+
log(`Error cleaning up old job events: ${error}`);
|
|
1690
|
+
throw error;
|
|
1691
|
+
}
|
|
1439
1692
|
}
|
|
1440
|
-
|
|
1441
|
-
|
|
1442
|
-
|
|
1443
|
-
|
|
1444
|
-
|
|
1445
|
-
|
|
1446
|
-
|
|
1447
|
-
|
|
1448
|
-
|
|
1449
|
-
|
|
1450
|
-
|
|
1451
|
-
|
|
1452
|
-
|
|
1453
|
-
|
|
1454
|
-
|
|
1455
|
-
|
|
1456
|
-
|
|
1457
|
-
|
|
1458
|
-
|
|
1459
|
-
|
|
1460
|
-
|
|
1461
|
-
|
|
1462
|
-
|
|
1693
|
+
async reclaimStuckJobs(maxProcessingTimeMinutes = 10) {
|
|
1694
|
+
const client = await this.pool.connect();
|
|
1695
|
+
try {
|
|
1696
|
+
const result = await client.query(
|
|
1697
|
+
`
|
|
1698
|
+
UPDATE job_queue
|
|
1699
|
+
SET status = 'pending', locked_at = NULL, locked_by = NULL, updated_at = NOW()
|
|
1700
|
+
WHERE status = 'processing'
|
|
1701
|
+
AND locked_at < NOW() - GREATEST(
|
|
1702
|
+
INTERVAL '1 minute' * $1::int,
|
|
1703
|
+
INTERVAL '1 millisecond' * COALESCE(timeout_ms, 0)
|
|
1704
|
+
)
|
|
1705
|
+
RETURNING id
|
|
1706
|
+
`,
|
|
1707
|
+
[maxProcessingTimeMinutes]
|
|
1708
|
+
);
|
|
1709
|
+
log(`Reclaimed ${result.rowCount} stuck jobs`);
|
|
1710
|
+
return result.rowCount || 0;
|
|
1711
|
+
} catch (error) {
|
|
1712
|
+
log(`Error reclaiming stuck jobs: ${error}`);
|
|
1713
|
+
throw error;
|
|
1714
|
+
} finally {
|
|
1715
|
+
client.release();
|
|
1716
|
+
}
|
|
1717
|
+
}
|
|
1718
|
+
// ── Internal helpers ──────────────────────────────────────────────────
|
|
1719
|
+
/**
|
|
1720
|
+
* Batch-insert multiple job events in a single query.
|
|
1721
|
+
* More efficient than individual recordJobEvent calls.
|
|
1722
|
+
*/
|
|
1723
|
+
async recordJobEventsBatch(events) {
|
|
1724
|
+
if (events.length === 0) return;
|
|
1725
|
+
const client = await this.pool.connect();
|
|
1726
|
+
try {
|
|
1727
|
+
const values = [];
|
|
1728
|
+
const params = [];
|
|
1729
|
+
let paramIdx = 1;
|
|
1730
|
+
for (const event of events) {
|
|
1731
|
+
values.push(`($${paramIdx++}, $${paramIdx++}, $${paramIdx++})`);
|
|
1732
|
+
params.push(
|
|
1733
|
+
event.jobId,
|
|
1734
|
+
event.eventType,
|
|
1735
|
+
event.metadata ? JSON.stringify(event.metadata) : null
|
|
1736
|
+
);
|
|
1463
1737
|
}
|
|
1738
|
+
await client.query(
|
|
1739
|
+
`INSERT INTO job_events (job_id, event_type, metadata) VALUES ${values.join(", ")}`,
|
|
1740
|
+
params
|
|
1741
|
+
);
|
|
1742
|
+
} catch (error) {
|
|
1743
|
+
log(`Error recording batch job events: ${error}`);
|
|
1744
|
+
} finally {
|
|
1745
|
+
client.release();
|
|
1464
1746
|
}
|
|
1465
1747
|
}
|
|
1466
|
-
|
|
1467
|
-
|
|
1468
|
-
|
|
1469
|
-
|
|
1470
|
-
|
|
1471
|
-
|
|
1472
|
-
|
|
1473
|
-
|
|
1474
|
-
|
|
1475
|
-
|
|
1476
|
-
|
|
1477
|
-
|
|
1478
|
-
|
|
1479
|
-
|
|
1480
|
-
|
|
1481
|
-
|
|
1482
|
-
|
|
1483
|
-
|
|
1484
|
-
|
|
1485
|
-
|
|
1486
|
-
|
|
1487
|
-
|
|
1488
|
-
|
|
1489
|
-
|
|
1490
|
-
|
|
1491
|
-
|
|
1492
|
-
|
|
1493
|
-
|
|
1494
|
-
|
|
1495
|
-
|
|
1496
|
-
|
|
1497
|
-
if (
|
|
1498
|
-
|
|
1499
|
-
|
|
1500
|
-
}
|
|
1501
|
-
stepData[waitKey] = { type: "date", completed: false };
|
|
1502
|
-
throw new WaitSignal("date", date, void 0, stepData);
|
|
1503
|
-
},
|
|
1504
|
-
createToken: async (options) => {
|
|
1505
|
-
const token = await createWaitpoint(pool, jobId, options);
|
|
1506
|
-
return token;
|
|
1507
|
-
},
|
|
1508
|
-
waitForToken: async (tokenId) => {
|
|
1509
|
-
const waitKey = `__wait_${waitCounter++}`;
|
|
1510
|
-
const cached = stepData[waitKey];
|
|
1511
|
-
if (cached && typeof cached === "object" && cached.completed) {
|
|
1512
|
-
log(
|
|
1513
|
-
`Token wait "${waitKey}" already completed for job ${jobId}, returning cached result`
|
|
1748
|
+
// ── Cron schedules ──────────────────────────────────────────────────
|
|
1749
|
+
/** Create a cron schedule and return its ID. */
|
|
1750
|
+
async addCronSchedule(input) {
|
|
1751
|
+
const client = await this.pool.connect();
|
|
1752
|
+
try {
|
|
1753
|
+
const result = await client.query(
|
|
1754
|
+
`INSERT INTO cron_schedules
|
|
1755
|
+
(schedule_name, cron_expression, job_type, payload, max_attempts,
|
|
1756
|
+
priority, timeout_ms, force_kill_on_timeout, tags, timezone,
|
|
1757
|
+
allow_overlap, next_run_at)
|
|
1758
|
+
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)
|
|
1759
|
+
RETURNING id`,
|
|
1760
|
+
[
|
|
1761
|
+
input.scheduleName,
|
|
1762
|
+
input.cronExpression,
|
|
1763
|
+
input.jobType,
|
|
1764
|
+
input.payload,
|
|
1765
|
+
input.maxAttempts,
|
|
1766
|
+
input.priority,
|
|
1767
|
+
input.timeoutMs,
|
|
1768
|
+
input.forceKillOnTimeout,
|
|
1769
|
+
input.tags ?? null,
|
|
1770
|
+
input.timezone,
|
|
1771
|
+
input.allowOverlap,
|
|
1772
|
+
input.nextRunAt
|
|
1773
|
+
]
|
|
1774
|
+
);
|
|
1775
|
+
const id = result.rows[0].id;
|
|
1776
|
+
log(`Added cron schedule ${id}: "${input.scheduleName}"`);
|
|
1777
|
+
return id;
|
|
1778
|
+
} catch (error) {
|
|
1779
|
+
if (error?.code === "23505") {
|
|
1780
|
+
throw new Error(
|
|
1781
|
+
`Cron schedule with name "${input.scheduleName}" already exists`
|
|
1514
1782
|
);
|
|
1515
|
-
return cached.result;
|
|
1516
|
-
}
|
|
1517
|
-
const wp = await getWaitpoint(pool, tokenId);
|
|
1518
|
-
if (wp && wp.status === "completed") {
|
|
1519
|
-
const result = {
|
|
1520
|
-
ok: true,
|
|
1521
|
-
output: wp.output
|
|
1522
|
-
};
|
|
1523
|
-
stepData[waitKey] = {
|
|
1524
|
-
type: "token",
|
|
1525
|
-
tokenId,
|
|
1526
|
-
completed: true,
|
|
1527
|
-
result
|
|
1528
|
-
};
|
|
1529
|
-
await updateStepData(pool, jobId, stepData);
|
|
1530
|
-
return result;
|
|
1531
1783
|
}
|
|
1532
|
-
|
|
1533
|
-
|
|
1534
|
-
|
|
1535
|
-
|
|
1536
|
-
|
|
1537
|
-
|
|
1538
|
-
|
|
1539
|
-
|
|
1540
|
-
|
|
1541
|
-
|
|
1542
|
-
|
|
1543
|
-
|
|
1544
|
-
|
|
1784
|
+
log(`Error adding cron schedule: ${error}`);
|
|
1785
|
+
throw error;
|
|
1786
|
+
} finally {
|
|
1787
|
+
client.release();
|
|
1788
|
+
}
|
|
1789
|
+
}
|
|
1790
|
+
/** Get a cron schedule by ID. */
|
|
1791
|
+
async getCronSchedule(id) {
|
|
1792
|
+
const client = await this.pool.connect();
|
|
1793
|
+
try {
|
|
1794
|
+
const result = await client.query(
|
|
1795
|
+
`SELECT id, schedule_name AS "scheduleName", cron_expression AS "cronExpression",
|
|
1796
|
+
job_type AS "jobType", payload, max_attempts AS "maxAttempts",
|
|
1797
|
+
priority, timeout_ms AS "timeoutMs",
|
|
1798
|
+
force_kill_on_timeout AS "forceKillOnTimeout", tags,
|
|
1799
|
+
timezone, allow_overlap AS "allowOverlap", status,
|
|
1800
|
+
last_enqueued_at AS "lastEnqueuedAt", last_job_id AS "lastJobId",
|
|
1801
|
+
next_run_at AS "nextRunAt",
|
|
1802
|
+
created_at AS "createdAt", updated_at AS "updatedAt"
|
|
1803
|
+
FROM cron_schedules WHERE id = $1`,
|
|
1804
|
+
[id]
|
|
1805
|
+
);
|
|
1806
|
+
if (result.rows.length === 0) return null;
|
|
1807
|
+
return result.rows[0];
|
|
1808
|
+
} catch (error) {
|
|
1809
|
+
log(`Error getting cron schedule ${id}: ${error}`);
|
|
1810
|
+
throw error;
|
|
1811
|
+
} finally {
|
|
1812
|
+
client.release();
|
|
1813
|
+
}
|
|
1814
|
+
}
|
|
1815
|
+
/** Get a cron schedule by its unique name. */
|
|
1816
|
+
async getCronScheduleByName(name) {
|
|
1817
|
+
const client = await this.pool.connect();
|
|
1818
|
+
try {
|
|
1819
|
+
const result = await client.query(
|
|
1820
|
+
`SELECT id, schedule_name AS "scheduleName", cron_expression AS "cronExpression",
|
|
1821
|
+
job_type AS "jobType", payload, max_attempts AS "maxAttempts",
|
|
1822
|
+
priority, timeout_ms AS "timeoutMs",
|
|
1823
|
+
force_kill_on_timeout AS "forceKillOnTimeout", tags,
|
|
1824
|
+
timezone, allow_overlap AS "allowOverlap", status,
|
|
1825
|
+
last_enqueued_at AS "lastEnqueuedAt", last_job_id AS "lastJobId",
|
|
1826
|
+
next_run_at AS "nextRunAt",
|
|
1827
|
+
created_at AS "createdAt", updated_at AS "updatedAt"
|
|
1828
|
+
FROM cron_schedules WHERE schedule_name = $1`,
|
|
1829
|
+
[name]
|
|
1830
|
+
);
|
|
1831
|
+
if (result.rows.length === 0) return null;
|
|
1832
|
+
return result.rows[0];
|
|
1833
|
+
} catch (error) {
|
|
1834
|
+
log(`Error getting cron schedule by name "${name}": ${error}`);
|
|
1835
|
+
throw error;
|
|
1836
|
+
} finally {
|
|
1837
|
+
client.release();
|
|
1838
|
+
}
|
|
1839
|
+
}
|
|
1840
|
+
/** List cron schedules, optionally filtered by status. */
|
|
1841
|
+
async listCronSchedules(status) {
|
|
1842
|
+
const client = await this.pool.connect();
|
|
1843
|
+
try {
|
|
1844
|
+
let query = `SELECT id, schedule_name AS "scheduleName", cron_expression AS "cronExpression",
|
|
1845
|
+
job_type AS "jobType", payload, max_attempts AS "maxAttempts",
|
|
1846
|
+
priority, timeout_ms AS "timeoutMs",
|
|
1847
|
+
force_kill_on_timeout AS "forceKillOnTimeout", tags,
|
|
1848
|
+
timezone, allow_overlap AS "allowOverlap", status,
|
|
1849
|
+
last_enqueued_at AS "lastEnqueuedAt", last_job_id AS "lastJobId",
|
|
1850
|
+
next_run_at AS "nextRunAt",
|
|
1851
|
+
created_at AS "createdAt", updated_at AS "updatedAt"
|
|
1852
|
+
FROM cron_schedules`;
|
|
1853
|
+
const params = [];
|
|
1854
|
+
if (status) {
|
|
1855
|
+
query += ` WHERE status = $1`;
|
|
1856
|
+
params.push(status);
|
|
1545
1857
|
}
|
|
1546
|
-
|
|
1547
|
-
|
|
1548
|
-
|
|
1549
|
-
|
|
1550
|
-
|
|
1551
|
-
|
|
1552
|
-
|
|
1858
|
+
query += ` ORDER BY created_at ASC`;
|
|
1859
|
+
const result = await client.query(query, params);
|
|
1860
|
+
return result.rows;
|
|
1861
|
+
} catch (error) {
|
|
1862
|
+
log(`Error listing cron schedules: ${error}`);
|
|
1863
|
+
throw error;
|
|
1864
|
+
} finally {
|
|
1865
|
+
client.release();
|
|
1553
1866
|
}
|
|
1554
|
-
};
|
|
1555
|
-
return ctx;
|
|
1556
|
-
}
|
|
1557
|
-
async function processJobWithHandlers(backend, job, jobHandlers) {
|
|
1558
|
-
const handler = jobHandlers[job.jobType];
|
|
1559
|
-
if (!handler) {
|
|
1560
|
-
await backend.setPendingReasonForUnpickedJobs(
|
|
1561
|
-
`No handler registered for job type: ${job.jobType}`,
|
|
1562
|
-
job.jobType
|
|
1563
|
-
);
|
|
1564
|
-
await backend.failJob(
|
|
1565
|
-
job.id,
|
|
1566
|
-
new Error(`No handler registered for job type: ${job.jobType}`),
|
|
1567
|
-
"no_handler" /* NoHandler */
|
|
1568
|
-
);
|
|
1569
|
-
return;
|
|
1570
1867
|
}
|
|
1571
|
-
|
|
1572
|
-
|
|
1573
|
-
|
|
1574
|
-
|
|
1575
|
-
|
|
1576
|
-
|
|
1577
|
-
|
|
1578
|
-
|
|
1868
|
+
/** Delete a cron schedule by ID. */
|
|
1869
|
+
async removeCronSchedule(id) {
|
|
1870
|
+
const client = await this.pool.connect();
|
|
1871
|
+
try {
|
|
1872
|
+
await client.query(`DELETE FROM cron_schedules WHERE id = $1`, [id]);
|
|
1873
|
+
log(`Removed cron schedule ${id}`);
|
|
1874
|
+
} catch (error) {
|
|
1875
|
+
log(`Error removing cron schedule ${id}: ${error}`);
|
|
1876
|
+
throw error;
|
|
1877
|
+
} finally {
|
|
1878
|
+
client.release();
|
|
1879
|
+
}
|
|
1579
1880
|
}
|
|
1580
|
-
|
|
1581
|
-
|
|
1582
|
-
|
|
1583
|
-
|
|
1584
|
-
|
|
1585
|
-
|
|
1586
|
-
|
|
1587
|
-
|
|
1588
|
-
|
|
1589
|
-
|
|
1590
|
-
|
|
1591
|
-
|
|
1592
|
-
|
|
1593
|
-
|
|
1594
|
-
|
|
1595
|
-
|
|
1596
|
-
|
|
1597
|
-
|
|
1598
|
-
|
|
1599
|
-
|
|
1600
|
-
|
|
1601
|
-
|
|
1602
|
-
|
|
1603
|
-
|
|
1604
|
-
|
|
1605
|
-
|
|
1606
|
-
|
|
1607
|
-
|
|
1608
|
-
|
|
1609
|
-
|
|
1610
|
-
|
|
1611
|
-
|
|
1612
|
-
|
|
1613
|
-
|
|
1614
|
-
|
|
1615
|
-
|
|
1616
|
-
const
|
|
1617
|
-
const
|
|
1618
|
-
|
|
1619
|
-
|
|
1620
|
-
|
|
1621
|
-
|
|
1622
|
-
|
|
1623
|
-
|
|
1624
|
-
|
|
1625
|
-
|
|
1626
|
-
|
|
1627
|
-
|
|
1628
|
-
}
|
|
1629
|
-
|
|
1630
|
-
prolong: () => {
|
|
1631
|
-
log("prolong() called but ignored: job has no timeout set");
|
|
1632
|
-
},
|
|
1633
|
-
onTimeout: () => {
|
|
1634
|
-
log("onTimeout() called but ignored: job has no timeout set");
|
|
1635
|
-
}
|
|
1636
|
-
};
|
|
1637
|
-
const ctx = pool ? buildWaitContext(backend, pool, job.id, stepData, baseCtx) : buildBasicContext(backend, job.id, baseCtx);
|
|
1638
|
-
if (forceKillOnTimeout && !hasTimeout) {
|
|
1639
|
-
log(
|
|
1640
|
-
`forceKillOnTimeout is set but no timeoutMs for job ${job.id}, running without force kill`
|
|
1641
|
-
);
|
|
1881
|
+
/** Pause a cron schedule. */
|
|
1882
|
+
async pauseCronSchedule(id) {
|
|
1883
|
+
const client = await this.pool.connect();
|
|
1884
|
+
try {
|
|
1885
|
+
await client.query(
|
|
1886
|
+
`UPDATE cron_schedules SET status = 'paused', updated_at = NOW() WHERE id = $1`,
|
|
1887
|
+
[id]
|
|
1888
|
+
);
|
|
1889
|
+
log(`Paused cron schedule ${id}`);
|
|
1890
|
+
} catch (error) {
|
|
1891
|
+
log(`Error pausing cron schedule ${id}: ${error}`);
|
|
1892
|
+
throw error;
|
|
1893
|
+
} finally {
|
|
1894
|
+
client.release();
|
|
1895
|
+
}
|
|
1896
|
+
}
|
|
1897
|
+
/** Resume a paused cron schedule. */
|
|
1898
|
+
async resumeCronSchedule(id) {
|
|
1899
|
+
const client = await this.pool.connect();
|
|
1900
|
+
try {
|
|
1901
|
+
await client.query(
|
|
1902
|
+
`UPDATE cron_schedules SET status = 'active', updated_at = NOW() WHERE id = $1`,
|
|
1903
|
+
[id]
|
|
1904
|
+
);
|
|
1905
|
+
log(`Resumed cron schedule ${id}`);
|
|
1906
|
+
} catch (error) {
|
|
1907
|
+
log(`Error resuming cron schedule ${id}: ${error}`);
|
|
1908
|
+
throw error;
|
|
1909
|
+
} finally {
|
|
1910
|
+
client.release();
|
|
1911
|
+
}
|
|
1912
|
+
}
|
|
1913
|
+
/** Edit a cron schedule. */
|
|
1914
|
+
async editCronSchedule(id, updates, nextRunAt) {
|
|
1915
|
+
const client = await this.pool.connect();
|
|
1916
|
+
try {
|
|
1917
|
+
const updateFields = [];
|
|
1918
|
+
const params = [];
|
|
1919
|
+
let paramIdx = 1;
|
|
1920
|
+
if (updates.cronExpression !== void 0) {
|
|
1921
|
+
updateFields.push(`cron_expression = $${paramIdx++}`);
|
|
1922
|
+
params.push(updates.cronExpression);
|
|
1923
|
+
}
|
|
1924
|
+
if (updates.payload !== void 0) {
|
|
1925
|
+
updateFields.push(`payload = $${paramIdx++}`);
|
|
1926
|
+
params.push(updates.payload);
|
|
1927
|
+
}
|
|
1928
|
+
if (updates.maxAttempts !== void 0) {
|
|
1929
|
+
updateFields.push(`max_attempts = $${paramIdx++}`);
|
|
1930
|
+
params.push(updates.maxAttempts);
|
|
1642
1931
|
}
|
|
1643
|
-
|
|
1644
|
-
|
|
1645
|
-
|
|
1646
|
-
jobPromise,
|
|
1647
|
-
new Promise((_, reject) => {
|
|
1648
|
-
timeoutReject = reject;
|
|
1649
|
-
armTimeout(timeoutMs);
|
|
1650
|
-
})
|
|
1651
|
-
]);
|
|
1652
|
-
} else {
|
|
1653
|
-
await jobPromise;
|
|
1932
|
+
if (updates.priority !== void 0) {
|
|
1933
|
+
updateFields.push(`priority = $${paramIdx++}`);
|
|
1934
|
+
params.push(updates.priority);
|
|
1654
1935
|
}
|
|
1655
|
-
|
|
1656
|
-
|
|
1657
|
-
|
|
1658
|
-
|
|
1659
|
-
|
|
1660
|
-
|
|
1661
|
-
|
|
1662
|
-
|
|
1663
|
-
|
|
1664
|
-
|
|
1665
|
-
|
|
1666
|
-
|
|
1667
|
-
|
|
1668
|
-
);
|
|
1936
|
+
if (updates.timeoutMs !== void 0) {
|
|
1937
|
+
updateFields.push(`timeout_ms = $${paramIdx++}`);
|
|
1938
|
+
params.push(updates.timeoutMs);
|
|
1939
|
+
}
|
|
1940
|
+
if (updates.forceKillOnTimeout !== void 0) {
|
|
1941
|
+
updateFields.push(`force_kill_on_timeout = $${paramIdx++}`);
|
|
1942
|
+
params.push(updates.forceKillOnTimeout);
|
|
1943
|
+
}
|
|
1944
|
+
if (updates.tags !== void 0) {
|
|
1945
|
+
updateFields.push(`tags = $${paramIdx++}`);
|
|
1946
|
+
params.push(updates.tags);
|
|
1947
|
+
}
|
|
1948
|
+
if (updates.timezone !== void 0) {
|
|
1949
|
+
updateFields.push(`timezone = $${paramIdx++}`);
|
|
1950
|
+
params.push(updates.timezone);
|
|
1951
|
+
}
|
|
1952
|
+
if (updates.allowOverlap !== void 0) {
|
|
1953
|
+
updateFields.push(`allow_overlap = $${paramIdx++}`);
|
|
1954
|
+
params.push(updates.allowOverlap);
|
|
1955
|
+
}
|
|
1956
|
+
if (nextRunAt !== void 0) {
|
|
1957
|
+
updateFields.push(`next_run_at = $${paramIdx++}`);
|
|
1958
|
+
params.push(nextRunAt);
|
|
1959
|
+
}
|
|
1960
|
+
if (updateFields.length === 0) {
|
|
1961
|
+
log(`No fields to update for cron schedule ${id}`);
|
|
1669
1962
|
return;
|
|
1670
1963
|
}
|
|
1671
|
-
|
|
1672
|
-
|
|
1673
|
-
)
|
|
1674
|
-
await
|
|
1675
|
-
|
|
1676
|
-
|
|
1677
|
-
|
|
1678
|
-
|
|
1679
|
-
|
|
1680
|
-
|
|
1681
|
-
console.error(`Error processing job ${job.id}:`, error);
|
|
1682
|
-
let failureReason = "handler_error" /* HandlerError */;
|
|
1683
|
-
if (error && typeof error === "object" && "failureReason" in error && error.failureReason === "timeout" /* Timeout */) {
|
|
1684
|
-
failureReason = "timeout" /* Timeout */;
|
|
1964
|
+
updateFields.push(`updated_at = NOW()`);
|
|
1965
|
+
params.push(id);
|
|
1966
|
+
const query = `UPDATE cron_schedules SET ${updateFields.join(", ")} WHERE id = $${paramIdx}`;
|
|
1967
|
+
await client.query(query, params);
|
|
1968
|
+
log(`Edited cron schedule ${id}`);
|
|
1969
|
+
} catch (error) {
|
|
1970
|
+
log(`Error editing cron schedule ${id}: ${error}`);
|
|
1971
|
+
throw error;
|
|
1972
|
+
} finally {
|
|
1973
|
+
client.release();
|
|
1685
1974
|
}
|
|
1686
|
-
await backend.failJob(
|
|
1687
|
-
job.id,
|
|
1688
|
-
error instanceof Error ? error : new Error(String(error)),
|
|
1689
|
-
failureReason
|
|
1690
|
-
);
|
|
1691
|
-
}
|
|
1692
|
-
}
|
|
1693
|
-
async function processBatchWithHandlers(backend, workerId, batchSize, jobType, jobHandlers, concurrency, onError) {
|
|
1694
|
-
const jobs = await backend.getNextBatch(
|
|
1695
|
-
workerId,
|
|
1696
|
-
batchSize,
|
|
1697
|
-
jobType
|
|
1698
|
-
);
|
|
1699
|
-
if (!concurrency || concurrency >= jobs.length) {
|
|
1700
|
-
await Promise.all(
|
|
1701
|
-
jobs.map((job) => processJobWithHandlers(backend, job, jobHandlers))
|
|
1702
|
-
);
|
|
1703
|
-
return jobs.length;
|
|
1704
1975
|
}
|
|
1705
|
-
|
|
1706
|
-
|
|
1707
|
-
|
|
1708
|
-
|
|
1709
|
-
|
|
1710
|
-
|
|
1711
|
-
|
|
1712
|
-
|
|
1713
|
-
|
|
1714
|
-
|
|
1715
|
-
|
|
1716
|
-
|
|
1717
|
-
|
|
1718
|
-
|
|
1719
|
-
|
|
1720
|
-
|
|
1721
|
-
|
|
1722
|
-
|
|
1723
|
-
|
|
1724
|
-
|
|
1725
|
-
|
|
1976
|
+
/**
|
|
1977
|
+
* Atomically fetch all active cron schedules whose nextRunAt <= NOW().
|
|
1978
|
+
* Uses FOR UPDATE SKIP LOCKED to prevent duplicate enqueuing across workers.
|
|
1979
|
+
*/
|
|
1980
|
+
async getDueCronSchedules() {
|
|
1981
|
+
const client = await this.pool.connect();
|
|
1982
|
+
try {
|
|
1983
|
+
const result = await client.query(
|
|
1984
|
+
`SELECT id, schedule_name AS "scheduleName", cron_expression AS "cronExpression",
|
|
1985
|
+
job_type AS "jobType", payload, max_attempts AS "maxAttempts",
|
|
1986
|
+
priority, timeout_ms AS "timeoutMs",
|
|
1987
|
+
force_kill_on_timeout AS "forceKillOnTimeout", tags,
|
|
1988
|
+
timezone, allow_overlap AS "allowOverlap", status,
|
|
1989
|
+
last_enqueued_at AS "lastEnqueuedAt", last_job_id AS "lastJobId",
|
|
1990
|
+
next_run_at AS "nextRunAt",
|
|
1991
|
+
created_at AS "createdAt", updated_at AS "updatedAt"
|
|
1992
|
+
FROM cron_schedules
|
|
1993
|
+
WHERE status = 'active'
|
|
1994
|
+
AND next_run_at IS NOT NULL
|
|
1995
|
+
AND next_run_at <= NOW()
|
|
1996
|
+
ORDER BY next_run_at ASC
|
|
1997
|
+
FOR UPDATE SKIP LOCKED`
|
|
1998
|
+
);
|
|
1999
|
+
log(`Found ${result.rows.length} due cron schedules`);
|
|
2000
|
+
return result.rows;
|
|
2001
|
+
} catch (error) {
|
|
2002
|
+
if (error?.code === "42P01") {
|
|
2003
|
+
log("cron_schedules table does not exist, skipping cron enqueue");
|
|
2004
|
+
return [];
|
|
1726
2005
|
}
|
|
1727
|
-
|
|
1728
|
-
|
|
1729
|
-
|
|
1730
|
-
|
|
1731
|
-
|
|
1732
|
-
|
|
1733
|
-
|
|
1734
|
-
|
|
1735
|
-
|
|
1736
|
-
|
|
1737
|
-
|
|
1738
|
-
|
|
1739
|
-
} = options;
|
|
1740
|
-
let running = false;
|
|
1741
|
-
let intervalId = null;
|
|
1742
|
-
let currentBatchPromise = null;
|
|
1743
|
-
setLogContext(options.verbose ?? false);
|
|
1744
|
-
const processJobs = async () => {
|
|
1745
|
-
if (!running) return 0;
|
|
1746
|
-
log(
|
|
1747
|
-
`Processing jobs with workerId: ${workerId}${jobType ? ` and jobType: ${Array.isArray(jobType) ? jobType.join(",") : jobType}` : ""}`
|
|
1748
|
-
);
|
|
2006
|
+
log(`Error getting due cron schedules: ${error}`);
|
|
2007
|
+
throw error;
|
|
2008
|
+
} finally {
|
|
2009
|
+
client.release();
|
|
2010
|
+
}
|
|
2011
|
+
}
|
|
2012
|
+
/**
|
|
2013
|
+
* Update a cron schedule after a job has been enqueued.
|
|
2014
|
+
* Sets lastEnqueuedAt, lastJobId, and advances nextRunAt.
|
|
2015
|
+
*/
|
|
2016
|
+
async updateCronScheduleAfterEnqueue(id, lastEnqueuedAt, lastJobId, nextRunAt) {
|
|
2017
|
+
const client = await this.pool.connect();
|
|
1749
2018
|
try {
|
|
1750
|
-
|
|
1751
|
-
|
|
1752
|
-
|
|
1753
|
-
|
|
1754
|
-
|
|
1755
|
-
|
|
1756
|
-
|
|
1757
|
-
|
|
2019
|
+
await client.query(
|
|
2020
|
+
`UPDATE cron_schedules
|
|
2021
|
+
SET last_enqueued_at = $2,
|
|
2022
|
+
last_job_id = $3,
|
|
2023
|
+
next_run_at = $4,
|
|
2024
|
+
updated_at = NOW()
|
|
2025
|
+
WHERE id = $1`,
|
|
2026
|
+
[id, lastEnqueuedAt, lastJobId, nextRunAt]
|
|
2027
|
+
);
|
|
2028
|
+
log(
|
|
2029
|
+
`Updated cron schedule ${id}: lastJobId=${lastJobId}, nextRunAt=${nextRunAt?.toISOString() ?? "null"}`
|
|
1758
2030
|
);
|
|
1759
|
-
return processed;
|
|
1760
2031
|
} catch (error) {
|
|
1761
|
-
|
|
2032
|
+
log(`Error updating cron schedule ${id} after enqueue: ${error}`);
|
|
2033
|
+
throw error;
|
|
2034
|
+
} finally {
|
|
2035
|
+
client.release();
|
|
1762
2036
|
}
|
|
1763
|
-
|
|
1764
|
-
|
|
1765
|
-
|
|
1766
|
-
|
|
1767
|
-
|
|
1768
|
-
|
|
1769
|
-
|
|
1770
|
-
|
|
1771
|
-
|
|
1772
|
-
|
|
1773
|
-
|
|
1774
|
-
|
|
1775
|
-
const
|
|
1776
|
-
|
|
1777
|
-
|
|
1778
|
-
|
|
1779
|
-
|
|
1780
|
-
|
|
1781
|
-
|
|
1782
|
-
|
|
1783
|
-
|
|
1784
|
-
|
|
1785
|
-
|
|
1786
|
-
|
|
1787
|
-
|
|
1788
|
-
|
|
1789
|
-
|
|
1790
|
-
|
|
1791
|
-
|
|
1792
|
-
|
|
1793
|
-
|
|
1794
|
-
|
|
1795
|
-
|
|
1796
|
-
|
|
1797
|
-
|
|
1798
|
-
|
|
1799
|
-
if (intervalId) {
|
|
1800
|
-
clearTimeout(intervalId);
|
|
1801
|
-
intervalId = null;
|
|
1802
|
-
}
|
|
1803
|
-
},
|
|
1804
|
-
/**
|
|
1805
|
-
* Stop the job processor and wait for all in-flight jobs to complete.
|
|
1806
|
-
* Useful for graceful shutdown (e.g., SIGTERM handling).
|
|
1807
|
-
*/
|
|
1808
|
-
stopAndDrain: async (drainTimeoutMs = 3e4) => {
|
|
1809
|
-
log(`Stopping and draining job processor with workerId: ${workerId}`);
|
|
1810
|
-
running = false;
|
|
1811
|
-
if (intervalId) {
|
|
1812
|
-
clearTimeout(intervalId);
|
|
1813
|
-
intervalId = null;
|
|
1814
|
-
}
|
|
1815
|
-
if (currentBatchPromise) {
|
|
1816
|
-
await Promise.race([
|
|
1817
|
-
currentBatchPromise.catch(() => {
|
|
1818
|
-
}),
|
|
1819
|
-
new Promise((resolve) => setTimeout(resolve, drainTimeoutMs))
|
|
1820
|
-
]);
|
|
1821
|
-
currentBatchPromise = null;
|
|
2037
|
+
}
|
|
2038
|
+
// ── Wait / step-data support ────────────────────────────────────────
|
|
2039
|
+
/**
|
|
2040
|
+
* Transition a job from 'processing' to 'waiting' status.
|
|
2041
|
+
* Persists step data so the handler can resume from where it left off.
|
|
2042
|
+
*
|
|
2043
|
+
* @param jobId - The job to pause.
|
|
2044
|
+
* @param options - Wait configuration including optional waitUntil date, token ID, and step data.
|
|
2045
|
+
*/
|
|
2046
|
+
async waitJob(jobId, options) {
|
|
2047
|
+
const client = await this.pool.connect();
|
|
2048
|
+
try {
|
|
2049
|
+
const result = await client.query(
|
|
2050
|
+
`
|
|
2051
|
+
UPDATE job_queue
|
|
2052
|
+
SET status = 'waiting',
|
|
2053
|
+
wait_until = $2,
|
|
2054
|
+
wait_token_id = $3,
|
|
2055
|
+
step_data = $4,
|
|
2056
|
+
locked_at = NULL,
|
|
2057
|
+
locked_by = NULL,
|
|
2058
|
+
updated_at = NOW()
|
|
2059
|
+
WHERE id = $1 AND status = 'processing'
|
|
2060
|
+
`,
|
|
2061
|
+
[
|
|
2062
|
+
jobId,
|
|
2063
|
+
options.waitUntil ?? null,
|
|
2064
|
+
options.waitTokenId ?? null,
|
|
2065
|
+
JSON.stringify(options.stepData)
|
|
2066
|
+
]
|
|
2067
|
+
);
|
|
2068
|
+
if (result.rowCount === 0) {
|
|
2069
|
+
log(
|
|
2070
|
+
`Job ${jobId} could not be set to waiting (may have been reclaimed or is no longer processing)`
|
|
2071
|
+
);
|
|
2072
|
+
return;
|
|
1822
2073
|
}
|
|
1823
|
-
|
|
1824
|
-
|
|
1825
|
-
|
|
1826
|
-
|
|
1827
|
-
|
|
1828
|
-
|
|
1829
|
-
|
|
1830
|
-
|
|
1831
|
-
|
|
1832
|
-
|
|
1833
|
-
|
|
1834
|
-
|
|
1835
|
-
|
|
1836
|
-
|
|
1837
|
-
|
|
1838
|
-
|
|
1839
|
-
|
|
1840
|
-
|
|
1841
|
-
|
|
1842
|
-
|
|
1843
|
-
const
|
|
1844
|
-
|
|
2074
|
+
await this.recordJobEvent(jobId, "waiting" /* Waiting */, {
|
|
2075
|
+
waitUntil: options.waitUntil?.toISOString() ?? null,
|
|
2076
|
+
waitTokenId: options.waitTokenId ?? null
|
|
2077
|
+
});
|
|
2078
|
+
log(`Job ${jobId} set to waiting`);
|
|
2079
|
+
} catch (error) {
|
|
2080
|
+
log(`Error setting job ${jobId} to waiting: ${error}`);
|
|
2081
|
+
throw error;
|
|
2082
|
+
} finally {
|
|
2083
|
+
client.release();
|
|
2084
|
+
}
|
|
2085
|
+
}
|
|
2086
|
+
/**
|
|
2087
|
+
* Persist step data for a job. Called after each ctx.run() step completes.
|
|
2088
|
+
* Best-effort: does not throw to avoid killing the running handler.
|
|
2089
|
+
*
|
|
2090
|
+
* @param jobId - The job to update.
|
|
2091
|
+
* @param stepData - The step data to persist.
|
|
2092
|
+
*/
|
|
2093
|
+
async updateStepData(jobId, stepData) {
|
|
2094
|
+
const client = await this.pool.connect();
|
|
2095
|
+
try {
|
|
2096
|
+
await client.query(
|
|
2097
|
+
`UPDATE job_queue SET step_data = $2, updated_at = NOW() WHERE id = $1`,
|
|
2098
|
+
[jobId, JSON.stringify(stepData)]
|
|
2099
|
+
);
|
|
2100
|
+
} catch (error) {
|
|
2101
|
+
log(`Error updating step_data for job ${jobId}: ${error}`);
|
|
2102
|
+
} finally {
|
|
2103
|
+
client.release();
|
|
2104
|
+
}
|
|
1845
2105
|
}
|
|
1846
|
-
|
|
1847
|
-
|
|
1848
|
-
|
|
1849
|
-
|
|
1850
|
-
|
|
1851
|
-
|
|
1852
|
-
|
|
1853
|
-
|
|
2106
|
+
/**
|
|
2107
|
+
* Create a waitpoint token in the database.
|
|
2108
|
+
*
|
|
2109
|
+
* @param jobId - The job ID to associate with the token (null if created outside a handler).
|
|
2110
|
+
* @param options - Optional timeout string (e.g. '10m', '1h') and tags.
|
|
2111
|
+
* @returns The created waitpoint with its unique ID.
|
|
2112
|
+
*/
|
|
2113
|
+
async createWaitpoint(jobId, options) {
|
|
2114
|
+
const client = await this.pool.connect();
|
|
1854
2115
|
try {
|
|
1855
|
-
const
|
|
1856
|
-
|
|
1857
|
-
|
|
1858
|
-
|
|
1859
|
-
|
|
2116
|
+
const id = `wp_${randomUUID()}`;
|
|
2117
|
+
let timeoutAt = null;
|
|
2118
|
+
if (options?.timeout) {
|
|
2119
|
+
const ms = parseTimeoutString(options.timeout);
|
|
2120
|
+
timeoutAt = new Date(Date.now() + ms);
|
|
1860
2121
|
}
|
|
1861
|
-
|
|
1862
|
-
|
|
1863
|
-
|
|
1864
|
-
|
|
1865
|
-
|
|
1866
|
-
|
|
1867
|
-
|
|
2122
|
+
await client.query(
|
|
2123
|
+
`INSERT INTO waitpoints (id, job_id, status, timeout_at, tags) VALUES ($1, $2, 'waiting', $3, $4)`,
|
|
2124
|
+
[id, jobId, timeoutAt, options?.tags ?? null]
|
|
2125
|
+
);
|
|
2126
|
+
log(`Created waitpoint ${id} for job ${jobId}`);
|
|
2127
|
+
return { id };
|
|
2128
|
+
} catch (error) {
|
|
2129
|
+
log(`Error creating waitpoint: ${error}`);
|
|
2130
|
+
throw error;
|
|
2131
|
+
} finally {
|
|
2132
|
+
client.release();
|
|
2133
|
+
}
|
|
2134
|
+
}
|
|
2135
|
+
/**
|
|
2136
|
+
* Complete a waitpoint token and move the associated job back to 'pending'.
|
|
2137
|
+
*
|
|
2138
|
+
* @param tokenId - The waitpoint token ID to complete.
|
|
2139
|
+
* @param data - Optional data to pass to the waiting handler.
|
|
2140
|
+
*/
|
|
2141
|
+
async completeWaitpoint(tokenId, data) {
|
|
2142
|
+
const client = await this.pool.connect();
|
|
2143
|
+
try {
|
|
2144
|
+
await client.query("BEGIN");
|
|
2145
|
+
const wpResult = await client.query(
|
|
2146
|
+
`UPDATE waitpoints SET status = 'completed', output = $2, completed_at = NOW()
|
|
2147
|
+
WHERE id = $1 AND status = 'waiting'
|
|
2148
|
+
RETURNING job_id`,
|
|
2149
|
+
[tokenId, data != null ? JSON.stringify(data) : null]
|
|
2150
|
+
);
|
|
2151
|
+
if (wpResult.rows.length === 0) {
|
|
2152
|
+
await client.query("ROLLBACK");
|
|
2153
|
+
log(`Waitpoint ${tokenId} not found or already completed`);
|
|
2154
|
+
return;
|
|
1868
2155
|
}
|
|
1869
|
-
|
|
1870
|
-
if (
|
|
1871
|
-
|
|
2156
|
+
const jobId = wpResult.rows[0].job_id;
|
|
2157
|
+
if (jobId != null) {
|
|
2158
|
+
await client.query(
|
|
2159
|
+
`UPDATE job_queue
|
|
2160
|
+
SET status = 'pending', wait_token_id = NULL, wait_until = NULL, updated_at = NOW()
|
|
2161
|
+
WHERE id = $1 AND status = 'waiting'`,
|
|
2162
|
+
[jobId]
|
|
2163
|
+
);
|
|
1872
2164
|
}
|
|
2165
|
+
await client.query("COMMIT");
|
|
2166
|
+
log(`Completed waitpoint ${tokenId} for job ${jobId}`);
|
|
2167
|
+
} catch (error) {
|
|
2168
|
+
await client.query("ROLLBACK");
|
|
2169
|
+
log(`Error completing waitpoint ${tokenId}: ${error}`);
|
|
2170
|
+
throw error;
|
|
2171
|
+
} finally {
|
|
2172
|
+
client.release();
|
|
1873
2173
|
}
|
|
1874
2174
|
}
|
|
1875
|
-
|
|
1876
|
-
|
|
1877
|
-
|
|
1878
|
-
|
|
1879
|
-
|
|
1880
|
-
|
|
1881
|
-
|
|
2175
|
+
/**
|
|
2176
|
+
* Retrieve a waitpoint token by its ID.
|
|
2177
|
+
*
|
|
2178
|
+
* @param tokenId - The waitpoint token ID to look up.
|
|
2179
|
+
* @returns The waitpoint record, or null if not found.
|
|
2180
|
+
*/
|
|
2181
|
+
async getWaitpoint(tokenId) {
|
|
2182
|
+
const client = await this.pool.connect();
|
|
2183
|
+
try {
|
|
2184
|
+
const result = await client.query(
|
|
2185
|
+
`SELECT id, job_id AS "jobId", status, output, timeout_at AS "timeoutAt", created_at AS "createdAt", completed_at AS "completedAt", tags FROM waitpoints WHERE id = $1`,
|
|
2186
|
+
[tokenId]
|
|
2187
|
+
);
|
|
2188
|
+
if (result.rows.length === 0) return null;
|
|
2189
|
+
return result.rows[0];
|
|
2190
|
+
} catch (error) {
|
|
2191
|
+
log(`Error getting waitpoint ${tokenId}: ${error}`);
|
|
2192
|
+
throw error;
|
|
2193
|
+
} finally {
|
|
2194
|
+
client.release();
|
|
1882
2195
|
}
|
|
1883
|
-
const caValue = typeof customCA === "string" ? loadPemOrFile(customCA) : void 0;
|
|
1884
|
-
ssl = {
|
|
1885
|
-
...ssl,
|
|
1886
|
-
...caValue ? { ca: caValue } : {},
|
|
1887
|
-
cert: loadPemOrFile(
|
|
1888
|
-
typeof config.ssl.cert === "string" ? config.ssl.cert : process.env.PGSSLCERT
|
|
1889
|
-
),
|
|
1890
|
-
key: loadPemOrFile(
|
|
1891
|
-
typeof config.ssl.key === "string" ? config.ssl.key : process.env.PGSSLKEY
|
|
1892
|
-
),
|
|
1893
|
-
rejectUnauthorized: config.ssl.rejectUnauthorized !== void 0 ? config.ssl.rejectUnauthorized : true
|
|
1894
|
-
};
|
|
1895
2196
|
}
|
|
1896
|
-
|
|
1897
|
-
|
|
1898
|
-
|
|
1899
|
-
|
|
1900
|
-
|
|
1901
|
-
|
|
1902
|
-
|
|
1903
|
-
|
|
1904
|
-
|
|
1905
|
-
|
|
1906
|
-
|
|
1907
|
-
|
|
1908
|
-
|
|
1909
|
-
|
|
2197
|
+
/**
|
|
2198
|
+
* Expire timed-out waitpoint tokens and move their associated jobs back to 'pending'.
|
|
2199
|
+
*
|
|
2200
|
+
* @returns The number of tokens that were expired.
|
|
2201
|
+
*/
|
|
2202
|
+
async expireTimedOutWaitpoints() {
|
|
2203
|
+
const client = await this.pool.connect();
|
|
2204
|
+
try {
|
|
2205
|
+
await client.query("BEGIN");
|
|
2206
|
+
const result = await client.query(
|
|
2207
|
+
`UPDATE waitpoints
|
|
2208
|
+
SET status = 'timed_out'
|
|
2209
|
+
WHERE status = 'waiting' AND timeout_at IS NOT NULL AND timeout_at <= NOW()
|
|
2210
|
+
RETURNING id, job_id`
|
|
2211
|
+
);
|
|
2212
|
+
for (const row of result.rows) {
|
|
2213
|
+
if (row.job_id != null) {
|
|
2214
|
+
await client.query(
|
|
2215
|
+
`UPDATE job_queue
|
|
2216
|
+
SET status = 'pending', wait_token_id = NULL, wait_until = NULL, updated_at = NOW()
|
|
2217
|
+
WHERE id = $1 AND status = 'waiting'`,
|
|
2218
|
+
[row.job_id]
|
|
2219
|
+
);
|
|
2220
|
+
}
|
|
2221
|
+
}
|
|
2222
|
+
await client.query("COMMIT");
|
|
2223
|
+
const count = result.rowCount || 0;
|
|
2224
|
+
if (count > 0) {
|
|
2225
|
+
log(`Expired ${count} timed-out waitpoints`);
|
|
2226
|
+
}
|
|
2227
|
+
return count;
|
|
2228
|
+
} catch (error) {
|
|
2229
|
+
await client.query("ROLLBACK");
|
|
2230
|
+
log(`Error expiring timed-out waitpoints: ${error}`);
|
|
2231
|
+
throw error;
|
|
2232
|
+
} finally {
|
|
2233
|
+
client.release();
|
|
2234
|
+
}
|
|
1910
2235
|
}
|
|
1911
|
-
|
|
1912
|
-
|
|
1913
|
-
|
|
1914
|
-
|
|
1915
|
-
|
|
1916
|
-
|
|
1917
|
-
|
|
1918
|
-
|
|
2236
|
+
// ── Internal helpers ──────────────────────────────────────────────────
|
|
2237
|
+
async setPendingReasonForUnpickedJobs(reason, jobType) {
|
|
2238
|
+
const client = await this.pool.connect();
|
|
2239
|
+
try {
|
|
2240
|
+
let jobTypeFilter = "";
|
|
2241
|
+
const params = [reason];
|
|
2242
|
+
if (jobType) {
|
|
2243
|
+
if (Array.isArray(jobType)) {
|
|
2244
|
+
jobTypeFilter = ` AND job_type = ANY($2)`;
|
|
2245
|
+
params.push(jobType);
|
|
2246
|
+
} else {
|
|
2247
|
+
jobTypeFilter = ` AND job_type = $2`;
|
|
2248
|
+
params.push(jobType);
|
|
2249
|
+
}
|
|
2250
|
+
}
|
|
2251
|
+
await client.query(
|
|
2252
|
+
`UPDATE job_queue SET pending_reason = $1 WHERE status = 'pending'${jobTypeFilter}`,
|
|
2253
|
+
params
|
|
2254
|
+
);
|
|
2255
|
+
} finally {
|
|
2256
|
+
client.release();
|
|
2257
|
+
}
|
|
1919
2258
|
}
|
|
1920
|
-
return pool;
|
|
1921
2259
|
};
|
|
1922
2260
|
|
|
1923
2261
|
// src/backends/redis-scripts.ts
|
|
@@ -1974,7 +2312,10 @@ redis.call('HMSET', jobKey,
|
|
|
1974
2312
|
'lastFailedAt', 'null',
|
|
1975
2313
|
'lastCancelledAt', 'null',
|
|
1976
2314
|
'tags', tagsJson,
|
|
1977
|
-
'idempotencyKey', idempotencyKey
|
|
2315
|
+
'idempotencyKey', idempotencyKey,
|
|
2316
|
+
'waitUntil', 'null',
|
|
2317
|
+
'waitTokenId', 'null',
|
|
2318
|
+
'stepData', 'null'
|
|
1978
2319
|
)
|
|
1979
2320
|
|
|
1980
2321
|
-- Status index
|
|
@@ -2057,7 +2398,25 @@ for _, jobId in ipairs(retries) do
|
|
|
2057
2398
|
redis.call('ZREM', prefix .. 'retry', jobId)
|
|
2058
2399
|
end
|
|
2059
2400
|
|
|
2060
|
-
-- 3.
|
|
2401
|
+
-- 3. Move ready waiting jobs (time-based, no token) into queue
|
|
2402
|
+
local waitingJobs = redis.call('ZRANGEBYSCORE', prefix .. 'waiting', '-inf', nowMs, 'LIMIT', 0, 200)
|
|
2403
|
+
for _, jobId in ipairs(waitingJobs) do
|
|
2404
|
+
local jk = prefix .. 'job:' .. jobId
|
|
2405
|
+
local status = redis.call('HGET', jk, 'status')
|
|
2406
|
+
local waitTokenId = redis.call('HGET', jk, 'waitTokenId')
|
|
2407
|
+
if status == 'waiting' and (waitTokenId == false or waitTokenId == 'null') then
|
|
2408
|
+
local pri = tonumber(redis.call('HGET', jk, 'priority') or '0')
|
|
2409
|
+
local ca = tonumber(redis.call('HGET', jk, 'createdAt'))
|
|
2410
|
+
local score = pri * ${SCORE_RANGE} + (${SCORE_RANGE} - ca)
|
|
2411
|
+
redis.call('ZADD', prefix .. 'queue', score, jobId)
|
|
2412
|
+
redis.call('SREM', prefix .. 'status:waiting', jobId)
|
|
2413
|
+
redis.call('SADD', prefix .. 'status:pending', jobId)
|
|
2414
|
+
redis.call('HMSET', jk, 'status', 'pending', 'waitUntil', 'null')
|
|
2415
|
+
end
|
|
2416
|
+
redis.call('ZREM', prefix .. 'waiting', jobId)
|
|
2417
|
+
end
|
|
2418
|
+
|
|
2419
|
+
-- 4. Parse job type filter
|
|
2061
2420
|
local filterTypes = nil
|
|
2062
2421
|
if jobTypeFilter ~= "null" then
|
|
2063
2422
|
-- Could be a JSON array or a plain string
|
|
@@ -2070,7 +2429,7 @@ if jobTypeFilter ~= "null" then
|
|
|
2070
2429
|
end
|
|
2071
2430
|
end
|
|
2072
2431
|
|
|
2073
|
-
--
|
|
2432
|
+
-- 5. Pop candidates from queue (highest score first)
|
|
2074
2433
|
-- We pop more than batchSize because some may be filtered out
|
|
2075
2434
|
local popCount = batchSize * 3
|
|
2076
2435
|
local candidates = redis.call('ZPOPMAX', prefix .. 'queue', popCount)
|
|
@@ -2154,7 +2513,10 @@ local jk = prefix .. 'job:' .. jobId
|
|
|
2154
2513
|
redis.call('HMSET', jk,
|
|
2155
2514
|
'status', 'completed',
|
|
2156
2515
|
'updatedAt', nowMs,
|
|
2157
|
-
'completedAt', nowMs
|
|
2516
|
+
'completedAt', nowMs,
|
|
2517
|
+
'stepData', 'null',
|
|
2518
|
+
'waitUntil', 'null',
|
|
2519
|
+
'waitTokenId', 'null'
|
|
2158
2520
|
)
|
|
2159
2521
|
redis.call('SREM', prefix .. 'status:processing', jobId)
|
|
2160
2522
|
redis.call('SADD', prefix .. 'status:completed', jobId)
|
|
@@ -2213,6 +2575,7 @@ local nowMs = tonumber(ARGV[2])
|
|
|
2213
2575
|
local jk = prefix .. 'job:' .. jobId
|
|
2214
2576
|
|
|
2215
2577
|
local oldStatus = redis.call('HGET', jk, 'status')
|
|
2578
|
+
if oldStatus ~= 'failed' and oldStatus ~= 'processing' then return 0 end
|
|
2216
2579
|
|
|
2217
2580
|
redis.call('HMSET', jk,
|
|
2218
2581
|
'status', 'pending',
|
|
@@ -2224,9 +2587,7 @@ redis.call('HMSET', jk,
|
|
|
2224
2587
|
)
|
|
2225
2588
|
|
|
2226
2589
|
-- Remove from old status, add to pending
|
|
2227
|
-
|
|
2228
|
-
redis.call('SREM', prefix .. 'status:' .. oldStatus, jobId)
|
|
2229
|
-
end
|
|
2590
|
+
redis.call('SREM', prefix .. 'status:' .. oldStatus, jobId)
|
|
2230
2591
|
redis.call('SADD', prefix .. 'status:pending', jobId)
|
|
2231
2592
|
|
|
2232
2593
|
-- Remove from retry sorted set if present
|
|
@@ -2247,18 +2608,21 @@ local nowMs = ARGV[2]
|
|
|
2247
2608
|
local jk = prefix .. 'job:' .. jobId
|
|
2248
2609
|
|
|
2249
2610
|
local status = redis.call('HGET', jk, 'status')
|
|
2250
|
-
if status ~= 'pending' then return 0 end
|
|
2611
|
+
if status ~= 'pending' and status ~= 'waiting' then return 0 end
|
|
2251
2612
|
|
|
2252
2613
|
redis.call('HMSET', jk,
|
|
2253
2614
|
'status', 'cancelled',
|
|
2254
2615
|
'updatedAt', nowMs,
|
|
2255
|
-
'lastCancelledAt', nowMs
|
|
2616
|
+
'lastCancelledAt', nowMs,
|
|
2617
|
+
'waitUntil', 'null',
|
|
2618
|
+
'waitTokenId', 'null'
|
|
2256
2619
|
)
|
|
2257
|
-
redis.call('SREM', prefix .. 'status:
|
|
2620
|
+
redis.call('SREM', prefix .. 'status:' .. status, jobId)
|
|
2258
2621
|
redis.call('SADD', prefix .. 'status:cancelled', jobId)
|
|
2259
|
-
-- Remove from queue / delayed
|
|
2622
|
+
-- Remove from queue / delayed / waiting
|
|
2260
2623
|
redis.call('ZREM', prefix .. 'queue', jobId)
|
|
2261
2624
|
redis.call('ZREM', prefix .. 'delayed', jobId)
|
|
2625
|
+
redis.call('ZREM', prefix .. 'waiting', jobId)
|
|
2262
2626
|
|
|
2263
2627
|
return 1
|
|
2264
2628
|
`;
|
|
@@ -2326,18 +2690,16 @@ end
|
|
|
2326
2690
|
|
|
2327
2691
|
return count
|
|
2328
2692
|
`;
|
|
2329
|
-
var
|
|
2693
|
+
var CLEANUP_OLD_JOBS_BATCH_SCRIPT = `
|
|
2330
2694
|
local prefix = KEYS[1]
|
|
2331
2695
|
local cutoffMs = tonumber(ARGV[1])
|
|
2332
|
-
|
|
2333
|
-
local completed = redis.call('SMEMBERS', prefix .. 'status:completed')
|
|
2334
2696
|
local count = 0
|
|
2335
2697
|
|
|
2336
|
-
for
|
|
2698
|
+
for i = 2, #ARGV do
|
|
2699
|
+
local jobId = ARGV[i]
|
|
2337
2700
|
local jk = prefix .. 'job:' .. jobId
|
|
2338
2701
|
local updatedAt = tonumber(redis.call('HGET', jk, 'updatedAt'))
|
|
2339
2702
|
if updatedAt and updatedAt < cutoffMs then
|
|
2340
|
-
-- Remove all indexes
|
|
2341
2703
|
local jobType = redis.call('HGET', jk, 'jobType')
|
|
2342
2704
|
local tagsJson = redis.call('HGET', jk, 'tags')
|
|
2343
2705
|
local idempotencyKey = redis.call('HGET', jk, 'idempotencyKey')
|
|
@@ -2360,7 +2722,6 @@ for _, jobId in ipairs(completed) do
|
|
|
2360
2722
|
if idempotencyKey and idempotencyKey ~= 'null' then
|
|
2361
2723
|
redis.call('DEL', prefix .. 'idempotency:' .. idempotencyKey)
|
|
2362
2724
|
end
|
|
2363
|
-
-- Delete events
|
|
2364
2725
|
redis.call('DEL', prefix .. 'events:' .. jobId)
|
|
2365
2726
|
|
|
2366
2727
|
count = count + 1
|
|
@@ -2369,8 +2730,158 @@ end
|
|
|
2369
2730
|
|
|
2370
2731
|
return count
|
|
2371
2732
|
`;
|
|
2733
|
+
var WAIT_JOB_SCRIPT = `
|
|
2734
|
+
local prefix = KEYS[1]
|
|
2735
|
+
local jobId = ARGV[1]
|
|
2736
|
+
local waitUntilMs = ARGV[2]
|
|
2737
|
+
local waitTokenId = ARGV[3]
|
|
2738
|
+
local stepDataJson = ARGV[4]
|
|
2739
|
+
local nowMs = ARGV[5]
|
|
2740
|
+
local jk = prefix .. 'job:' .. jobId
|
|
2741
|
+
|
|
2742
|
+
local status = redis.call('HGET', jk, 'status')
|
|
2743
|
+
if status ~= 'processing' then return 0 end
|
|
2744
|
+
|
|
2745
|
+
redis.call('HMSET', jk,
|
|
2746
|
+
'status', 'waiting',
|
|
2747
|
+
'waitUntil', waitUntilMs,
|
|
2748
|
+
'waitTokenId', waitTokenId,
|
|
2749
|
+
'stepData', stepDataJson,
|
|
2750
|
+
'lockedAt', 'null',
|
|
2751
|
+
'lockedBy', 'null',
|
|
2752
|
+
'updatedAt', nowMs
|
|
2753
|
+
)
|
|
2754
|
+
redis.call('SREM', prefix .. 'status:processing', jobId)
|
|
2755
|
+
redis.call('SADD', prefix .. 'status:waiting', jobId)
|
|
2756
|
+
|
|
2757
|
+
-- Add to waiting sorted set if time-based wait
|
|
2758
|
+
if waitUntilMs ~= 'null' then
|
|
2759
|
+
redis.call('ZADD', prefix .. 'waiting', tonumber(waitUntilMs), jobId)
|
|
2760
|
+
end
|
|
2761
|
+
|
|
2762
|
+
return 1
|
|
2763
|
+
`;
|
|
2764
|
+
var COMPLETE_WAITPOINT_SCRIPT = `
|
|
2765
|
+
local prefix = KEYS[1]
|
|
2766
|
+
local tokenId = ARGV[1]
|
|
2767
|
+
local outputJson = ARGV[2]
|
|
2768
|
+
local nowMs = ARGV[3]
|
|
2769
|
+
local wpk = prefix .. 'waitpoint:' .. tokenId
|
|
2770
|
+
|
|
2771
|
+
local wpStatus = redis.call('HGET', wpk, 'status')
|
|
2772
|
+
if not wpStatus or wpStatus ~= 'waiting' then return 0 end
|
|
2773
|
+
|
|
2774
|
+
redis.call('HMSET', wpk,
|
|
2775
|
+
'status', 'completed',
|
|
2776
|
+
'output', outputJson,
|
|
2777
|
+
'completedAt', nowMs
|
|
2778
|
+
)
|
|
2779
|
+
|
|
2780
|
+
-- Move associated job back to pending
|
|
2781
|
+
local jobId = redis.call('HGET', wpk, 'jobId')
|
|
2782
|
+
if jobId and jobId ~= 'null' then
|
|
2783
|
+
local jk = prefix .. 'job:' .. jobId
|
|
2784
|
+
local jobStatus = redis.call('HGET', jk, 'status')
|
|
2785
|
+
if jobStatus == 'waiting' then
|
|
2786
|
+
redis.call('HMSET', jk,
|
|
2787
|
+
'status', 'pending',
|
|
2788
|
+
'waitTokenId', 'null',
|
|
2789
|
+
'waitUntil', 'null',
|
|
2790
|
+
'updatedAt', nowMs
|
|
2791
|
+
)
|
|
2792
|
+
redis.call('SREM', prefix .. 'status:waiting', jobId)
|
|
2793
|
+
redis.call('SADD', prefix .. 'status:pending', jobId)
|
|
2794
|
+
redis.call('ZREM', prefix .. 'waiting', jobId)
|
|
2795
|
+
|
|
2796
|
+
-- Re-add to queue
|
|
2797
|
+
local priority = tonumber(redis.call('HGET', jk, 'priority') or '0')
|
|
2798
|
+
local createdAt = tonumber(redis.call('HGET', jk, 'createdAt'))
|
|
2799
|
+
local score = priority * ${SCORE_RANGE} + (${SCORE_RANGE} - createdAt)
|
|
2800
|
+
redis.call('ZADD', prefix .. 'queue', score, jobId)
|
|
2801
|
+
end
|
|
2802
|
+
end
|
|
2803
|
+
|
|
2804
|
+
return 1
|
|
2805
|
+
`;
|
|
2806
|
+
var EXPIRE_TIMED_OUT_WAITPOINTS_SCRIPT = `
|
|
2807
|
+
local prefix = KEYS[1]
|
|
2808
|
+
local nowMs = tonumber(ARGV[1])
|
|
2809
|
+
|
|
2810
|
+
local expiredIds = redis.call('ZRANGEBYSCORE', prefix .. 'waitpoint_timeout', '-inf', nowMs)
|
|
2811
|
+
local count = 0
|
|
2812
|
+
|
|
2813
|
+
for _, tokenId in ipairs(expiredIds) do
|
|
2814
|
+
local wpk = prefix .. 'waitpoint:' .. tokenId
|
|
2815
|
+
local wpStatus = redis.call('HGET', wpk, 'status')
|
|
2816
|
+
if wpStatus == 'waiting' then
|
|
2817
|
+
redis.call('HMSET', wpk,
|
|
2818
|
+
'status', 'timed_out'
|
|
2819
|
+
)
|
|
2820
|
+
|
|
2821
|
+
-- Move associated job back to pending
|
|
2822
|
+
local jobId = redis.call('HGET', wpk, 'jobId')
|
|
2823
|
+
if jobId and jobId ~= 'null' then
|
|
2824
|
+
local jk = prefix .. 'job:' .. jobId
|
|
2825
|
+
local jobStatus = redis.call('HGET', jk, 'status')
|
|
2826
|
+
if jobStatus == 'waiting' then
|
|
2827
|
+
redis.call('HMSET', jk,
|
|
2828
|
+
'status', 'pending',
|
|
2829
|
+
'waitTokenId', 'null',
|
|
2830
|
+
'waitUntil', 'null',
|
|
2831
|
+
'updatedAt', nowMs
|
|
2832
|
+
)
|
|
2833
|
+
redis.call('SREM', prefix .. 'status:waiting', jobId)
|
|
2834
|
+
redis.call('SADD', prefix .. 'status:pending', jobId)
|
|
2835
|
+
redis.call('ZREM', prefix .. 'waiting', jobId)
|
|
2372
2836
|
|
|
2373
|
-
|
|
2837
|
+
local priority = tonumber(redis.call('HGET', jk, 'priority') or '0')
|
|
2838
|
+
local createdAt = tonumber(redis.call('HGET', jk, 'createdAt'))
|
|
2839
|
+
local score = priority * ${SCORE_RANGE} + (${SCORE_RANGE} - createdAt)
|
|
2840
|
+
redis.call('ZADD', prefix .. 'queue', score, jobId)
|
|
2841
|
+
end
|
|
2842
|
+
end
|
|
2843
|
+
|
|
2844
|
+
count = count + 1
|
|
2845
|
+
end
|
|
2846
|
+
redis.call('ZREM', prefix .. 'waitpoint_timeout', tokenId)
|
|
2847
|
+
end
|
|
2848
|
+
|
|
2849
|
+
return count
|
|
2850
|
+
`;
|
|
2851
|
+
var MAX_TIMEOUT_MS2 = 365 * 24 * 60 * 60 * 1e3;
|
|
2852
|
+
function parseTimeoutString2(timeout) {
|
|
2853
|
+
const match = timeout.match(/^(\d+)(s|m|h|d)$/);
|
|
2854
|
+
if (!match) {
|
|
2855
|
+
throw new Error(
|
|
2856
|
+
`Invalid timeout format: "${timeout}". Expected format like "10m", "1h", "24h", "7d".`
|
|
2857
|
+
);
|
|
2858
|
+
}
|
|
2859
|
+
const value = parseInt(match[1], 10);
|
|
2860
|
+
const unit = match[2];
|
|
2861
|
+
let ms;
|
|
2862
|
+
switch (unit) {
|
|
2863
|
+
case "s":
|
|
2864
|
+
ms = value * 1e3;
|
|
2865
|
+
break;
|
|
2866
|
+
case "m":
|
|
2867
|
+
ms = value * 60 * 1e3;
|
|
2868
|
+
break;
|
|
2869
|
+
case "h":
|
|
2870
|
+
ms = value * 60 * 60 * 1e3;
|
|
2871
|
+
break;
|
|
2872
|
+
case "d":
|
|
2873
|
+
ms = value * 24 * 60 * 60 * 1e3;
|
|
2874
|
+
break;
|
|
2875
|
+
default:
|
|
2876
|
+
throw new Error(`Unknown timeout unit: "${unit}"`);
|
|
2877
|
+
}
|
|
2878
|
+
if (!Number.isFinite(ms) || ms > MAX_TIMEOUT_MS2) {
|
|
2879
|
+
throw new Error(
|
|
2880
|
+
`Timeout value "${timeout}" is too large. Maximum allowed is 365 days.`
|
|
2881
|
+
);
|
|
2882
|
+
}
|
|
2883
|
+
return ms;
|
|
2884
|
+
}
|
|
2374
2885
|
function hashToObject(arr) {
|
|
2375
2886
|
const obj = {};
|
|
2376
2887
|
for (let i = 0; i < arr.length; i += 2) {
|
|
@@ -2436,9 +2947,20 @@ function deserializeJob(h) {
|
|
|
2436
2947
|
lastCancelledAt: dateOrNull(h.lastCancelledAt),
|
|
2437
2948
|
tags,
|
|
2438
2949
|
idempotencyKey: nullish(h.idempotencyKey),
|
|
2439
|
-
progress: numOrNull(h.progress)
|
|
2950
|
+
progress: numOrNull(h.progress),
|
|
2951
|
+
waitUntil: dateOrNull(h.waitUntil),
|
|
2952
|
+
waitTokenId: nullish(h.waitTokenId),
|
|
2953
|
+
stepData: parseStepData(h.stepData)
|
|
2440
2954
|
};
|
|
2441
2955
|
}
|
|
2956
|
+
function parseStepData(raw) {
|
|
2957
|
+
if (!raw || raw === "null") return void 0;
|
|
2958
|
+
try {
|
|
2959
|
+
return JSON.parse(raw);
|
|
2960
|
+
} catch {
|
|
2961
|
+
return void 0;
|
|
2962
|
+
}
|
|
2963
|
+
}
|
|
2442
2964
|
var RedisBackend = class {
|
|
2443
2965
|
constructor(redisConfig) {
|
|
2444
2966
|
let IORedis;
|
|
@@ -2594,8 +3116,14 @@ var RedisBackend = class {
|
|
|
2594
3116
|
if (filters.runAt) {
|
|
2595
3117
|
jobs = this.filterByRunAt(jobs, filters.runAt);
|
|
2596
3118
|
}
|
|
3119
|
+
if (filters.cursor !== void 0) {
|
|
3120
|
+
jobs = jobs.filter((j) => j.id < filters.cursor);
|
|
3121
|
+
}
|
|
3122
|
+
}
|
|
3123
|
+
jobs.sort((a, b) => b.id - a.id);
|
|
3124
|
+
if (filters?.cursor !== void 0) {
|
|
3125
|
+
return jobs.slice(0, limit);
|
|
2597
3126
|
}
|
|
2598
|
-
jobs.sort((a, b) => b.createdAt.getTime() - a.createdAt.getTime());
|
|
2599
3127
|
return jobs.slice(offset, offset + limit);
|
|
2600
3128
|
}
|
|
2601
3129
|
async getJobsByTags(tags, mode = "all", limit = 100, offset = 0) {
|
|
@@ -2827,22 +3355,104 @@ var RedisBackend = class {
|
|
|
2827
3355
|
log(`Edited ${count} pending jobs`);
|
|
2828
3356
|
return count;
|
|
2829
3357
|
}
|
|
2830
|
-
|
|
3358
|
+
/**
|
|
3359
|
+
* Delete completed jobs older than the given number of days.
|
|
3360
|
+
* Uses SSCAN to iterate the completed set in batches, avoiding
|
|
3361
|
+
* loading all IDs into memory and preventing long Redis blocks.
|
|
3362
|
+
*
|
|
3363
|
+
* @param daysToKeep - Number of days to retain completed jobs (default 30).
|
|
3364
|
+
* @param batchSize - Number of IDs to scan per SSCAN iteration (default 200).
|
|
3365
|
+
* @returns Total number of deleted jobs.
|
|
3366
|
+
*/
|
|
3367
|
+
async cleanupOldJobs(daysToKeep = 30, batchSize = 200) {
|
|
2831
3368
|
const cutoffMs = this.nowMs() - daysToKeep * 24 * 60 * 60 * 1e3;
|
|
2832
|
-
const
|
|
2833
|
-
|
|
2834
|
-
|
|
2835
|
-
|
|
2836
|
-
|
|
2837
|
-
|
|
2838
|
-
|
|
2839
|
-
|
|
3369
|
+
const setKey = `${this.prefix}status:completed`;
|
|
3370
|
+
let totalDeleted = 0;
|
|
3371
|
+
let cursor = "0";
|
|
3372
|
+
do {
|
|
3373
|
+
const [nextCursor, ids] = await this.client.sscan(
|
|
3374
|
+
setKey,
|
|
3375
|
+
cursor,
|
|
3376
|
+
"COUNT",
|
|
3377
|
+
batchSize
|
|
3378
|
+
);
|
|
3379
|
+
cursor = nextCursor;
|
|
3380
|
+
if (ids.length > 0) {
|
|
3381
|
+
const result = await this.client.eval(
|
|
3382
|
+
CLEANUP_OLD_JOBS_BATCH_SCRIPT,
|
|
3383
|
+
1,
|
|
3384
|
+
this.prefix,
|
|
3385
|
+
cutoffMs,
|
|
3386
|
+
...ids
|
|
3387
|
+
);
|
|
3388
|
+
totalDeleted += Number(result);
|
|
3389
|
+
}
|
|
3390
|
+
} while (cursor !== "0");
|
|
3391
|
+
log(`Deleted ${totalDeleted} old jobs`);
|
|
3392
|
+
return totalDeleted;
|
|
2840
3393
|
}
|
|
2841
|
-
|
|
2842
|
-
|
|
2843
|
-
|
|
2844
|
-
|
|
2845
|
-
|
|
3394
|
+
/**
|
|
3395
|
+
* Delete job events older than the given number of days.
|
|
3396
|
+
* Iterates all event lists and removes events whose createdAt is before the cutoff.
|
|
3397
|
+
* Also removes orphaned event lists (where the job no longer exists).
|
|
3398
|
+
*
|
|
3399
|
+
* @param daysToKeep - Number of days to retain events (default 30).
|
|
3400
|
+
* @param batchSize - Number of event keys to scan per SCAN iteration (default 200).
|
|
3401
|
+
* @returns Total number of deleted events.
|
|
3402
|
+
*/
|
|
3403
|
+
async cleanupOldJobEvents(daysToKeep = 30, batchSize = 200) {
|
|
3404
|
+
const cutoffMs = this.nowMs() - daysToKeep * 24 * 60 * 60 * 1e3;
|
|
3405
|
+
const pattern = `${this.prefix}events:*`;
|
|
3406
|
+
let totalDeleted = 0;
|
|
3407
|
+
let cursor = "0";
|
|
3408
|
+
do {
|
|
3409
|
+
const [nextCursor, keys] = await this.client.scan(
|
|
3410
|
+
cursor,
|
|
3411
|
+
"MATCH",
|
|
3412
|
+
pattern,
|
|
3413
|
+
"COUNT",
|
|
3414
|
+
batchSize
|
|
3415
|
+
);
|
|
3416
|
+
cursor = nextCursor;
|
|
3417
|
+
for (const key of keys) {
|
|
3418
|
+
const jobIdStr = key.slice(`${this.prefix}events:`.length);
|
|
3419
|
+
const jobExists = await this.client.exists(
|
|
3420
|
+
`${this.prefix}job:${jobIdStr}`
|
|
3421
|
+
);
|
|
3422
|
+
if (!jobExists) {
|
|
3423
|
+
const len = await this.client.llen(key);
|
|
3424
|
+
await this.client.del(key);
|
|
3425
|
+
totalDeleted += len;
|
|
3426
|
+
continue;
|
|
3427
|
+
}
|
|
3428
|
+
const events = await this.client.lrange(key, 0, -1);
|
|
3429
|
+
const kept = [];
|
|
3430
|
+
for (const raw of events) {
|
|
3431
|
+
try {
|
|
3432
|
+
const e = JSON.parse(raw);
|
|
3433
|
+
if (e.createdAt >= cutoffMs) {
|
|
3434
|
+
kept.push(raw);
|
|
3435
|
+
} else {
|
|
3436
|
+
totalDeleted++;
|
|
3437
|
+
}
|
|
3438
|
+
} catch {
|
|
3439
|
+
totalDeleted++;
|
|
3440
|
+
}
|
|
3441
|
+
}
|
|
3442
|
+
if (kept.length === 0) {
|
|
3443
|
+
await this.client.del(key);
|
|
3444
|
+
} else if (kept.length < events.length) {
|
|
3445
|
+
const pipeline = this.client.pipeline();
|
|
3446
|
+
pipeline.del(key);
|
|
3447
|
+
for (const raw of kept) {
|
|
3448
|
+
pipeline.rpush(key, raw);
|
|
3449
|
+
}
|
|
3450
|
+
await pipeline.exec();
|
|
3451
|
+
}
|
|
3452
|
+
}
|
|
3453
|
+
} while (cursor !== "0");
|
|
3454
|
+
log(`Deleted ${totalDeleted} old job events`);
|
|
3455
|
+
return totalDeleted;
|
|
2846
3456
|
}
|
|
2847
3457
|
async reclaimStuckJobs(maxProcessingTimeMinutes = 10) {
|
|
2848
3458
|
const maxAgeMs = maxProcessingTimeMinutes * 60 * 1e3;
|
|
@@ -2857,6 +3467,191 @@ var RedisBackend = class {
|
|
|
2857
3467
|
log(`Reclaimed ${result} stuck jobs`);
|
|
2858
3468
|
return Number(result);
|
|
2859
3469
|
}
|
|
3470
|
+
// ── Wait / step-data support ────────────────────────────────────────
|
|
3471
|
+
/**
|
|
3472
|
+
* Transition a job from 'processing' to 'waiting' status.
|
|
3473
|
+
* Persists step data so the handler can resume from where it left off.
|
|
3474
|
+
*
|
|
3475
|
+
* @param jobId - The job to pause.
|
|
3476
|
+
* @param options - Wait configuration including optional waitUntil date, token ID, and step data.
|
|
3477
|
+
*/
|
|
3478
|
+
async waitJob(jobId, options) {
|
|
3479
|
+
const now = this.nowMs();
|
|
3480
|
+
const waitUntilMs = options.waitUntil ? options.waitUntil.getTime().toString() : "null";
|
|
3481
|
+
const waitTokenId = options.waitTokenId ?? "null";
|
|
3482
|
+
const stepDataJson = JSON.stringify(options.stepData);
|
|
3483
|
+
const result = await this.client.eval(
|
|
3484
|
+
WAIT_JOB_SCRIPT,
|
|
3485
|
+
1,
|
|
3486
|
+
this.prefix,
|
|
3487
|
+
jobId,
|
|
3488
|
+
waitUntilMs,
|
|
3489
|
+
waitTokenId,
|
|
3490
|
+
stepDataJson,
|
|
3491
|
+
now
|
|
3492
|
+
);
|
|
3493
|
+
if (Number(result) === 0) {
|
|
3494
|
+
log(
|
|
3495
|
+
`Job ${jobId} could not be set to waiting (may have been reclaimed or is no longer processing)`
|
|
3496
|
+
);
|
|
3497
|
+
return;
|
|
3498
|
+
}
|
|
3499
|
+
await this.recordJobEvent(jobId, "waiting" /* Waiting */, {
|
|
3500
|
+
waitUntil: options.waitUntil?.toISOString() ?? null,
|
|
3501
|
+
waitTokenId: options.waitTokenId ?? null
|
|
3502
|
+
});
|
|
3503
|
+
log(`Job ${jobId} set to waiting`);
|
|
3504
|
+
}
|
|
3505
|
+
/**
|
|
3506
|
+
* Persist step data for a job. Called after each ctx.run() step completes.
|
|
3507
|
+
* Best-effort: does not throw to avoid killing the running handler.
|
|
3508
|
+
*
|
|
3509
|
+
* @param jobId - The job to update.
|
|
3510
|
+
* @param stepData - The step data to persist.
|
|
3511
|
+
*/
|
|
3512
|
+
async updateStepData(jobId, stepData) {
|
|
3513
|
+
try {
|
|
3514
|
+
const now = this.nowMs();
|
|
3515
|
+
await this.client.hset(
|
|
3516
|
+
`${this.prefix}job:${jobId}`,
|
|
3517
|
+
"stepData",
|
|
3518
|
+
JSON.stringify(stepData),
|
|
3519
|
+
"updatedAt",
|
|
3520
|
+
now.toString()
|
|
3521
|
+
);
|
|
3522
|
+
} catch (error) {
|
|
3523
|
+
log(`Error updating stepData for job ${jobId}: ${error}`);
|
|
3524
|
+
}
|
|
3525
|
+
}
|
|
3526
|
+
/**
|
|
3527
|
+
* Create a waitpoint token.
|
|
3528
|
+
*
|
|
3529
|
+
* @param jobId - The job ID to associate with the token (null if created outside a handler).
|
|
3530
|
+
* @param options - Optional timeout string (e.g. '10m', '1h') and tags.
|
|
3531
|
+
* @returns The created waitpoint with its unique ID.
|
|
3532
|
+
*/
|
|
3533
|
+
async createWaitpoint(jobId, options) {
|
|
3534
|
+
const id = `wp_${randomUUID()}`;
|
|
3535
|
+
const now = this.nowMs();
|
|
3536
|
+
let timeoutAt = null;
|
|
3537
|
+
if (options?.timeout) {
|
|
3538
|
+
const ms = parseTimeoutString2(options.timeout);
|
|
3539
|
+
timeoutAt = now + ms;
|
|
3540
|
+
}
|
|
3541
|
+
const key = `${this.prefix}waitpoint:${id}`;
|
|
3542
|
+
const fields = [
|
|
3543
|
+
"id",
|
|
3544
|
+
id,
|
|
3545
|
+
"jobId",
|
|
3546
|
+
jobId !== null ? jobId.toString() : "null",
|
|
3547
|
+
"status",
|
|
3548
|
+
"waiting",
|
|
3549
|
+
"output",
|
|
3550
|
+
"null",
|
|
3551
|
+
"timeoutAt",
|
|
3552
|
+
timeoutAt !== null ? timeoutAt.toString() : "null",
|
|
3553
|
+
"createdAt",
|
|
3554
|
+
now.toString(),
|
|
3555
|
+
"completedAt",
|
|
3556
|
+
"null",
|
|
3557
|
+
"tags",
|
|
3558
|
+
options?.tags ? JSON.stringify(options.tags) : "null"
|
|
3559
|
+
];
|
|
3560
|
+
await this.client.hmset(key, ...fields);
|
|
3561
|
+
if (timeoutAt !== null) {
|
|
3562
|
+
await this.client.zadd(`${this.prefix}waitpoint_timeout`, timeoutAt, id);
|
|
3563
|
+
}
|
|
3564
|
+
log(`Created waitpoint ${id} for job ${jobId}`);
|
|
3565
|
+
return { id };
|
|
3566
|
+
}
|
|
3567
|
+
/**
|
|
3568
|
+
* Complete a waitpoint token and move the associated job back to 'pending'.
|
|
3569
|
+
*
|
|
3570
|
+
* @param tokenId - The waitpoint token ID to complete.
|
|
3571
|
+
* @param data - Optional data to pass to the waiting handler.
|
|
3572
|
+
*/
|
|
3573
|
+
async completeWaitpoint(tokenId, data) {
|
|
3574
|
+
const now = this.nowMs();
|
|
3575
|
+
const outputJson = data != null ? JSON.stringify(data) : "null";
|
|
3576
|
+
const result = await this.client.eval(
|
|
3577
|
+
COMPLETE_WAITPOINT_SCRIPT,
|
|
3578
|
+
1,
|
|
3579
|
+
this.prefix,
|
|
3580
|
+
tokenId,
|
|
3581
|
+
outputJson,
|
|
3582
|
+
now
|
|
3583
|
+
);
|
|
3584
|
+
if (Number(result) === 0) {
|
|
3585
|
+
log(`Waitpoint ${tokenId} not found or already completed`);
|
|
3586
|
+
return;
|
|
3587
|
+
}
|
|
3588
|
+
log(`Completed waitpoint ${tokenId}`);
|
|
3589
|
+
}
|
|
3590
|
+
/**
|
|
3591
|
+
* Retrieve a waitpoint token by its ID.
|
|
3592
|
+
*
|
|
3593
|
+
* @param tokenId - The waitpoint token ID to look up.
|
|
3594
|
+
* @returns The waitpoint record, or null if not found.
|
|
3595
|
+
*/
|
|
3596
|
+
async getWaitpoint(tokenId) {
|
|
3597
|
+
const data = await this.client.hgetall(
|
|
3598
|
+
`${this.prefix}waitpoint:${tokenId}`
|
|
3599
|
+
);
|
|
3600
|
+
if (!data || Object.keys(data).length === 0) return null;
|
|
3601
|
+
const nullish = (v) => v === void 0 || v === "null" || v === "" ? null : v;
|
|
3602
|
+
const numOrNull = (v) => {
|
|
3603
|
+
const n = nullish(v);
|
|
3604
|
+
return n === null ? null : Number(n);
|
|
3605
|
+
};
|
|
3606
|
+
const dateOrNull = (v) => {
|
|
3607
|
+
const n = numOrNull(v);
|
|
3608
|
+
return n === null ? null : new Date(n);
|
|
3609
|
+
};
|
|
3610
|
+
let output = null;
|
|
3611
|
+
if (data.output && data.output !== "null") {
|
|
3612
|
+
try {
|
|
3613
|
+
output = JSON.parse(data.output);
|
|
3614
|
+
} catch {
|
|
3615
|
+
output = data.output;
|
|
3616
|
+
}
|
|
3617
|
+
}
|
|
3618
|
+
let tags = null;
|
|
3619
|
+
if (data.tags && data.tags !== "null") {
|
|
3620
|
+
try {
|
|
3621
|
+
tags = JSON.parse(data.tags);
|
|
3622
|
+
} catch {
|
|
3623
|
+
}
|
|
3624
|
+
}
|
|
3625
|
+
return {
|
|
3626
|
+
id: data.id,
|
|
3627
|
+
jobId: numOrNull(data.jobId),
|
|
3628
|
+
status: data.status,
|
|
3629
|
+
output,
|
|
3630
|
+
timeoutAt: dateOrNull(data.timeoutAt),
|
|
3631
|
+
createdAt: new Date(Number(data.createdAt)),
|
|
3632
|
+
completedAt: dateOrNull(data.completedAt),
|
|
3633
|
+
tags
|
|
3634
|
+
};
|
|
3635
|
+
}
|
|
3636
|
+
/**
|
|
3637
|
+
* Expire timed-out waitpoint tokens and move their associated jobs back to 'pending'.
|
|
3638
|
+
*
|
|
3639
|
+
* @returns The number of tokens that were expired.
|
|
3640
|
+
*/
|
|
3641
|
+
async expireTimedOutWaitpoints() {
|
|
3642
|
+
const now = this.nowMs();
|
|
3643
|
+
const result = await this.client.eval(
|
|
3644
|
+
EXPIRE_TIMED_OUT_WAITPOINTS_SCRIPT,
|
|
3645
|
+
1,
|
|
3646
|
+
this.prefix,
|
|
3647
|
+
now
|
|
3648
|
+
);
|
|
3649
|
+
const count = Number(result);
|
|
3650
|
+
if (count > 0) {
|
|
3651
|
+
log(`Expired ${count} timed-out waitpoints`);
|
|
3652
|
+
}
|
|
3653
|
+
return count;
|
|
3654
|
+
}
|
|
2860
3655
|
// ── Internal helpers ──────────────────────────────────────────────────
|
|
2861
3656
|
async setPendingReasonForUnpickedJobs(reason, jobType) {
|
|
2862
3657
|
let ids = await this.client.smembers(`${this.prefix}status:pending`);
|
|
@@ -2961,6 +3756,332 @@ var RedisBackend = class {
|
|
|
2961
3756
|
return true;
|
|
2962
3757
|
});
|
|
2963
3758
|
}
|
|
3759
|
+
// ── Cron schedules ──────────────────────────────────────────────────
|
|
3760
|
+
/** Create a cron schedule and return its ID. */
|
|
3761
|
+
async addCronSchedule(input) {
|
|
3762
|
+
const existingId = await this.client.get(
|
|
3763
|
+
`${this.prefix}cron_name:${input.scheduleName}`
|
|
3764
|
+
);
|
|
3765
|
+
if (existingId !== null) {
|
|
3766
|
+
throw new Error(
|
|
3767
|
+
`Cron schedule with name "${input.scheduleName}" already exists`
|
|
3768
|
+
);
|
|
3769
|
+
}
|
|
3770
|
+
const id = await this.client.incr(`${this.prefix}cron_id_seq`);
|
|
3771
|
+
const now = this.nowMs();
|
|
3772
|
+
const key = `${this.prefix}cron:${id}`;
|
|
3773
|
+
const fields = [
|
|
3774
|
+
"id",
|
|
3775
|
+
id.toString(),
|
|
3776
|
+
"scheduleName",
|
|
3777
|
+
input.scheduleName,
|
|
3778
|
+
"cronExpression",
|
|
3779
|
+
input.cronExpression,
|
|
3780
|
+
"jobType",
|
|
3781
|
+
input.jobType,
|
|
3782
|
+
"payload",
|
|
3783
|
+
JSON.stringify(input.payload),
|
|
3784
|
+
"maxAttempts",
|
|
3785
|
+
input.maxAttempts.toString(),
|
|
3786
|
+
"priority",
|
|
3787
|
+
input.priority.toString(),
|
|
3788
|
+
"timeoutMs",
|
|
3789
|
+
input.timeoutMs !== null ? input.timeoutMs.toString() : "null",
|
|
3790
|
+
"forceKillOnTimeout",
|
|
3791
|
+
input.forceKillOnTimeout ? "true" : "false",
|
|
3792
|
+
"tags",
|
|
3793
|
+
input.tags ? JSON.stringify(input.tags) : "null",
|
|
3794
|
+
"timezone",
|
|
3795
|
+
input.timezone,
|
|
3796
|
+
"allowOverlap",
|
|
3797
|
+
input.allowOverlap ? "true" : "false",
|
|
3798
|
+
"status",
|
|
3799
|
+
"active",
|
|
3800
|
+
"lastEnqueuedAt",
|
|
3801
|
+
"null",
|
|
3802
|
+
"lastJobId",
|
|
3803
|
+
"null",
|
|
3804
|
+
"nextRunAt",
|
|
3805
|
+
input.nextRunAt ? input.nextRunAt.getTime().toString() : "null",
|
|
3806
|
+
"createdAt",
|
|
3807
|
+
now.toString(),
|
|
3808
|
+
"updatedAt",
|
|
3809
|
+
now.toString()
|
|
3810
|
+
];
|
|
3811
|
+
await this.client.hmset(key, ...fields);
|
|
3812
|
+
await this.client.set(
|
|
3813
|
+
`${this.prefix}cron_name:${input.scheduleName}`,
|
|
3814
|
+
id.toString()
|
|
3815
|
+
);
|
|
3816
|
+
await this.client.sadd(`${this.prefix}crons`, id.toString());
|
|
3817
|
+
await this.client.sadd(`${this.prefix}cron_status:active`, id.toString());
|
|
3818
|
+
if (input.nextRunAt) {
|
|
3819
|
+
await this.client.zadd(
|
|
3820
|
+
`${this.prefix}cron_due`,
|
|
3821
|
+
input.nextRunAt.getTime(),
|
|
3822
|
+
id.toString()
|
|
3823
|
+
);
|
|
3824
|
+
}
|
|
3825
|
+
log(`Added cron schedule ${id}: "${input.scheduleName}"`);
|
|
3826
|
+
return id;
|
|
3827
|
+
}
|
|
3828
|
+
/** Get a cron schedule by ID. */
|
|
3829
|
+
async getCronSchedule(id) {
|
|
3830
|
+
const data = await this.client.hgetall(`${this.prefix}cron:${id}`);
|
|
3831
|
+
if (!data || Object.keys(data).length === 0) return null;
|
|
3832
|
+
return this.deserializeCronSchedule(data);
|
|
3833
|
+
}
|
|
3834
|
+
/** Get a cron schedule by its unique name. */
|
|
3835
|
+
async getCronScheduleByName(name) {
|
|
3836
|
+
const id = await this.client.get(`${this.prefix}cron_name:${name}`);
|
|
3837
|
+
if (id === null) return null;
|
|
3838
|
+
return this.getCronSchedule(Number(id));
|
|
3839
|
+
}
|
|
3840
|
+
/** List cron schedules, optionally filtered by status. */
|
|
3841
|
+
async listCronSchedules(status) {
|
|
3842
|
+
let ids;
|
|
3843
|
+
if (status) {
|
|
3844
|
+
ids = await this.client.smembers(`${this.prefix}cron_status:${status}`);
|
|
3845
|
+
} else {
|
|
3846
|
+
ids = await this.client.smembers(`${this.prefix}crons`);
|
|
3847
|
+
}
|
|
3848
|
+
if (ids.length === 0) return [];
|
|
3849
|
+
const pipeline = this.client.pipeline();
|
|
3850
|
+
for (const id of ids) {
|
|
3851
|
+
pipeline.hgetall(`${this.prefix}cron:${id}`);
|
|
3852
|
+
}
|
|
3853
|
+
const results = await pipeline.exec();
|
|
3854
|
+
const schedules = [];
|
|
3855
|
+
if (results) {
|
|
3856
|
+
for (const [err, data] of results) {
|
|
3857
|
+
if (!err && data && typeof data === "object" && Object.keys(data).length > 0) {
|
|
3858
|
+
schedules.push(
|
|
3859
|
+
this.deserializeCronSchedule(data)
|
|
3860
|
+
);
|
|
3861
|
+
}
|
|
3862
|
+
}
|
|
3863
|
+
}
|
|
3864
|
+
schedules.sort((a, b) => a.createdAt.getTime() - b.createdAt.getTime());
|
|
3865
|
+
return schedules;
|
|
3866
|
+
}
|
|
3867
|
+
/** Delete a cron schedule by ID. */
|
|
3868
|
+
async removeCronSchedule(id) {
|
|
3869
|
+
const data = await this.client.hgetall(`${this.prefix}cron:${id}`);
|
|
3870
|
+
if (!data || Object.keys(data).length === 0) return;
|
|
3871
|
+
const name = data.scheduleName;
|
|
3872
|
+
const status = data.status;
|
|
3873
|
+
await this.client.del(`${this.prefix}cron:${id}`);
|
|
3874
|
+
await this.client.del(`${this.prefix}cron_name:${name}`);
|
|
3875
|
+
await this.client.srem(`${this.prefix}crons`, id.toString());
|
|
3876
|
+
await this.client.srem(
|
|
3877
|
+
`${this.prefix}cron_status:${status}`,
|
|
3878
|
+
id.toString()
|
|
3879
|
+
);
|
|
3880
|
+
await this.client.zrem(`${this.prefix}cron_due`, id.toString());
|
|
3881
|
+
log(`Removed cron schedule ${id}`);
|
|
3882
|
+
}
|
|
3883
|
+
/** Pause a cron schedule. */
|
|
3884
|
+
async pauseCronSchedule(id) {
|
|
3885
|
+
const now = this.nowMs();
|
|
3886
|
+
await this.client.hset(
|
|
3887
|
+
`${this.prefix}cron:${id}`,
|
|
3888
|
+
"status",
|
|
3889
|
+
"paused",
|
|
3890
|
+
"updatedAt",
|
|
3891
|
+
now.toString()
|
|
3892
|
+
);
|
|
3893
|
+
await this.client.srem(`${this.prefix}cron_status:active`, id.toString());
|
|
3894
|
+
await this.client.sadd(`${this.prefix}cron_status:paused`, id.toString());
|
|
3895
|
+
await this.client.zrem(`${this.prefix}cron_due`, id.toString());
|
|
3896
|
+
log(`Paused cron schedule ${id}`);
|
|
3897
|
+
}
|
|
3898
|
+
/** Resume a paused cron schedule. */
|
|
3899
|
+
async resumeCronSchedule(id) {
|
|
3900
|
+
const now = this.nowMs();
|
|
3901
|
+
await this.client.hset(
|
|
3902
|
+
`${this.prefix}cron:${id}`,
|
|
3903
|
+
"status",
|
|
3904
|
+
"active",
|
|
3905
|
+
"updatedAt",
|
|
3906
|
+
now.toString()
|
|
3907
|
+
);
|
|
3908
|
+
await this.client.srem(`${this.prefix}cron_status:paused`, id.toString());
|
|
3909
|
+
await this.client.sadd(`${this.prefix}cron_status:active`, id.toString());
|
|
3910
|
+
const nextRunAt = await this.client.hget(
|
|
3911
|
+
`${this.prefix}cron:${id}`,
|
|
3912
|
+
"nextRunAt"
|
|
3913
|
+
);
|
|
3914
|
+
if (nextRunAt && nextRunAt !== "null") {
|
|
3915
|
+
await this.client.zadd(
|
|
3916
|
+
`${this.prefix}cron_due`,
|
|
3917
|
+
Number(nextRunAt),
|
|
3918
|
+
id.toString()
|
|
3919
|
+
);
|
|
3920
|
+
}
|
|
3921
|
+
log(`Resumed cron schedule ${id}`);
|
|
3922
|
+
}
|
|
3923
|
+
/** Edit a cron schedule. */
|
|
3924
|
+
async editCronSchedule(id, updates, nextRunAt) {
|
|
3925
|
+
const now = this.nowMs();
|
|
3926
|
+
const fields = [];
|
|
3927
|
+
if (updates.cronExpression !== void 0) {
|
|
3928
|
+
fields.push("cronExpression", updates.cronExpression);
|
|
3929
|
+
}
|
|
3930
|
+
if (updates.payload !== void 0) {
|
|
3931
|
+
fields.push("payload", JSON.stringify(updates.payload));
|
|
3932
|
+
}
|
|
3933
|
+
if (updates.maxAttempts !== void 0) {
|
|
3934
|
+
fields.push("maxAttempts", updates.maxAttempts.toString());
|
|
3935
|
+
}
|
|
3936
|
+
if (updates.priority !== void 0) {
|
|
3937
|
+
fields.push("priority", updates.priority.toString());
|
|
3938
|
+
}
|
|
3939
|
+
if (updates.timeoutMs !== void 0) {
|
|
3940
|
+
fields.push(
|
|
3941
|
+
"timeoutMs",
|
|
3942
|
+
updates.timeoutMs !== null ? updates.timeoutMs.toString() : "null"
|
|
3943
|
+
);
|
|
3944
|
+
}
|
|
3945
|
+
if (updates.forceKillOnTimeout !== void 0) {
|
|
3946
|
+
fields.push(
|
|
3947
|
+
"forceKillOnTimeout",
|
|
3948
|
+
updates.forceKillOnTimeout ? "true" : "false"
|
|
3949
|
+
);
|
|
3950
|
+
}
|
|
3951
|
+
if (updates.tags !== void 0) {
|
|
3952
|
+
fields.push(
|
|
3953
|
+
"tags",
|
|
3954
|
+
updates.tags !== null ? JSON.stringify(updates.tags) : "null"
|
|
3955
|
+
);
|
|
3956
|
+
}
|
|
3957
|
+
if (updates.timezone !== void 0) {
|
|
3958
|
+
fields.push("timezone", updates.timezone);
|
|
3959
|
+
}
|
|
3960
|
+
if (updates.allowOverlap !== void 0) {
|
|
3961
|
+
fields.push("allowOverlap", updates.allowOverlap ? "true" : "false");
|
|
3962
|
+
}
|
|
3963
|
+
if (nextRunAt !== void 0) {
|
|
3964
|
+
const val = nextRunAt !== null ? nextRunAt.getTime().toString() : "null";
|
|
3965
|
+
fields.push("nextRunAt", val);
|
|
3966
|
+
if (nextRunAt !== null) {
|
|
3967
|
+
await this.client.zadd(
|
|
3968
|
+
`${this.prefix}cron_due`,
|
|
3969
|
+
nextRunAt.getTime(),
|
|
3970
|
+
id.toString()
|
|
3971
|
+
);
|
|
3972
|
+
} else {
|
|
3973
|
+
await this.client.zrem(`${this.prefix}cron_due`, id.toString());
|
|
3974
|
+
}
|
|
3975
|
+
}
|
|
3976
|
+
if (fields.length === 0) {
|
|
3977
|
+
log(`No fields to update for cron schedule ${id}`);
|
|
3978
|
+
return;
|
|
3979
|
+
}
|
|
3980
|
+
fields.push("updatedAt", now.toString());
|
|
3981
|
+
await this.client.hmset(`${this.prefix}cron:${id}`, ...fields);
|
|
3982
|
+
log(`Edited cron schedule ${id}`);
|
|
3983
|
+
}
|
|
3984
|
+
/**
|
|
3985
|
+
* Fetch all active cron schedules whose nextRunAt <= now.
|
|
3986
|
+
* Uses a sorted set (cron_due) for efficient range query.
|
|
3987
|
+
*/
|
|
3988
|
+
async getDueCronSchedules() {
|
|
3989
|
+
const now = this.nowMs();
|
|
3990
|
+
const ids = await this.client.zrangebyscore(
|
|
3991
|
+
`${this.prefix}cron_due`,
|
|
3992
|
+
0,
|
|
3993
|
+
now
|
|
3994
|
+
);
|
|
3995
|
+
if (ids.length === 0) {
|
|
3996
|
+
log("Found 0 due cron schedules");
|
|
3997
|
+
return [];
|
|
3998
|
+
}
|
|
3999
|
+
const schedules = [];
|
|
4000
|
+
for (const id of ids) {
|
|
4001
|
+
const data = await this.client.hgetall(`${this.prefix}cron:${id}`);
|
|
4002
|
+
if (data && Object.keys(data).length > 0 && data.status === "active") {
|
|
4003
|
+
schedules.push(this.deserializeCronSchedule(data));
|
|
4004
|
+
}
|
|
4005
|
+
}
|
|
4006
|
+
log(`Found ${schedules.length} due cron schedules`);
|
|
4007
|
+
return schedules;
|
|
4008
|
+
}
|
|
4009
|
+
/**
|
|
4010
|
+
* Update a cron schedule after a job has been enqueued.
|
|
4011
|
+
* Sets lastEnqueuedAt, lastJobId, and advances nextRunAt.
|
|
4012
|
+
*/
|
|
4013
|
+
async updateCronScheduleAfterEnqueue(id, lastEnqueuedAt, lastJobId, nextRunAt) {
|
|
4014
|
+
const fields = [
|
|
4015
|
+
"lastEnqueuedAt",
|
|
4016
|
+
lastEnqueuedAt.getTime().toString(),
|
|
4017
|
+
"lastJobId",
|
|
4018
|
+
lastJobId.toString(),
|
|
4019
|
+
"nextRunAt",
|
|
4020
|
+
nextRunAt ? nextRunAt.getTime().toString() : "null",
|
|
4021
|
+
"updatedAt",
|
|
4022
|
+
this.nowMs().toString()
|
|
4023
|
+
];
|
|
4024
|
+
await this.client.hmset(`${this.prefix}cron:${id}`, ...fields);
|
|
4025
|
+
if (nextRunAt) {
|
|
4026
|
+
await this.client.zadd(
|
|
4027
|
+
`${this.prefix}cron_due`,
|
|
4028
|
+
nextRunAt.getTime(),
|
|
4029
|
+
id.toString()
|
|
4030
|
+
);
|
|
4031
|
+
} else {
|
|
4032
|
+
await this.client.zrem(`${this.prefix}cron_due`, id.toString());
|
|
4033
|
+
}
|
|
4034
|
+
log(
|
|
4035
|
+
`Updated cron schedule ${id}: lastJobId=${lastJobId}, nextRunAt=${nextRunAt?.toISOString() ?? "null"}`
|
|
4036
|
+
);
|
|
4037
|
+
}
|
|
4038
|
+
/** Deserialize a Redis hash into a CronScheduleRecord. */
|
|
4039
|
+
deserializeCronSchedule(h) {
|
|
4040
|
+
const nullish = (v) => v === void 0 || v === "null" || v === "" ? null : v;
|
|
4041
|
+
const numOrNull = (v) => {
|
|
4042
|
+
const n = nullish(v);
|
|
4043
|
+
return n === null ? null : Number(n);
|
|
4044
|
+
};
|
|
4045
|
+
const dateOrNull = (v) => {
|
|
4046
|
+
const n = numOrNull(v);
|
|
4047
|
+
return n === null ? null : new Date(n);
|
|
4048
|
+
};
|
|
4049
|
+
let payload;
|
|
4050
|
+
try {
|
|
4051
|
+
payload = JSON.parse(h.payload);
|
|
4052
|
+
} catch {
|
|
4053
|
+
payload = h.payload;
|
|
4054
|
+
}
|
|
4055
|
+
let tags;
|
|
4056
|
+
try {
|
|
4057
|
+
const raw = h.tags;
|
|
4058
|
+
if (raw && raw !== "null") {
|
|
4059
|
+
tags = JSON.parse(raw);
|
|
4060
|
+
}
|
|
4061
|
+
} catch {
|
|
4062
|
+
}
|
|
4063
|
+
return {
|
|
4064
|
+
id: Number(h.id),
|
|
4065
|
+
scheduleName: h.scheduleName,
|
|
4066
|
+
cronExpression: h.cronExpression,
|
|
4067
|
+
jobType: h.jobType,
|
|
4068
|
+
payload,
|
|
4069
|
+
maxAttempts: Number(h.maxAttempts),
|
|
4070
|
+
priority: Number(h.priority),
|
|
4071
|
+
timeoutMs: numOrNull(h.timeoutMs),
|
|
4072
|
+
forceKillOnTimeout: h.forceKillOnTimeout === "true",
|
|
4073
|
+
tags,
|
|
4074
|
+
timezone: h.timezone,
|
|
4075
|
+
allowOverlap: h.allowOverlap === "true",
|
|
4076
|
+
status: h.status,
|
|
4077
|
+
lastEnqueuedAt: dateOrNull(h.lastEnqueuedAt),
|
|
4078
|
+
lastJobId: numOrNull(h.lastJobId),
|
|
4079
|
+
nextRunAt: dateOrNull(h.nextRunAt),
|
|
4080
|
+
createdAt: new Date(Number(h.createdAt)),
|
|
4081
|
+
updatedAt: new Date(Number(h.updatedAt))
|
|
4082
|
+
};
|
|
4083
|
+
}
|
|
4084
|
+
// ── Private helpers (filters) ─────────────────────────────────────────
|
|
2964
4085
|
async applyFilters(ids, filters) {
|
|
2965
4086
|
let result = ids;
|
|
2966
4087
|
if (filters.jobType) {
|
|
@@ -2990,6 +4111,19 @@ var RedisBackend = class {
|
|
|
2990
4111
|
return result;
|
|
2991
4112
|
}
|
|
2992
4113
|
};
|
|
4114
|
+
function getNextCronOccurrence(cronExpression, timezone = "UTC", after, CronImpl = Cron) {
|
|
4115
|
+
const cron = new CronImpl(cronExpression, { timezone });
|
|
4116
|
+
const next = cron.nextRun(after ?? /* @__PURE__ */ new Date());
|
|
4117
|
+
return next ?? null;
|
|
4118
|
+
}
|
|
4119
|
+
function validateCronExpression(cronExpression, CronImpl = Cron) {
|
|
4120
|
+
try {
|
|
4121
|
+
new CronImpl(cronExpression);
|
|
4122
|
+
return true;
|
|
4123
|
+
} catch {
|
|
4124
|
+
return false;
|
|
4125
|
+
}
|
|
4126
|
+
}
|
|
2993
4127
|
|
|
2994
4128
|
// src/handler-validation.ts
|
|
2995
4129
|
function validateHandlerSerializable2(handler, jobType) {
|
|
@@ -3065,10 +4199,9 @@ var initJobQueue = (config) => {
|
|
|
3065
4199
|
const backendType = config.backend ?? "postgres";
|
|
3066
4200
|
setLogContext(config.verbose ?? false);
|
|
3067
4201
|
let backend;
|
|
3068
|
-
let pool;
|
|
3069
4202
|
if (backendType === "postgres") {
|
|
3070
4203
|
const pgConfig = config;
|
|
3071
|
-
pool = createPool(pgConfig.databaseConfig);
|
|
4204
|
+
const pool = createPool(pgConfig.databaseConfig);
|
|
3072
4205
|
backend = new PostgresBackend(pool);
|
|
3073
4206
|
} else if (backendType === "redis") {
|
|
3074
4207
|
const redisConfig = config.redisConfig;
|
|
@@ -3076,13 +4209,48 @@ var initJobQueue = (config) => {
|
|
|
3076
4209
|
} else {
|
|
3077
4210
|
throw new Error(`Unknown backend: ${backendType}`);
|
|
3078
4211
|
}
|
|
3079
|
-
const
|
|
3080
|
-
|
|
3081
|
-
|
|
3082
|
-
|
|
4212
|
+
const enqueueDueCronJobsImpl = async () => {
|
|
4213
|
+
const dueSchedules = await backend.getDueCronSchedules();
|
|
4214
|
+
let count = 0;
|
|
4215
|
+
for (const schedule of dueSchedules) {
|
|
4216
|
+
if (!schedule.allowOverlap && schedule.lastJobId !== null) {
|
|
4217
|
+
const lastJob = await backend.getJob(schedule.lastJobId);
|
|
4218
|
+
if (lastJob && (lastJob.status === "pending" || lastJob.status === "processing" || lastJob.status === "waiting")) {
|
|
4219
|
+
const nextRunAt2 = getNextCronOccurrence(
|
|
4220
|
+
schedule.cronExpression,
|
|
4221
|
+
schedule.timezone
|
|
4222
|
+
);
|
|
4223
|
+
await backend.updateCronScheduleAfterEnqueue(
|
|
4224
|
+
schedule.id,
|
|
4225
|
+
/* @__PURE__ */ new Date(),
|
|
4226
|
+
schedule.lastJobId,
|
|
4227
|
+
nextRunAt2
|
|
4228
|
+
);
|
|
4229
|
+
continue;
|
|
4230
|
+
}
|
|
4231
|
+
}
|
|
4232
|
+
const jobId = await backend.addJob({
|
|
4233
|
+
jobType: schedule.jobType,
|
|
4234
|
+
payload: schedule.payload,
|
|
4235
|
+
maxAttempts: schedule.maxAttempts,
|
|
4236
|
+
priority: schedule.priority,
|
|
4237
|
+
timeoutMs: schedule.timeoutMs ?? void 0,
|
|
4238
|
+
forceKillOnTimeout: schedule.forceKillOnTimeout,
|
|
4239
|
+
tags: schedule.tags
|
|
4240
|
+
});
|
|
4241
|
+
const nextRunAt = getNextCronOccurrence(
|
|
4242
|
+
schedule.cronExpression,
|
|
4243
|
+
schedule.timezone
|
|
4244
|
+
);
|
|
4245
|
+
await backend.updateCronScheduleAfterEnqueue(
|
|
4246
|
+
schedule.id,
|
|
4247
|
+
/* @__PURE__ */ new Date(),
|
|
4248
|
+
jobId,
|
|
4249
|
+
nextRunAt
|
|
3083
4250
|
);
|
|
4251
|
+
count++;
|
|
3084
4252
|
}
|
|
3085
|
-
return
|
|
4253
|
+
return count;
|
|
3086
4254
|
};
|
|
3087
4255
|
return {
|
|
3088
4256
|
// Job queue operations
|
|
@@ -3107,8 +4275,8 @@ var initJobQueue = (config) => {
|
|
|
3107
4275
|
config.verbose ?? false
|
|
3108
4276
|
),
|
|
3109
4277
|
retryJob: (jobId) => backend.retryJob(jobId),
|
|
3110
|
-
cleanupOldJobs: (daysToKeep) => backend.cleanupOldJobs(daysToKeep),
|
|
3111
|
-
cleanupOldJobEvents: (daysToKeep) => backend.cleanupOldJobEvents(daysToKeep),
|
|
4278
|
+
cleanupOldJobs: (daysToKeep, batchSize) => backend.cleanupOldJobs(daysToKeep, batchSize),
|
|
4279
|
+
cleanupOldJobEvents: (daysToKeep, batchSize) => backend.cleanupOldJobEvents(daysToKeep, batchSize),
|
|
3112
4280
|
cancelJob: withLogContext(
|
|
3113
4281
|
(jobId) => backend.cancelJob(jobId),
|
|
3114
4282
|
config.verbose ?? false
|
|
@@ -3136,33 +4304,111 @@ var initJobQueue = (config) => {
|
|
|
3136
4304
|
(tags, mode = "all", limit, offset) => backend.getJobsByTags(tags, mode, limit, offset),
|
|
3137
4305
|
config.verbose ?? false
|
|
3138
4306
|
),
|
|
3139
|
-
// Job processing
|
|
3140
|
-
createProcessor: (handlers, options) => createProcessor(backend, handlers, options)
|
|
4307
|
+
// Job processing — automatically enqueues due cron jobs before each batch
|
|
4308
|
+
createProcessor: (handlers, options) => createProcessor(backend, handlers, options, async () => {
|
|
4309
|
+
await enqueueDueCronJobsImpl();
|
|
4310
|
+
}),
|
|
3141
4311
|
// Job events
|
|
3142
4312
|
getJobEvents: withLogContext(
|
|
3143
4313
|
(jobId) => backend.getJobEvents(jobId),
|
|
3144
4314
|
config.verbose ?? false
|
|
3145
4315
|
),
|
|
3146
|
-
// Wait / Token support (
|
|
4316
|
+
// Wait / Token support (works with all backends)
|
|
3147
4317
|
createToken: withLogContext(
|
|
3148
|
-
(options) => createWaitpoint(
|
|
4318
|
+
(options) => backend.createWaitpoint(null, options),
|
|
3149
4319
|
config.verbose ?? false
|
|
3150
4320
|
),
|
|
3151
4321
|
completeToken: withLogContext(
|
|
3152
|
-
(tokenId, data) => completeWaitpoint(
|
|
4322
|
+
(tokenId, data) => backend.completeWaitpoint(tokenId, data),
|
|
3153
4323
|
config.verbose ?? false
|
|
3154
4324
|
),
|
|
3155
4325
|
getToken: withLogContext(
|
|
3156
|
-
(tokenId) => getWaitpoint(
|
|
4326
|
+
(tokenId) => backend.getWaitpoint(tokenId),
|
|
3157
4327
|
config.verbose ?? false
|
|
3158
4328
|
),
|
|
3159
4329
|
expireTimedOutTokens: withLogContext(
|
|
3160
|
-
() => expireTimedOutWaitpoints(
|
|
4330
|
+
() => backend.expireTimedOutWaitpoints(),
|
|
4331
|
+
config.verbose ?? false
|
|
4332
|
+
),
|
|
4333
|
+
// Cron schedule operations
|
|
4334
|
+
addCronJob: withLogContext(
|
|
4335
|
+
(options) => {
|
|
4336
|
+
if (!validateCronExpression(options.cronExpression)) {
|
|
4337
|
+
return Promise.reject(
|
|
4338
|
+
new Error(`Invalid cron expression: "${options.cronExpression}"`)
|
|
4339
|
+
);
|
|
4340
|
+
}
|
|
4341
|
+
const nextRunAt = getNextCronOccurrence(
|
|
4342
|
+
options.cronExpression,
|
|
4343
|
+
options.timezone ?? "UTC"
|
|
4344
|
+
);
|
|
4345
|
+
const input = {
|
|
4346
|
+
scheduleName: options.scheduleName,
|
|
4347
|
+
cronExpression: options.cronExpression,
|
|
4348
|
+
jobType: options.jobType,
|
|
4349
|
+
payload: options.payload,
|
|
4350
|
+
maxAttempts: options.maxAttempts ?? 3,
|
|
4351
|
+
priority: options.priority ?? 0,
|
|
4352
|
+
timeoutMs: options.timeoutMs ?? null,
|
|
4353
|
+
forceKillOnTimeout: options.forceKillOnTimeout ?? false,
|
|
4354
|
+
tags: options.tags,
|
|
4355
|
+
timezone: options.timezone ?? "UTC",
|
|
4356
|
+
allowOverlap: options.allowOverlap ?? false,
|
|
4357
|
+
nextRunAt
|
|
4358
|
+
};
|
|
4359
|
+
return backend.addCronSchedule(input);
|
|
4360
|
+
},
|
|
4361
|
+
config.verbose ?? false
|
|
4362
|
+
),
|
|
4363
|
+
getCronJob: withLogContext(
|
|
4364
|
+
(id) => backend.getCronSchedule(id),
|
|
4365
|
+
config.verbose ?? false
|
|
4366
|
+
),
|
|
4367
|
+
getCronJobByName: withLogContext(
|
|
4368
|
+
(name) => backend.getCronScheduleByName(name),
|
|
4369
|
+
config.verbose ?? false
|
|
4370
|
+
),
|
|
4371
|
+
listCronJobs: withLogContext(
|
|
4372
|
+
(status) => backend.listCronSchedules(status),
|
|
4373
|
+
config.verbose ?? false
|
|
4374
|
+
),
|
|
4375
|
+
removeCronJob: withLogContext(
|
|
4376
|
+
(id) => backend.removeCronSchedule(id),
|
|
4377
|
+
config.verbose ?? false
|
|
4378
|
+
),
|
|
4379
|
+
pauseCronJob: withLogContext(
|
|
4380
|
+
(id) => backend.pauseCronSchedule(id),
|
|
4381
|
+
config.verbose ?? false
|
|
4382
|
+
),
|
|
4383
|
+
resumeCronJob: withLogContext(
|
|
4384
|
+
(id) => backend.resumeCronSchedule(id),
|
|
4385
|
+
config.verbose ?? false
|
|
4386
|
+
),
|
|
4387
|
+
editCronJob: withLogContext(
|
|
4388
|
+
async (id, updates) => {
|
|
4389
|
+
if (updates.cronExpression !== void 0 && !validateCronExpression(updates.cronExpression)) {
|
|
4390
|
+
throw new Error(
|
|
4391
|
+
`Invalid cron expression: "${updates.cronExpression}"`
|
|
4392
|
+
);
|
|
4393
|
+
}
|
|
4394
|
+
let nextRunAt;
|
|
4395
|
+
if (updates.cronExpression !== void 0 || updates.timezone !== void 0) {
|
|
4396
|
+
const existing = await backend.getCronSchedule(id);
|
|
4397
|
+
const expr = updates.cronExpression ?? existing?.cronExpression ?? "";
|
|
4398
|
+
const tz = updates.timezone ?? existing?.timezone ?? "UTC";
|
|
4399
|
+
nextRunAt = getNextCronOccurrence(expr, tz);
|
|
4400
|
+
}
|
|
4401
|
+
await backend.editCronSchedule(id, updates, nextRunAt);
|
|
4402
|
+
},
|
|
4403
|
+
config.verbose ?? false
|
|
4404
|
+
),
|
|
4405
|
+
enqueueDueCronJobs: withLogContext(
|
|
4406
|
+
() => enqueueDueCronJobsImpl(),
|
|
3161
4407
|
config.verbose ?? false
|
|
3162
4408
|
),
|
|
3163
4409
|
// Advanced access
|
|
3164
4410
|
getPool: () => {
|
|
3165
|
-
if (
|
|
4411
|
+
if (!(backend instanceof PostgresBackend)) {
|
|
3166
4412
|
throw new Error(
|
|
3167
4413
|
"getPool() is only available with the PostgreSQL backend."
|
|
3168
4414
|
);
|
|
@@ -3184,6 +4430,6 @@ var withLogContext = (fn, verbose) => (...args) => {
|
|
|
3184
4430
|
return fn(...args);
|
|
3185
4431
|
};
|
|
3186
4432
|
|
|
3187
|
-
export { FailureReason, JobEventType, PostgresBackend, WaitSignal, initJobQueue, testHandlerSerialization, validateHandlerSerializable2 as validateHandlerSerializable };
|
|
4433
|
+
export { FailureReason, JobEventType, PostgresBackend, WaitSignal, getNextCronOccurrence, initJobQueue, testHandlerSerialization, validateCronExpression, validateHandlerSerializable2 as validateHandlerSerializable };
|
|
3188
4434
|
//# sourceMappingURL=index.js.map
|
|
3189
4435
|
//# sourceMappingURL=index.js.map
|