@nicnocquee/dataqueue 1.30.0 → 1.32.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +2531 -1283
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +367 -17
- package/dist/index.d.ts +367 -17
- package/dist/index.js +2530 -1284
- package/dist/index.js.map +1 -1
- package/migrations/1781200000004_create_cron_schedules_table.sql +33 -0
- package/package.json +3 -2
- package/src/backend.ts +139 -4
- package/src/backends/postgres.ts +676 -30
- package/src/backends/redis-scripts.ts +197 -22
- package/src/backends/redis.test.ts +971 -0
- package/src/backends/redis.ts +789 -22
- package/src/cron.test.ts +126 -0
- package/src/cron.ts +40 -0
- package/src/index.test.ts +361 -0
- package/src/index.ts +165 -29
- package/src/processor.ts +36 -97
- package/src/queue.test.ts +29 -0
- package/src/queue.ts +19 -251
- package/src/types.ts +177 -10
package/src/backends/postgres.ts
CHANGED
|
@@ -7,10 +7,58 @@ import {
|
|
|
7
7
|
JobEventType,
|
|
8
8
|
TagQueryMode,
|
|
9
9
|
JobType,
|
|
10
|
+
CronScheduleRecord,
|
|
11
|
+
CronScheduleStatus,
|
|
12
|
+
EditCronScheduleOptions,
|
|
13
|
+
WaitpointRecord,
|
|
14
|
+
CreateTokenOptions,
|
|
10
15
|
} from '../types.js';
|
|
11
|
-
import {
|
|
16
|
+
import { randomUUID } from 'crypto';
|
|
17
|
+
import {
|
|
18
|
+
QueueBackend,
|
|
19
|
+
JobFilters,
|
|
20
|
+
JobUpdates,
|
|
21
|
+
CronScheduleInput,
|
|
22
|
+
} from '../backend.js';
|
|
12
23
|
import { log } from '../log-context.js';
|
|
13
24
|
|
|
25
|
+
const MAX_TIMEOUT_MS = 365 * 24 * 60 * 60 * 1000;
|
|
26
|
+
|
|
27
|
+
/** Parse a timeout string like '10m', '1h', '24h', '7d' into milliseconds. */
|
|
28
|
+
function parseTimeoutString(timeout: string): number {
|
|
29
|
+
const match = timeout.match(/^(\d+)(s|m|h|d)$/);
|
|
30
|
+
if (!match) {
|
|
31
|
+
throw new Error(
|
|
32
|
+
`Invalid timeout format: "${timeout}". Expected format like "10m", "1h", "24h", "7d".`,
|
|
33
|
+
);
|
|
34
|
+
}
|
|
35
|
+
const value = parseInt(match[1], 10);
|
|
36
|
+
const unit = match[2];
|
|
37
|
+
let ms: number;
|
|
38
|
+
switch (unit) {
|
|
39
|
+
case 's':
|
|
40
|
+
ms = value * 1000;
|
|
41
|
+
break;
|
|
42
|
+
case 'm':
|
|
43
|
+
ms = value * 60 * 1000;
|
|
44
|
+
break;
|
|
45
|
+
case 'h':
|
|
46
|
+
ms = value * 60 * 60 * 1000;
|
|
47
|
+
break;
|
|
48
|
+
case 'd':
|
|
49
|
+
ms = value * 24 * 60 * 60 * 1000;
|
|
50
|
+
break;
|
|
51
|
+
default:
|
|
52
|
+
throw new Error(`Unknown timeout unit: "${unit}"`);
|
|
53
|
+
}
|
|
54
|
+
if (!Number.isFinite(ms) || ms > MAX_TIMEOUT_MS) {
|
|
55
|
+
throw new Error(
|
|
56
|
+
`Timeout value "${timeout}" is too large. Maximum allowed is 365 days.`,
|
|
57
|
+
);
|
|
58
|
+
}
|
|
59
|
+
return ms;
|
|
60
|
+
}
|
|
61
|
+
|
|
14
62
|
export class PostgresBackend implements QueueBackend {
|
|
15
63
|
constructor(private pool: Pool) {}
|
|
16
64
|
|
|
@@ -979,46 +1027,93 @@ export class PostgresBackend implements QueueBackend {
|
|
|
979
1027
|
}
|
|
980
1028
|
}
|
|
981
1029
|
|
|
982
|
-
|
|
983
|
-
|
|
1030
|
+
/**
|
|
1031
|
+
* Delete completed jobs older than the given number of days.
|
|
1032
|
+
* Deletes in batches of 1000 to avoid long-running transactions
|
|
1033
|
+
* and excessive WAL bloat at scale.
|
|
1034
|
+
*
|
|
1035
|
+
* @param daysToKeep - Number of days to retain completed jobs (default 30).
|
|
1036
|
+
* @param batchSize - Number of rows to delete per batch (default 1000).
|
|
1037
|
+
* @returns Total number of deleted jobs.
|
|
1038
|
+
*/
|
|
1039
|
+
async cleanupOldJobs(daysToKeep = 30, batchSize = 1000): Promise<number> {
|
|
1040
|
+
let totalDeleted = 0;
|
|
1041
|
+
|
|
984
1042
|
try {
|
|
985
|
-
|
|
986
|
-
|
|
987
|
-
|
|
988
|
-
|
|
989
|
-
|
|
990
|
-
|
|
991
|
-
|
|
992
|
-
|
|
993
|
-
|
|
994
|
-
|
|
995
|
-
|
|
1043
|
+
let deletedInBatch: number;
|
|
1044
|
+
do {
|
|
1045
|
+
const client = await this.pool.connect();
|
|
1046
|
+
try {
|
|
1047
|
+
const result = await client.query(
|
|
1048
|
+
`
|
|
1049
|
+
DELETE FROM job_queue
|
|
1050
|
+
WHERE id IN (
|
|
1051
|
+
SELECT id FROM job_queue
|
|
1052
|
+
WHERE status = 'completed'
|
|
1053
|
+
AND updated_at < NOW() - INTERVAL '1 day' * $1::int
|
|
1054
|
+
LIMIT $2
|
|
1055
|
+
)
|
|
1056
|
+
`,
|
|
1057
|
+
[daysToKeep, batchSize],
|
|
1058
|
+
);
|
|
1059
|
+
deletedInBatch = result.rowCount || 0;
|
|
1060
|
+
totalDeleted += deletedInBatch;
|
|
1061
|
+
} finally {
|
|
1062
|
+
client.release();
|
|
1063
|
+
}
|
|
1064
|
+
} while (deletedInBatch === batchSize);
|
|
1065
|
+
|
|
1066
|
+
log(`Deleted ${totalDeleted} old jobs`);
|
|
1067
|
+
return totalDeleted;
|
|
996
1068
|
} catch (error) {
|
|
997
1069
|
log(`Error cleaning up old jobs: ${error}`);
|
|
998
1070
|
throw error;
|
|
999
|
-
} finally {
|
|
1000
|
-
client.release();
|
|
1001
1071
|
}
|
|
1002
1072
|
}
|
|
1003
1073
|
|
|
1004
|
-
|
|
1005
|
-
|
|
1074
|
+
/**
|
|
1075
|
+
* Delete job events older than the given number of days.
|
|
1076
|
+
* Deletes in batches of 1000 to avoid long-running transactions
|
|
1077
|
+
* and excessive WAL bloat at scale.
|
|
1078
|
+
*
|
|
1079
|
+
* @param daysToKeep - Number of days to retain events (default 30).
|
|
1080
|
+
* @param batchSize - Number of rows to delete per batch (default 1000).
|
|
1081
|
+
* @returns Total number of deleted events.
|
|
1082
|
+
*/
|
|
1083
|
+
async cleanupOldJobEvents(
|
|
1084
|
+
daysToKeep = 30,
|
|
1085
|
+
batchSize = 1000,
|
|
1086
|
+
): Promise<number> {
|
|
1087
|
+
let totalDeleted = 0;
|
|
1088
|
+
|
|
1006
1089
|
try {
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
|
|
1010
|
-
|
|
1011
|
-
|
|
1012
|
-
|
|
1013
|
-
|
|
1014
|
-
|
|
1015
|
-
|
|
1016
|
-
|
|
1090
|
+
let deletedInBatch: number;
|
|
1091
|
+
do {
|
|
1092
|
+
const client = await this.pool.connect();
|
|
1093
|
+
try {
|
|
1094
|
+
const result = await client.query(
|
|
1095
|
+
`
|
|
1096
|
+
DELETE FROM job_events
|
|
1097
|
+
WHERE id IN (
|
|
1098
|
+
SELECT id FROM job_events
|
|
1099
|
+
WHERE created_at < NOW() - INTERVAL '1 day' * $1::int
|
|
1100
|
+
LIMIT $2
|
|
1101
|
+
)
|
|
1102
|
+
`,
|
|
1103
|
+
[daysToKeep, batchSize],
|
|
1104
|
+
);
|
|
1105
|
+
deletedInBatch = result.rowCount || 0;
|
|
1106
|
+
totalDeleted += deletedInBatch;
|
|
1107
|
+
} finally {
|
|
1108
|
+
client.release();
|
|
1109
|
+
}
|
|
1110
|
+
} while (deletedInBatch === batchSize);
|
|
1111
|
+
|
|
1112
|
+
log(`Deleted ${totalDeleted} old job events`);
|
|
1113
|
+
return totalDeleted;
|
|
1017
1114
|
} catch (error) {
|
|
1018
1115
|
log(`Error cleaning up old job events: ${error}`);
|
|
1019
1116
|
throw error;
|
|
1020
|
-
} finally {
|
|
1021
|
-
client.release();
|
|
1022
1117
|
}
|
|
1023
1118
|
}
|
|
1024
1119
|
|
|
@@ -1083,6 +1178,557 @@ export class PostgresBackend implements QueueBackend {
|
|
|
1083
1178
|
}
|
|
1084
1179
|
}
|
|
1085
1180
|
|
|
1181
|
+
// ── Cron schedules ──────────────────────────────────────────────────
|
|
1182
|
+
|
|
1183
|
+
/** Create a cron schedule and return its ID. */
|
|
1184
|
+
async addCronSchedule(input: CronScheduleInput): Promise<number> {
|
|
1185
|
+
const client = await this.pool.connect();
|
|
1186
|
+
try {
|
|
1187
|
+
const result = await client.query(
|
|
1188
|
+
`INSERT INTO cron_schedules
|
|
1189
|
+
(schedule_name, cron_expression, job_type, payload, max_attempts,
|
|
1190
|
+
priority, timeout_ms, force_kill_on_timeout, tags, timezone,
|
|
1191
|
+
allow_overlap, next_run_at)
|
|
1192
|
+
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)
|
|
1193
|
+
RETURNING id`,
|
|
1194
|
+
[
|
|
1195
|
+
input.scheduleName,
|
|
1196
|
+
input.cronExpression,
|
|
1197
|
+
input.jobType,
|
|
1198
|
+
input.payload,
|
|
1199
|
+
input.maxAttempts,
|
|
1200
|
+
input.priority,
|
|
1201
|
+
input.timeoutMs,
|
|
1202
|
+
input.forceKillOnTimeout,
|
|
1203
|
+
input.tags ?? null,
|
|
1204
|
+
input.timezone,
|
|
1205
|
+
input.allowOverlap,
|
|
1206
|
+
input.nextRunAt,
|
|
1207
|
+
],
|
|
1208
|
+
);
|
|
1209
|
+
const id = result.rows[0].id;
|
|
1210
|
+
log(`Added cron schedule ${id}: "${input.scheduleName}"`);
|
|
1211
|
+
return id;
|
|
1212
|
+
} catch (error: any) {
|
|
1213
|
+
// Unique constraint violation on schedule_name
|
|
1214
|
+
if (error?.code === '23505') {
|
|
1215
|
+
throw new Error(
|
|
1216
|
+
`Cron schedule with name "${input.scheduleName}" already exists`,
|
|
1217
|
+
);
|
|
1218
|
+
}
|
|
1219
|
+
log(`Error adding cron schedule: ${error}`);
|
|
1220
|
+
throw error;
|
|
1221
|
+
} finally {
|
|
1222
|
+
client.release();
|
|
1223
|
+
}
|
|
1224
|
+
}
|
|
1225
|
+
|
|
1226
|
+
/** Get a cron schedule by ID. */
|
|
1227
|
+
async getCronSchedule(id: number): Promise<CronScheduleRecord | null> {
|
|
1228
|
+
const client = await this.pool.connect();
|
|
1229
|
+
try {
|
|
1230
|
+
const result = await client.query(
|
|
1231
|
+
`SELECT id, schedule_name AS "scheduleName", cron_expression AS "cronExpression",
|
|
1232
|
+
job_type AS "jobType", payload, max_attempts AS "maxAttempts",
|
|
1233
|
+
priority, timeout_ms AS "timeoutMs",
|
|
1234
|
+
force_kill_on_timeout AS "forceKillOnTimeout", tags,
|
|
1235
|
+
timezone, allow_overlap AS "allowOverlap", status,
|
|
1236
|
+
last_enqueued_at AS "lastEnqueuedAt", last_job_id AS "lastJobId",
|
|
1237
|
+
next_run_at AS "nextRunAt",
|
|
1238
|
+
created_at AS "createdAt", updated_at AS "updatedAt"
|
|
1239
|
+
FROM cron_schedules WHERE id = $1`,
|
|
1240
|
+
[id],
|
|
1241
|
+
);
|
|
1242
|
+
if (result.rows.length === 0) return null;
|
|
1243
|
+
return result.rows[0] as CronScheduleRecord;
|
|
1244
|
+
} catch (error) {
|
|
1245
|
+
log(`Error getting cron schedule ${id}: ${error}`);
|
|
1246
|
+
throw error;
|
|
1247
|
+
} finally {
|
|
1248
|
+
client.release();
|
|
1249
|
+
}
|
|
1250
|
+
}
|
|
1251
|
+
|
|
1252
|
+
/** Get a cron schedule by its unique name. */
|
|
1253
|
+
async getCronScheduleByName(
|
|
1254
|
+
name: string,
|
|
1255
|
+
): Promise<CronScheduleRecord | null> {
|
|
1256
|
+
const client = await this.pool.connect();
|
|
1257
|
+
try {
|
|
1258
|
+
const result = await client.query(
|
|
1259
|
+
`SELECT id, schedule_name AS "scheduleName", cron_expression AS "cronExpression",
|
|
1260
|
+
job_type AS "jobType", payload, max_attempts AS "maxAttempts",
|
|
1261
|
+
priority, timeout_ms AS "timeoutMs",
|
|
1262
|
+
force_kill_on_timeout AS "forceKillOnTimeout", tags,
|
|
1263
|
+
timezone, allow_overlap AS "allowOverlap", status,
|
|
1264
|
+
last_enqueued_at AS "lastEnqueuedAt", last_job_id AS "lastJobId",
|
|
1265
|
+
next_run_at AS "nextRunAt",
|
|
1266
|
+
created_at AS "createdAt", updated_at AS "updatedAt"
|
|
1267
|
+
FROM cron_schedules WHERE schedule_name = $1`,
|
|
1268
|
+
[name],
|
|
1269
|
+
);
|
|
1270
|
+
if (result.rows.length === 0) return null;
|
|
1271
|
+
return result.rows[0] as CronScheduleRecord;
|
|
1272
|
+
} catch (error) {
|
|
1273
|
+
log(`Error getting cron schedule by name "${name}": ${error}`);
|
|
1274
|
+
throw error;
|
|
1275
|
+
} finally {
|
|
1276
|
+
client.release();
|
|
1277
|
+
}
|
|
1278
|
+
}
|
|
1279
|
+
|
|
1280
|
+
/** List cron schedules, optionally filtered by status. */
|
|
1281
|
+
async listCronSchedules(
|
|
1282
|
+
status?: CronScheduleStatus,
|
|
1283
|
+
): Promise<CronScheduleRecord[]> {
|
|
1284
|
+
const client = await this.pool.connect();
|
|
1285
|
+
try {
|
|
1286
|
+
let query = `SELECT id, schedule_name AS "scheduleName", cron_expression AS "cronExpression",
|
|
1287
|
+
job_type AS "jobType", payload, max_attempts AS "maxAttempts",
|
|
1288
|
+
priority, timeout_ms AS "timeoutMs",
|
|
1289
|
+
force_kill_on_timeout AS "forceKillOnTimeout", tags,
|
|
1290
|
+
timezone, allow_overlap AS "allowOverlap", status,
|
|
1291
|
+
last_enqueued_at AS "lastEnqueuedAt", last_job_id AS "lastJobId",
|
|
1292
|
+
next_run_at AS "nextRunAt",
|
|
1293
|
+
created_at AS "createdAt", updated_at AS "updatedAt"
|
|
1294
|
+
FROM cron_schedules`;
|
|
1295
|
+
const params: any[] = [];
|
|
1296
|
+
if (status) {
|
|
1297
|
+
query += ` WHERE status = $1`;
|
|
1298
|
+
params.push(status);
|
|
1299
|
+
}
|
|
1300
|
+
query += ` ORDER BY created_at ASC`;
|
|
1301
|
+
const result = await client.query(query, params);
|
|
1302
|
+
return result.rows as CronScheduleRecord[];
|
|
1303
|
+
} catch (error) {
|
|
1304
|
+
log(`Error listing cron schedules: ${error}`);
|
|
1305
|
+
throw error;
|
|
1306
|
+
} finally {
|
|
1307
|
+
client.release();
|
|
1308
|
+
}
|
|
1309
|
+
}
|
|
1310
|
+
|
|
1311
|
+
/** Delete a cron schedule by ID. */
|
|
1312
|
+
async removeCronSchedule(id: number): Promise<void> {
|
|
1313
|
+
const client = await this.pool.connect();
|
|
1314
|
+
try {
|
|
1315
|
+
await client.query(`DELETE FROM cron_schedules WHERE id = $1`, [id]);
|
|
1316
|
+
log(`Removed cron schedule ${id}`);
|
|
1317
|
+
} catch (error) {
|
|
1318
|
+
log(`Error removing cron schedule ${id}: ${error}`);
|
|
1319
|
+
throw error;
|
|
1320
|
+
} finally {
|
|
1321
|
+
client.release();
|
|
1322
|
+
}
|
|
1323
|
+
}
|
|
1324
|
+
|
|
1325
|
+
/** Pause a cron schedule. */
|
|
1326
|
+
async pauseCronSchedule(id: number): Promise<void> {
|
|
1327
|
+
const client = await this.pool.connect();
|
|
1328
|
+
try {
|
|
1329
|
+
await client.query(
|
|
1330
|
+
`UPDATE cron_schedules SET status = 'paused', updated_at = NOW() WHERE id = $1`,
|
|
1331
|
+
[id],
|
|
1332
|
+
);
|
|
1333
|
+
log(`Paused cron schedule ${id}`);
|
|
1334
|
+
} catch (error) {
|
|
1335
|
+
log(`Error pausing cron schedule ${id}: ${error}`);
|
|
1336
|
+
throw error;
|
|
1337
|
+
} finally {
|
|
1338
|
+
client.release();
|
|
1339
|
+
}
|
|
1340
|
+
}
|
|
1341
|
+
|
|
1342
|
+
/** Resume a paused cron schedule. */
|
|
1343
|
+
async resumeCronSchedule(id: number): Promise<void> {
|
|
1344
|
+
const client = await this.pool.connect();
|
|
1345
|
+
try {
|
|
1346
|
+
await client.query(
|
|
1347
|
+
`UPDATE cron_schedules SET status = 'active', updated_at = NOW() WHERE id = $1`,
|
|
1348
|
+
[id],
|
|
1349
|
+
);
|
|
1350
|
+
log(`Resumed cron schedule ${id}`);
|
|
1351
|
+
} catch (error) {
|
|
1352
|
+
log(`Error resuming cron schedule ${id}: ${error}`);
|
|
1353
|
+
throw error;
|
|
1354
|
+
} finally {
|
|
1355
|
+
client.release();
|
|
1356
|
+
}
|
|
1357
|
+
}
|
|
1358
|
+
|
|
1359
|
+
/** Edit a cron schedule. */
|
|
1360
|
+
async editCronSchedule(
|
|
1361
|
+
id: number,
|
|
1362
|
+
updates: EditCronScheduleOptions,
|
|
1363
|
+
nextRunAt?: Date | null,
|
|
1364
|
+
): Promise<void> {
|
|
1365
|
+
const client = await this.pool.connect();
|
|
1366
|
+
try {
|
|
1367
|
+
const updateFields: string[] = [];
|
|
1368
|
+
const params: any[] = [];
|
|
1369
|
+
let paramIdx = 1;
|
|
1370
|
+
|
|
1371
|
+
if (updates.cronExpression !== undefined) {
|
|
1372
|
+
updateFields.push(`cron_expression = $${paramIdx++}`);
|
|
1373
|
+
params.push(updates.cronExpression);
|
|
1374
|
+
}
|
|
1375
|
+
if (updates.payload !== undefined) {
|
|
1376
|
+
updateFields.push(`payload = $${paramIdx++}`);
|
|
1377
|
+
params.push(updates.payload);
|
|
1378
|
+
}
|
|
1379
|
+
if (updates.maxAttempts !== undefined) {
|
|
1380
|
+
updateFields.push(`max_attempts = $${paramIdx++}`);
|
|
1381
|
+
params.push(updates.maxAttempts);
|
|
1382
|
+
}
|
|
1383
|
+
if (updates.priority !== undefined) {
|
|
1384
|
+
updateFields.push(`priority = $${paramIdx++}`);
|
|
1385
|
+
params.push(updates.priority);
|
|
1386
|
+
}
|
|
1387
|
+
if (updates.timeoutMs !== undefined) {
|
|
1388
|
+
updateFields.push(`timeout_ms = $${paramIdx++}`);
|
|
1389
|
+
params.push(updates.timeoutMs);
|
|
1390
|
+
}
|
|
1391
|
+
if (updates.forceKillOnTimeout !== undefined) {
|
|
1392
|
+
updateFields.push(`force_kill_on_timeout = $${paramIdx++}`);
|
|
1393
|
+
params.push(updates.forceKillOnTimeout);
|
|
1394
|
+
}
|
|
1395
|
+
if (updates.tags !== undefined) {
|
|
1396
|
+
updateFields.push(`tags = $${paramIdx++}`);
|
|
1397
|
+
params.push(updates.tags);
|
|
1398
|
+
}
|
|
1399
|
+
if (updates.timezone !== undefined) {
|
|
1400
|
+
updateFields.push(`timezone = $${paramIdx++}`);
|
|
1401
|
+
params.push(updates.timezone);
|
|
1402
|
+
}
|
|
1403
|
+
if (updates.allowOverlap !== undefined) {
|
|
1404
|
+
updateFields.push(`allow_overlap = $${paramIdx++}`);
|
|
1405
|
+
params.push(updates.allowOverlap);
|
|
1406
|
+
}
|
|
1407
|
+
if (nextRunAt !== undefined) {
|
|
1408
|
+
updateFields.push(`next_run_at = $${paramIdx++}`);
|
|
1409
|
+
params.push(nextRunAt);
|
|
1410
|
+
}
|
|
1411
|
+
|
|
1412
|
+
if (updateFields.length === 0) {
|
|
1413
|
+
log(`No fields to update for cron schedule ${id}`);
|
|
1414
|
+
return;
|
|
1415
|
+
}
|
|
1416
|
+
|
|
1417
|
+
updateFields.push(`updated_at = NOW()`);
|
|
1418
|
+
params.push(id);
|
|
1419
|
+
|
|
1420
|
+
const query = `UPDATE cron_schedules SET ${updateFields.join(', ')} WHERE id = $${paramIdx}`;
|
|
1421
|
+
await client.query(query, params);
|
|
1422
|
+
log(`Edited cron schedule ${id}`);
|
|
1423
|
+
} catch (error) {
|
|
1424
|
+
log(`Error editing cron schedule ${id}: ${error}`);
|
|
1425
|
+
throw error;
|
|
1426
|
+
} finally {
|
|
1427
|
+
client.release();
|
|
1428
|
+
}
|
|
1429
|
+
}
|
|
1430
|
+
|
|
1431
|
+
/**
|
|
1432
|
+
* Atomically fetch all active cron schedules whose nextRunAt <= NOW().
|
|
1433
|
+
* Uses FOR UPDATE SKIP LOCKED to prevent duplicate enqueuing across workers.
|
|
1434
|
+
*/
|
|
1435
|
+
async getDueCronSchedules(): Promise<CronScheduleRecord[]> {
|
|
1436
|
+
const client = await this.pool.connect();
|
|
1437
|
+
try {
|
|
1438
|
+
const result = await client.query(
|
|
1439
|
+
`SELECT id, schedule_name AS "scheduleName", cron_expression AS "cronExpression",
|
|
1440
|
+
job_type AS "jobType", payload, max_attempts AS "maxAttempts",
|
|
1441
|
+
priority, timeout_ms AS "timeoutMs",
|
|
1442
|
+
force_kill_on_timeout AS "forceKillOnTimeout", tags,
|
|
1443
|
+
timezone, allow_overlap AS "allowOverlap", status,
|
|
1444
|
+
last_enqueued_at AS "lastEnqueuedAt", last_job_id AS "lastJobId",
|
|
1445
|
+
next_run_at AS "nextRunAt",
|
|
1446
|
+
created_at AS "createdAt", updated_at AS "updatedAt"
|
|
1447
|
+
FROM cron_schedules
|
|
1448
|
+
WHERE status = 'active'
|
|
1449
|
+
AND next_run_at IS NOT NULL
|
|
1450
|
+
AND next_run_at <= NOW()
|
|
1451
|
+
ORDER BY next_run_at ASC
|
|
1452
|
+
FOR UPDATE SKIP LOCKED`,
|
|
1453
|
+
);
|
|
1454
|
+
log(`Found ${result.rows.length} due cron schedules`);
|
|
1455
|
+
return result.rows as CronScheduleRecord[];
|
|
1456
|
+
} catch (error: any) {
|
|
1457
|
+
// 42P01 = undefined_table — cron migration hasn't been run yet
|
|
1458
|
+
if (error?.code === '42P01') {
|
|
1459
|
+
log('cron_schedules table does not exist, skipping cron enqueue');
|
|
1460
|
+
return [];
|
|
1461
|
+
}
|
|
1462
|
+
log(`Error getting due cron schedules: ${error}`);
|
|
1463
|
+
throw error;
|
|
1464
|
+
} finally {
|
|
1465
|
+
client.release();
|
|
1466
|
+
}
|
|
1467
|
+
}
|
|
1468
|
+
|
|
1469
|
+
/**
|
|
1470
|
+
* Update a cron schedule after a job has been enqueued.
|
|
1471
|
+
* Sets lastEnqueuedAt, lastJobId, and advances nextRunAt.
|
|
1472
|
+
*/
|
|
1473
|
+
async updateCronScheduleAfterEnqueue(
|
|
1474
|
+
id: number,
|
|
1475
|
+
lastEnqueuedAt: Date,
|
|
1476
|
+
lastJobId: number,
|
|
1477
|
+
nextRunAt: Date | null,
|
|
1478
|
+
): Promise<void> {
|
|
1479
|
+
const client = await this.pool.connect();
|
|
1480
|
+
try {
|
|
1481
|
+
await client.query(
|
|
1482
|
+
`UPDATE cron_schedules
|
|
1483
|
+
SET last_enqueued_at = $2,
|
|
1484
|
+
last_job_id = $3,
|
|
1485
|
+
next_run_at = $4,
|
|
1486
|
+
updated_at = NOW()
|
|
1487
|
+
WHERE id = $1`,
|
|
1488
|
+
[id, lastEnqueuedAt, lastJobId, nextRunAt],
|
|
1489
|
+
);
|
|
1490
|
+
log(
|
|
1491
|
+
`Updated cron schedule ${id}: lastJobId=${lastJobId}, nextRunAt=${nextRunAt?.toISOString() ?? 'null'}`,
|
|
1492
|
+
);
|
|
1493
|
+
} catch (error) {
|
|
1494
|
+
log(`Error updating cron schedule ${id} after enqueue: ${error}`);
|
|
1495
|
+
throw error;
|
|
1496
|
+
} finally {
|
|
1497
|
+
client.release();
|
|
1498
|
+
}
|
|
1499
|
+
}
|
|
1500
|
+
|
|
1501
|
+
// ── Wait / step-data support ────────────────────────────────────────
|
|
1502
|
+
|
|
1503
|
+
/**
|
|
1504
|
+
* Transition a job from 'processing' to 'waiting' status.
|
|
1505
|
+
* Persists step data so the handler can resume from where it left off.
|
|
1506
|
+
*
|
|
1507
|
+
* @param jobId - The job to pause.
|
|
1508
|
+
* @param options - Wait configuration including optional waitUntil date, token ID, and step data.
|
|
1509
|
+
*/
|
|
1510
|
+
async waitJob(
|
|
1511
|
+
jobId: number,
|
|
1512
|
+
options: {
|
|
1513
|
+
waitUntil?: Date;
|
|
1514
|
+
waitTokenId?: string;
|
|
1515
|
+
stepData: Record<string, any>;
|
|
1516
|
+
},
|
|
1517
|
+
): Promise<void> {
|
|
1518
|
+
const client = await this.pool.connect();
|
|
1519
|
+
try {
|
|
1520
|
+
const result = await client.query(
|
|
1521
|
+
`
|
|
1522
|
+
UPDATE job_queue
|
|
1523
|
+
SET status = 'waiting',
|
|
1524
|
+
wait_until = $2,
|
|
1525
|
+
wait_token_id = $3,
|
|
1526
|
+
step_data = $4,
|
|
1527
|
+
locked_at = NULL,
|
|
1528
|
+
locked_by = NULL,
|
|
1529
|
+
updated_at = NOW()
|
|
1530
|
+
WHERE id = $1 AND status = 'processing'
|
|
1531
|
+
`,
|
|
1532
|
+
[
|
|
1533
|
+
jobId,
|
|
1534
|
+
options.waitUntil ?? null,
|
|
1535
|
+
options.waitTokenId ?? null,
|
|
1536
|
+
JSON.stringify(options.stepData),
|
|
1537
|
+
],
|
|
1538
|
+
);
|
|
1539
|
+
if (result.rowCount === 0) {
|
|
1540
|
+
log(
|
|
1541
|
+
`Job ${jobId} could not be set to waiting (may have been reclaimed or is no longer processing)`,
|
|
1542
|
+
);
|
|
1543
|
+
return;
|
|
1544
|
+
}
|
|
1545
|
+
await this.recordJobEvent(jobId, JobEventType.Waiting, {
|
|
1546
|
+
waitUntil: options.waitUntil?.toISOString() ?? null,
|
|
1547
|
+
waitTokenId: options.waitTokenId ?? null,
|
|
1548
|
+
});
|
|
1549
|
+
log(`Job ${jobId} set to waiting`);
|
|
1550
|
+
} catch (error) {
|
|
1551
|
+
log(`Error setting job ${jobId} to waiting: ${error}`);
|
|
1552
|
+
throw error;
|
|
1553
|
+
} finally {
|
|
1554
|
+
client.release();
|
|
1555
|
+
}
|
|
1556
|
+
}
|
|
1557
|
+
|
|
1558
|
+
/**
|
|
1559
|
+
* Persist step data for a job. Called after each ctx.run() step completes.
|
|
1560
|
+
* Best-effort: does not throw to avoid killing the running handler.
|
|
1561
|
+
*
|
|
1562
|
+
* @param jobId - The job to update.
|
|
1563
|
+
* @param stepData - The step data to persist.
|
|
1564
|
+
*/
|
|
1565
|
+
async updateStepData(
|
|
1566
|
+
jobId: number,
|
|
1567
|
+
stepData: Record<string, any>,
|
|
1568
|
+
): Promise<void> {
|
|
1569
|
+
const client = await this.pool.connect();
|
|
1570
|
+
try {
|
|
1571
|
+
await client.query(
|
|
1572
|
+
`UPDATE job_queue SET step_data = $2, updated_at = NOW() WHERE id = $1`,
|
|
1573
|
+
[jobId, JSON.stringify(stepData)],
|
|
1574
|
+
);
|
|
1575
|
+
} catch (error) {
|
|
1576
|
+
log(`Error updating step_data for job ${jobId}: ${error}`);
|
|
1577
|
+
} finally {
|
|
1578
|
+
client.release();
|
|
1579
|
+
}
|
|
1580
|
+
}
|
|
1581
|
+
|
|
1582
|
+
/**
|
|
1583
|
+
* Create a waitpoint token in the database.
|
|
1584
|
+
*
|
|
1585
|
+
* @param jobId - The job ID to associate with the token (null if created outside a handler).
|
|
1586
|
+
* @param options - Optional timeout string (e.g. '10m', '1h') and tags.
|
|
1587
|
+
* @returns The created waitpoint with its unique ID.
|
|
1588
|
+
*/
|
|
1589
|
+
async createWaitpoint(
|
|
1590
|
+
jobId: number | null,
|
|
1591
|
+
options?: CreateTokenOptions,
|
|
1592
|
+
): Promise<{ id: string }> {
|
|
1593
|
+
const client = await this.pool.connect();
|
|
1594
|
+
try {
|
|
1595
|
+
const id = `wp_${randomUUID()}`;
|
|
1596
|
+
let timeoutAt: Date | null = null;
|
|
1597
|
+
|
|
1598
|
+
if (options?.timeout) {
|
|
1599
|
+
const ms = parseTimeoutString(options.timeout);
|
|
1600
|
+
timeoutAt = new Date(Date.now() + ms);
|
|
1601
|
+
}
|
|
1602
|
+
|
|
1603
|
+
await client.query(
|
|
1604
|
+
`INSERT INTO waitpoints (id, job_id, status, timeout_at, tags) VALUES ($1, $2, 'waiting', $3, $4)`,
|
|
1605
|
+
[id, jobId, timeoutAt, options?.tags ?? null],
|
|
1606
|
+
);
|
|
1607
|
+
|
|
1608
|
+
log(`Created waitpoint ${id} for job ${jobId}`);
|
|
1609
|
+
return { id };
|
|
1610
|
+
} catch (error) {
|
|
1611
|
+
log(`Error creating waitpoint: ${error}`);
|
|
1612
|
+
throw error;
|
|
1613
|
+
} finally {
|
|
1614
|
+
client.release();
|
|
1615
|
+
}
|
|
1616
|
+
}
|
|
1617
|
+
|
|
1618
|
+
/**
|
|
1619
|
+
* Complete a waitpoint token and move the associated job back to 'pending'.
|
|
1620
|
+
*
|
|
1621
|
+
* @param tokenId - The waitpoint token ID to complete.
|
|
1622
|
+
* @param data - Optional data to pass to the waiting handler.
|
|
1623
|
+
*/
|
|
1624
|
+
async completeWaitpoint(tokenId: string, data?: any): Promise<void> {
|
|
1625
|
+
const client = await this.pool.connect();
|
|
1626
|
+
try {
|
|
1627
|
+
await client.query('BEGIN');
|
|
1628
|
+
|
|
1629
|
+
const wpResult = await client.query(
|
|
1630
|
+
`UPDATE waitpoints SET status = 'completed', output = $2, completed_at = NOW()
|
|
1631
|
+
WHERE id = $1 AND status = 'waiting'
|
|
1632
|
+
RETURNING job_id`,
|
|
1633
|
+
[tokenId, data != null ? JSON.stringify(data) : null],
|
|
1634
|
+
);
|
|
1635
|
+
|
|
1636
|
+
if (wpResult.rows.length === 0) {
|
|
1637
|
+
await client.query('ROLLBACK');
|
|
1638
|
+
log(`Waitpoint ${tokenId} not found or already completed`);
|
|
1639
|
+
return;
|
|
1640
|
+
}
|
|
1641
|
+
|
|
1642
|
+
const jobId = wpResult.rows[0].job_id;
|
|
1643
|
+
|
|
1644
|
+
if (jobId != null) {
|
|
1645
|
+
await client.query(
|
|
1646
|
+
`UPDATE job_queue
|
|
1647
|
+
SET status = 'pending', wait_token_id = NULL, wait_until = NULL, updated_at = NOW()
|
|
1648
|
+
WHERE id = $1 AND status = 'waiting'`,
|
|
1649
|
+
[jobId],
|
|
1650
|
+
);
|
|
1651
|
+
}
|
|
1652
|
+
|
|
1653
|
+
await client.query('COMMIT');
|
|
1654
|
+
log(`Completed waitpoint ${tokenId} for job ${jobId}`);
|
|
1655
|
+
} catch (error) {
|
|
1656
|
+
await client.query('ROLLBACK');
|
|
1657
|
+
log(`Error completing waitpoint ${tokenId}: ${error}`);
|
|
1658
|
+
throw error;
|
|
1659
|
+
} finally {
|
|
1660
|
+
client.release();
|
|
1661
|
+
}
|
|
1662
|
+
}
|
|
1663
|
+
|
|
1664
|
+
/**
|
|
1665
|
+
* Retrieve a waitpoint token by its ID.
|
|
1666
|
+
*
|
|
1667
|
+
* @param tokenId - The waitpoint token ID to look up.
|
|
1668
|
+
* @returns The waitpoint record, or null if not found.
|
|
1669
|
+
*/
|
|
1670
|
+
async getWaitpoint(tokenId: string): Promise<WaitpointRecord | null> {
|
|
1671
|
+
const client = await this.pool.connect();
|
|
1672
|
+
try {
|
|
1673
|
+
const result = await client.query(
|
|
1674
|
+
`SELECT id, job_id AS "jobId", status, output, timeout_at AS "timeoutAt", created_at AS "createdAt", completed_at AS "completedAt", tags FROM waitpoints WHERE id = $1`,
|
|
1675
|
+
[tokenId],
|
|
1676
|
+
);
|
|
1677
|
+
if (result.rows.length === 0) return null;
|
|
1678
|
+
return result.rows[0] as WaitpointRecord;
|
|
1679
|
+
} catch (error) {
|
|
1680
|
+
log(`Error getting waitpoint ${tokenId}: ${error}`);
|
|
1681
|
+
throw error;
|
|
1682
|
+
} finally {
|
|
1683
|
+
client.release();
|
|
1684
|
+
}
|
|
1685
|
+
}
|
|
1686
|
+
|
|
1687
|
+
/**
|
|
1688
|
+
* Expire timed-out waitpoint tokens and move their associated jobs back to 'pending'.
|
|
1689
|
+
*
|
|
1690
|
+
* @returns The number of tokens that were expired.
|
|
1691
|
+
*/
|
|
1692
|
+
async expireTimedOutWaitpoints(): Promise<number> {
|
|
1693
|
+
const client = await this.pool.connect();
|
|
1694
|
+
try {
|
|
1695
|
+
await client.query('BEGIN');
|
|
1696
|
+
|
|
1697
|
+
const result = await client.query(
|
|
1698
|
+
`UPDATE waitpoints
|
|
1699
|
+
SET status = 'timed_out'
|
|
1700
|
+
WHERE status = 'waiting' AND timeout_at IS NOT NULL AND timeout_at <= NOW()
|
|
1701
|
+
RETURNING id, job_id`,
|
|
1702
|
+
);
|
|
1703
|
+
|
|
1704
|
+
for (const row of result.rows) {
|
|
1705
|
+
if (row.job_id != null) {
|
|
1706
|
+
await client.query(
|
|
1707
|
+
`UPDATE job_queue
|
|
1708
|
+
SET status = 'pending', wait_token_id = NULL, wait_until = NULL, updated_at = NOW()
|
|
1709
|
+
WHERE id = $1 AND status = 'waiting'`,
|
|
1710
|
+
[row.job_id],
|
|
1711
|
+
);
|
|
1712
|
+
}
|
|
1713
|
+
}
|
|
1714
|
+
|
|
1715
|
+
await client.query('COMMIT');
|
|
1716
|
+
const count = result.rowCount || 0;
|
|
1717
|
+
if (count > 0) {
|
|
1718
|
+
log(`Expired ${count} timed-out waitpoints`);
|
|
1719
|
+
}
|
|
1720
|
+
return count;
|
|
1721
|
+
} catch (error) {
|
|
1722
|
+
await client.query('ROLLBACK');
|
|
1723
|
+
log(`Error expiring timed-out waitpoints: ${error}`);
|
|
1724
|
+
throw error;
|
|
1725
|
+
} finally {
|
|
1726
|
+
client.release();
|
|
1727
|
+
}
|
|
1728
|
+
}
|
|
1729
|
+
|
|
1730
|
+
// ── Internal helpers ──────────────────────────────────────────────────
|
|
1731
|
+
|
|
1086
1732
|
async setPendingReasonForUnpickedJobs(
|
|
1087
1733
|
reason: string,
|
|
1088
1734
|
jobType?: string | string[],
|