@nicnocquee/dataqueue 1.34.0 → 1.35.0-beta.20260224110011
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/ai/docs-content.json +27 -15
- package/ai/rules/advanced.md +78 -1
- package/ai/rules/basic.md +73 -3
- package/ai/rules/react-dashboard.md +5 -1
- package/ai/skills/dataqueue-advanced/SKILL.md +181 -0
- package/ai/skills/dataqueue-core/SKILL.md +109 -3
- package/ai/skills/dataqueue-react/SKILL.md +19 -7
- package/dist/index.cjs +1168 -173
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +394 -13
- package/dist/index.d.ts +394 -13
- package/dist/index.js +1168 -173
- package/dist/index.js.map +1 -1
- package/migrations/1781200000005_add_retry_config_to_job_queue.sql +17 -0
- package/migrations/1781200000006_add_output_to_job_queue.sql +3 -0
- package/migrations/1781200000007_add_group_fields_to_job_queue.sql +16 -0
- package/package.json +1 -1
- package/src/backend.ts +37 -3
- package/src/backends/postgres.ts +458 -76
- package/src/backends/redis-scripts.ts +273 -37
- package/src/backends/redis.test.ts +753 -0
- package/src/backends/redis.ts +253 -15
- package/src/db-util.ts +1 -1
- package/src/index.test.ts +811 -12
- package/src/index.ts +106 -14
- package/src/processor.test.ts +18 -0
- package/src/processor.ts +147 -49
- package/src/queue.test.ts +584 -0
- package/src/queue.ts +22 -3
- package/src/supervisor.test.ts +340 -0
- package/src/supervisor.ts +177 -0
- package/src/types.ts +353 -3
package/src/backends/postgres.ts
CHANGED
|
@@ -12,6 +12,8 @@ import {
|
|
|
12
12
|
EditCronScheduleOptions,
|
|
13
13
|
WaitpointRecord,
|
|
14
14
|
CreateTokenOptions,
|
|
15
|
+
AddJobOptions,
|
|
16
|
+
DatabaseClient,
|
|
15
17
|
} from '../types.js';
|
|
16
18
|
import { randomUUID } from 'crypto';
|
|
17
19
|
import {
|
|
@@ -103,18 +105,35 @@ export class PostgresBackend implements QueueBackend {
|
|
|
103
105
|
|
|
104
106
|
// ── Job CRUD ──────────────────────────────────────────────────────────
|
|
105
107
|
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
108
|
+
/**
|
|
109
|
+
* Add a job and return its numeric ID.
|
|
110
|
+
*
|
|
111
|
+
* @param job - Job configuration.
|
|
112
|
+
* @param options - Optional. Pass `{ db }` to run the INSERT on an external
|
|
113
|
+
* client (e.g., inside a transaction) so the job is part of the caller's
|
|
114
|
+
* transaction. The event INSERT also uses the same client.
|
|
115
|
+
*/
|
|
116
|
+
async addJob<PayloadMap, T extends JobType<PayloadMap>>(
|
|
117
|
+
{
|
|
118
|
+
jobType,
|
|
119
|
+
payload,
|
|
120
|
+
maxAttempts = 3,
|
|
121
|
+
priority = 0,
|
|
122
|
+
runAt = null,
|
|
123
|
+
timeoutMs = undefined,
|
|
124
|
+
forceKillOnTimeout = false,
|
|
125
|
+
tags = undefined,
|
|
126
|
+
idempotencyKey = undefined,
|
|
127
|
+
retryDelay = undefined,
|
|
128
|
+
retryBackoff = undefined,
|
|
129
|
+
retryDelayMax = undefined,
|
|
130
|
+
group = undefined,
|
|
131
|
+
}: JobOptions<PayloadMap, T>,
|
|
132
|
+
options?: AddJobOptions,
|
|
133
|
+
): Promise<number> {
|
|
134
|
+
const externalClient = options?.db;
|
|
135
|
+
const client: DatabaseClient =
|
|
136
|
+
externalClient ?? (await this.pool.connect());
|
|
118
137
|
try {
|
|
119
138
|
let result;
|
|
120
139
|
const onConflict = idempotencyKey
|
|
@@ -124,8 +143,8 @@ export class PostgresBackend implements QueueBackend {
|
|
|
124
143
|
if (runAt) {
|
|
125
144
|
result = await client.query(
|
|
126
145
|
`INSERT INTO job_queue
|
|
127
|
-
(job_type, payload, max_attempts, priority, run_at, timeout_ms, force_kill_on_timeout, tags, idempotency_key)
|
|
128
|
-
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
|
|
146
|
+
(job_type, payload, max_attempts, priority, run_at, timeout_ms, force_kill_on_timeout, tags, idempotency_key, retry_delay, retry_backoff, retry_delay_max, group_id, group_tier)
|
|
147
|
+
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)
|
|
129
148
|
${onConflict}
|
|
130
149
|
RETURNING id`,
|
|
131
150
|
[
|
|
@@ -138,13 +157,18 @@ export class PostgresBackend implements QueueBackend {
|
|
|
138
157
|
forceKillOnTimeout ?? false,
|
|
139
158
|
tags ?? null,
|
|
140
159
|
idempotencyKey ?? null,
|
|
160
|
+
retryDelay ?? null,
|
|
161
|
+
retryBackoff ?? null,
|
|
162
|
+
retryDelayMax ?? null,
|
|
163
|
+
group?.id ?? null,
|
|
164
|
+
group?.tier ?? null,
|
|
141
165
|
],
|
|
142
166
|
);
|
|
143
167
|
} else {
|
|
144
168
|
result = await client.query(
|
|
145
169
|
`INSERT INTO job_queue
|
|
146
|
-
(job_type, payload, max_attempts, priority, timeout_ms, force_kill_on_timeout, tags, idempotency_key)
|
|
147
|
-
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
|
170
|
+
(job_type, payload, max_attempts, priority, timeout_ms, force_kill_on_timeout, tags, idempotency_key, retry_delay, retry_backoff, retry_delay_max, group_id, group_tier)
|
|
171
|
+
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13)
|
|
148
172
|
${onConflict}
|
|
149
173
|
RETURNING id`,
|
|
150
174
|
[
|
|
@@ -156,11 +180,15 @@ export class PostgresBackend implements QueueBackend {
|
|
|
156
180
|
forceKillOnTimeout ?? false,
|
|
157
181
|
tags ?? null,
|
|
158
182
|
idempotencyKey ?? null,
|
|
183
|
+
retryDelay ?? null,
|
|
184
|
+
retryBackoff ?? null,
|
|
185
|
+
retryDelayMax ?? null,
|
|
186
|
+
group?.id ?? null,
|
|
187
|
+
group?.tier ?? null,
|
|
159
188
|
],
|
|
160
189
|
);
|
|
161
190
|
}
|
|
162
191
|
|
|
163
|
-
// If ON CONFLICT DO NOTHING was triggered, no rows are returned.
|
|
164
192
|
if (result.rows.length === 0 && idempotencyKey) {
|
|
165
193
|
const existing = await client.query(
|
|
166
194
|
`SELECT id FROM job_queue WHERE idempotency_key = $1`,
|
|
@@ -181,18 +209,220 @@ export class PostgresBackend implements QueueBackend {
|
|
|
181
209
|
log(
|
|
182
210
|
`Added job ${jobId}: payload ${JSON.stringify(payload)}, ${runAt ? `runAt ${runAt.toISOString()}, ` : ''}priority ${priority}, maxAttempts ${maxAttempts}, jobType ${jobType}, tags ${JSON.stringify(tags)}${idempotencyKey ? `, idempotencyKey "${idempotencyKey}"` : ''}`,
|
|
183
211
|
);
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
212
|
+
|
|
213
|
+
if (externalClient) {
|
|
214
|
+
try {
|
|
215
|
+
await client.query(
|
|
216
|
+
`INSERT INTO job_events (job_id, event_type, metadata) VALUES ($1, $2, $3)`,
|
|
217
|
+
[
|
|
218
|
+
jobId,
|
|
219
|
+
JobEventType.Added,
|
|
220
|
+
JSON.stringify({ jobType, payload, tags, idempotencyKey }),
|
|
221
|
+
],
|
|
222
|
+
);
|
|
223
|
+
} catch (error) {
|
|
224
|
+
log(`Error recording job event for job ${jobId}: ${error}`);
|
|
225
|
+
}
|
|
226
|
+
} else {
|
|
227
|
+
await this.recordJobEvent(jobId, JobEventType.Added, {
|
|
228
|
+
jobType,
|
|
229
|
+
payload,
|
|
230
|
+
tags,
|
|
231
|
+
idempotencyKey,
|
|
232
|
+
});
|
|
233
|
+
}
|
|
190
234
|
return jobId;
|
|
191
235
|
} catch (error) {
|
|
192
236
|
log(`Error adding job: ${error}`);
|
|
193
237
|
throw error;
|
|
194
238
|
} finally {
|
|
195
|
-
client.release();
|
|
239
|
+
if (!externalClient) (client as any).release();
|
|
240
|
+
}
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
/**
|
|
244
|
+
* Insert multiple jobs in a single database round-trip.
|
|
245
|
+
*
|
|
246
|
+
* Uses a multi-row INSERT with ON CONFLICT handling for idempotency keys.
|
|
247
|
+
* Returns IDs in the same order as the input array.
|
|
248
|
+
*/
|
|
249
|
+
async addJobs<PayloadMap, T extends JobType<PayloadMap>>(
|
|
250
|
+
jobs: JobOptions<PayloadMap, T>[],
|
|
251
|
+
options?: AddJobOptions,
|
|
252
|
+
): Promise<number[]> {
|
|
253
|
+
if (jobs.length === 0) return [];
|
|
254
|
+
|
|
255
|
+
const externalClient = options?.db;
|
|
256
|
+
const client: DatabaseClient =
|
|
257
|
+
externalClient ?? (await this.pool.connect());
|
|
258
|
+
try {
|
|
259
|
+
const COLS_PER_JOB = 14;
|
|
260
|
+
const valueClauses: string[] = [];
|
|
261
|
+
const params: any[] = [];
|
|
262
|
+
|
|
263
|
+
const hasAnyIdempotencyKey = jobs.some((j) => j.idempotencyKey);
|
|
264
|
+
|
|
265
|
+
for (let i = 0; i < jobs.length; i++) {
|
|
266
|
+
const {
|
|
267
|
+
jobType,
|
|
268
|
+
payload,
|
|
269
|
+
maxAttempts = 3,
|
|
270
|
+
priority = 0,
|
|
271
|
+
runAt = null,
|
|
272
|
+
timeoutMs = undefined,
|
|
273
|
+
forceKillOnTimeout = false,
|
|
274
|
+
tags = undefined,
|
|
275
|
+
idempotencyKey = undefined,
|
|
276
|
+
retryDelay = undefined,
|
|
277
|
+
retryBackoff = undefined,
|
|
278
|
+
retryDelayMax = undefined,
|
|
279
|
+
group = undefined,
|
|
280
|
+
} = jobs[i];
|
|
281
|
+
|
|
282
|
+
const base = i * COLS_PER_JOB;
|
|
283
|
+
valueClauses.push(
|
|
284
|
+
`($${base + 1}, $${base + 2}, $${base + 3}, $${base + 4}, ` +
|
|
285
|
+
`COALESCE($${base + 5}::timestamptz, CURRENT_TIMESTAMP), ` +
|
|
286
|
+
`$${base + 6}, $${base + 7}, $${base + 8}, $${base + 9}, ` +
|
|
287
|
+
`$${base + 10}, $${base + 11}, $${base + 12}, $${base + 13}, $${base + 14})`,
|
|
288
|
+
);
|
|
289
|
+
params.push(
|
|
290
|
+
jobType,
|
|
291
|
+
payload,
|
|
292
|
+
maxAttempts,
|
|
293
|
+
priority,
|
|
294
|
+
runAt,
|
|
295
|
+
timeoutMs ?? null,
|
|
296
|
+
forceKillOnTimeout ?? false,
|
|
297
|
+
tags ?? null,
|
|
298
|
+
idempotencyKey ?? null,
|
|
299
|
+
retryDelay ?? null,
|
|
300
|
+
retryBackoff ?? null,
|
|
301
|
+
retryDelayMax ?? null,
|
|
302
|
+
group?.id ?? null,
|
|
303
|
+
group?.tier ?? null,
|
|
304
|
+
);
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
const onConflict = hasAnyIdempotencyKey
|
|
308
|
+
? `ON CONFLICT (idempotency_key) WHERE idempotency_key IS NOT NULL DO NOTHING`
|
|
309
|
+
: '';
|
|
310
|
+
|
|
311
|
+
const result = await client.query(
|
|
312
|
+
`INSERT INTO job_queue
|
|
313
|
+
(job_type, payload, max_attempts, priority, run_at, timeout_ms, force_kill_on_timeout, tags, idempotency_key, retry_delay, retry_backoff, retry_delay_max, group_id, group_tier)
|
|
314
|
+
VALUES ${valueClauses.join(', ')}
|
|
315
|
+
${onConflict}
|
|
316
|
+
RETURNING id, idempotency_key`,
|
|
317
|
+
params,
|
|
318
|
+
);
|
|
319
|
+
|
|
320
|
+
// Build a map of idempotency_key -> id from returned rows
|
|
321
|
+
const returnedKeyToId = new Map<string, number>();
|
|
322
|
+
const returnedNullKeyIds: number[] = [];
|
|
323
|
+
for (const row of result.rows) {
|
|
324
|
+
if (row.idempotency_key != null) {
|
|
325
|
+
returnedKeyToId.set(row.idempotency_key, row.id);
|
|
326
|
+
} else {
|
|
327
|
+
returnedNullKeyIds.push(row.id);
|
|
328
|
+
}
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
// Identify idempotency keys that conflicted (not in RETURNING)
|
|
332
|
+
const missingKeys: string[] = [];
|
|
333
|
+
for (const job of jobs) {
|
|
334
|
+
if (job.idempotencyKey && !returnedKeyToId.has(job.idempotencyKey)) {
|
|
335
|
+
missingKeys.push(job.idempotencyKey);
|
|
336
|
+
}
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
// Batch-fetch existing IDs for conflicted keys
|
|
340
|
+
if (missingKeys.length > 0) {
|
|
341
|
+
const existing = await client.query(
|
|
342
|
+
`SELECT id, idempotency_key FROM job_queue WHERE idempotency_key = ANY($1)`,
|
|
343
|
+
[missingKeys],
|
|
344
|
+
);
|
|
345
|
+
for (const row of existing.rows) {
|
|
346
|
+
returnedKeyToId.set(row.idempotency_key, row.id);
|
|
347
|
+
}
|
|
348
|
+
}
|
|
349
|
+
|
|
350
|
+
// Assemble result array in input order
|
|
351
|
+
let nullKeyIdx = 0;
|
|
352
|
+
const ids: number[] = [];
|
|
353
|
+
for (const job of jobs) {
|
|
354
|
+
if (job.idempotencyKey) {
|
|
355
|
+
const id = returnedKeyToId.get(job.idempotencyKey);
|
|
356
|
+
if (id === undefined) {
|
|
357
|
+
throw new Error(
|
|
358
|
+
`Failed to resolve job ID for idempotency key "${job.idempotencyKey}"`,
|
|
359
|
+
);
|
|
360
|
+
}
|
|
361
|
+
ids.push(id);
|
|
362
|
+
} else {
|
|
363
|
+
ids.push(returnedNullKeyIds[nullKeyIdx++]);
|
|
364
|
+
}
|
|
365
|
+
}
|
|
366
|
+
|
|
367
|
+
log(`Batch-inserted ${jobs.length} jobs, IDs: [${ids.join(', ')}]`);
|
|
368
|
+
|
|
369
|
+
// Record 'added' events — only for newly inserted jobs
|
|
370
|
+
const newJobEvents: {
|
|
371
|
+
jobId: number;
|
|
372
|
+
eventType: JobEventType;
|
|
373
|
+
metadata?: any;
|
|
374
|
+
}[] = [];
|
|
375
|
+
for (let i = 0; i < jobs.length; i++) {
|
|
376
|
+
const job = jobs[i];
|
|
377
|
+
const wasInserted =
|
|
378
|
+
!job.idempotencyKey || !missingKeys.includes(job.idempotencyKey);
|
|
379
|
+
if (wasInserted) {
|
|
380
|
+
newJobEvents.push({
|
|
381
|
+
jobId: ids[i],
|
|
382
|
+
eventType: JobEventType.Added,
|
|
383
|
+
metadata: {
|
|
384
|
+
jobType: job.jobType,
|
|
385
|
+
payload: job.payload,
|
|
386
|
+
tags: job.tags,
|
|
387
|
+
idempotencyKey: job.idempotencyKey,
|
|
388
|
+
},
|
|
389
|
+
});
|
|
390
|
+
}
|
|
391
|
+
}
|
|
392
|
+
|
|
393
|
+
if (newJobEvents.length > 0) {
|
|
394
|
+
if (externalClient) {
|
|
395
|
+
// Record events on the same transaction client
|
|
396
|
+
const evtValues: string[] = [];
|
|
397
|
+
const evtParams: any[] = [];
|
|
398
|
+
let evtIdx = 1;
|
|
399
|
+
for (const evt of newJobEvents) {
|
|
400
|
+
evtValues.push(`($${evtIdx++}, $${evtIdx++}, $${evtIdx++})`);
|
|
401
|
+
evtParams.push(
|
|
402
|
+
evt.jobId,
|
|
403
|
+
evt.eventType,
|
|
404
|
+
evt.metadata ? JSON.stringify(evt.metadata) : null,
|
|
405
|
+
);
|
|
406
|
+
}
|
|
407
|
+
try {
|
|
408
|
+
await client.query(
|
|
409
|
+
`INSERT INTO job_events (job_id, event_type, metadata) VALUES ${evtValues.join(', ')}`,
|
|
410
|
+
evtParams,
|
|
411
|
+
);
|
|
412
|
+
} catch (error) {
|
|
413
|
+
log(`Error recording batch job events: ${error}`);
|
|
414
|
+
}
|
|
415
|
+
} else {
|
|
416
|
+
await this.recordJobEventsBatch(newJobEvents);
|
|
417
|
+
}
|
|
418
|
+
}
|
|
419
|
+
|
|
420
|
+
return ids;
|
|
421
|
+
} catch (error) {
|
|
422
|
+
log(`Error batch-inserting jobs: ${error}`);
|
|
423
|
+
throw error;
|
|
424
|
+
} finally {
|
|
425
|
+
if (!externalClient) (client as any).release();
|
|
196
426
|
}
|
|
197
427
|
}
|
|
198
428
|
|
|
@@ -202,7 +432,7 @@ export class PostgresBackend implements QueueBackend {
|
|
|
202
432
|
const client = await this.pool.connect();
|
|
203
433
|
try {
|
|
204
434
|
const result = await client.query(
|
|
205
|
-
`SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue WHERE id = $1`,
|
|
435
|
+
`SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", group_id AS "groupId", group_tier AS "groupTier", output FROM job_queue WHERE id = $1`,
|
|
206
436
|
[id],
|
|
207
437
|
);
|
|
208
438
|
|
|
@@ -236,7 +466,7 @@ export class PostgresBackend implements QueueBackend {
|
|
|
236
466
|
const client = await this.pool.connect();
|
|
237
467
|
try {
|
|
238
468
|
const result = await client.query(
|
|
239
|
-
`SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue WHERE status = $1 ORDER BY created_at DESC LIMIT $2 OFFSET $3`,
|
|
469
|
+
`SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", group_id AS "groupId", group_tier AS "groupTier", output FROM job_queue WHERE status = $1 ORDER BY created_at DESC LIMIT $2 OFFSET $3`,
|
|
240
470
|
[status, limit, offset],
|
|
241
471
|
);
|
|
242
472
|
log(`Found ${result.rows.length} jobs by status ${status}`);
|
|
@@ -262,7 +492,7 @@ export class PostgresBackend implements QueueBackend {
|
|
|
262
492
|
const client = await this.pool.connect();
|
|
263
493
|
try {
|
|
264
494
|
const result = await client.query(
|
|
265
|
-
`SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue ORDER BY created_at DESC LIMIT $1 OFFSET $2`,
|
|
495
|
+
`SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", group_id AS "groupId", group_tier AS "groupTier", output FROM job_queue ORDER BY created_at DESC LIMIT $1 OFFSET $2`,
|
|
266
496
|
[limit, offset],
|
|
267
497
|
);
|
|
268
498
|
log(`Found ${result.rows.length} jobs (all)`);
|
|
@@ -287,7 +517,7 @@ export class PostgresBackend implements QueueBackend {
|
|
|
287
517
|
): Promise<JobRecord<PayloadMap, T>[]> {
|
|
288
518
|
const client = await this.pool.connect();
|
|
289
519
|
try {
|
|
290
|
-
let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue`;
|
|
520
|
+
let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", group_id AS "groupId", group_tier AS "groupTier", output FROM job_queue`;
|
|
291
521
|
const params: any[] = [];
|
|
292
522
|
const where: string[] = [];
|
|
293
523
|
let paramIdx = 1;
|
|
@@ -414,7 +644,7 @@ export class PostgresBackend implements QueueBackend {
|
|
|
414
644
|
): Promise<JobRecord<PayloadMap, T>[]> {
|
|
415
645
|
const client = await this.pool.connect();
|
|
416
646
|
try {
|
|
417
|
-
let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress
|
|
647
|
+
let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", group_id AS "groupId", group_tier AS "groupTier", output
|
|
418
648
|
FROM job_queue`;
|
|
419
649
|
let params: any[] = [];
|
|
420
650
|
switch (mode) {
|
|
@@ -467,6 +697,7 @@ export class PostgresBackend implements QueueBackend {
|
|
|
467
697
|
workerId: string,
|
|
468
698
|
batchSize = 10,
|
|
469
699
|
jobType?: string | string[],
|
|
700
|
+
groupConcurrency?: number,
|
|
470
701
|
): Promise<JobRecord<PayloadMap, T>[]> {
|
|
471
702
|
const client = await this.pool.connect();
|
|
472
703
|
try {
|
|
@@ -476,50 +707,121 @@ export class PostgresBackend implements QueueBackend {
|
|
|
476
707
|
const params: any[] = [workerId, batchSize];
|
|
477
708
|
if (jobType) {
|
|
478
709
|
if (Array.isArray(jobType)) {
|
|
479
|
-
jobTypeFilter = ` AND job_type = ANY($3)`;
|
|
710
|
+
jobTypeFilter = ` AND candidate.job_type = ANY($3)`;
|
|
480
711
|
params.push(jobType);
|
|
481
712
|
} else {
|
|
482
|
-
jobTypeFilter = ` AND job_type = $3`;
|
|
713
|
+
jobTypeFilter = ` AND candidate.job_type = $3`;
|
|
483
714
|
params.push(jobType);
|
|
484
715
|
}
|
|
485
716
|
}
|
|
486
717
|
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
WHERE (
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
718
|
+
let result;
|
|
719
|
+
if (groupConcurrency === undefined) {
|
|
720
|
+
result = await client.query(
|
|
721
|
+
`
|
|
722
|
+
UPDATE job_queue
|
|
723
|
+
SET status = 'processing',
|
|
724
|
+
locked_at = NOW(),
|
|
725
|
+
locked_by = $1,
|
|
726
|
+
attempts = CASE WHEN status = 'waiting' THEN attempts ELSE attempts + 1 END,
|
|
727
|
+
updated_at = NOW(),
|
|
728
|
+
pending_reason = NULL,
|
|
729
|
+
started_at = COALESCE(started_at, NOW()),
|
|
730
|
+
last_retried_at = CASE WHEN status != 'waiting' AND attempts > 0 THEN NOW() ELSE last_retried_at END,
|
|
731
|
+
wait_until = NULL
|
|
732
|
+
WHERE id IN (
|
|
733
|
+
SELECT id FROM job_queue candidate
|
|
734
|
+
WHERE (
|
|
735
|
+
(
|
|
736
|
+
(candidate.status = 'pending' OR (candidate.status = 'failed' AND candidate.next_attempt_at <= NOW()))
|
|
737
|
+
AND (candidate.attempts < candidate.max_attempts)
|
|
738
|
+
AND candidate.run_at <= NOW()
|
|
739
|
+
)
|
|
740
|
+
OR (
|
|
741
|
+
candidate.status = 'waiting'
|
|
742
|
+
AND candidate.wait_until IS NOT NULL
|
|
743
|
+
AND candidate.wait_until <= NOW()
|
|
744
|
+
AND candidate.wait_token_id IS NULL
|
|
745
|
+
)
|
|
506
746
|
)
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
747
|
+
${jobTypeFilter}
|
|
748
|
+
ORDER BY candidate.priority DESC, candidate.created_at ASC
|
|
749
|
+
LIMIT $2
|
|
750
|
+
FOR UPDATE SKIP LOCKED
|
|
751
|
+
)
|
|
752
|
+
RETURNING id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", group_id AS "groupId", group_tier AS "groupTier", output
|
|
753
|
+
`,
|
|
754
|
+
params,
|
|
755
|
+
);
|
|
756
|
+
} else {
|
|
757
|
+
const constrainedParams = [...params, groupConcurrency];
|
|
758
|
+
const groupConcurrencyParamIndex = constrainedParams.length;
|
|
759
|
+
result = await client.query(
|
|
760
|
+
`
|
|
761
|
+
WITH eligible AS (
|
|
762
|
+
SELECT candidate.id, candidate.group_id, candidate.priority, candidate.created_at
|
|
763
|
+
FROM job_queue candidate
|
|
764
|
+
WHERE (
|
|
765
|
+
(
|
|
766
|
+
(candidate.status = 'pending' OR (candidate.status = 'failed' AND candidate.next_attempt_at <= NOW()))
|
|
767
|
+
AND (candidate.attempts < candidate.max_attempts)
|
|
768
|
+
AND candidate.run_at <= NOW()
|
|
769
|
+
)
|
|
770
|
+
OR (
|
|
771
|
+
candidate.status = 'waiting'
|
|
772
|
+
AND candidate.wait_until IS NOT NULL
|
|
773
|
+
AND candidate.wait_until <= NOW()
|
|
774
|
+
AND candidate.wait_token_id IS NULL
|
|
775
|
+
)
|
|
512
776
|
)
|
|
777
|
+
${jobTypeFilter}
|
|
778
|
+
FOR UPDATE SKIP LOCKED
|
|
779
|
+
),
|
|
780
|
+
ranked AS (
|
|
781
|
+
SELECT
|
|
782
|
+
eligible.id,
|
|
783
|
+
eligible.group_id,
|
|
784
|
+
eligible.priority,
|
|
785
|
+
eligible.created_at,
|
|
786
|
+
ROW_NUMBER() OVER (
|
|
787
|
+
PARTITION BY eligible.group_id
|
|
788
|
+
ORDER BY eligible.priority DESC, eligible.created_at ASC
|
|
789
|
+
) AS group_rank,
|
|
790
|
+
COALESCE((
|
|
791
|
+
SELECT COUNT(*)
|
|
792
|
+
FROM job_queue processing_jobs
|
|
793
|
+
WHERE processing_jobs.status = 'processing'
|
|
794
|
+
AND processing_jobs.group_id = eligible.group_id
|
|
795
|
+
), 0) AS active_group_count
|
|
796
|
+
FROM eligible
|
|
797
|
+
),
|
|
798
|
+
selected AS (
|
|
799
|
+
SELECT ranked.id
|
|
800
|
+
FROM ranked
|
|
801
|
+
WHERE ranked.group_id IS NULL
|
|
802
|
+
OR (
|
|
803
|
+
ranked.active_group_count < $${groupConcurrencyParamIndex}
|
|
804
|
+
AND ranked.group_rank <= ($${groupConcurrencyParamIndex} - ranked.active_group_count)
|
|
805
|
+
)
|
|
806
|
+
ORDER BY ranked.priority DESC, ranked.created_at ASC
|
|
807
|
+
LIMIT $2
|
|
513
808
|
)
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
809
|
+
UPDATE job_queue
|
|
810
|
+
SET status = 'processing',
|
|
811
|
+
locked_at = NOW(),
|
|
812
|
+
locked_by = $1,
|
|
813
|
+
attempts = CASE WHEN status = 'waiting' THEN attempts ELSE attempts + 1 END,
|
|
814
|
+
updated_at = NOW(),
|
|
815
|
+
pending_reason = NULL,
|
|
816
|
+
started_at = COALESCE(started_at, NOW()),
|
|
817
|
+
last_retried_at = CASE WHEN status != 'waiting' AND attempts > 0 THEN NOW() ELSE last_retried_at END,
|
|
818
|
+
wait_until = NULL
|
|
819
|
+
WHERE id IN (SELECT id FROM selected)
|
|
820
|
+
RETURNING id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", group_id AS "groupId", group_tier AS "groupTier", output
|
|
821
|
+
`,
|
|
822
|
+
constrainedParams,
|
|
823
|
+
);
|
|
824
|
+
}
|
|
523
825
|
|
|
524
826
|
log(`Found ${result.rows.length} jobs to process`);
|
|
525
827
|
await client.query('COMMIT');
|
|
@@ -549,17 +851,19 @@ export class PostgresBackend implements QueueBackend {
|
|
|
549
851
|
}
|
|
550
852
|
}
|
|
551
853
|
|
|
552
|
-
async completeJob(jobId: number): Promise<void> {
|
|
854
|
+
async completeJob(jobId: number, output?: unknown): Promise<void> {
|
|
553
855
|
const client = await this.pool.connect();
|
|
554
856
|
try {
|
|
857
|
+
const outputJson = output !== undefined ? JSON.stringify(output) : null;
|
|
555
858
|
const result = await client.query(
|
|
556
859
|
`
|
|
557
860
|
UPDATE job_queue
|
|
558
861
|
SET status = 'completed', updated_at = NOW(), completed_at = NOW(),
|
|
559
|
-
step_data = NULL, wait_until = NULL, wait_token_id = NULL
|
|
862
|
+
step_data = NULL, wait_until = NULL, wait_token_id = NULL,
|
|
863
|
+
output = COALESCE($2::jsonb, output)
|
|
560
864
|
WHERE id = $1 AND status = 'processing'
|
|
561
865
|
`,
|
|
562
|
-
[jobId],
|
|
866
|
+
[jobId, outputJson],
|
|
563
867
|
);
|
|
564
868
|
if (result.rowCount === 0) {
|
|
565
869
|
log(
|
|
@@ -588,9 +892,17 @@ export class PostgresBackend implements QueueBackend {
|
|
|
588
892
|
UPDATE job_queue
|
|
589
893
|
SET status = 'failed',
|
|
590
894
|
updated_at = NOW(),
|
|
591
|
-
next_attempt_at = CASE
|
|
592
|
-
WHEN attempts
|
|
593
|
-
|
|
895
|
+
next_attempt_at = CASE
|
|
896
|
+
WHEN attempts >= max_attempts THEN NULL
|
|
897
|
+
WHEN retry_delay IS NULL AND retry_backoff IS NULL AND retry_delay_max IS NULL
|
|
898
|
+
THEN NOW() + (POWER(2, attempts) * INTERVAL '1 minute')
|
|
899
|
+
WHEN COALESCE(retry_backoff, true) = true
|
|
900
|
+
THEN NOW() + (LEAST(
|
|
901
|
+
COALESCE(retry_delay_max, 2147483647),
|
|
902
|
+
COALESCE(retry_delay, 60) * POWER(2, attempts)
|
|
903
|
+
) * (0.5 + 0.5 * random()) * INTERVAL '1 second')
|
|
904
|
+
ELSE
|
|
905
|
+
NOW() + (COALESCE(retry_delay, 60) * INTERVAL '1 second')
|
|
594
906
|
END,
|
|
595
907
|
error_history = COALESCE(error_history, '[]'::jsonb) || $2::jsonb,
|
|
596
908
|
failure_reason = $3,
|
|
@@ -665,6 +977,23 @@ export class PostgresBackend implements QueueBackend {
|
|
|
665
977
|
}
|
|
666
978
|
}
|
|
667
979
|
|
|
980
|
+
// ── Output ────────────────────────────────────────────────────────────
|
|
981
|
+
|
|
982
|
+
async updateOutput(jobId: number, output: unknown): Promise<void> {
|
|
983
|
+
const client = await this.pool.connect();
|
|
984
|
+
try {
|
|
985
|
+
await client.query(
|
|
986
|
+
`UPDATE job_queue SET output = $2::jsonb, updated_at = NOW() WHERE id = $1`,
|
|
987
|
+
[jobId, JSON.stringify(output)],
|
|
988
|
+
);
|
|
989
|
+
log(`Updated output for job ${jobId}`);
|
|
990
|
+
} catch (error) {
|
|
991
|
+
log(`Error updating output for job ${jobId}: ${error}`);
|
|
992
|
+
} finally {
|
|
993
|
+
client.release();
|
|
994
|
+
}
|
|
995
|
+
}
|
|
996
|
+
|
|
668
997
|
// ── Job management ────────────────────────────────────────────────────
|
|
669
998
|
|
|
670
999
|
async retryJob(jobId: number): Promise<void> {
|
|
@@ -843,6 +1172,18 @@ export class PostgresBackend implements QueueBackend {
|
|
|
843
1172
|
updateFields.push(`tags = $${paramIdx++}`);
|
|
844
1173
|
params.push(updates.tags ?? null);
|
|
845
1174
|
}
|
|
1175
|
+
if (updates.retryDelay !== undefined) {
|
|
1176
|
+
updateFields.push(`retry_delay = $${paramIdx++}`);
|
|
1177
|
+
params.push(updates.retryDelay ?? null);
|
|
1178
|
+
}
|
|
1179
|
+
if (updates.retryBackoff !== undefined) {
|
|
1180
|
+
updateFields.push(`retry_backoff = $${paramIdx++}`);
|
|
1181
|
+
params.push(updates.retryBackoff ?? null);
|
|
1182
|
+
}
|
|
1183
|
+
if (updates.retryDelayMax !== undefined) {
|
|
1184
|
+
updateFields.push(`retry_delay_max = $${paramIdx++}`);
|
|
1185
|
+
params.push(updates.retryDelayMax ?? null);
|
|
1186
|
+
}
|
|
846
1187
|
|
|
847
1188
|
if (updateFields.length === 0) {
|
|
848
1189
|
log(`No fields to update for job ${jobId}`);
|
|
@@ -869,6 +1210,12 @@ export class PostgresBackend implements QueueBackend {
|
|
|
869
1210
|
if (updates.timeoutMs !== undefined)
|
|
870
1211
|
metadata.timeoutMs = updates.timeoutMs;
|
|
871
1212
|
if (updates.tags !== undefined) metadata.tags = updates.tags;
|
|
1213
|
+
if (updates.retryDelay !== undefined)
|
|
1214
|
+
metadata.retryDelay = updates.retryDelay;
|
|
1215
|
+
if (updates.retryBackoff !== undefined)
|
|
1216
|
+
metadata.retryBackoff = updates.retryBackoff;
|
|
1217
|
+
if (updates.retryDelayMax !== undefined)
|
|
1218
|
+
metadata.retryDelayMax = updates.retryDelayMax;
|
|
872
1219
|
|
|
873
1220
|
await this.recordJobEvent(jobId, JobEventType.Edited, metadata);
|
|
874
1221
|
log(`Edited job ${jobId}: ${JSON.stringify(metadata)}`);
|
|
@@ -918,6 +1265,18 @@ export class PostgresBackend implements QueueBackend {
|
|
|
918
1265
|
updateFields.push(`tags = $${paramIdx++}`);
|
|
919
1266
|
params.push(updates.tags ?? null);
|
|
920
1267
|
}
|
|
1268
|
+
if (updates.retryDelay !== undefined) {
|
|
1269
|
+
updateFields.push(`retry_delay = $${paramIdx++}`);
|
|
1270
|
+
params.push(updates.retryDelay ?? null);
|
|
1271
|
+
}
|
|
1272
|
+
if (updates.retryBackoff !== undefined) {
|
|
1273
|
+
updateFields.push(`retry_backoff = $${paramIdx++}`);
|
|
1274
|
+
params.push(updates.retryBackoff ?? null);
|
|
1275
|
+
}
|
|
1276
|
+
if (updates.retryDelayMax !== undefined) {
|
|
1277
|
+
updateFields.push(`retry_delay_max = $${paramIdx++}`);
|
|
1278
|
+
params.push(updates.retryDelayMax ?? null);
|
|
1279
|
+
}
|
|
921
1280
|
|
|
922
1281
|
if (updateFields.length === 0) {
|
|
923
1282
|
log(`No fields to update for batch edit`);
|
|
@@ -1188,8 +1547,8 @@ export class PostgresBackend implements QueueBackend {
|
|
|
1188
1547
|
`INSERT INTO cron_schedules
|
|
1189
1548
|
(schedule_name, cron_expression, job_type, payload, max_attempts,
|
|
1190
1549
|
priority, timeout_ms, force_kill_on_timeout, tags, timezone,
|
|
1191
|
-
allow_overlap, next_run_at)
|
|
1192
|
-
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)
|
|
1550
|
+
allow_overlap, next_run_at, retry_delay, retry_backoff, retry_delay_max)
|
|
1551
|
+
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15)
|
|
1193
1552
|
RETURNING id`,
|
|
1194
1553
|
[
|
|
1195
1554
|
input.scheduleName,
|
|
@@ -1204,6 +1563,9 @@ export class PostgresBackend implements QueueBackend {
|
|
|
1204
1563
|
input.timezone,
|
|
1205
1564
|
input.allowOverlap,
|
|
1206
1565
|
input.nextRunAt,
|
|
1566
|
+
input.retryDelay,
|
|
1567
|
+
input.retryBackoff,
|
|
1568
|
+
input.retryDelayMax,
|
|
1207
1569
|
],
|
|
1208
1570
|
);
|
|
1209
1571
|
const id = result.rows[0].id;
|
|
@@ -1235,7 +1597,9 @@ export class PostgresBackend implements QueueBackend {
|
|
|
1235
1597
|
timezone, allow_overlap AS "allowOverlap", status,
|
|
1236
1598
|
last_enqueued_at AS "lastEnqueuedAt", last_job_id AS "lastJobId",
|
|
1237
1599
|
next_run_at AS "nextRunAt",
|
|
1238
|
-
created_at AS "createdAt", updated_at AS "updatedAt"
|
|
1600
|
+
created_at AS "createdAt", updated_at AS "updatedAt",
|
|
1601
|
+
retry_delay AS "retryDelay", retry_backoff AS "retryBackoff",
|
|
1602
|
+
retry_delay_max AS "retryDelayMax"
|
|
1239
1603
|
FROM cron_schedules WHERE id = $1`,
|
|
1240
1604
|
[id],
|
|
1241
1605
|
);
|
|
@@ -1263,7 +1627,9 @@ export class PostgresBackend implements QueueBackend {
|
|
|
1263
1627
|
timezone, allow_overlap AS "allowOverlap", status,
|
|
1264
1628
|
last_enqueued_at AS "lastEnqueuedAt", last_job_id AS "lastJobId",
|
|
1265
1629
|
next_run_at AS "nextRunAt",
|
|
1266
|
-
created_at AS "createdAt", updated_at AS "updatedAt"
|
|
1630
|
+
created_at AS "createdAt", updated_at AS "updatedAt",
|
|
1631
|
+
retry_delay AS "retryDelay", retry_backoff AS "retryBackoff",
|
|
1632
|
+
retry_delay_max AS "retryDelayMax"
|
|
1267
1633
|
FROM cron_schedules WHERE schedule_name = $1`,
|
|
1268
1634
|
[name],
|
|
1269
1635
|
);
|
|
@@ -1290,7 +1656,9 @@ export class PostgresBackend implements QueueBackend {
|
|
|
1290
1656
|
timezone, allow_overlap AS "allowOverlap", status,
|
|
1291
1657
|
last_enqueued_at AS "lastEnqueuedAt", last_job_id AS "lastJobId",
|
|
1292
1658
|
next_run_at AS "nextRunAt",
|
|
1293
|
-
created_at AS "createdAt", updated_at AS "updatedAt"
|
|
1659
|
+
created_at AS "createdAt", updated_at AS "updatedAt",
|
|
1660
|
+
retry_delay AS "retryDelay", retry_backoff AS "retryBackoff",
|
|
1661
|
+
retry_delay_max AS "retryDelayMax"
|
|
1294
1662
|
FROM cron_schedules`;
|
|
1295
1663
|
const params: any[] = [];
|
|
1296
1664
|
if (status) {
|
|
@@ -1404,6 +1772,18 @@ export class PostgresBackend implements QueueBackend {
|
|
|
1404
1772
|
updateFields.push(`allow_overlap = $${paramIdx++}`);
|
|
1405
1773
|
params.push(updates.allowOverlap);
|
|
1406
1774
|
}
|
|
1775
|
+
if (updates.retryDelay !== undefined) {
|
|
1776
|
+
updateFields.push(`retry_delay = $${paramIdx++}`);
|
|
1777
|
+
params.push(updates.retryDelay);
|
|
1778
|
+
}
|
|
1779
|
+
if (updates.retryBackoff !== undefined) {
|
|
1780
|
+
updateFields.push(`retry_backoff = $${paramIdx++}`);
|
|
1781
|
+
params.push(updates.retryBackoff);
|
|
1782
|
+
}
|
|
1783
|
+
if (updates.retryDelayMax !== undefined) {
|
|
1784
|
+
updateFields.push(`retry_delay_max = $${paramIdx++}`);
|
|
1785
|
+
params.push(updates.retryDelayMax);
|
|
1786
|
+
}
|
|
1407
1787
|
if (nextRunAt !== undefined) {
|
|
1408
1788
|
updateFields.push(`next_run_at = $${paramIdx++}`);
|
|
1409
1789
|
params.push(nextRunAt);
|
|
@@ -1443,7 +1823,9 @@ export class PostgresBackend implements QueueBackend {
|
|
|
1443
1823
|
timezone, allow_overlap AS "allowOverlap", status,
|
|
1444
1824
|
last_enqueued_at AS "lastEnqueuedAt", last_job_id AS "lastJobId",
|
|
1445
1825
|
next_run_at AS "nextRunAt",
|
|
1446
|
-
created_at AS "createdAt", updated_at AS "updatedAt"
|
|
1826
|
+
created_at AS "createdAt", updated_at AS "updatedAt",
|
|
1827
|
+
retry_delay AS "retryDelay", retry_backoff AS "retryBackoff",
|
|
1828
|
+
retry_delay_max AS "retryDelayMax"
|
|
1447
1829
|
FROM cron_schedules
|
|
1448
1830
|
WHERE status = 'active'
|
|
1449
1831
|
AND next_run_at IS NOT NULL
|