@tstdl/base 0.93.91 → 0.93.93
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/authentication/client/authentication.service.js +8 -8
- package/document-management/server/services/document-validation.service.js +5 -5
- package/document-management/server/services/document-workflow.service.js +2 -2
- package/orm/sqls/sqls.d.ts +6 -6
- package/package.json +2 -2
- package/task-queue/enqueue-batch.d.ts +16 -11
- package/task-queue/enqueue-batch.js +2 -2
- package/task-queue/index.d.ts +2 -1
- package/task-queue/index.js +2 -1
- package/task-queue/postgres/drizzle/{0000_thin_black_panther.sql → 0000_simple_invisible_woman.sql} +5 -5
- package/task-queue/postgres/drizzle/meta/0000_snapshot.json +11 -11
- package/task-queue/postgres/drizzle/meta/_journal.json +2 -2
- package/task-queue/postgres/module.js +2 -2
- package/task-queue/postgres/schemas.d.ts +1 -1
- package/task-queue/postgres/schemas.js +2 -2
- package/task-queue/postgres/task-queue.d.ts +101 -47
- package/task-queue/postgres/task-queue.js +149 -139
- package/task-queue/postgres/task-queue.provider.d.ts +3 -4
- package/task-queue/postgres/task-queue.provider.js +2 -2
- package/task-queue/postgres/task.model.d.ts +5 -5
- package/task-queue/postgres/task.model.js +5 -5
- package/task-queue/provider.d.ts +2 -2
- package/task-queue/task-context.d.ts +34 -18
- package/task-queue/task-context.js +23 -13
- package/task-queue/task-queue.d.ts +160 -132
- package/task-queue/task-queue.js +8 -8
- package/task-queue/tests/complex.test.js +36 -29
- package/task-queue/tests/dependencies.test.js +17 -17
- package/task-queue/tests/enqueue-item.test.d.ts +1 -0
- package/task-queue/tests/enqueue-item.test.js +12 -0
- package/task-queue/tests/queue-generic.test.d.ts +1 -0
- package/task-queue/tests/queue-generic.test.js +8 -0
- package/task-queue/tests/queue.test.js +50 -50
- package/task-queue/tests/task-context.test.d.ts +1 -0
- package/task-queue/tests/task-context.test.js +7 -0
- package/task-queue/tests/task-union.test.d.ts +1 -0
- package/task-queue/tests/task-union.test.js +18 -0
- package/task-queue/tests/typing.test.d.ts +1 -0
- package/task-queue/tests/typing.test.js +9 -0
- package/task-queue/tests/worker.test.js +16 -16
- package/task-queue/types.d.ts +48 -0
- package/task-queue/types.js +1 -0
package/task-queue/task-queue.js
CHANGED
|
@@ -9,26 +9,26 @@ import { isDefined, isString } from '../utils/type-guards.js';
|
|
|
9
9
|
import { millisecondsPerDay, millisecondsPerMinute, millisecondsPerSecond } from '../utils/units.js';
|
|
10
10
|
import { TaskQueueEnqueueBatch } from './enqueue-batch.js';
|
|
11
11
|
import { BatchTaskContext } from './task-context.js';
|
|
12
|
-
export class
|
|
12
|
+
export class TaskProcessResult {
|
|
13
13
|
payload;
|
|
14
14
|
constructor(payload) {
|
|
15
15
|
this.payload = payload;
|
|
16
16
|
}
|
|
17
17
|
static Complete(result) {
|
|
18
|
-
return new
|
|
18
|
+
return new TaskProcessResult({ action: 'complete', result });
|
|
19
19
|
}
|
|
20
20
|
static Fail(error, fatal = false) {
|
|
21
|
-
return new
|
|
21
|
+
return new TaskProcessResult({ action: 'fail', error, fatal });
|
|
22
22
|
}
|
|
23
23
|
static RescheduleTo(timestamp) {
|
|
24
|
-
return new
|
|
24
|
+
return new TaskProcessResult({ action: 'reschedule', timestamp });
|
|
25
25
|
}
|
|
26
26
|
static RescheduleBy(milliseconds) {
|
|
27
27
|
const timestamp = currentTimestamp() + milliseconds;
|
|
28
28
|
return this.RescheduleTo(timestamp);
|
|
29
29
|
}
|
|
30
30
|
}
|
|
31
|
-
export const
|
|
31
|
+
export const TaskStatus = defineEnum('TaskStatus', {
|
|
32
32
|
/**
|
|
33
33
|
* The task is waiting to be processed.
|
|
34
34
|
*/
|
|
@@ -152,7 +152,7 @@ export class TaskQueue extends Transactional {
|
|
|
152
152
|
const result = results[i];
|
|
153
153
|
switch (result.payload.action) {
|
|
154
154
|
case 'complete':
|
|
155
|
-
completions.push({ task, result: result.payload });
|
|
155
|
+
completions.push({ task, result: result.payload.result });
|
|
156
156
|
break;
|
|
157
157
|
case 'fail':
|
|
158
158
|
failures.push({ task, error: result.payload.error, fatal: result.payload.fatal });
|
|
@@ -166,12 +166,12 @@ export class TaskQueue extends Transactional {
|
|
|
166
166
|
}
|
|
167
167
|
if (completions.length > 0) {
|
|
168
168
|
context.logger.verbose(`Completing ${completions.length} tasks`);
|
|
169
|
-
await this.completeMany(completions.map((c) => c.task), completions.map((c) => c.result));
|
|
169
|
+
await this.completeMany(completions.map((c) => c.task), { results: completions.map((c) => c.result) });
|
|
170
170
|
}
|
|
171
171
|
if (failures.length > 0) {
|
|
172
172
|
context.logger.verbose(`Failing ${failures.length} tasks`);
|
|
173
173
|
for (const item of failures) {
|
|
174
|
-
await this.fail(item.task, item.error, item.fatal);
|
|
174
|
+
await this.fail(item.task, item.error, { fatal: item.fatal });
|
|
175
175
|
}
|
|
176
176
|
}
|
|
177
177
|
if (reschedules.length > 0) {
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { afterAll, afterEach, beforeAll, beforeEach, describe, expect, it } from 'vitest';
|
|
2
|
-
import { DependencyJoinMode, TaskQueueProvider,
|
|
2
|
+
import { DependencyJoinMode, TaskQueueProvider, TaskStatus } from '../../task-queue/index.js';
|
|
3
3
|
import { setupIntegrationTest } from '../../unit-test/index.js';
|
|
4
4
|
import { currentTimestamp } from '../../utils/date-time.js';
|
|
5
5
|
import { timeout } from '../../utils/timing.js';
|
|
@@ -50,15 +50,15 @@ describe('Complex Queue Scenarios', () => {
|
|
|
50
50
|
const taskC = await queue.enqueue('C', {}, { tags: ['tag-c'], scheduleAfterTags: ['tag-a'] });
|
|
51
51
|
// A runs first
|
|
52
52
|
const taskA = await queue.enqueue('A', {}, { tags: ['tag-a'] });
|
|
53
|
-
expect(taskD.status).toBe(
|
|
54
|
-
expect(taskB.status).toBe(
|
|
55
|
-
expect(taskC.status).toBe(
|
|
56
|
-
expect(taskA.status).toBe(
|
|
53
|
+
expect(taskD.status).toBe(TaskStatus.Waiting);
|
|
54
|
+
expect(taskB.status).toBe(TaskStatus.Waiting);
|
|
55
|
+
expect(taskC.status).toBe(TaskStatus.Waiting);
|
|
56
|
+
expect(taskA.status).toBe(TaskStatus.Pending);
|
|
57
57
|
// Process A
|
|
58
58
|
const dA = await queue.dequeue({ types: ['A'] });
|
|
59
59
|
await queue.complete(dA);
|
|
60
|
-
await waitForStatus(taskB.id,
|
|
61
|
-
await waitForStatus(taskC.id,
|
|
60
|
+
await waitForStatus(taskB.id, TaskStatus.Pending);
|
|
61
|
+
await waitForStatus(taskC.id, TaskStatus.Pending);
|
|
62
62
|
// Process B
|
|
63
63
|
const dB = await queue.dequeue({ types: ['B'] });
|
|
64
64
|
await queue.complete(dB);
|
|
@@ -66,14 +66,14 @@ describe('Complex Queue Scenarios', () => {
|
|
|
66
66
|
await queue.processPendingFanIn();
|
|
67
67
|
// D still waiting (needs C)
|
|
68
68
|
const uD2 = await queue.getTask(taskD.id);
|
|
69
|
-
expect(uD2?.status).toBe(
|
|
69
|
+
expect(uD2?.status).toBe(TaskStatus.Waiting);
|
|
70
70
|
// Process C
|
|
71
71
|
const dC = await queue.dequeue({ types: ['C'] });
|
|
72
72
|
await queue.complete(dC);
|
|
73
|
-
await waitForStatus(taskD.id,
|
|
73
|
+
await waitForStatus(taskD.id, TaskStatus.Pending);
|
|
74
74
|
// D should be Pending
|
|
75
75
|
const uD3 = await queue.getTask(taskD.id);
|
|
76
|
-
expect(uD3?.status).toBe(
|
|
76
|
+
expect(uD3?.status).toBe(TaskStatus.Pending);
|
|
77
77
|
});
|
|
78
78
|
it('should handle Deep Chain (A -> B -> C -> D)', async () => {
|
|
79
79
|
const D = await queue.enqueue('D', {}, { scheduleAfterTags: ['C'] });
|
|
@@ -82,14 +82,14 @@ describe('Complex Queue Scenarios', () => {
|
|
|
82
82
|
const A = await queue.enqueue('A', {}, { tags: ['A'] });
|
|
83
83
|
// Run A
|
|
84
84
|
await queue.complete((await queue.dequeue({ types: ['A'] })));
|
|
85
|
-
await waitForStatus(B.id,
|
|
85
|
+
await waitForStatus(B.id, TaskStatus.Pending);
|
|
86
86
|
// Run B
|
|
87
87
|
await queue.complete((await queue.dequeue({ types: ['B'] })));
|
|
88
|
-
await waitForStatus(C.id,
|
|
88
|
+
await waitForStatus(C.id, TaskStatus.Pending);
|
|
89
89
|
// Run C
|
|
90
90
|
await queue.complete((await queue.dequeue({ types: ['C'] })));
|
|
91
|
-
await waitForStatus(D.id,
|
|
92
|
-
expect((await queue.getTask(D.id))?.status).toBe(
|
|
91
|
+
await waitForStatus(D.id, TaskStatus.Pending);
|
|
92
|
+
expect((await queue.getTask(D.id))?.status).toBe(TaskStatus.Pending);
|
|
93
93
|
});
|
|
94
94
|
it('should propagate cancellation down the dependency tree', async () => {
|
|
95
95
|
// Use parentId for explicit tree structure which `cancel` supports
|
|
@@ -97,9 +97,9 @@ describe('Complex Queue Scenarios', () => {
|
|
|
97
97
|
const child = await queue.enqueue('child', {}, { parentId: root.id });
|
|
98
98
|
const grandChild = await queue.enqueue('grand', {}, { parentId: child.id });
|
|
99
99
|
await queue.cancel(root.id);
|
|
100
|
-
expect((await queue.getTask(root.id))?.status).toBe(
|
|
101
|
-
expect((await queue.getTask(child.id))?.status).toBe(
|
|
102
|
-
expect((await queue.getTask(grandChild.id))?.status).toBe(
|
|
100
|
+
expect((await queue.getTask(root.id))?.status).toBe(TaskStatus.Cancelled);
|
|
101
|
+
expect((await queue.getTask(child.id))?.status).toBe(TaskStatus.Cancelled);
|
|
102
|
+
expect((await queue.getTask(grandChild.id))?.status).toBe(TaskStatus.Cancelled);
|
|
103
103
|
});
|
|
104
104
|
});
|
|
105
105
|
describe('Scheduling & Priorities', () => {
|
|
@@ -129,7 +129,7 @@ describe('Complex Queue Scenarios', () => {
|
|
|
129
129
|
const u2 = await queue.getTask(task.id);
|
|
130
130
|
expect(u2?.tries).toBe(2);
|
|
131
131
|
const now = currentTimestamp();
|
|
132
|
-
expect(u2.scheduleTimestamp
|
|
132
|
+
expect(u2.scheduleTimestamp > now + 300).toBe(true);
|
|
133
133
|
});
|
|
134
134
|
});
|
|
135
135
|
describe('Rate Limiting & Concurrency', () => {
|
|
@@ -177,15 +177,15 @@ describe('Complex Queue Scenarios', () => {
|
|
|
177
177
|
// Verify it is in main table
|
|
178
178
|
const before = await archiveQueue.getTask(task.id);
|
|
179
179
|
expect(before).toBeDefined();
|
|
180
|
-
expect(before?.status).toBe(
|
|
181
|
-
expect(before
|
|
180
|
+
expect(before?.status).toBe(TaskStatus.Completed);
|
|
181
|
+
expect(before.completeTimestamp > 0).toBe(true);
|
|
182
182
|
// Wait for retention (100ms).
|
|
183
183
|
await timeout(500);
|
|
184
184
|
await archiveQueue.maintenance();
|
|
185
185
|
// Should move from main table to archive
|
|
186
186
|
const loaded = await archiveQueue.getTask(task.id);
|
|
187
187
|
expect(loaded).toBeDefined();
|
|
188
|
-
expect(loaded?.status).toBe(
|
|
188
|
+
expect(loaded?.status).toBe(TaskStatus.Completed);
|
|
189
189
|
await archiveQueue.clear();
|
|
190
190
|
});
|
|
191
191
|
it('should prune expired pending tasks', async () => {
|
|
@@ -194,8 +194,8 @@ describe('Complex Queue Scenarios', () => {
|
|
|
194
194
|
await timeout(150);
|
|
195
195
|
await queue.maintenance();
|
|
196
196
|
const updated = await queue.getTask(task.id);
|
|
197
|
-
expect(updated?.status).toBe(
|
|
198
|
-
expect(updated?.error
|
|
197
|
+
expect(updated?.status).toBe(TaskStatus.Dead);
|
|
198
|
+
expect((updated?.error)['code']).toBe('Expired');
|
|
199
199
|
});
|
|
200
200
|
it('should retrieve task from archive', async () => {
|
|
201
201
|
// Manually insert into archive? We can't access archiveRepository directly.
|
|
@@ -211,7 +211,7 @@ describe('Complex Queue Scenarios', () => {
|
|
|
211
211
|
// Verify retrieval
|
|
212
212
|
const fromArchive = await queue.getTask(task.id);
|
|
213
213
|
expect(fromArchive).toBeDefined();
|
|
214
|
-
expect(fromArchive?.status).toBe(
|
|
214
|
+
expect(fromArchive?.status).toBe(TaskStatus.Completed);
|
|
215
215
|
});
|
|
216
216
|
it('should defer archival of parent tasks until children are archived', async () => {
|
|
217
217
|
const qProvider = injector.resolve(TaskQueueProvider);
|
|
@@ -267,7 +267,7 @@ describe('Complex Queue Scenarios', () => {
|
|
|
267
267
|
expect(t1.id).toBe(t2.id); // Deduplicated
|
|
268
268
|
const t3 = await queue.enqueue('t', { v: 3 }, { idempotencyKey: key, replace: true });
|
|
269
269
|
expect(t3.id).not.toBe(t1.id); // Replaced
|
|
270
|
-
expect(t3.data
|
|
270
|
+
expect(t3.data['v']).toBe(3);
|
|
271
271
|
});
|
|
272
272
|
});
|
|
273
273
|
describe('Edge Cases', () => {
|
|
@@ -277,8 +277,15 @@ describe('Complex Queue Scenarios', () => {
|
|
|
277
277
|
const d = await queue.dequeue({ types: ['pre'] });
|
|
278
278
|
await queue.fail(d, new Error('fail'));
|
|
279
279
|
await queue.processPendingFanIn();
|
|
280
|
-
|
|
281
|
-
|
|
280
|
+
// Retry check because fan-in might be processed by background worker asynchronously
|
|
281
|
+
let u = await queue.getTask(dependent.id);
|
|
282
|
+
for (let i = 0; i < 5; i++) {
|
|
283
|
+
if (u?.status == TaskStatus.Waiting)
|
|
284
|
+
break;
|
|
285
|
+
await timeout(50);
|
|
286
|
+
u = await queue.getTask(dependent.id);
|
|
287
|
+
}
|
|
288
|
+
expect(u?.status).toBe(TaskStatus.Waiting); // Should still be waiting because dependency didn't Complete
|
|
282
289
|
});
|
|
283
290
|
it('should handle mixed AND/OR dependencies', async () => {
|
|
284
291
|
const dep = await queue.enqueue('dep', {}, {
|
|
@@ -287,8 +294,8 @@ describe('Complex Queue Scenarios', () => {
|
|
|
287
294
|
});
|
|
288
295
|
const A = await queue.enqueue('A', {}, { tags: ['A'] });
|
|
289
296
|
await queue.complete((await queue.dequeue({ types: ['A'] })));
|
|
290
|
-
await waitForStatus(dep.id,
|
|
291
|
-
expect((await queue.getTask(dep.id))?.status).toBe(
|
|
297
|
+
await waitForStatus(dep.id, TaskStatus.Pending);
|
|
298
|
+
expect((await queue.getTask(dep.id))?.status).toBe(TaskStatus.Pending);
|
|
292
299
|
});
|
|
293
300
|
it('should not reschedule if task is not running', async () => {
|
|
294
301
|
const task = await queue.enqueue('t', {});
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { afterAll, afterEach, beforeAll, beforeEach, describe, expect, it } from 'vitest';
|
|
2
2
|
import { CancellationToken } from '../../cancellation/index.js';
|
|
3
|
-
import { DependencyJoinMode, TaskQueueProvider,
|
|
3
|
+
import { DependencyJoinMode, TaskQueueProvider, TaskStatus } from '../../task-queue/index.js';
|
|
4
4
|
import { setupIntegrationTest } from '../../unit-test/index.js';
|
|
5
5
|
import { timeout } from '../../utils/timing.js';
|
|
6
6
|
describe('Queue Dependencies & Tree Tests', () => {
|
|
@@ -28,7 +28,7 @@ describe('Queue Dependencies & Tree Tests', () => {
|
|
|
28
28
|
const dependent = await queue.enqueue('dependent', { foo: 'bar' }, {
|
|
29
29
|
completeAfterTags: ['tag-a'],
|
|
30
30
|
});
|
|
31
|
-
expect(dependent.status).toBe(
|
|
31
|
+
expect(dependent.status).toBe(TaskStatus.Waiting);
|
|
32
32
|
// 2. Create the prerequisite task
|
|
33
33
|
const prereq = await queue.enqueue('prereq', { val: 1 }, { tags: ['tag-a'] });
|
|
34
34
|
// 3. Complete prereq
|
|
@@ -37,14 +37,14 @@ describe('Queue Dependencies & Tree Tests', () => {
|
|
|
37
37
|
await queue.complete(dequeued);
|
|
38
38
|
await queue.processPendingFanIn();
|
|
39
39
|
const updatedDependent = await queue.getTask(dependent.id);
|
|
40
|
-
expect(updatedDependent?.status).toBe(
|
|
40
|
+
expect(updatedDependent?.status).toBe(TaskStatus.Completed);
|
|
41
41
|
});
|
|
42
42
|
it('should schedule a task to run after dependency completes (scheduleAfterTags)', async () => {
|
|
43
43
|
// 1. Dependent task
|
|
44
44
|
const dependent = await queue.enqueue('dependent', { foo: 'bar' }, {
|
|
45
45
|
scheduleAfterTags: ['tag-b'],
|
|
46
46
|
});
|
|
47
|
-
expect(dependent.status).toBe(
|
|
47
|
+
expect(dependent.status).toBe(TaskStatus.Waiting);
|
|
48
48
|
// 2. Prereq
|
|
49
49
|
const prereq = await queue.enqueue('prereq', {}, { tags: ['tag-b'] });
|
|
50
50
|
// 3. Complete prereq
|
|
@@ -53,7 +53,7 @@ describe('Queue Dependencies & Tree Tests', () => {
|
|
|
53
53
|
await queue.processPendingFanIn();
|
|
54
54
|
// 5. Dependent should be Pending (ready to run)
|
|
55
55
|
const updatedDependent = await queue.getTask(dependent.id);
|
|
56
|
-
expect(updatedDependent?.status).toBe(
|
|
56
|
+
expect(updatedDependent?.status).toBe(TaskStatus.Pending);
|
|
57
57
|
// 6. Should be dequeueable
|
|
58
58
|
const d2 = await queue.dequeue({ types: ['dependent'] });
|
|
59
59
|
expect(d2?.id).toBe(dependent.id);
|
|
@@ -66,18 +66,18 @@ describe('Queue Dependencies & Tree Tests', () => {
|
|
|
66
66
|
const prereq = await queue.enqueue('prereq', {}, { tags: ['tag-fail'] });
|
|
67
67
|
const dequeued = await queue.dequeue({ types: ['prereq'] });
|
|
68
68
|
// Fail fatally
|
|
69
|
-
await queue.fail(dequeued, new Error('boom'), true);
|
|
69
|
+
await queue.fail(dequeued, new Error('boom'), { fatal: true });
|
|
70
70
|
// Trigger resolution
|
|
71
71
|
await queue.processPendingFanIn();
|
|
72
72
|
let updatedDependent;
|
|
73
73
|
for (let i = 0; i < 20; i++) {
|
|
74
74
|
await timeout(100);
|
|
75
75
|
updatedDependent = await queue.getTask(dependent.id);
|
|
76
|
-
if (updatedDependent?.status ===
|
|
76
|
+
if (updatedDependent?.status === TaskStatus.Dead)
|
|
77
77
|
break;
|
|
78
78
|
await queue.processPendingFanIn(); // Retry processing if it didn't catch it yet
|
|
79
79
|
}
|
|
80
|
-
expect(updatedDependent?.status).toBe(
|
|
80
|
+
expect(updatedDependent?.status).toBe(TaskStatus.Dead);
|
|
81
81
|
expect(updatedDependent?.error?.code).toBe('DependencyFailed');
|
|
82
82
|
});
|
|
83
83
|
it('should respect DependencyJoinMode.Or', async () => {
|
|
@@ -92,7 +92,7 @@ describe('Queue Dependencies & Tree Tests', () => {
|
|
|
92
92
|
await queue.complete(d1);
|
|
93
93
|
await queue.processPendingFanIn();
|
|
94
94
|
const updated = await queue.getTask(dependent.id);
|
|
95
|
-
expect(updated?.status).toBe(
|
|
95
|
+
expect(updated?.status).toBe(TaskStatus.Pending);
|
|
96
96
|
});
|
|
97
97
|
});
|
|
98
98
|
describe('Tree Operations & Cancellation', () => {
|
|
@@ -106,16 +106,16 @@ describe('Queue Dependencies & Tree Tests', () => {
|
|
|
106
106
|
// Get Tree
|
|
107
107
|
const tree = await queue.getTree(root.id);
|
|
108
108
|
expect(tree.length).toBe(3);
|
|
109
|
-
expect(tree.map(t => t.id)).toContain(grandchild.id);
|
|
109
|
+
expect(tree.map((t) => t.id)).toContain(grandchild.id);
|
|
110
110
|
// Cancel Root
|
|
111
111
|
await queue.cancel(root.id);
|
|
112
112
|
// Verify all cancelled
|
|
113
113
|
const tRoot = await queue.getTask(root.id);
|
|
114
114
|
const tChild = await queue.getTask(child.id);
|
|
115
115
|
const tGrand = await queue.getTask(grandchild.id);
|
|
116
|
-
expect(tRoot?.status).toBe(
|
|
117
|
-
expect(tChild?.status).toBe(
|
|
118
|
-
expect(tGrand?.status).toBe(
|
|
116
|
+
expect(tRoot?.status).toBe(TaskStatus.Cancelled);
|
|
117
|
+
expect(tChild?.status).toBe(TaskStatus.Cancelled);
|
|
118
|
+
expect(tGrand?.status).toBe(TaskStatus.Cancelled);
|
|
119
119
|
});
|
|
120
120
|
it('should cancel many by tags', async () => {
|
|
121
121
|
await queue.enqueue('t1', {}, { tags: ['group-a'] });
|
|
@@ -125,19 +125,19 @@ describe('Queue Dependencies & Tree Tests', () => {
|
|
|
125
125
|
// Check status
|
|
126
126
|
const tasks = await queue.getManyByTags('group-a');
|
|
127
127
|
expect(tasks.length).toBe(2);
|
|
128
|
-
expect(tasks.every(t => t.status ===
|
|
128
|
+
expect(tasks.every((t) => t.status === TaskStatus.Cancelled)).toBe(true);
|
|
129
129
|
const tasksB = await queue.getManyByTags('group-b');
|
|
130
|
-
expect(tasksB[0]?.status).toBe(
|
|
130
|
+
expect(tasksB[0]?.status).toBe(TaskStatus.Pending);
|
|
131
131
|
});
|
|
132
132
|
});
|
|
133
133
|
describe('Restart & Consumers', () => {
|
|
134
134
|
it('should restart a dead/cancelled task', async () => {
|
|
135
135
|
const task = await queue.enqueue('restart-test', {});
|
|
136
136
|
const d = await queue.dequeue({ types: ['restart-test'] });
|
|
137
|
-
await queue.fail(d, new Error('fatal'), true); // Dead
|
|
137
|
+
await queue.fail(d, new Error('fatal'), { fatal: true }); // Dead
|
|
138
138
|
await queue.restart(task.id);
|
|
139
139
|
const updated = await queue.getTask(task.id);
|
|
140
|
-
expect(updated?.status).toBe(
|
|
140
|
+
expect(updated?.status).toBe(TaskStatus.Pending);
|
|
141
141
|
expect(updated?.tries).toBe(0);
|
|
142
142
|
expect(updated?.error).toBeNull();
|
|
143
143
|
});
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import { describe, expectTypeOf, test } from 'vitest';
|
|
2
|
+
describe('EnqueueManyItem Type Definitions', () => {
|
|
3
|
+
test('EnqueueManyItem should be a discriminated union', () => {
|
|
4
|
+
const item = {};
|
|
5
|
+
if (item.type == 'test-task') {
|
|
6
|
+
expectTypeOf(item.data).toEqualTypeOf();
|
|
7
|
+
}
|
|
8
|
+
else if (item.type == 'other-task') {
|
|
9
|
+
expectTypeOf(item.data).toEqualTypeOf();
|
|
10
|
+
}
|
|
11
|
+
});
|
|
12
|
+
});
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
import { describe, test, expectTypeOf } from 'vitest';
|
|
2
|
+
describe('TaskQueue Class Generic', () => {
|
|
3
|
+
test('TaskQueue should accept TaskDefinitionMap', () => {
|
|
4
|
+
// If it compiles, it's good. But we can inspect the generic constraints.
|
|
5
|
+
// For now, simple existence check.
|
|
6
|
+
expectTypeOf().toBeObject();
|
|
7
|
+
});
|
|
8
|
+
});
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { afterAll, afterEach, beforeAll, beforeEach, describe, expect, it } from 'vitest';
|
|
2
|
-
import { TaskQueueProvider,
|
|
2
|
+
import { TaskQueueProvider, TaskStatus } from '../../task-queue/index.js';
|
|
3
3
|
import { setupIntegrationTest } from '../../unit-test/index.js';
|
|
4
4
|
import { currentTimestamp } from '../../utils/date-time.js';
|
|
5
5
|
import { timeout } from '../../utils/timing.js';
|
|
@@ -8,7 +8,7 @@ describe('Queue Integration Tests', () => {
|
|
|
8
8
|
let queue;
|
|
9
9
|
const queueName = `test-queue-${Date.now()}`;
|
|
10
10
|
// Helper to verify state in DB
|
|
11
|
-
async function
|
|
11
|
+
async function assertTaskStatus(id, state, message) {
|
|
12
12
|
const task = await queue.getTask(id);
|
|
13
13
|
expect(task?.status, message).toBe(state);
|
|
14
14
|
}
|
|
@@ -46,12 +46,12 @@ describe('Queue Integration Tests', () => {
|
|
|
46
46
|
const t2 = await queue.enqueue('test', { value: 'second' });
|
|
47
47
|
const d1 = await queue.dequeue();
|
|
48
48
|
expect(d1?.id).toBe(t1.id);
|
|
49
|
-
expect(d1?.data
|
|
50
|
-
await queue.complete(d1, { success: true });
|
|
51
|
-
await
|
|
49
|
+
expect((d1?.data)['value']).toBe('first');
|
|
50
|
+
await queue.complete(d1, { result: { success: true } });
|
|
51
|
+
await assertTaskStatus(t1.id, TaskStatus.Completed, 'Task 1 completed');
|
|
52
52
|
const d2 = await queue.dequeue();
|
|
53
53
|
expect(d2?.id).toBe(t2.id);
|
|
54
|
-
await queue.complete(d2, { success: true });
|
|
54
|
+
await queue.complete(d2, { result: { success: true } });
|
|
55
55
|
});
|
|
56
56
|
it('Priorities', async () => {
|
|
57
57
|
// Priority 1000 (default)
|
|
@@ -73,13 +73,13 @@ describe('Queue Integration Tests', () => {
|
|
|
73
73
|
const t2 = await queue.enqueue('test', { value: 'ignored' }, { idempotencyKey: key });
|
|
74
74
|
expect(t2.id, 'Same ID if not replaced').toBe(t1.id);
|
|
75
75
|
const check1 = await queue.getTask(t1.id);
|
|
76
|
-
expect(check1?.data
|
|
76
|
+
expect((check1?.data)['value']).toBe('original');
|
|
77
77
|
// 3. Replace Strategy: Should replace existing task with new data AND new ID
|
|
78
78
|
const t3 = await queue.enqueueMany([{ type: 'test', data: { value: 'updated' }, idempotencyKey: key }], { replace: true, returnTasks: true });
|
|
79
79
|
expect(t3[0].id, 'New ID if replaced').not.toBe(t1.id);
|
|
80
80
|
// New task should have new data
|
|
81
81
|
const checkNew = await queue.getTask(t3[0].id);
|
|
82
|
-
expect(checkNew?.data
|
|
82
|
+
expect((checkNew?.data)['value']).toBe('updated');
|
|
83
83
|
expect(checkNew?.tries).toBe(0);
|
|
84
84
|
});
|
|
85
85
|
it('Retries and Failures', async () => {
|
|
@@ -89,14 +89,14 @@ describe('Queue Integration Tests', () => {
|
|
|
89
89
|
expect(attempt1?.id).toBe(task.id);
|
|
90
90
|
await queue.fail(attempt1, { message: 'oops' });
|
|
91
91
|
// Force reschedule to now to bypass retryDelay
|
|
92
|
-
await queue.reschedule(task.id,
|
|
92
|
+
await queue.reschedule(task.id, currentTimestamp());
|
|
93
93
|
// Try 2
|
|
94
94
|
const attempt2 = await queue.dequeue();
|
|
95
95
|
expect(attempt2?.id).toBe(task.id);
|
|
96
96
|
expect(attempt2?.tries).toBe(2);
|
|
97
97
|
// Fail fatally
|
|
98
|
-
await queue.fail(attempt2, { message: 'fatal error' }, true);
|
|
99
|
-
await
|
|
98
|
+
await queue.fail(attempt2, { message: 'fatal error' }, { fatal: true });
|
|
99
|
+
await assertTaskStatus(task.id, TaskStatus.Dead, 'Task is Dead after fatal error');
|
|
100
100
|
});
|
|
101
101
|
it('Hierarchy (Parent/Child)', async () => {
|
|
102
102
|
// A. Create Parent
|
|
@@ -108,14 +108,14 @@ describe('Queue Integration Tests', () => {
|
|
|
108
108
|
const child = await queue.enqueue('test', { value: 'child-manual' }, { parentId: p.id });
|
|
109
109
|
// D. "Finish" Parent execution.
|
|
110
110
|
await queue.complete(pTask);
|
|
111
|
-
// await
|
|
111
|
+
// await assertTaskStatus(p.id, TaskStatus.Waiting, 'Parent entered WAITING state'); // Depends on implementation details of auto-waiting
|
|
112
112
|
});
|
|
113
113
|
it('Batching', async () => {
|
|
114
114
|
const batch = queue.batch();
|
|
115
115
|
for (let i = 0; i < 5; i++) {
|
|
116
116
|
batch.add('test', { value: `batch-${i}` });
|
|
117
117
|
}
|
|
118
|
-
const tasks = await batch.enqueue(true);
|
|
118
|
+
const tasks = await batch.enqueue({ returnTasks: true });
|
|
119
119
|
expect(tasks.length).toBe(5);
|
|
120
120
|
const dequeuedBatch = await queue.dequeueMany(5);
|
|
121
121
|
expect(dequeuedBatch.length).toBe(5);
|
|
@@ -149,47 +149,47 @@ describe('PostgresQueue (Distributed Task Orchestration)', () => {
|
|
|
149
149
|
});
|
|
150
150
|
describe('Basic Lifecycle', () => {
|
|
151
151
|
it('should enqueue and dequeue a task', async () => {
|
|
152
|
-
await queue.enqueue('
|
|
152
|
+
await queue.enqueue('foo', { foo: 'bar' });
|
|
153
153
|
const task = await queue.dequeue();
|
|
154
154
|
expect(task).toBeDefined();
|
|
155
155
|
expect(task?.data).toEqual({ foo: 'bar' });
|
|
156
|
-
expect(task?.status).toBe(
|
|
156
|
+
expect(task?.status).toBe(TaskStatus.Running);
|
|
157
157
|
expect(task?.tries).toBe(1);
|
|
158
158
|
});
|
|
159
159
|
it('should complete a task successfully', async () => {
|
|
160
|
-
const task = await queue.enqueue('
|
|
160
|
+
const task = await queue.enqueue('foo', { foo: 'bar' });
|
|
161
161
|
const dequeued = await queue.dequeue();
|
|
162
|
-
await queue.complete(dequeued, { result: true });
|
|
162
|
+
await queue.complete(dequeued, { result: { result: true } });
|
|
163
163
|
const updated = await queue.getTask(task.id);
|
|
164
|
-
expect(updated?.status).toBe(
|
|
164
|
+
expect(updated?.status).toBe(TaskStatus.Completed);
|
|
165
165
|
expect(updated?.result).toEqual({ result: true });
|
|
166
|
-
expect(updated
|
|
166
|
+
expect(updated.completeTimestamp > 0).toBe(true);
|
|
167
167
|
});
|
|
168
168
|
it('should fail a task and increment tries', async () => {
|
|
169
|
-
const task = await queue.enqueue('
|
|
169
|
+
const task = await queue.enqueue('foo', { foo: 'bar' });
|
|
170
170
|
const dequeued = await queue.dequeue();
|
|
171
171
|
await queue.fail(dequeued, new Error('temp failure'));
|
|
172
172
|
const updated = await queue.getTask(task.id);
|
|
173
|
-
expect(updated?.status).toBe(
|
|
173
|
+
expect(updated?.status).toBe(TaskStatus.Pending);
|
|
174
174
|
expect(updated?.tries).toBe(1);
|
|
175
175
|
expect(updated?.error).toBeDefined();
|
|
176
176
|
});
|
|
177
177
|
});
|
|
178
178
|
describe('Priority and Scheduling', () => {
|
|
179
179
|
it('should dequeue tasks in priority order (lower number first)', async () => {
|
|
180
|
-
await queue.enqueue('
|
|
181
|
-
await queue.enqueue('
|
|
182
|
-
await queue.enqueue('
|
|
180
|
+
await queue.enqueue('foo', { foo: 'low' }, { priority: 2000 });
|
|
181
|
+
await queue.enqueue('foo', { foo: 'high' }, { priority: 10 });
|
|
182
|
+
await queue.enqueue('foo', { foo: 'mid' }, { priority: 1000 });
|
|
183
183
|
const t1 = await queue.dequeue();
|
|
184
184
|
const t2 = await queue.dequeue();
|
|
185
185
|
const t3 = await queue.dequeue();
|
|
186
|
-
expect(t1?.data
|
|
187
|
-
expect(t2?.data
|
|
188
|
-
expect(t3?.data
|
|
186
|
+
expect((t1?.data)['foo']).toBe('high');
|
|
187
|
+
expect((t2?.data)['foo']).toBe('mid');
|
|
188
|
+
expect((t3?.data)['foo']).toBe('low');
|
|
189
189
|
});
|
|
190
190
|
it('should not dequeue a task scheduled in the future', async () => {
|
|
191
191
|
const future = currentTimestamp() + 500;
|
|
192
|
-
await queue.enqueue('
|
|
192
|
+
await queue.enqueue('foo', { foo: 'future' }, { scheduleTimestamp: future });
|
|
193
193
|
const task = await queue.dequeue();
|
|
194
194
|
expect(task).toBeUndefined();
|
|
195
195
|
await timeout(600);
|
|
@@ -202,9 +202,9 @@ describe('PostgresQueue (Distributed Task Orchestration)', () => {
|
|
|
202
202
|
const queueProvider = injector.resolve(TaskQueueProvider);
|
|
203
203
|
const limitedQueue = queueProvider.get(`limit-test-${Date.now()}`, { globalConcurrency: 2 });
|
|
204
204
|
await limitedQueue.enqueueMany([
|
|
205
|
-
{ type: '
|
|
206
|
-
{ type: '
|
|
207
|
-
{ type: '
|
|
205
|
+
{ type: 'foo', data: { foo: '1' } },
|
|
206
|
+
{ type: 'foo', data: { foo: '2' } },
|
|
207
|
+
{ type: 'foo', data: { foo: '3' } },
|
|
208
208
|
]);
|
|
209
209
|
const t1 = await limitedQueue.dequeue();
|
|
210
210
|
const t2 = await limitedQueue.dequeue();
|
|
@@ -221,9 +221,9 @@ describe('PostgresQueue (Distributed Task Orchestration)', () => {
|
|
|
221
221
|
describe('Circuit Breaker', () => {
|
|
222
222
|
it('should trip the breaker after threshold failures', async () => {
|
|
223
223
|
// Config: circuitBreakerThreshold: 2 (set in beforeEach)
|
|
224
|
-
await queue.enqueue('
|
|
225
|
-
await queue.enqueue('
|
|
226
|
-
await queue.enqueue('
|
|
224
|
+
await queue.enqueue('foo', { foo: '1' });
|
|
225
|
+
await queue.enqueue('foo', { foo: '2' });
|
|
226
|
+
await queue.enqueue('foo', { foo: '3' });
|
|
227
227
|
await queue.fail((await queue.dequeue()), 'err');
|
|
228
228
|
await queue.fail((await queue.dequeue()), 'err');
|
|
229
229
|
// Breaker should be Open
|
|
@@ -232,8 +232,8 @@ describe('PostgresQueue (Distributed Task Orchestration)', () => {
|
|
|
232
232
|
});
|
|
233
233
|
it('should allow a single probe in Half-Open state', async () => {
|
|
234
234
|
await queue.enqueueMany([
|
|
235
|
-
{ type: '
|
|
236
|
-
{ type: '
|
|
235
|
+
{ type: 'foo', data: { foo: '1' } },
|
|
236
|
+
{ type: 'foo', data: { foo: '2' } },
|
|
237
237
|
]);
|
|
238
238
|
await queue.fail((await queue.dequeue()), 'err');
|
|
239
239
|
await queue.fail((await queue.dequeue()), 'err');
|
|
@@ -247,13 +247,13 @@ describe('PostgresQueue (Distributed Task Orchestration)', () => {
|
|
|
247
247
|
});
|
|
248
248
|
describe('Timeouts and Maintenance (Pruning)', () => {
|
|
249
249
|
it('should recover "Zombie" tasks (crashed workers)', async () => {
|
|
250
|
-
const task = await queue.enqueue('
|
|
250
|
+
const task = await queue.enqueue('foo', { foo: 'zombie' });
|
|
251
251
|
await queue.dequeue(); // Task is now Running with a token
|
|
252
252
|
// processTimeout is 200ms. Wait for it to expire.
|
|
253
253
|
await timeout(300);
|
|
254
254
|
await queue.maintenance();
|
|
255
255
|
const recovered = await queue.getTask(task.id);
|
|
256
|
-
expect(recovered?.status).toBe(
|
|
256
|
+
expect(recovered?.status).toBe(TaskStatus.Pending);
|
|
257
257
|
expect(recovered?.tries).toBe(1);
|
|
258
258
|
expect(recovered?.token).toBeNull();
|
|
259
259
|
});
|
|
@@ -261,25 +261,25 @@ describe('PostgresQueue (Distributed Task Orchestration)', () => {
|
|
|
261
261
|
// Re-configure queue with very short execution timeout
|
|
262
262
|
const queueProvider = injector.resolve(TaskQueueProvider);
|
|
263
263
|
const shortQueue = queueProvider.get(`prune-test-${Date.now()}`, { maxExecutionTime: 100 });
|
|
264
|
-
const task = await shortQueue.enqueue('
|
|
264
|
+
const task = await shortQueue.enqueue('foo', { foo: 'long-running' });
|
|
265
265
|
await shortQueue.dequeue();
|
|
266
266
|
await timeout(200);
|
|
267
267
|
await shortQueue.maintenance();
|
|
268
268
|
const updated = await shortQueue.getTask(task.id);
|
|
269
|
-
expect(updated?.status).toBe(
|
|
269
|
+
expect(updated?.status).toBe(TaskStatus.Dead);
|
|
270
270
|
expect(updated?.error?.message).toContain('Hard Execution Timeout');
|
|
271
271
|
await shortQueue.clear();
|
|
272
272
|
});
|
|
273
273
|
it('should touch a task to extend token', async () => {
|
|
274
|
-
const task = await queue.enqueue('
|
|
274
|
+
const task = await queue.enqueue('foo', { foo: 'work' });
|
|
275
275
|
const dequeued = await queue.dequeue();
|
|
276
276
|
const initialLock = dequeued.visibilityDeadline;
|
|
277
277
|
await timeout(50);
|
|
278
278
|
const touched = await queue.touch(dequeued);
|
|
279
|
-
expect(touched?.visibilityDeadline).
|
|
279
|
+
expect(touched?.visibilityDeadline > initialLock).toBe(true);
|
|
280
280
|
});
|
|
281
281
|
it('should prevent touching if token is lost (stolen by another worker)', async () => {
|
|
282
|
-
await queue.enqueue('
|
|
282
|
+
await queue.enqueue('foo', { foo: 'work' });
|
|
283
283
|
const dequeued = await queue.dequeue();
|
|
284
284
|
expect(dequeued).toBeDefined();
|
|
285
285
|
// processTimeout is 200ms. Wait for it to expire.
|
|
@@ -294,34 +294,34 @@ describe('PostgresQueue (Distributed Task Orchestration)', () => {
|
|
|
294
294
|
describe('Batch Operations', () => {
|
|
295
295
|
it('should complete many tasks efficiently', async () => {
|
|
296
296
|
const tasks = await queue.enqueueMany([
|
|
297
|
-
{ type: '
|
|
298
|
-
{ type: '
|
|
297
|
+
{ type: 'foo', data: { foo: '1' } },
|
|
298
|
+
{ type: 'foo', data: { foo: '2' } },
|
|
299
299
|
], { returnTasks: true });
|
|
300
300
|
const d1 = await queue.dequeue();
|
|
301
301
|
const d2 = await queue.dequeue();
|
|
302
302
|
await queue.completeMany([d1, d2]);
|
|
303
303
|
const t1 = await queue.getTask(tasks[0].id);
|
|
304
304
|
const t2 = await queue.getTask(tasks[1].id);
|
|
305
|
-
expect(t1?.status).toBe(
|
|
306
|
-
expect(t2?.status).toBe(
|
|
305
|
+
expect(t1?.status).toBe(TaskStatus.Completed);
|
|
306
|
+
expect(t2?.status).toBe(TaskStatus.Completed);
|
|
307
307
|
});
|
|
308
308
|
});
|
|
309
309
|
describe('Rescheduling', () => {
|
|
310
310
|
it('should reschedule and refund tries if running', async () => {
|
|
311
|
-
const task = await queue.enqueue('
|
|
311
|
+
const task = await queue.enqueue('foo', { foo: 'reschedule-me' });
|
|
312
312
|
const dequeued = await queue.dequeue();
|
|
313
313
|
expect(dequeued?.tries).toBe(1);
|
|
314
314
|
const inFuture = currentTimestamp() + 1000;
|
|
315
315
|
await queue.reschedule(dequeued.id, inFuture);
|
|
316
316
|
const updated = await queue.getTask(task.id);
|
|
317
|
-
expect(updated?.status).toBe(
|
|
317
|
+
expect(updated?.status).toBe(TaskStatus.Pending);
|
|
318
318
|
expect(updated?.tries).toBe(0); // Refunded
|
|
319
319
|
expect(updated?.scheduleTimestamp).toBe(inFuture);
|
|
320
320
|
});
|
|
321
321
|
});
|
|
322
322
|
describe('TaskContext (Worker DX)', () => {
|
|
323
323
|
it('checkpoint() should update progress and handle token loss', async () => {
|
|
324
|
-
const task = await queue.enqueue('
|
|
324
|
+
const task = await queue.enqueue('foo', { foo: 'progress' });
|
|
325
325
|
const dequeued = await queue.dequeue();
|
|
326
326
|
// In real scenarios TaskContext wraps the queue logic.
|
|
327
327
|
// Here we just verify touch/checkpoint effects on the DB.
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|