@tstdl/base 0.93.92 → 0.93.94
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/authentication/client/authentication.service.js +3 -2
- package/document-management/server/services/document-validation.service.js +5 -5
- package/document-management/server/services/document-workflow.service.js +2 -2
- package/orm/sqls/sqls.d.ts +6 -6
- package/package.json +2 -2
- package/task-queue/enqueue-batch.d.ts +16 -11
- package/task-queue/enqueue-batch.js +2 -2
- package/task-queue/index.d.ts +2 -1
- package/task-queue/index.js +2 -1
- package/task-queue/postgres/drizzle/{0000_thin_black_panther.sql → 0000_simple_invisible_woman.sql} +5 -5
- package/task-queue/postgres/drizzle/meta/0000_snapshot.json +11 -11
- package/task-queue/postgres/drizzle/meta/_journal.json +2 -2
- package/task-queue/postgres/module.js +2 -2
- package/task-queue/postgres/schemas.d.ts +1 -1
- package/task-queue/postgres/schemas.js +2 -2
- package/task-queue/postgres/task-queue.d.ts +101 -47
- package/task-queue/postgres/task-queue.js +149 -139
- package/task-queue/postgres/task-queue.provider.d.ts +3 -4
- package/task-queue/postgres/task-queue.provider.js +2 -2
- package/task-queue/postgres/task.model.d.ts +5 -5
- package/task-queue/postgres/task.model.js +5 -5
- package/task-queue/provider.d.ts +2 -2
- package/task-queue/task-context.d.ts +38 -18
- package/task-queue/task-context.js +35 -13
- package/task-queue/task-queue.d.ts +160 -132
- package/task-queue/task-queue.js +8 -8
- package/task-queue/tests/complex.test.js +36 -29
- package/task-queue/tests/dependencies.test.js +17 -17
- package/task-queue/tests/enqueue-item.test.d.ts +1 -0
- package/task-queue/tests/enqueue-item.test.js +12 -0
- package/task-queue/tests/queue-generic.test.d.ts +1 -0
- package/task-queue/tests/queue-generic.test.js +8 -0
- package/task-queue/tests/queue.test.js +50 -50
- package/task-queue/tests/task-context.test.d.ts +1 -0
- package/task-queue/tests/task-context.test.js +7 -0
- package/task-queue/tests/task-union.test.d.ts +1 -0
- package/task-queue/tests/task-union.test.js +18 -0
- package/task-queue/tests/typing.test.d.ts +1 -0
- package/task-queue/tests/typing.test.js +9 -0
- package/task-queue/tests/worker.test.js +16 -16
- package/task-queue/types.d.ts +48 -0
- package/task-queue/types.js +1 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { afterAll, afterEach, beforeAll, beforeEach, describe, expect, it } from 'vitest';
|
|
2
2
|
import { CancellationToken } from '../../cancellation/index.js';
|
|
3
|
-
import { DependencyJoinMode, TaskQueueProvider,
|
|
3
|
+
import { DependencyJoinMode, TaskQueueProvider, TaskStatus } from '../../task-queue/index.js';
|
|
4
4
|
import { setupIntegrationTest } from '../../unit-test/index.js';
|
|
5
5
|
import { timeout } from '../../utils/timing.js';
|
|
6
6
|
describe('Queue Dependencies & Tree Tests', () => {
|
|
@@ -28,7 +28,7 @@ describe('Queue Dependencies & Tree Tests', () => {
|
|
|
28
28
|
const dependent = await queue.enqueue('dependent', { foo: 'bar' }, {
|
|
29
29
|
completeAfterTags: ['tag-a'],
|
|
30
30
|
});
|
|
31
|
-
expect(dependent.status).toBe(
|
|
31
|
+
expect(dependent.status).toBe(TaskStatus.Waiting);
|
|
32
32
|
// 2. Create the prerequisite task
|
|
33
33
|
const prereq = await queue.enqueue('prereq', { val: 1 }, { tags: ['tag-a'] });
|
|
34
34
|
// 3. Complete prereq
|
|
@@ -37,14 +37,14 @@ describe('Queue Dependencies & Tree Tests', () => {
|
|
|
37
37
|
await queue.complete(dequeued);
|
|
38
38
|
await queue.processPendingFanIn();
|
|
39
39
|
const updatedDependent = await queue.getTask(dependent.id);
|
|
40
|
-
expect(updatedDependent?.status).toBe(
|
|
40
|
+
expect(updatedDependent?.status).toBe(TaskStatus.Completed);
|
|
41
41
|
});
|
|
42
42
|
it('should schedule a task to run after dependency completes (scheduleAfterTags)', async () => {
|
|
43
43
|
// 1. Dependent task
|
|
44
44
|
const dependent = await queue.enqueue('dependent', { foo: 'bar' }, {
|
|
45
45
|
scheduleAfterTags: ['tag-b'],
|
|
46
46
|
});
|
|
47
|
-
expect(dependent.status).toBe(
|
|
47
|
+
expect(dependent.status).toBe(TaskStatus.Waiting);
|
|
48
48
|
// 2. Prereq
|
|
49
49
|
const prereq = await queue.enqueue('prereq', {}, { tags: ['tag-b'] });
|
|
50
50
|
// 3. Complete prereq
|
|
@@ -53,7 +53,7 @@ describe('Queue Dependencies & Tree Tests', () => {
|
|
|
53
53
|
await queue.processPendingFanIn();
|
|
54
54
|
// 5. Dependent should be Pending (ready to run)
|
|
55
55
|
const updatedDependent = await queue.getTask(dependent.id);
|
|
56
|
-
expect(updatedDependent?.status).toBe(
|
|
56
|
+
expect(updatedDependent?.status).toBe(TaskStatus.Pending);
|
|
57
57
|
// 6. Should be dequeueable
|
|
58
58
|
const d2 = await queue.dequeue({ types: ['dependent'] });
|
|
59
59
|
expect(d2?.id).toBe(dependent.id);
|
|
@@ -66,18 +66,18 @@ describe('Queue Dependencies & Tree Tests', () => {
|
|
|
66
66
|
const prereq = await queue.enqueue('prereq', {}, { tags: ['tag-fail'] });
|
|
67
67
|
const dequeued = await queue.dequeue({ types: ['prereq'] });
|
|
68
68
|
// Fail fatally
|
|
69
|
-
await queue.fail(dequeued, new Error('boom'), true);
|
|
69
|
+
await queue.fail(dequeued, new Error('boom'), { fatal: true });
|
|
70
70
|
// Trigger resolution
|
|
71
71
|
await queue.processPendingFanIn();
|
|
72
72
|
let updatedDependent;
|
|
73
73
|
for (let i = 0; i < 20; i++) {
|
|
74
74
|
await timeout(100);
|
|
75
75
|
updatedDependent = await queue.getTask(dependent.id);
|
|
76
|
-
if (updatedDependent?.status ===
|
|
76
|
+
if (updatedDependent?.status === TaskStatus.Dead)
|
|
77
77
|
break;
|
|
78
78
|
await queue.processPendingFanIn(); // Retry processing if it didn't catch it yet
|
|
79
79
|
}
|
|
80
|
-
expect(updatedDependent?.status).toBe(
|
|
80
|
+
expect(updatedDependent?.status).toBe(TaskStatus.Dead);
|
|
81
81
|
expect(updatedDependent?.error?.code).toBe('DependencyFailed');
|
|
82
82
|
});
|
|
83
83
|
it('should respect DependencyJoinMode.Or', async () => {
|
|
@@ -92,7 +92,7 @@ describe('Queue Dependencies & Tree Tests', () => {
|
|
|
92
92
|
await queue.complete(d1);
|
|
93
93
|
await queue.processPendingFanIn();
|
|
94
94
|
const updated = await queue.getTask(dependent.id);
|
|
95
|
-
expect(updated?.status).toBe(
|
|
95
|
+
expect(updated?.status).toBe(TaskStatus.Pending);
|
|
96
96
|
});
|
|
97
97
|
});
|
|
98
98
|
describe('Tree Operations & Cancellation', () => {
|
|
@@ -106,16 +106,16 @@ describe('Queue Dependencies & Tree Tests', () => {
|
|
|
106
106
|
// Get Tree
|
|
107
107
|
const tree = await queue.getTree(root.id);
|
|
108
108
|
expect(tree.length).toBe(3);
|
|
109
|
-
expect(tree.map(t => t.id)).toContain(grandchild.id);
|
|
109
|
+
expect(tree.map((t) => t.id)).toContain(grandchild.id);
|
|
110
110
|
// Cancel Root
|
|
111
111
|
await queue.cancel(root.id);
|
|
112
112
|
// Verify all cancelled
|
|
113
113
|
const tRoot = await queue.getTask(root.id);
|
|
114
114
|
const tChild = await queue.getTask(child.id);
|
|
115
115
|
const tGrand = await queue.getTask(grandchild.id);
|
|
116
|
-
expect(tRoot?.status).toBe(
|
|
117
|
-
expect(tChild?.status).toBe(
|
|
118
|
-
expect(tGrand?.status).toBe(
|
|
116
|
+
expect(tRoot?.status).toBe(TaskStatus.Cancelled);
|
|
117
|
+
expect(tChild?.status).toBe(TaskStatus.Cancelled);
|
|
118
|
+
expect(tGrand?.status).toBe(TaskStatus.Cancelled);
|
|
119
119
|
});
|
|
120
120
|
it('should cancel many by tags', async () => {
|
|
121
121
|
await queue.enqueue('t1', {}, { tags: ['group-a'] });
|
|
@@ -125,19 +125,19 @@ describe('Queue Dependencies & Tree Tests', () => {
|
|
|
125
125
|
// Check status
|
|
126
126
|
const tasks = await queue.getManyByTags('group-a');
|
|
127
127
|
expect(tasks.length).toBe(2);
|
|
128
|
-
expect(tasks.every(t => t.status ===
|
|
128
|
+
expect(tasks.every((t) => t.status === TaskStatus.Cancelled)).toBe(true);
|
|
129
129
|
const tasksB = await queue.getManyByTags('group-b');
|
|
130
|
-
expect(tasksB[0]?.status).toBe(
|
|
130
|
+
expect(tasksB[0]?.status).toBe(TaskStatus.Pending);
|
|
131
131
|
});
|
|
132
132
|
});
|
|
133
133
|
describe('Restart & Consumers', () => {
|
|
134
134
|
it('should restart a dead/cancelled task', async () => {
|
|
135
135
|
const task = await queue.enqueue('restart-test', {});
|
|
136
136
|
const d = await queue.dequeue({ types: ['restart-test'] });
|
|
137
|
-
await queue.fail(d, new Error('fatal'), true); // Dead
|
|
137
|
+
await queue.fail(d, new Error('fatal'), { fatal: true }); // Dead
|
|
138
138
|
await queue.restart(task.id);
|
|
139
139
|
const updated = await queue.getTask(task.id);
|
|
140
|
-
expect(updated?.status).toBe(
|
|
140
|
+
expect(updated?.status).toBe(TaskStatus.Pending);
|
|
141
141
|
expect(updated?.tries).toBe(0);
|
|
142
142
|
expect(updated?.error).toBeNull();
|
|
143
143
|
});
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import { describe, expectTypeOf, test } from 'vitest';
|
|
2
|
+
describe('EnqueueManyItem Type Definitions', () => {
|
|
3
|
+
test('EnqueueManyItem should be a discriminated union', () => {
|
|
4
|
+
const item = {};
|
|
5
|
+
if (item.type == 'test-task') {
|
|
6
|
+
expectTypeOf(item.data).toEqualTypeOf();
|
|
7
|
+
}
|
|
8
|
+
else if (item.type == 'other-task') {
|
|
9
|
+
expectTypeOf(item.data).toEqualTypeOf();
|
|
10
|
+
}
|
|
11
|
+
});
|
|
12
|
+
});
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
import { describe, test, expectTypeOf } from 'vitest';
|
|
2
|
+
describe('TaskQueue Class Generic', () => {
|
|
3
|
+
test('TaskQueue should accept TaskDefinitionMap', () => {
|
|
4
|
+
// If it compiles, it's good. But we can inspect the generic constraints.
|
|
5
|
+
// For now, simple existence check.
|
|
6
|
+
expectTypeOf().toBeObject();
|
|
7
|
+
});
|
|
8
|
+
});
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { afterAll, afterEach, beforeAll, beforeEach, describe, expect, it } from 'vitest';
|
|
2
|
-
import { TaskQueueProvider,
|
|
2
|
+
import { TaskQueueProvider, TaskStatus } from '../../task-queue/index.js';
|
|
3
3
|
import { setupIntegrationTest } from '../../unit-test/index.js';
|
|
4
4
|
import { currentTimestamp } from '../../utils/date-time.js';
|
|
5
5
|
import { timeout } from '../../utils/timing.js';
|
|
@@ -8,7 +8,7 @@ describe('Queue Integration Tests', () => {
|
|
|
8
8
|
let queue;
|
|
9
9
|
const queueName = `test-queue-${Date.now()}`;
|
|
10
10
|
// Helper to verify state in DB
|
|
11
|
-
async function
|
|
11
|
+
async function assertTaskStatus(id, state, message) {
|
|
12
12
|
const task = await queue.getTask(id);
|
|
13
13
|
expect(task?.status, message).toBe(state);
|
|
14
14
|
}
|
|
@@ -46,12 +46,12 @@ describe('Queue Integration Tests', () => {
|
|
|
46
46
|
const t2 = await queue.enqueue('test', { value: 'second' });
|
|
47
47
|
const d1 = await queue.dequeue();
|
|
48
48
|
expect(d1?.id).toBe(t1.id);
|
|
49
|
-
expect(d1?.data
|
|
50
|
-
await queue.complete(d1, { success: true });
|
|
51
|
-
await
|
|
49
|
+
expect((d1?.data)['value']).toBe('first');
|
|
50
|
+
await queue.complete(d1, { result: { success: true } });
|
|
51
|
+
await assertTaskStatus(t1.id, TaskStatus.Completed, 'Task 1 completed');
|
|
52
52
|
const d2 = await queue.dequeue();
|
|
53
53
|
expect(d2?.id).toBe(t2.id);
|
|
54
|
-
await queue.complete(d2, { success: true });
|
|
54
|
+
await queue.complete(d2, { result: { success: true } });
|
|
55
55
|
});
|
|
56
56
|
it('Priorities', async () => {
|
|
57
57
|
// Priority 1000 (default)
|
|
@@ -73,13 +73,13 @@ describe('Queue Integration Tests', () => {
|
|
|
73
73
|
const t2 = await queue.enqueue('test', { value: 'ignored' }, { idempotencyKey: key });
|
|
74
74
|
expect(t2.id, 'Same ID if not replaced').toBe(t1.id);
|
|
75
75
|
const check1 = await queue.getTask(t1.id);
|
|
76
|
-
expect(check1?.data
|
|
76
|
+
expect((check1?.data)['value']).toBe('original');
|
|
77
77
|
// 3. Replace Strategy: Should replace existing task with new data AND new ID
|
|
78
78
|
const t3 = await queue.enqueueMany([{ type: 'test', data: { value: 'updated' }, idempotencyKey: key }], { replace: true, returnTasks: true });
|
|
79
79
|
expect(t3[0].id, 'New ID if replaced').not.toBe(t1.id);
|
|
80
80
|
// New task should have new data
|
|
81
81
|
const checkNew = await queue.getTask(t3[0].id);
|
|
82
|
-
expect(checkNew?.data
|
|
82
|
+
expect((checkNew?.data)['value']).toBe('updated');
|
|
83
83
|
expect(checkNew?.tries).toBe(0);
|
|
84
84
|
});
|
|
85
85
|
it('Retries and Failures', async () => {
|
|
@@ -89,14 +89,14 @@ describe('Queue Integration Tests', () => {
|
|
|
89
89
|
expect(attempt1?.id).toBe(task.id);
|
|
90
90
|
await queue.fail(attempt1, { message: 'oops' });
|
|
91
91
|
// Force reschedule to now to bypass retryDelay
|
|
92
|
-
await queue.reschedule(task.id,
|
|
92
|
+
await queue.reschedule(task.id, currentTimestamp());
|
|
93
93
|
// Try 2
|
|
94
94
|
const attempt2 = await queue.dequeue();
|
|
95
95
|
expect(attempt2?.id).toBe(task.id);
|
|
96
96
|
expect(attempt2?.tries).toBe(2);
|
|
97
97
|
// Fail fatally
|
|
98
|
-
await queue.fail(attempt2, { message: 'fatal error' }, true);
|
|
99
|
-
await
|
|
98
|
+
await queue.fail(attempt2, { message: 'fatal error' }, { fatal: true });
|
|
99
|
+
await assertTaskStatus(task.id, TaskStatus.Dead, 'Task is Dead after fatal error');
|
|
100
100
|
});
|
|
101
101
|
it('Hierarchy (Parent/Child)', async () => {
|
|
102
102
|
// A. Create Parent
|
|
@@ -108,14 +108,14 @@ describe('Queue Integration Tests', () => {
|
|
|
108
108
|
const child = await queue.enqueue('test', { value: 'child-manual' }, { parentId: p.id });
|
|
109
109
|
// D. "Finish" Parent execution.
|
|
110
110
|
await queue.complete(pTask);
|
|
111
|
-
// await
|
|
111
|
+
// await assertTaskStatus(p.id, TaskStatus.Waiting, 'Parent entered WAITING state'); // Depends on implementation details of auto-waiting
|
|
112
112
|
});
|
|
113
113
|
it('Batching', async () => {
|
|
114
114
|
const batch = queue.batch();
|
|
115
115
|
for (let i = 0; i < 5; i++) {
|
|
116
116
|
batch.add('test', { value: `batch-${i}` });
|
|
117
117
|
}
|
|
118
|
-
const tasks = await batch.enqueue(true);
|
|
118
|
+
const tasks = await batch.enqueue({ returnTasks: true });
|
|
119
119
|
expect(tasks.length).toBe(5);
|
|
120
120
|
const dequeuedBatch = await queue.dequeueMany(5);
|
|
121
121
|
expect(dequeuedBatch.length).toBe(5);
|
|
@@ -149,47 +149,47 @@ describe('PostgresQueue (Distributed Task Orchestration)', () => {
|
|
|
149
149
|
});
|
|
150
150
|
describe('Basic Lifecycle', () => {
|
|
151
151
|
it('should enqueue and dequeue a task', async () => {
|
|
152
|
-
await queue.enqueue('
|
|
152
|
+
await queue.enqueue('foo', { foo: 'bar' });
|
|
153
153
|
const task = await queue.dequeue();
|
|
154
154
|
expect(task).toBeDefined();
|
|
155
155
|
expect(task?.data).toEqual({ foo: 'bar' });
|
|
156
|
-
expect(task?.status).toBe(
|
|
156
|
+
expect(task?.status).toBe(TaskStatus.Running);
|
|
157
157
|
expect(task?.tries).toBe(1);
|
|
158
158
|
});
|
|
159
159
|
it('should complete a task successfully', async () => {
|
|
160
|
-
const task = await queue.enqueue('
|
|
160
|
+
const task = await queue.enqueue('foo', { foo: 'bar' });
|
|
161
161
|
const dequeued = await queue.dequeue();
|
|
162
|
-
await queue.complete(dequeued, { result: true });
|
|
162
|
+
await queue.complete(dequeued, { result: { result: true } });
|
|
163
163
|
const updated = await queue.getTask(task.id);
|
|
164
|
-
expect(updated?.status).toBe(
|
|
164
|
+
expect(updated?.status).toBe(TaskStatus.Completed);
|
|
165
165
|
expect(updated?.result).toEqual({ result: true });
|
|
166
|
-
expect(updated
|
|
166
|
+
expect(updated.completeTimestamp > 0).toBe(true);
|
|
167
167
|
});
|
|
168
168
|
it('should fail a task and increment tries', async () => {
|
|
169
|
-
const task = await queue.enqueue('
|
|
169
|
+
const task = await queue.enqueue('foo', { foo: 'bar' });
|
|
170
170
|
const dequeued = await queue.dequeue();
|
|
171
171
|
await queue.fail(dequeued, new Error('temp failure'));
|
|
172
172
|
const updated = await queue.getTask(task.id);
|
|
173
|
-
expect(updated?.status).toBe(
|
|
173
|
+
expect(updated?.status).toBe(TaskStatus.Pending);
|
|
174
174
|
expect(updated?.tries).toBe(1);
|
|
175
175
|
expect(updated?.error).toBeDefined();
|
|
176
176
|
});
|
|
177
177
|
});
|
|
178
178
|
describe('Priority and Scheduling', () => {
|
|
179
179
|
it('should dequeue tasks in priority order (lower number first)', async () => {
|
|
180
|
-
await queue.enqueue('
|
|
181
|
-
await queue.enqueue('
|
|
182
|
-
await queue.enqueue('
|
|
180
|
+
await queue.enqueue('foo', { foo: 'low' }, { priority: 2000 });
|
|
181
|
+
await queue.enqueue('foo', { foo: 'high' }, { priority: 10 });
|
|
182
|
+
await queue.enqueue('foo', { foo: 'mid' }, { priority: 1000 });
|
|
183
183
|
const t1 = await queue.dequeue();
|
|
184
184
|
const t2 = await queue.dequeue();
|
|
185
185
|
const t3 = await queue.dequeue();
|
|
186
|
-
expect(t1?.data
|
|
187
|
-
expect(t2?.data
|
|
188
|
-
expect(t3?.data
|
|
186
|
+
expect((t1?.data)['foo']).toBe('high');
|
|
187
|
+
expect((t2?.data)['foo']).toBe('mid');
|
|
188
|
+
expect((t3?.data)['foo']).toBe('low');
|
|
189
189
|
});
|
|
190
190
|
it('should not dequeue a task scheduled in the future', async () => {
|
|
191
191
|
const future = currentTimestamp() + 500;
|
|
192
|
-
await queue.enqueue('
|
|
192
|
+
await queue.enqueue('foo', { foo: 'future' }, { scheduleTimestamp: future });
|
|
193
193
|
const task = await queue.dequeue();
|
|
194
194
|
expect(task).toBeUndefined();
|
|
195
195
|
await timeout(600);
|
|
@@ -202,9 +202,9 @@ describe('PostgresQueue (Distributed Task Orchestration)', () => {
|
|
|
202
202
|
const queueProvider = injector.resolve(TaskQueueProvider);
|
|
203
203
|
const limitedQueue = queueProvider.get(`limit-test-${Date.now()}`, { globalConcurrency: 2 });
|
|
204
204
|
await limitedQueue.enqueueMany([
|
|
205
|
-
{ type: '
|
|
206
|
-
{ type: '
|
|
207
|
-
{ type: '
|
|
205
|
+
{ type: 'foo', data: { foo: '1' } },
|
|
206
|
+
{ type: 'foo', data: { foo: '2' } },
|
|
207
|
+
{ type: 'foo', data: { foo: '3' } },
|
|
208
208
|
]);
|
|
209
209
|
const t1 = await limitedQueue.dequeue();
|
|
210
210
|
const t2 = await limitedQueue.dequeue();
|
|
@@ -221,9 +221,9 @@ describe('PostgresQueue (Distributed Task Orchestration)', () => {
|
|
|
221
221
|
describe('Circuit Breaker', () => {
|
|
222
222
|
it('should trip the breaker after threshold failures', async () => {
|
|
223
223
|
// Config: circuitBreakerThreshold: 2 (set in beforeEach)
|
|
224
|
-
await queue.enqueue('
|
|
225
|
-
await queue.enqueue('
|
|
226
|
-
await queue.enqueue('
|
|
224
|
+
await queue.enqueue('foo', { foo: '1' });
|
|
225
|
+
await queue.enqueue('foo', { foo: '2' });
|
|
226
|
+
await queue.enqueue('foo', { foo: '3' });
|
|
227
227
|
await queue.fail((await queue.dequeue()), 'err');
|
|
228
228
|
await queue.fail((await queue.dequeue()), 'err');
|
|
229
229
|
// Breaker should be Open
|
|
@@ -232,8 +232,8 @@ describe('PostgresQueue (Distributed Task Orchestration)', () => {
|
|
|
232
232
|
});
|
|
233
233
|
it('should allow a single probe in Half-Open state', async () => {
|
|
234
234
|
await queue.enqueueMany([
|
|
235
|
-
{ type: '
|
|
236
|
-
{ type: '
|
|
235
|
+
{ type: 'foo', data: { foo: '1' } },
|
|
236
|
+
{ type: 'foo', data: { foo: '2' } },
|
|
237
237
|
]);
|
|
238
238
|
await queue.fail((await queue.dequeue()), 'err');
|
|
239
239
|
await queue.fail((await queue.dequeue()), 'err');
|
|
@@ -247,13 +247,13 @@ describe('PostgresQueue (Distributed Task Orchestration)', () => {
|
|
|
247
247
|
});
|
|
248
248
|
describe('Timeouts and Maintenance (Pruning)', () => {
|
|
249
249
|
it('should recover "Zombie" tasks (crashed workers)', async () => {
|
|
250
|
-
const task = await queue.enqueue('
|
|
250
|
+
const task = await queue.enqueue('foo', { foo: 'zombie' });
|
|
251
251
|
await queue.dequeue(); // Task is now Running with a token
|
|
252
252
|
// processTimeout is 200ms. Wait for it to expire.
|
|
253
253
|
await timeout(300);
|
|
254
254
|
await queue.maintenance();
|
|
255
255
|
const recovered = await queue.getTask(task.id);
|
|
256
|
-
expect(recovered?.status).toBe(
|
|
256
|
+
expect(recovered?.status).toBe(TaskStatus.Pending);
|
|
257
257
|
expect(recovered?.tries).toBe(1);
|
|
258
258
|
expect(recovered?.token).toBeNull();
|
|
259
259
|
});
|
|
@@ -261,25 +261,25 @@ describe('PostgresQueue (Distributed Task Orchestration)', () => {
|
|
|
261
261
|
// Re-configure queue with very short execution timeout
|
|
262
262
|
const queueProvider = injector.resolve(TaskQueueProvider);
|
|
263
263
|
const shortQueue = queueProvider.get(`prune-test-${Date.now()}`, { maxExecutionTime: 100 });
|
|
264
|
-
const task = await shortQueue.enqueue('
|
|
264
|
+
const task = await shortQueue.enqueue('foo', { foo: 'long-running' });
|
|
265
265
|
await shortQueue.dequeue();
|
|
266
266
|
await timeout(200);
|
|
267
267
|
await shortQueue.maintenance();
|
|
268
268
|
const updated = await shortQueue.getTask(task.id);
|
|
269
|
-
expect(updated?.status).toBe(
|
|
269
|
+
expect(updated?.status).toBe(TaskStatus.Dead);
|
|
270
270
|
expect(updated?.error?.message).toContain('Hard Execution Timeout');
|
|
271
271
|
await shortQueue.clear();
|
|
272
272
|
});
|
|
273
273
|
it('should touch a task to extend token', async () => {
|
|
274
|
-
const task = await queue.enqueue('
|
|
274
|
+
const task = await queue.enqueue('foo', { foo: 'work' });
|
|
275
275
|
const dequeued = await queue.dequeue();
|
|
276
276
|
const initialLock = dequeued.visibilityDeadline;
|
|
277
277
|
await timeout(50);
|
|
278
278
|
const touched = await queue.touch(dequeued);
|
|
279
|
-
expect(touched?.visibilityDeadline).
|
|
279
|
+
expect(touched?.visibilityDeadline > initialLock).toBe(true);
|
|
280
280
|
});
|
|
281
281
|
it('should prevent touching if token is lost (stolen by another worker)', async () => {
|
|
282
|
-
await queue.enqueue('
|
|
282
|
+
await queue.enqueue('foo', { foo: 'work' });
|
|
283
283
|
const dequeued = await queue.dequeue();
|
|
284
284
|
expect(dequeued).toBeDefined();
|
|
285
285
|
// processTimeout is 200ms. Wait for it to expire.
|
|
@@ -294,34 +294,34 @@ describe('PostgresQueue (Distributed Task Orchestration)', () => {
|
|
|
294
294
|
describe('Batch Operations', () => {
|
|
295
295
|
it('should complete many tasks efficiently', async () => {
|
|
296
296
|
const tasks = await queue.enqueueMany([
|
|
297
|
-
{ type: '
|
|
298
|
-
{ type: '
|
|
297
|
+
{ type: 'foo', data: { foo: '1' } },
|
|
298
|
+
{ type: 'foo', data: { foo: '2' } },
|
|
299
299
|
], { returnTasks: true });
|
|
300
300
|
const d1 = await queue.dequeue();
|
|
301
301
|
const d2 = await queue.dequeue();
|
|
302
302
|
await queue.completeMany([d1, d2]);
|
|
303
303
|
const t1 = await queue.getTask(tasks[0].id);
|
|
304
304
|
const t2 = await queue.getTask(tasks[1].id);
|
|
305
|
-
expect(t1?.status).toBe(
|
|
306
|
-
expect(t2?.status).toBe(
|
|
305
|
+
expect(t1?.status).toBe(TaskStatus.Completed);
|
|
306
|
+
expect(t2?.status).toBe(TaskStatus.Completed);
|
|
307
307
|
});
|
|
308
308
|
});
|
|
309
309
|
describe('Rescheduling', () => {
|
|
310
310
|
it('should reschedule and refund tries if running', async () => {
|
|
311
|
-
const task = await queue.enqueue('
|
|
311
|
+
const task = await queue.enqueue('foo', { foo: 'reschedule-me' });
|
|
312
312
|
const dequeued = await queue.dequeue();
|
|
313
313
|
expect(dequeued?.tries).toBe(1);
|
|
314
314
|
const inFuture = currentTimestamp() + 1000;
|
|
315
315
|
await queue.reschedule(dequeued.id, inFuture);
|
|
316
316
|
const updated = await queue.getTask(task.id);
|
|
317
|
-
expect(updated?.status).toBe(
|
|
317
|
+
expect(updated?.status).toBe(TaskStatus.Pending);
|
|
318
318
|
expect(updated?.tries).toBe(0); // Refunded
|
|
319
319
|
expect(updated?.scheduleTimestamp).toBe(inFuture);
|
|
320
320
|
});
|
|
321
321
|
});
|
|
322
322
|
describe('TaskContext (Worker DX)', () => {
|
|
323
323
|
it('checkpoint() should update progress and handle token loss', async () => {
|
|
324
|
-
const task = await queue.enqueue('
|
|
324
|
+
const task = await queue.enqueue('foo', { foo: 'progress' });
|
|
325
325
|
const dequeued = await queue.dequeue();
|
|
326
326
|
// In real scenarios TaskContext wraps the queue logic.
|
|
327
327
|
// Here we just verify touch/checkpoint effects on the DB.
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
import { describe, expectTypeOf, test } from 'vitest';
|
|
2
|
+
describe('Task Discriminated Union', () => {
|
|
3
|
+
test('Task should be a discriminated union based on definition map', () => {
|
|
4
|
+
// Should be a union
|
|
5
|
+
expectTypeOf().toBeObject();
|
|
6
|
+
// Discrimination check
|
|
7
|
+
const task = {};
|
|
8
|
+
if (task.type == 'test-task') {
|
|
9
|
+
expectTypeOf(task.data).toEqualTypeOf();
|
|
10
|
+
expectTypeOf(task.state).toEqualTypeOf();
|
|
11
|
+
expectTypeOf(task.result).toEqualTypeOf();
|
|
12
|
+
}
|
|
13
|
+
else if (task.type == 'other-task') {
|
|
14
|
+
expectTypeOf(task.data).toEqualTypeOf();
|
|
15
|
+
expectTypeOf(task.result).toEqualTypeOf();
|
|
16
|
+
}
|
|
17
|
+
});
|
|
18
|
+
});
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
import { describe, expectTypeOf, test } from 'vitest';
|
|
2
|
+
describe('TaskQueue Type Definitions', () => {
|
|
3
|
+
test('TaskDefinition and TaskDefinitionMap should be defined', () => {
|
|
4
|
+
expectTypeOf().toEqualTypeOf();
|
|
5
|
+
expectTypeOf().toEqualTypeOf();
|
|
6
|
+
expectTypeOf().toEqualTypeOf();
|
|
7
|
+
expectTypeOf().toEqualTypeOf();
|
|
8
|
+
});
|
|
9
|
+
});
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { afterAll, afterEach, beforeAll, beforeEach, describe, expect, it } from 'vitest';
|
|
2
2
|
import { CancellationToken } from '../../cancellation/index.js';
|
|
3
|
-
import {
|
|
3
|
+
import { TaskProcessResult, TaskQueueProvider, TaskStatus } from '../../task-queue/index.js';
|
|
4
4
|
import { setupIntegrationTest } from '../../unit-test/index.js';
|
|
5
5
|
import { timeout } from '../../utils/timing.js';
|
|
6
6
|
describe('Worker & Base Class Tests', () => {
|
|
@@ -30,8 +30,8 @@ describe('Worker & Base Class Tests', () => {
|
|
|
30
30
|
const processed = [];
|
|
31
31
|
const token = new CancellationToken();
|
|
32
32
|
queue.process({ cancellationSignal: token }, async (context) => {
|
|
33
|
-
processed.push(context.data
|
|
34
|
-
return
|
|
33
|
+
processed.push(context.data['val']);
|
|
34
|
+
return TaskProcessResult.Complete();
|
|
35
35
|
});
|
|
36
36
|
// Wait until 2 tasks are processed
|
|
37
37
|
for (let i = 0; i < 20; i++) {
|
|
@@ -45,8 +45,8 @@ describe('Worker & Base Class Tests', () => {
|
|
|
45
45
|
expect(processed.length).toBe(2);
|
|
46
46
|
const check1 = await queue.getTask(t1.id);
|
|
47
47
|
const check2 = await queue.getTask(t2.id);
|
|
48
|
-
expect(check1?.status).toBe(
|
|
49
|
-
expect(check2?.status).toBe(
|
|
48
|
+
expect(check1?.status).toBe(TaskStatus.Completed);
|
|
49
|
+
expect(check2?.status).toBe(TaskStatus.Completed);
|
|
50
50
|
});
|
|
51
51
|
it('should handle errors in worker gracefully', async () => {
|
|
52
52
|
const task = await queue.enqueue('fail', {});
|
|
@@ -57,7 +57,7 @@ describe('Worker & Base Class Tests', () => {
|
|
|
57
57
|
await timeout(200);
|
|
58
58
|
token.set();
|
|
59
59
|
const updated = await queue.getTask(task.id);
|
|
60
|
-
expect(updated?.status).toBe(
|
|
60
|
+
expect(updated?.status).toBe(TaskStatus.Pending); // Should retry
|
|
61
61
|
expect(updated?.tries).toBe(1);
|
|
62
62
|
expect(updated?.error?.message).toBe('worker error');
|
|
63
63
|
});
|
|
@@ -71,8 +71,8 @@ describe('Worker & Base Class Tests', () => {
|
|
|
71
71
|
const token = new CancellationToken();
|
|
72
72
|
queue.processBatch({ batchSize: 2, cancellationSignal: token }, async (context) => {
|
|
73
73
|
expect(context.tasks.length).toBeLessThanOrEqual(2);
|
|
74
|
-
context.tasks.forEach(t => processedBatch.push(t.data
|
|
75
|
-
return context.tasks.map(() =>
|
|
74
|
+
context.tasks.forEach(t => processedBatch.push(t.data['v']));
|
|
75
|
+
return context.tasks.map(() => TaskProcessResult.Complete());
|
|
76
76
|
});
|
|
77
77
|
for (let i = 0; i < 20; i++) {
|
|
78
78
|
if (processedBatch.length === 3)
|
|
@@ -90,13 +90,13 @@ describe('Worker & Base Class Tests', () => {
|
|
|
90
90
|
// Simulate long work > visibilityTimeout (500ms)
|
|
91
91
|
await timeout(700);
|
|
92
92
|
executed = true;
|
|
93
|
-
return
|
|
93
|
+
return TaskProcessResult.Complete();
|
|
94
94
|
});
|
|
95
95
|
await timeout(1000);
|
|
96
96
|
token.set();
|
|
97
97
|
expect(executed).toBe(true);
|
|
98
98
|
const updated = await queue.getTask(task.id);
|
|
99
|
-
expect(updated?.status).toBe(
|
|
99
|
+
expect(updated?.status).toBe(TaskStatus.Completed);
|
|
100
100
|
});
|
|
101
101
|
it('should handle TaskResult actions (Fail, Reschedule)', async () => {
|
|
102
102
|
const tFail = await queue.enqueue('fail-action', {});
|
|
@@ -106,12 +106,12 @@ describe('Worker & Base Class Tests', () => {
|
|
|
106
106
|
queue.process({ cancellationSignal: token }, async (context) => {
|
|
107
107
|
processed.add(context.id);
|
|
108
108
|
if (context.id === tFail.id) {
|
|
109
|
-
return
|
|
109
|
+
return TaskProcessResult.Fail(new Error('explicit fail'));
|
|
110
110
|
}
|
|
111
111
|
if (context.id === tResched.id) {
|
|
112
|
-
return
|
|
112
|
+
return TaskProcessResult.RescheduleBy(1000);
|
|
113
113
|
}
|
|
114
|
-
return
|
|
114
|
+
return TaskProcessResult.Complete();
|
|
115
115
|
});
|
|
116
116
|
for (let i = 0; i < 20; i++) {
|
|
117
117
|
if (processed.size === 2)
|
|
@@ -120,10 +120,10 @@ describe('Worker & Base Class Tests', () => {
|
|
|
120
120
|
}
|
|
121
121
|
token.set();
|
|
122
122
|
const uFail = await queue.getTask(tFail.id);
|
|
123
|
-
expect(uFail?.status).toBe(
|
|
123
|
+
expect(uFail?.status).toBe(TaskStatus.Pending); // Retry
|
|
124
124
|
expect(uFail?.error?.message).toBe('explicit fail');
|
|
125
125
|
const uResched = await queue.getTask(tResched.id);
|
|
126
|
-
expect(uResched?.status).toBe(
|
|
126
|
+
expect(uResched?.status).toBe(TaskStatus.Pending);
|
|
127
127
|
expect(uResched?.scheduleTimestamp).toBeGreaterThan(Date.now());
|
|
128
128
|
});
|
|
129
129
|
it('should exercise TaskContext methods', async () => {
|
|
@@ -150,7 +150,7 @@ describe('Worker & Base Class Tests', () => {
|
|
|
150
150
|
const otherChildren = await context.spawnMany(otherQueue, [{ type: 'other', data: { x: 2 } }]);
|
|
151
151
|
expect(otherChildren[0]?.parentId).toBe(task.id);
|
|
152
152
|
executed = true;
|
|
153
|
-
return
|
|
153
|
+
return TaskProcessResult.Complete();
|
|
154
154
|
});
|
|
155
155
|
for (let i = 0; i < 20; i++) {
|
|
156
156
|
if (executed)
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
import type { BatchTaskContext, TaskContext } from './task-context.js';
|
|
2
|
+
import type { Task, TaskProcessResult } from './task-queue.js';
|
|
3
|
+
export type TaskDefinition<Data = unknown, State = unknown, Result = unknown> = {
|
|
4
|
+
data: Data;
|
|
5
|
+
state: State;
|
|
6
|
+
result: Result;
|
|
7
|
+
};
|
|
8
|
+
export type TaskDefinitionMap<Definition extends Record<string, TaskDefinition> = Record<string, TaskDefinition>> = Definition;
|
|
9
|
+
export type TaskTypes<Definitions extends TaskDefinitionMap> = Extract<keyof Definitions, string>;
|
|
10
|
+
export type TaskOfType<Definitions extends TaskDefinitionMap, Type extends TaskTypes<Definitions>> = string extends TaskTypes<Definitions> ? Task<Definitions> & {
|
|
11
|
+
type: Type;
|
|
12
|
+
} : Extract<Task<Definitions>, {
|
|
13
|
+
type: Type;
|
|
14
|
+
}>;
|
|
15
|
+
export type TaskData<Definitions extends TaskDefinitionMap, Type extends TaskTypes<Definitions>> = Definitions[Type]['data'];
|
|
16
|
+
export type TaskState<Definitions extends TaskDefinitionMap, Type extends TaskTypes<Definitions>> = Definitions[Type]['state'];
|
|
17
|
+
export type TaskResult<Definitions extends TaskDefinitionMap, Type extends TaskTypes<Definitions>> = Definitions[Type]['result'];
|
|
18
|
+
export type TasksResults<Tasks extends Task<any>[]> = {
|
|
19
|
+
[I in keyof Tasks]: Tasks[I] extends Task<infer Definitions> ? TaskResult<Definitions, Tasks[I]['type']> : never;
|
|
20
|
+
};
|
|
21
|
+
export type TasksStates<Tasks extends Task<any>[]> = {
|
|
22
|
+
[I in keyof Tasks]: Tasks[I] extends Task<infer Definitions> ? TaskState<Definitions, Tasks[I]['type']> : never;
|
|
23
|
+
};
|
|
24
|
+
export type TaskProcessResultPayload<Result> = {
|
|
25
|
+
action: 'complete';
|
|
26
|
+
result: Result | undefined;
|
|
27
|
+
} | {
|
|
28
|
+
action: 'fail';
|
|
29
|
+
error: unknown;
|
|
30
|
+
fatal: boolean;
|
|
31
|
+
} | {
|
|
32
|
+
action: 'reschedule';
|
|
33
|
+
timestamp: number;
|
|
34
|
+
};
|
|
35
|
+
export interface ProcessWorker<Definitions extends TaskDefinitionMap, Type extends TaskTypes<Definitions>> {
|
|
36
|
+
/**
|
|
37
|
+
* A worker function that processes a single task.
|
|
38
|
+
* @param context The task context providing data, logger, and orchestration helpers.
|
|
39
|
+
*/
|
|
40
|
+
(context: TaskContext<Definitions, Type>): TaskProcessResult<Definitions[Type]['result']> | Promise<TaskProcessResult<Definitions[Type]['result']>>;
|
|
41
|
+
}
|
|
42
|
+
export interface ProcessBatchWorker<Definitions extends TaskDefinitionMap, Type extends TaskTypes<Definitions>> {
|
|
43
|
+
/**
|
|
44
|
+
* A worker function that processes a batch of tasks.
|
|
45
|
+
* @param context The batch context providing tasks and helpers.
|
|
46
|
+
*/
|
|
47
|
+
(context: BatchTaskContext<Definitions, Type>): TaskProcessResult<Definitions[Type]['result']>[] | Promise<TaskProcessResult<Definitions[Type]['result']>[]>;
|
|
48
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|