@tstdl/base 0.93.126 → 0.93.128

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. package/api/client/client.js +45 -9
  2. package/api/client/tests/api-client.test.d.ts +1 -0
  3. package/api/client/tests/api-client.test.js +194 -0
  4. package/api/types.d.ts +34 -2
  5. package/api/types.js +9 -2
  6. package/authentication/client/authentication.service.js +30 -11
  7. package/authentication/client/http-client.middleware.js +10 -3
  8. package/authentication/server/authentication.service.d.ts +12 -0
  9. package/authentication/server/authentication.service.js +14 -2
  10. package/authentication/tests/authentication.client-error-handling.test.js +23 -66
  11. package/authentication/tests/authentication.client-service-refresh.test.js +14 -14
  12. package/cancellation/token.d.ts +6 -0
  13. package/cancellation/token.js +8 -0
  14. package/http/client/adapters/undici.adapter.js +0 -2
  15. package/http/client/http-client-request.d.ts +2 -0
  16. package/http/client/http-client-request.js +4 -0
  17. package/http/client/http-client-response.d.ts +1 -1
  18. package/http/client/http-client-response.js +3 -2
  19. package/http/utils.d.ts +6 -0
  20. package/http/utils.js +71 -0
  21. package/injector/graph.js +27 -6
  22. package/injector/injector.js +2 -0
  23. package/mail/drizzle/0000_numerous_the_watchers.sql +8 -0
  24. package/mail/drizzle/meta/0000_snapshot.json +1 -32
  25. package/mail/drizzle/meta/_journal.json +2 -9
  26. package/object-storage/s3/tests/s3.object-storage.integration.test.js +22 -53
  27. package/orm/tests/repository-expiration.test.js +3 -3
  28. package/package.json +1 -1
  29. package/rate-limit/tests/postgres-rate-limiter.test.js +9 -7
  30. package/task-queue/tests/complex.test.js +22 -22
  31. package/task-queue/tests/dependencies.test.js +15 -13
  32. package/task-queue/tests/queue.test.js +13 -13
  33. package/task-queue/tests/worker.test.js +12 -12
  34. package/testing/integration-setup.d.ts +2 -0
  35. package/testing/integration-setup.js +13 -7
  36. package/utils/backoff.d.ts +27 -3
  37. package/utils/backoff.js +31 -9
  38. package/utils/index.d.ts +1 -0
  39. package/utils/index.js +1 -0
  40. package/utils/retry-with-backoff.d.ts +22 -0
  41. package/utils/retry-with-backoff.js +64 -0
  42. package/utils/tests/backoff.test.d.ts +1 -0
  43. package/utils/tests/backoff.test.js +41 -0
  44. package/utils/tests/retry-with-backoff.test.d.ts +1 -0
  45. package/utils/tests/retry-with-backoff.test.js +49 -0
  46. package/mail/drizzle/0000_previous_malcolm_colcord.sql +0 -13
  47. package/mail/drizzle/0001_flimsy_bloodscream.sql +0 -5
  48. package/mail/drizzle/meta/0001_snapshot.json +0 -69
@@ -1,27 +1,20 @@
1
1
  import { afterAll, beforeAll, describe, expect, it } from 'vitest';
2
2
  import { setupIntegrationTest } from '../../../testing/index.js';
3
3
  import { readBinaryStream } from '../../../utils/stream/stream-reader.js';
4
- import { configureS3ObjectStorage } from '../s3.object-storage-provider.js';
4
+ import { isUndefined } from '../../../utils/type-guards.js';
5
5
  import { S3ObjectStorage } from '../s3.object-storage.js';
6
6
  describe('S3ObjectStorage Integration', () => {
7
7
  let storage;
8
- const bucketName = 'integration-test-bucket';
9
8
  beforeAll(async () => {
10
9
  const { injector } = await setupIntegrationTest({
11
10
  modules: { objectStorage: true },
12
11
  });
13
- configureS3ObjectStorage({
14
- endpoint: 'http://127.0.0.1:9000',
15
- accessKey: 'tstdl-dev',
16
- secretKey: 'tstdl-dev',
17
- bucket: bucketName,
18
- region: 'us-east-1',
19
- forcePathStyle: true,
20
- injector,
21
- });
22
12
  storage = await injector.resolveAsync(S3ObjectStorage, 'test-module');
23
13
  });
24
14
  afterAll(async () => {
15
+ if (isUndefined(storage)) {
16
+ return;
17
+ }
25
18
  const objects = await storage.getObjects();
26
19
  for (const obj of objects) {
27
20
  await storage.deleteObject(obj.key);
@@ -80,7 +73,7 @@ describe('S3ObjectStorage Integration', () => {
80
73
  const key = 'signed-download.txt';
81
74
  await storage.uploadObject(key, new TextEncoder().encode('signed download'));
82
75
  const url = await storage.getDownloadUrl(key, Date.now() + 60000);
83
- expect(url).toContain('http://127.0.0.1:9000');
76
+ expect(url).toMatch(/http:\/\/(127\.0\.0\.1|localhost):9000/);
84
77
  const response = await fetch(url);
85
78
  expect(response.status).toBe(200);
86
79
  expect(await response.text()).toBe('signed download');
@@ -154,15 +147,8 @@ describe('S3ObjectStorage Integration', () => {
154
147
  expect(stat.metadata['extra-key']).toBe('extra-value');
155
148
  });
156
149
  it('should work with bucket per module', async () => {
157
- const { injector } = await setupIntegrationTest();
158
- configureS3ObjectStorage({
159
- endpoint: 'http://127.0.0.1:9000',
160
- accessKey: 'tstdl-dev',
161
- secretKey: 'tstdl-dev',
162
- bucketPerModule: true,
163
- region: 'us-east-1',
164
- forcePathStyle: true,
165
- injector,
150
+ const { injector } = await setupIntegrationTest({
151
+ modules: { objectStorage: true },
166
152
  });
167
153
  const moduleName = `test-bucket-per-module-${Math.floor(Math.random() * 1000000)}`;
168
154
  const perModuleStorage = await injector.resolveAsync(S3ObjectStorage, moduleName);
@@ -177,7 +163,7 @@ describe('S3ObjectStorage Integration', () => {
177
163
  const metadata = { 's3-test': 'true' };
178
164
  await storage.uploadObject(key, content, { metadata });
179
165
  const obj = await storage.getObject(key);
180
- expect(await obj.getResourceUri()).toBe(`s3://integration-test-bucket/test-module/${key}`);
166
+ expect(await obj.getResourceUri()).toBe(`s3://test-module/${key}`);
181
167
  expect(await obj.getContentLength()).toBe(content.length);
182
168
  expect(await obj.getMetadata()).toMatchObject(metadata);
183
169
  expect(new TextDecoder().decode(await obj.getContent())).toBe('s3 object');
@@ -242,25 +228,22 @@ describe('S3ObjectStorage Integration', () => {
242
228
  const key = 'signed-download-expires.txt';
243
229
  await storage.uploadObject(key, new TextEncoder().encode('signed download expires'));
244
230
  const url = await storage.getDownloadUrl(key, Date.now() + 60000, {
245
- 'Expires': new Date(Date.now() + 60000).toUTCString(),
231
+ Expires: new Date(Date.now() + 60000).toUTCString(),
246
232
  });
247
- expect(url).toContain('http://127.0.0.1:9000');
233
+ expect(url).toMatch(/http:\/\/(127\.0\.0\.1|localhost):9000/);
248
234
  const response = await fetch(url);
249
235
  expect(response.status).toBe(200);
250
236
  });
251
237
  it('should handle Forbidden error in ensureBucketExists with wrong credentials', async () => {
252
- const { injector } = await setupIntegrationTest();
253
- configureS3ObjectStorage({
254
- endpoint: 'http://127.0.0.1:9000',
255
- accessKey: 'wrong',
256
- secretKey: 'wrong',
257
- bucket: 'forbidden-bucket',
258
- region: 'us-east-1',
259
- forcePathStyle: true,
260
- injector,
238
+ const { injector } = await setupIntegrationTest({
239
+ modules: { objectStorage: true },
240
+ s3: {
241
+ accessKey: 'wrong',
242
+ secretKey: 'wrong',
243
+ },
261
244
  });
262
245
  try {
263
- await injector.resolveAsync(S3ObjectStorage, 'test-module');
246
+ await injector.resolveAsync(S3ObjectStorage, 'forbidden-bucket');
264
247
  expect.fail('Should have thrown');
265
248
  }
266
249
  catch (error) {
@@ -268,17 +251,10 @@ describe('S3ObjectStorage Integration', () => {
268
251
  }
269
252
  });
270
253
  it('should copy object between different storages', async () => {
271
- const { injector } = await setupIntegrationTest();
272
- configureS3ObjectStorage({
273
- endpoint: 'http://127.0.0.1:9000',
274
- accessKey: 'tstdl-dev',
275
- secretKey: 'tstdl-dev',
276
- bucket: 'another-bucket',
277
- region: 'us-east-1',
278
- forcePathStyle: true,
279
- injector,
254
+ const { injector } = await setupIntegrationTest({
255
+ modules: { objectStorage: true },
280
256
  });
281
- const anotherStorage = await injector.resolveAsync(S3ObjectStorage, 'another-module');
257
+ const anotherStorage = await injector.resolveAsync(S3ObjectStorage, 'another-bucket');
282
258
  const sourceKey = 'cross-storage-source.txt';
283
259
  const destKey = 'cross-storage-dest.txt';
284
260
  await storage.uploadObject(sourceKey, new TextEncoder().encode('cross storage content'));
@@ -288,15 +264,8 @@ describe('S3ObjectStorage Integration', () => {
288
264
  expect(new TextDecoder().decode(content)).toBe('cross storage content');
289
265
  });
290
266
  it('should cover ensureBucketExists with region', async () => {
291
- const { injector } = await setupIntegrationTest();
292
- configureS3ObjectStorage({
293
- endpoint: 'http://127.0.0.1:9000',
294
- accessKey: 'tstdl-dev',
295
- secretKey: 'tstdl-dev',
296
- bucketPerModule: true,
297
- region: 'us-east-1',
298
- forcePathStyle: true,
299
- injector,
267
+ const { injector } = await setupIntegrationTest({
268
+ modules: { objectStorage: true },
300
269
  });
301
270
  const perModuleStorage = await injector.resolveAsync(S3ObjectStorage, `region-test-${Math.floor(Math.random() * 1000000)}`);
302
271
  await perModuleStorage.ensureBucketExists('us-east-1', { objectLocking: true });
@@ -51,7 +51,7 @@ describe('ORM Repository Expiration', () => {
51
51
  ], TtlEntity.prototype, "name", void 0);
52
52
  TtlEntity = __decorate([
53
53
  Table('ttl_entities', { schema }),
54
- TimeToLive(1000, 'hard') // 1s TTL
54
+ TimeToLive(100, 'hard') // 100ms TTL
55
55
  ], TtlEntity);
56
56
  beforeAll(async () => {
57
57
  injector = new Injector('Test');
@@ -95,8 +95,8 @@ describe('ORM Repository Expiration', () => {
95
95
  await runInInjectionContext(injector, async () => {
96
96
  const repository = injectRepository(TtlEntity);
97
97
  const e1 = await repository.insert(Object.assign(new TtlEntity(), { name: 'Valid' }));
98
- // Wait 1.1s for expiration
99
- await new Promise((resolve) => setTimeout(resolve, 1100));
98
+ // Wait 150ms for expiration
99
+ await new Promise((resolve) => setTimeout(resolve, 150));
100
100
  await repository.processExpirations();
101
101
  const all = await repository.loadAll({ withDeleted: true });
102
102
  expect(all).toHaveLength(0);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@tstdl/base",
3
- "version": "0.93.126",
3
+ "version": "0.93.128",
4
4
  "author": "Patrick Hein",
5
5
  "publishConfig": {
6
6
  "access": "public"
@@ -11,7 +11,7 @@ describe('PostgresRateLimiter Integration Tests', () => {
11
11
  const rateLimiterProvider = injector.resolve(RateLimiterProvider);
12
12
  rateLimiter = rateLimiterProvider.get(limiterName, {
13
13
  burstCapacity: 10,
14
- refillInterval: 1000, // 10 tokens per second -> 1 token per 100ms
14
+ refillInterval: 500, // 10 tokens per 500ms -> 1 token per 50ms
15
15
  });
16
16
  });
17
17
  afterAll(async () => {
@@ -30,9 +30,9 @@ describe('PostgresRateLimiter Integration Tests', () => {
30
30
  const resource = 'res-2';
31
31
  await rateLimiter.tryAcquire(resource, 10);
32
32
  expect(await rateLimiter.tryAcquire(resource, 1)).toBe(false);
33
- // Wait for 2 tokens (200ms)
34
- await timeout(250);
35
- expect(await rateLimiter.tryAcquire(resource, 2)).toBe(true);
33
+ // Wait for 1 token (50ms) + buffer
34
+ await timeout(75);
35
+ expect(await rateLimiter.tryAcquire(resource, 1)).toBe(true);
36
36
  expect(await rateLimiter.tryAcquire(resource, 1)).toBe(false);
37
37
  });
38
38
  it('should refund tokens', async () => {
@@ -55,7 +55,9 @@ describe('PostgresRateLimiter Integration Tests', () => {
55
55
  await rateLimiter.tryAcquire(resource, 0);
56
56
  const results = await Promise.all(Array.from({ length: 20 }).map(() => rateLimiter.tryAcquire(resource, 1)));
57
57
  const successCount = results.filter(Boolean).length;
58
- expect(successCount).toBe(10);
58
+ // We expect 10, but allow up to 12 if tokens refilled during the Promise.all
59
+ expect(successCount).toBeGreaterThanOrEqual(10);
60
+ expect(successCount).toBeLessThanOrEqual(12);
59
61
  }, 15000);
60
62
  it('should always allow zero or negative cost', async () => {
61
63
  const resource = 'res-zero';
@@ -72,8 +74,8 @@ describe('PostgresRateLimiter Integration Tests', () => {
72
74
  // Drain
73
75
  await rateLimiter.tryAcquire(resource, 10);
74
76
  expect(await rateLimiter.tryAcquire(resource, 1)).toBe(false);
75
- // Wait for full refill (1s) + extra
76
- await timeout(1200);
77
+ // Wait for full refill (500ms) + extra
78
+ await timeout(600);
77
79
  // Should only have 10 tokens
78
80
  expect(await rateLimiter.tryAcquire(resource, 10)).toBe(true);
79
81
  // Should be empty again immediately
@@ -15,13 +15,13 @@ describe('Complex Queue Scenarios', () => {
15
15
  // Configure with specific settings for testing logic
16
16
  queue = queueProvider.get(queueName, {
17
17
  visibilityTimeout: 1000,
18
- priorityAgingInterval: 100, // Fast aging
18
+ priorityAgingInterval: 50, // Fast aging
19
19
  priorityAgingStep: 10,
20
20
  rateLimit: 5,
21
- rateInterval: 500,
22
- retryDelayMinimum: 100,
21
+ rateInterval: 50,
22
+ retryDelayMinimum: 50,
23
23
  retryDelayGrowth: 2,
24
- retention: 100, // Fast retention for archive test
24
+ retention: 50, // Fast retention for archive test
25
25
  });
26
26
  });
27
27
  afterEach(async () => {
@@ -33,12 +33,12 @@ describe('Complex Queue Scenarios', () => {
33
33
  await injector?.dispose();
34
34
  });
35
35
  async function waitForStatus(id, status) {
36
- for (let i = 0; i < 20; i++) {
36
+ for (let i = 0; i < 50; i++) {
37
37
  const task = await queue.getTask(id);
38
38
  if (task?.status === status)
39
39
  return;
40
40
  await queue.processPendingFanIn();
41
- await timeout(50);
41
+ await timeout(10);
42
42
  }
43
43
  }
44
44
  describe('Complex Dependencies', () => {
@@ -62,7 +62,7 @@ describe('Complex Queue Scenarios', () => {
62
62
  // Process B
63
63
  const dB = await queue.dequeue({ types: ['B'] });
64
64
  await queue.complete(dB);
65
- await timeout(100);
65
+ await timeout(20);
66
66
  await queue.processPendingFanIn();
67
67
  // D still waiting (needs C)
68
68
  const uD2 = await queue.getTask(taskD.id);
@@ -105,8 +105,8 @@ describe('Complex Queue Scenarios', () => {
105
105
  describe('Scheduling & Priorities', () => {
106
106
  it('should promote priority of old pending tasks (Aging)', async () => {
107
107
  const t1 = await queue.enqueue('low', {}, { priority: 2000 });
108
- // Wait for aging interval (100ms)
109
- await timeout(150);
108
+ // Wait for aging interval (50ms)
109
+ await timeout(60);
110
110
  await queue.maintenance();
111
111
  const updated = await queue.getTask(t1.id);
112
112
  // Default step is 10. 2000 - 10 = 1990
@@ -120,7 +120,7 @@ describe('Complex Queue Scenarios', () => {
120
120
  const u1 = await queue.getTask(task.id);
121
121
  expect(u1?.tries).toBe(1);
122
122
  const delay1 = u1.scheduleTimestamp - currentTimestamp();
123
- expect(delay1).toBeGreaterThan(150); // Approx check
123
+ expect(delay1).toBeGreaterThan(20); // Approx check
124
124
  // Force reschedule to now
125
125
  await queue.reschedule(task.id, currentTimestamp());
126
126
  // Try 2
@@ -129,12 +129,12 @@ describe('Complex Queue Scenarios', () => {
129
129
  const u2 = await queue.getTask(task.id);
130
130
  expect(u2?.tries).toBe(2);
131
131
  const now = currentTimestamp();
132
- expect(u2.scheduleTimestamp > now + 300).toBe(true);
132
+ expect(u2.scheduleTimestamp > now + 50).toBe(true);
133
133
  });
134
134
  });
135
135
  describe('Rate Limiting & Concurrency', () => {
136
136
  it('should limit burst dequeue rate', async () => {
137
- // Rate limit 5, interval 500ms
137
+ // Rate limit 5, interval 100ms
138
138
  await queue.enqueueMany(Array.from({ length: 10 }, (_, i) => ({ type: 'burst', data: { i } })));
139
139
  // Request burstCapacity (5)
140
140
  const batch1 = await queue.dequeueMany(5);
@@ -143,7 +143,7 @@ describe('Complex Queue Scenarios', () => {
143
143
  const batch2 = await queue.dequeueMany(1);
144
144
  expect(batch2.length).toBe(0); // Rate limited
145
145
  // Wait for refill
146
- await timeout(600);
146
+ await timeout(60);
147
147
  const batch3 = await queue.dequeueMany(5);
148
148
  expect(batch3.length).toBe(5); // Refilled
149
149
  });
@@ -179,8 +179,8 @@ describe('Complex Queue Scenarios', () => {
179
179
  expect(before).toBeDefined();
180
180
  expect(before?.status).toBe(TaskStatus.Completed);
181
181
  expect(before.completeTimestamp > 0).toBe(true);
182
- // Wait for retention (100ms).
183
- await timeout(500);
182
+ // Wait for retention (50ms).
183
+ await timeout(60);
184
184
  await archiveQueue.maintenance();
185
185
  // Should move from main table to archive
186
186
  const loaded = await archiveQueue.getTask(task.id);
@@ -189,9 +189,9 @@ describe('Complex Queue Scenarios', () => {
189
189
  await archiveQueue.clear();
190
190
  });
191
191
  it('should prune expired pending tasks', async () => {
192
- // Time to live: 100ms
193
- const task = await queue.enqueue('expire-me', {}, { timeToLive: currentTimestamp() + 100 });
194
- await timeout(150);
192
+ // Time to live: 50ms
193
+ const task = await queue.enqueue('expire-me', {}, { timeToLive: currentTimestamp() + 50 });
194
+ await timeout(60);
195
195
  await queue.maintenance();
196
196
  const updated = await queue.getTask(task.id);
197
197
  expect(updated?.status).toBe(TaskStatus.Dead);
@@ -206,7 +206,7 @@ describe('Complex Queue Scenarios', () => {
206
206
  const d = await queue.dequeue();
207
207
  await queue.complete(d);
208
208
  // Force move
209
- await timeout(200);
209
+ await timeout(60);
210
210
  await queue.maintenance();
211
211
  // Verify retrieval
212
212
  const fromArchive = await queue.getTask(task.id);
@@ -215,7 +215,7 @@ describe('Complex Queue Scenarios', () => {
215
215
  });
216
216
  it('should defer archival of parent tasks until children are archived', async () => {
217
217
  const qProvider = injector.resolve(TaskQueueProvider);
218
- const treeQueue = qProvider.get(`archive-tree-${Date.now()}`, { retention: 100 });
218
+ const treeQueue = qProvider.get(`archive-tree-${Date.now()}`, { retention: 50 });
219
219
  const parent = await treeQueue.enqueue('parent', {});
220
220
  const child = await treeQueue.enqueue('child', {}, { parentId: parent.id });
221
221
  const d1 = await treeQueue.dequeue();
@@ -223,7 +223,7 @@ describe('Complex Queue Scenarios', () => {
223
223
  await treeQueue.complete(d1);
224
224
  await treeQueue.complete(d2);
225
225
  // Wait for retention
226
- await timeout(200);
226
+ await timeout(60);
227
227
  // First maintenance: should archive child, but parent stays because child is still in main table (until it's deleted in the same tx maybe? No, loadMany happens before delete)
228
228
  await treeQueue.maintenance();
229
229
  const parentStillActive = await treeQueue.getTask(parent.id);
@@ -282,7 +282,7 @@ describe('Complex Queue Scenarios', () => {
282
282
  for (let i = 0; i < 5; i++) {
283
283
  if (u?.status == TaskStatus.Waiting)
284
284
  break;
285
- await timeout(50);
285
+ await timeout(10);
286
286
  u = await queue.getTask(dependent.id);
287
287
  }
288
288
  expect(u?.status).toBe(TaskStatus.Waiting); // Should still be waiting because dependency didn't Complete
@@ -22,6 +22,16 @@ describe('Queue Dependencies & Tree Tests', () => {
22
22
  afterAll(async () => {
23
23
  await injector?.dispose();
24
24
  });
25
+ async function waitForStatus(id, status) {
26
+ for (let i = 0; i < 20; i++) {
27
+ const task = await queue.getTask(id);
28
+ if (task?.status == status) {
29
+ return;
30
+ }
31
+ await queue.processPendingFanIn();
32
+ await timeout(50);
33
+ }
34
+ }
25
35
  describe('Dependencies (Fan-In)', () => {
26
36
  it('should schedule a task only after dependency completes (completeAfterTags)', async () => {
27
37
  // 1. Create a dependent task (Waiting)
@@ -35,7 +45,7 @@ describe('Queue Dependencies & Tree Tests', () => {
35
45
  const dequeued = await queue.dequeue({ types: ['prereq'] });
36
46
  expect(dequeued?.id).toBe(prereq.id);
37
47
  await queue.complete(dequeued);
38
- await queue.processPendingFanIn();
48
+ await waitForStatus(dependent.id, TaskStatus.Completed);
39
49
  const updatedDependent = await queue.getTask(dependent.id);
40
50
  expect(updatedDependent?.status).toBe(TaskStatus.Completed);
41
51
  });
@@ -50,7 +60,7 @@ describe('Queue Dependencies & Tree Tests', () => {
50
60
  // 3. Complete prereq
51
61
  const dequeued = await queue.dequeue({ types: ['prereq'] });
52
62
  await queue.complete(dequeued);
53
- await queue.processPendingFanIn();
63
+ await waitForStatus(dependent.id, TaskStatus.Pending);
54
64
  // 5. Dependent should be Pending (ready to run)
55
65
  const updatedDependent = await queue.getTask(dependent.id);
56
66
  expect(updatedDependent?.status).toBe(TaskStatus.Pending);
@@ -67,16 +77,8 @@ describe('Queue Dependencies & Tree Tests', () => {
67
77
  const dequeued = await queue.dequeue({ types: ['prereq'] });
68
78
  // Fail fatally
69
79
  await queue.fail(dequeued, new Error('boom'), { fatal: true });
70
- // Trigger resolution
71
- await queue.processPendingFanIn();
72
- let updatedDependent;
73
- for (let i = 0; i < 20; i++) {
74
- await timeout(100);
75
- updatedDependent = await queue.getTask(dependent.id);
76
- if (updatedDependent?.status === TaskStatus.Dead)
77
- break;
78
- await queue.processPendingFanIn(); // Retry processing if it didn't catch it yet
79
- }
80
+ await waitForStatus(dependent.id, TaskStatus.Dead);
81
+ const updatedDependent = await queue.getTask(dependent.id);
80
82
  expect(updatedDependent?.status).toBe(TaskStatus.Dead);
81
83
  expect(updatedDependent?.error?.code).toBe('DependencyFailed');
82
84
  });
@@ -90,7 +92,7 @@ describe('Queue Dependencies & Tree Tests', () => {
90
92
  await queue.enqueue('t1', {}, { tags: ['tag-1'] });
91
93
  const d1 = await queue.dequeue({ types: ['t1'] });
92
94
  await queue.complete(d1);
93
- await queue.processPendingFanIn();
95
+ await waitForStatus(dependent.id, TaskStatus.Pending);
94
96
  const updated = await queue.getTask(dependent.id);
95
97
  expect(updated?.status).toBe(TaskStatus.Pending);
96
98
  });
@@ -134,11 +134,11 @@ describe('PostgresQueue (Distributed Task Orchestration)', () => {
134
134
  const queueProvider = injector.resolve(TaskQueueProvider);
135
135
  const queueName = `pg-test-queue-${Date.now()}-${Math.random()}`;
136
136
  queue = queueProvider.get(queueName, {
137
- visibilityTimeout: 200, // Short timeout for testing
137
+ visibilityTimeout: 50, // Short timeout for testing
138
138
  retryDelayMinimum: 50,
139
139
  retryDelayGrowth: 1,
140
140
  circuitBreakerThreshold: 2,
141
- circuitBreakerResetTimeout: 200,
141
+ circuitBreakerResetTimeout: 50,
142
142
  });
143
143
  });
144
144
  afterEach(async () => {
@@ -188,11 +188,11 @@ describe('PostgresQueue (Distributed Task Orchestration)', () => {
188
188
  expect((t3?.data)['foo']).toBe('low');
189
189
  });
190
190
  it('should not dequeue a task scheduled in the future', async () => {
191
- const future = currentTimestamp() + 500;
191
+ const future = currentTimestamp() + 100;
192
192
  await queue.enqueue('foo', { foo: 'future' }, { scheduleTimestamp: future });
193
193
  const task = await queue.dequeue();
194
194
  expect(task).toBeUndefined();
195
- await timeout(600);
195
+ await timeout(150);
196
196
  const taskLater = await queue.dequeue();
197
197
  expect(taskLater).toBeDefined();
198
198
  });
@@ -237,8 +237,8 @@ describe('PostgresQueue (Distributed Task Orchestration)', () => {
237
237
  ]);
238
238
  await queue.fail((await queue.dequeue()), 'err');
239
239
  await queue.fail((await queue.dequeue()), 'err');
240
- // Breaker is Open. Wait for reset timeout (200ms)
241
- await timeout(250);
240
+ // Breaker is Open. Wait for reset timeout (50ms)
241
+ await timeout(75);
242
242
  const probe = await queue.dequeue();
243
243
  expect(probe).toBeDefined();
244
244
  const secondAttempt = await queue.dequeue();
@@ -249,8 +249,8 @@ describe('PostgresQueue (Distributed Task Orchestration)', () => {
249
249
  it('should recover "Zombie" tasks (crashed workers)', async () => {
250
250
  const task = await queue.enqueue('foo', { foo: 'zombie' });
251
251
  await queue.dequeue(); // Task is now Running with a token
252
- // processTimeout is 200ms. Wait for it to expire.
253
- await timeout(300);
252
+ // processTimeout is 50ms. Wait for it to expire.
253
+ await timeout(100);
254
254
  await queue.maintenance();
255
255
  const recovered = await queue.getTask(task.id);
256
256
  expect(recovered?.status).toBe(TaskStatus.Pending);
@@ -260,10 +260,10 @@ describe('PostgresQueue (Distributed Task Orchestration)', () => {
260
260
  it('should fail tasks that exceed Hard Execution Timeout via prune', async () => {
261
261
  // Re-configure queue with very short execution timeout
262
262
  const queueProvider = injector.resolve(TaskQueueProvider);
263
- const shortQueue = queueProvider.get(`prune-test-${Date.now()}`, { maxExecutionTime: 100 });
263
+ const shortQueue = queueProvider.get(`prune-test-${Date.now()}`, { maxExecutionTime: 50 });
264
264
  const task = await shortQueue.enqueue('foo', { foo: 'long-running' });
265
265
  await shortQueue.dequeue();
266
- await timeout(200);
266
+ await timeout(75);
267
267
  await shortQueue.maintenance();
268
268
  const updated = await shortQueue.getTask(task.id);
269
269
  expect(updated?.status).toBe(TaskStatus.Dead);
@@ -274,7 +274,7 @@ describe('PostgresQueue (Distributed Task Orchestration)', () => {
274
274
  const task = await queue.enqueue('foo', { foo: 'work' });
275
275
  const dequeued = await queue.dequeue();
276
276
  const initialLock = dequeued.visibilityDeadline;
277
- await timeout(50);
277
+ await timeout(20);
278
278
  const touched = await queue.touch(dequeued);
279
279
  expect(touched?.visibilityDeadline > initialLock).toBe(true);
280
280
  });
@@ -282,8 +282,8 @@ describe('PostgresQueue (Distributed Task Orchestration)', () => {
282
282
  await queue.enqueue('foo', { foo: 'work' });
283
283
  const dequeued = await queue.dequeue();
284
284
  expect(dequeued).toBeDefined();
285
- // processTimeout is 200ms. Wait for it to expire.
286
- await timeout(300);
285
+ // processTimeout is 50ms. Wait for it to expire.
286
+ await timeout(100);
287
287
  await queue.maintenance();
288
288
  await queue.dequeue(); // Stolen by another worker (tries=2)
289
289
  // Original worker tries to touch
@@ -14,7 +14,7 @@ describe('Worker & Base Class Tests', () => {
14
14
  const queueProvider = injector.resolve(TaskQueueProvider);
15
15
  const queueName = `worker-queue-${Date.now()}-${Math.random()}`;
16
16
  queue = queueProvider.get(queueName, {
17
- visibilityTimeout: 500, // Short visibility for testing lease loss
17
+ visibilityTimeout: 200, // Short visibility for testing lease loss
18
18
  });
19
19
  token = new CancellationToken();
20
20
  });
@@ -36,10 +36,10 @@ describe('Worker & Base Class Tests', () => {
36
36
  return TaskProcessResult.Complete();
37
37
  });
38
38
  // Wait until 2 tasks are processed
39
- for (let i = 0; i < 20; i++) {
39
+ for (let i = 0; i < 50; i++) {
40
40
  if (processed.length === 2)
41
41
  break;
42
- await timeout(100);
42
+ await timeout(20);
43
43
  }
44
44
  token.set(); // Stop worker
45
45
  expect(processed).toContain(1);
@@ -55,7 +55,7 @@ describe('Worker & Base Class Tests', () => {
55
55
  queue.process({ cancellationSignal: token }, async () => {
56
56
  throw new Error('worker error');
57
57
  });
58
- await timeout(200);
58
+ await timeout(50);
59
59
  token.set();
60
60
  const updated = await queue.getTask(task.id);
61
61
  expect(updated?.status).toBe(TaskStatus.Pending); // Should retry
@@ -66,12 +66,12 @@ describe('Worker & Base Class Tests', () => {
66
66
  const task = await queue.enqueue('long', {});
67
67
  let executed = false;
68
68
  queue.process({ cancellationSignal: token }, async (_context) => {
69
- // Simulate long work > visibilityTimeout (500ms)
70
- await timeout(700);
69
+ // Simulate long work > visibilityTimeout (200ms)
70
+ await timeout(300);
71
71
  executed = true;
72
72
  return TaskProcessResult.Complete();
73
73
  });
74
- await timeout(1000);
74
+ await timeout(500);
75
75
  token.set();
76
76
  expect(executed).toBe(true);
77
77
  const updated = await queue.getTask(task.id);
@@ -91,10 +91,10 @@ describe('Worker & Base Class Tests', () => {
91
91
  }
92
92
  return TaskProcessResult.Complete();
93
93
  });
94
- for (let i = 0; i < 20; i++) {
94
+ for (let i = 0; i < 50; i++) {
95
95
  if (processed.size === 2)
96
96
  break;
97
- await timeout(100);
97
+ await timeout(20);
98
98
  }
99
99
  token.set();
100
100
  const uFail = await queue.getTask(tFail.id);
@@ -129,10 +129,10 @@ describe('Worker & Base Class Tests', () => {
129
129
  executed = true;
130
130
  return TaskProcessResult.Complete();
131
131
  });
132
- for (let i = 0; i < 20; i++) {
132
+ for (let i = 0; i < 50; i++) {
133
133
  if (executed)
134
134
  break;
135
- await timeout(100);
135
+ await timeout(20);
136
136
  }
137
137
  token.set();
138
138
  expect(executed).toBe(true);
@@ -159,7 +159,7 @@ describe('Worker & Base Class Tests', () => {
159
159
  if (finalAttemptValues.length === 2)
160
160
  break;
161
161
  testQueue.notify();
162
- await timeout(100);
162
+ await timeout(20);
163
163
  }
164
164
  token.set();
165
165
  expect(finalAttemptValues).toEqual([false, true]);
@@ -2,6 +2,7 @@ import type { PoolConfig } from 'pg';
2
2
  import { type AuthenticationAncillaryService } from '../authentication/server/index.js';
3
3
  import { Injector } from '../injector/index.js';
4
4
  import { LogLevel } from '../logger/index.js';
5
+ import { type S3ObjectStorageProviderConfig } from '../object-storage/s3/index.js';
5
6
  import { Database } from '../orm/server/index.js';
6
7
  import type { Type } from '../types/index.js';
7
8
  export type IntegrationTestOptions = {
@@ -13,6 +14,7 @@ export type IntegrationTestOptions = {
13
14
  baseUrl?: string;
14
15
  port?: number;
15
16
  };
17
+ s3?: Partial<S3ObjectStorageProviderConfig>;
16
18
  logLevels?: Record<string, LogLevel>;
17
19
  modules?: {
18
20
  api?: boolean;
@@ -98,7 +98,11 @@ export async function setupIntegrationTest(options = {}) {
98
98
  }
99
99
  if (options.modules?.authentication) {
100
100
  configureAuthenticationServer({
101
- serviceOptions: { secret: 'test-secret' },
101
+ serviceOptions: {
102
+ secret: 'test-secret',
103
+ hashIterations: 10,
104
+ signingSecretsDerivationIterations: 10,
105
+ },
102
106
  authenticationAncillaryService: options.authenticationAncillaryService,
103
107
  injector,
104
108
  });
@@ -121,13 +125,15 @@ export async function setupIntegrationTest(options = {}) {
121
125
  await runInInjectionContext(injector, migrateDocumentManagementSchema);
122
126
  }
123
127
  if (options.modules?.objectStorage) {
128
+ const bucketPerModule = options.s3?.bucketPerModule ?? configParser.boolean('S3_BUCKET_PER_MODULE', true);
124
129
  configureS3ObjectStorage({
125
- endpoint: configParser.string('S3_ENDPOINT', 'http://127.0.0.1:9000'),
126
- accessKey: configParser.string('S3_ACCESS_KEY', 'tstdl-dev'),
127
- secretKey: configParser.string('S3_SECRET_KEY', 'tstdl-dev'),
128
- bucket: configParser.string('S3_BUCKET', 'test-bucket'),
129
- region: configParser.string('S3_REGION', 'us-east-1'),
130
- forcePathStyle: configParser.boolean('S3_FORCE_PATH_STYLE', true),
130
+ endpoint: options.s3?.endpoint ?? configParser.string('S3_ENDPOINT', 'http://127.0.0.1:9000'),
131
+ accessKey: options.s3?.accessKey ?? configParser.string('S3_ACCESS_KEY', 'tstdl-dev'),
132
+ secretKey: options.s3?.secretKey ?? configParser.string('S3_SECRET_KEY', 'tstdl-dev'),
133
+ bucket: bucketPerModule ? undefined : (options.s3?.bucket ?? configParser.string('S3_BUCKET', 'test-bucket')),
134
+ bucketPerModule,
135
+ region: options.s3?.region ?? configParser.string('S3_REGION', 'us-east-1'),
136
+ forcePathStyle: options.s3?.forcePathStyle ?? configParser.boolean('S3_FORCE_PATH_STYLE', true),
131
137
  injector,
132
138
  });
133
139
  }