bunsane 0.2.9 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/CHANGELOG.md +266 -0
  2. package/config/cache.config.ts +12 -2
  3. package/core/App.ts +390 -66
  4. package/core/ApplicationLifecycle.ts +68 -4
  5. package/core/Entity.ts +407 -256
  6. package/core/EntityHookManager.ts +88 -21
  7. package/core/EntityManager.ts +12 -3
  8. package/core/Logger.ts +4 -0
  9. package/core/RequestContext.ts +4 -1
  10. package/core/SchedulerManager.ts +92 -9
  11. package/core/cache/CacheFactory.ts +3 -1
  12. package/core/cache/CacheManager.ts +54 -17
  13. package/core/cache/RedisCache.ts +38 -3
  14. package/core/decorators/EntityHooks.ts +24 -12
  15. package/core/middleware/RateLimit.ts +105 -0
  16. package/core/middleware/index.ts +1 -0
  17. package/core/remote/CircuitBreaker.ts +115 -0
  18. package/core/remote/OutboxWorker.ts +183 -0
  19. package/core/remote/RemoteManager.ts +400 -0
  20. package/core/remote/RpcCaller.ts +310 -0
  21. package/core/remote/StreamConsumer.ts +535 -0
  22. package/core/remote/decorators.ts +121 -0
  23. package/core/remote/health.ts +139 -0
  24. package/core/remote/index.ts +37 -0
  25. package/core/remote/metrics.ts +99 -0
  26. package/core/remote/outboxSchema.ts +41 -0
  27. package/core/remote/types.ts +151 -0
  28. package/core/scheduler/DistributedLock.ts +324 -266
  29. package/gql/builders/ResolverBuilder.ts +4 -4
  30. package/gql/complexityLimit.ts +95 -0
  31. package/gql/index.ts +15 -3
  32. package/gql/visitors/ResolverGeneratorVisitor.ts +16 -2
  33. package/package.json +1 -1
  34. package/query/ComponentInclusionNode.ts +13 -6
  35. package/query/OrNode.ts +2 -4
  36. package/query/Query.ts +30 -3
  37. package/query/SqlIdentifier.ts +105 -0
  38. package/query/builders/FullTextSearchBuilder.ts +19 -6
  39. package/service/ServiceRegistry.ts +21 -8
  40. package/storage/LocalStorageProvider.ts +12 -3
  41. package/storage/S3StorageProvider.ts +6 -6
  42. package/tests/e2e/http.test.ts +6 -2
  43. package/tests/helpers/MockRedisClient.ts +113 -0
  44. package/tests/helpers/MockRedisStreamServer.ts +448 -0
  45. package/tests/integration/entity/Entity.saveTimeout.test.ts +110 -0
  46. package/tests/integration/remote/dlq.test.ts +175 -0
  47. package/tests/integration/remote/event-dispatch.test.ts +114 -0
  48. package/tests/integration/remote/outbox.test.ts +130 -0
  49. package/tests/integration/remote/rpc.test.ts +177 -0
  50. package/tests/unit/remote/CircuitBreaker.test.ts +159 -0
  51. package/tests/unit/remote/RemoteError.test.ts +55 -0
  52. package/tests/unit/remote/decorators.test.ts +195 -0
  53. package/tests/unit/remote/metrics.test.ts +115 -0
  54. package/tests/unit/remote/mockRedisStreamServer.test.ts +104 -0
  55. package/tests/unit/storage/S3StorageProvider.test.ts +6 -10
  56. package/upload/FileValidator.ts +9 -6
@@ -0,0 +1,113 @@
1
+ /**
2
+ * ioredis-shaped client backed by a MockRedisStreamServer.
3
+ *
4
+ * Cast the returned instance to `Redis` (from "ioredis") when passing into
5
+ * the remote subsystem via `redisFactory`. Only methods the remote layer
6
+ * touches are implemented; others throw on use.
7
+ */
8
+
9
+ import type { MockRedisStreamServer } from "./MockRedisStreamServer";
10
+
11
+ export class MockRedisClient {
12
+ private server: MockRedisStreamServer;
13
+ private connected = true;
14
+ private listeners = new Map<string, Array<(...args: any[]) => void>>();
15
+
16
+ constructor(server: MockRedisStreamServer) {
17
+ this.server = server;
18
+ }
19
+
20
+ on(event: string, listener: (...args: any[]) => void): this {
21
+ const arr = this.listeners.get(event) ?? [];
22
+ arr.push(listener);
23
+ this.listeners.set(event, arr);
24
+ return this;
25
+ }
26
+
27
+ async xadd(key: string, ...args: any[]): Promise<string | null> {
28
+ this.ensureConnected();
29
+ try {
30
+ return this.server.xadd(key, ...args);
31
+ } catch (err: any) {
32
+ throw err;
33
+ }
34
+ }
35
+
36
+ async xgroup(...args: any[]): Promise<string> {
37
+ this.ensureConnected();
38
+ const [op, key, group, id, mk] = args;
39
+ return this.server.xgroup(op, key, group, id, mk);
40
+ }
41
+
42
+ async xreadgroup(...args: any[]): Promise<any> {
43
+ this.ensureConnected();
44
+ return this.server.xreadgroup(...args);
45
+ }
46
+
47
+ async xread(...args: any[]): Promise<any> {
48
+ this.ensureConnected();
49
+ return this.server.xread(...args);
50
+ }
51
+
52
+ async xack(key: string, group: string, msgId: string): Promise<number> {
53
+ this.ensureConnected();
54
+ return this.server.xack(key, group, msgId);
55
+ }
56
+
57
+ async xpending(...args: any[]): Promise<any> {
58
+ this.ensureConnected();
59
+ const [key, group, ...rest] = args;
60
+ return this.server.xpending(key, group, ...rest);
61
+ }
62
+
63
+ async xautoclaim(...args: any[]): Promise<any> {
64
+ this.ensureConnected();
65
+ return this.server.xautoclaim(
66
+ args[0],
67
+ args[1],
68
+ args[2],
69
+ Number(args[3]),
70
+ args[4],
71
+ ...args.slice(5)
72
+ );
73
+ }
74
+
75
+ async xlen(key: string): Promise<number> {
76
+ this.ensureConnected();
77
+ return this.server.xlen(key);
78
+ }
79
+
80
+ async xrange(...args: any[]): Promise<any> {
81
+ this.ensureConnected();
82
+ return this.server.xrange(
83
+ args[0],
84
+ args[1],
85
+ args[2],
86
+ ...args.slice(3)
87
+ );
88
+ }
89
+
90
+ async ping(): Promise<string> {
91
+ this.ensureConnected();
92
+ return this.server.ping();
93
+ }
94
+
95
+ disconnect(): void {
96
+ this.connected = false;
97
+ }
98
+
99
+ async quit(): Promise<string> {
100
+ this.connected = false;
101
+ return "OK";
102
+ }
103
+
104
+ private ensureConnected(): void {
105
+ if (!this.connected) {
106
+ throw new Error("Connection is closed");
107
+ }
108
+ }
109
+ }
110
+
111
+ export function createMockRedisFactory(server: MockRedisStreamServer) {
112
+ return (_blocking: boolean) => new MockRedisClient(server);
113
+ }
@@ -0,0 +1,448 @@
1
+ /**
2
+ * In-memory Redis Streams shim for Tier 2 integration tests.
3
+ *
4
+ * Implements only the commands the remote subsystem issues:
5
+ * xadd, xreadgroup, xread, xack, xgroup CREATE, xpending,
6
+ * xautoclaim, xlen, xrange, ping
7
+ *
8
+ * Shared server: multiple MockRedisClient instances pointing at the same
9
+ * server simulate separate app processes talking to one Redis.
10
+ */
11
+
12
+ interface StreamEntry {
13
+ id: string;
14
+ fields: string[]; // flat [k,v,k,v,...]
15
+ }
16
+
17
+ interface PelEntry {
18
+ msgId: string;
19
+ consumer: string;
20
+ deliveredAt: number;
21
+ deliveryCount: number;
22
+ }
23
+
24
+ interface ConsumerGroup {
25
+ name: string;
26
+ consumers: Set<string>;
27
+ pel: Map<string, PelEntry>;
28
+ /** Highest ID delivered via ">" — next read starts after this. */
29
+ lastDeliveredId: string;
30
+ }
31
+
32
+ interface Stream {
33
+ key: string;
34
+ entries: StreamEntry[];
35
+ groups: Map<string, ConsumerGroup>;
36
+ lastGeneratedTs: number;
37
+ seqWithinMs: number;
38
+ }
39
+
40
+ const MIN_ID = "0-0";
41
+
42
+ function parseId(id: string): [number, number] {
43
+ const dash = id.indexOf("-");
44
+ if (dash < 0) return [Number(id), 0];
45
+ return [Number(id.slice(0, dash)), Number(id.slice(dash + 1))];
46
+ }
47
+
48
+ function idLess(a: string, b: string): boolean {
49
+ const [at, as] = parseId(a);
50
+ const [bt, bs] = parseId(b);
51
+ if (at !== bt) return at < bt;
52
+ return as < bs;
53
+ }
54
+
55
+ function idGreater(a: string, b: string): boolean {
56
+ const [at, as] = parseId(a);
57
+ const [bt, bs] = parseId(b);
58
+ if (at !== bt) return at > bt;
59
+ return as > bs;
60
+ }
61
+
62
+ export class MockRedisStreamServer {
63
+ private streams = new Map<string, Stream>();
64
+ /** Fault injection for tests that need to simulate XADD failures. */
65
+ public xaddShouldFail = false;
66
+
67
+ private sleep(ms: number): Promise<void> {
68
+ return new Promise((r) => setTimeout(r, ms));
69
+ }
70
+
71
+ private getOrCreateStream(key: string): Stream {
72
+ let s = this.streams.get(key);
73
+ if (!s) {
74
+ s = {
75
+ key,
76
+ entries: [],
77
+ groups: new Map(),
78
+ lastGeneratedTs: 0,
79
+ seqWithinMs: 0,
80
+ };
81
+ this.streams.set(key, s);
82
+ }
83
+ return s;
84
+ }
85
+
86
+ private generateId(stream: Stream): string {
87
+ const now = Date.now();
88
+ if (now === stream.lastGeneratedTs) {
89
+ stream.seqWithinMs++;
90
+ } else {
91
+ stream.lastGeneratedTs = now;
92
+ stream.seqWithinMs = 0;
93
+ }
94
+ return `${now}-${stream.seqWithinMs}`;
95
+ }
96
+
97
+ /**
98
+ * XADD key [MAXLEN [~] N] * field value [field value ...]
99
+ * Returns the generated id.
100
+ */
101
+ xadd(key: string, ...args: any[]): string {
102
+ if (this.xaddShouldFail) {
103
+ throw new Error("MOCK_XADD_FAIL");
104
+ }
105
+ const stream = this.getOrCreateStream(key);
106
+
107
+ // Parse leading options
108
+ let i = 0;
109
+ let maxLen: number | null = null;
110
+ if (args[i] === "MAXLEN") {
111
+ i++;
112
+ if (args[i] === "~" || args[i] === "=") i++;
113
+ maxLen = Number(args[i]);
114
+ i++;
115
+ }
116
+
117
+ // Expect "*"
118
+ if (args[i] !== "*") {
119
+ throw new Error(`MockRedis xadd: only "*" auto-id supported, got ${args[i]}`);
120
+ }
121
+ i++;
122
+
123
+ const fields: string[] = [];
124
+ for (; i < args.length; i++) {
125
+ fields.push(String(args[i]));
126
+ }
127
+
128
+ const id = this.generateId(stream);
129
+ stream.entries.push({ id, fields });
130
+
131
+ if (maxLen !== null && stream.entries.length > maxLen) {
132
+ stream.entries.splice(0, stream.entries.length - maxLen);
133
+ }
134
+
135
+ return id;
136
+ }
137
+
138
+ /**
139
+ * XGROUP CREATE stream group id [MKSTREAM]
140
+ * id "$" = start from latest, "0" / "0-0" = start from beginning.
141
+ */
142
+ xgroup(op: string, key: string, groupName: string, startId: string, mkstream?: string): string {
143
+ if (op !== "CREATE") {
144
+ throw new Error(`MockRedis xgroup: op "${op}" not supported`);
145
+ }
146
+ const hasStream = this.streams.has(key);
147
+ if (!hasStream && mkstream !== "MKSTREAM") {
148
+ throw new Error("ERR no such key");
149
+ }
150
+ const stream = this.getOrCreateStream(key);
151
+ if (stream.groups.has(groupName)) {
152
+ const err = new Error(
153
+ `BUSYGROUP Consumer Group name already exists`
154
+ );
155
+ throw err;
156
+ }
157
+ const lastDeliveredId =
158
+ startId === "$"
159
+ ? stream.entries.length > 0
160
+ ? stream.entries[stream.entries.length - 1]!.id
161
+ : MIN_ID
162
+ : startId === "0" || startId === "0-0"
163
+ ? MIN_ID
164
+ : startId;
165
+ stream.groups.set(groupName, {
166
+ name: groupName,
167
+ consumers: new Set(),
168
+ pel: new Map(),
169
+ lastDeliveredId,
170
+ });
171
+ return "OK";
172
+ }
173
+
174
+ /**
175
+ * XREADGROUP GROUP g consumer [COUNT n] [BLOCK ms] STREAMS s ">"
176
+ * Returns [[streamKey, [[id, fields], ...]]] or null on timeout.
177
+ */
178
+ async xreadgroup(...args: any[]): Promise<any> {
179
+ let i = 0;
180
+ if (args[i] !== "GROUP") throw new Error("expected GROUP");
181
+ i++;
182
+ const groupName = String(args[i++]);
183
+ const consumer = String(args[i++]);
184
+ let count = Infinity;
185
+ let blockMs = 0;
186
+ while (args[i] !== "STREAMS") {
187
+ const opt = String(args[i++]).toUpperCase();
188
+ if (opt === "COUNT") count = Number(args[i++]);
189
+ else if (opt === "BLOCK") blockMs = Number(args[i++]);
190
+ else throw new Error(`unknown XREADGROUP opt ${opt}`);
191
+ }
192
+ i++; // skip STREAMS
193
+ const streams: string[] = [];
194
+ const ids: string[] = [];
195
+ const remaining = args.slice(i);
196
+ const half = remaining.length / 2;
197
+ for (let k = 0; k < half; k++) {
198
+ streams.push(String(remaining[k]));
199
+ ids.push(String(remaining[k + half]));
200
+ }
201
+
202
+ const deadline = Date.now() + blockMs;
203
+ while (true) {
204
+ const result = this.readGroupOnce(groupName, consumer, count, streams, ids);
205
+ if (result) return result;
206
+ if (Date.now() >= deadline) return null;
207
+ await this.sleep(10);
208
+ }
209
+ }
210
+
211
+ private readGroupOnce(
212
+ groupName: string,
213
+ consumer: string,
214
+ count: number,
215
+ streams: string[],
216
+ ids: string[]
217
+ ): any[] | null {
218
+ const out: any[] = [];
219
+ for (let s = 0; s < streams.length; s++) {
220
+ const streamKey = streams[s]!;
221
+ const id = ids[s]!;
222
+ const stream = this.streams.get(streamKey);
223
+ if (!stream) continue;
224
+ const group = stream.groups.get(groupName);
225
+ if (!group) continue;
226
+ group.consumers.add(consumer);
227
+
228
+ let newEntries: StreamEntry[];
229
+ if (id === ">") {
230
+ // New messages only
231
+ newEntries = stream.entries.filter((e) =>
232
+ idGreater(e.id, group.lastDeliveredId)
233
+ );
234
+ if (newEntries.length > count) {
235
+ newEntries = newEntries.slice(0, count);
236
+ }
237
+ for (const entry of newEntries) {
238
+ group.lastDeliveredId = entry.id;
239
+ const existing = group.pel.get(entry.id);
240
+ if (existing) {
241
+ existing.deliveryCount++;
242
+ existing.deliveredAt = Date.now();
243
+ existing.consumer = consumer;
244
+ } else {
245
+ group.pel.set(entry.id, {
246
+ msgId: entry.id,
247
+ consumer,
248
+ deliveredAt: Date.now(),
249
+ deliveryCount: 1,
250
+ });
251
+ }
252
+ }
253
+ } else {
254
+ // Re-read this consumer's PEL
255
+ newEntries = stream.entries.filter((e) => {
256
+ const p = group.pel.get(e.id);
257
+ return p && p.consumer === consumer && idGreater(e.id, id);
258
+ });
259
+ if (newEntries.length > count) {
260
+ newEntries = newEntries.slice(0, count);
261
+ }
262
+ }
263
+
264
+ if (newEntries.length > 0) {
265
+ out.push([
266
+ streamKey,
267
+ newEntries.map((e) => [e.id, e.fields]),
268
+ ]);
269
+ }
270
+ }
271
+ return out.length > 0 ? out : null;
272
+ }
273
+
274
+ /**
275
+ * XREAD [COUNT n] [BLOCK ms] STREAMS s id
276
+ */
277
+ async xread(...args: any[]): Promise<any> {
278
+ let i = 0;
279
+ let count = Infinity;
280
+ let blockMs = 0;
281
+ while (args[i] !== "STREAMS") {
282
+ const opt = String(args[i++]).toUpperCase();
283
+ if (opt === "COUNT") count = Number(args[i++]);
284
+ else if (opt === "BLOCK") blockMs = Number(args[i++]);
285
+ else throw new Error(`unknown XREAD opt ${opt}`);
286
+ }
287
+ i++;
288
+ const remaining = args.slice(i);
289
+ const half = remaining.length / 2;
290
+ const streams: string[] = [];
291
+ const ids: string[] = [];
292
+ for (let k = 0; k < half; k++) {
293
+ streams.push(String(remaining[k]));
294
+ ids.push(String(remaining[k + half]));
295
+ }
296
+
297
+ // Resolve "$" to the current last id per stream once, up front.
298
+ // Subsequent polls compare against that snapshot so new entries get
299
+ // delivered exactly once.
300
+ const resolvedIds = ids.map((id, k) => {
301
+ if (id !== "$") return id;
302
+ const stream = this.streams.get(streams[k]!);
303
+ if (!stream || stream.entries.length === 0) return MIN_ID;
304
+ return stream.entries[stream.entries.length - 1]!.id;
305
+ });
306
+
307
+ const deadline = Date.now() + blockMs;
308
+ while (true) {
309
+ const out: any[] = [];
310
+ for (let s = 0; s < streams.length; s++) {
311
+ const streamKey = streams[s]!;
312
+ const afterId = resolvedIds[s]!;
313
+ const stream = this.streams.get(streamKey);
314
+ if (!stream) continue;
315
+ const matching = stream.entries
316
+ .filter((e) => idGreater(e.id, afterId))
317
+ .slice(0, count);
318
+ if (matching.length > 0) {
319
+ out.push([
320
+ streamKey,
321
+ matching.map((e) => [e.id, e.fields]),
322
+ ]);
323
+ }
324
+ }
325
+ if (out.length > 0) return out;
326
+ if (Date.now() >= deadline) return null;
327
+ await this.sleep(10);
328
+ }
329
+ }
330
+
331
+ xack(key: string, groupName: string, msgId: string): number {
332
+ const stream = this.streams.get(key);
333
+ if (!stream) return 0;
334
+ const group = stream.groups.get(groupName);
335
+ if (!group) return 0;
336
+ return group.pel.delete(msgId) ? 1 : 0;
337
+ }
338
+
339
+ /**
340
+ * Two forms:
341
+ * XPENDING key group -> summary
342
+ * XPENDING key group minId maxId count [consumer] -> detail
343
+ */
344
+ xpending(key: string, groupName: string, ...args: any[]): any {
345
+ const stream = this.streams.get(key);
346
+ if (!stream) return [0, null, null, null];
347
+ const group = stream.groups.get(groupName);
348
+ if (!group) return [0, null, null, null];
349
+
350
+ if (args.length === 0) {
351
+ // Summary
352
+ const ids = Array.from(group.pel.keys()).sort((a, b) =>
353
+ idLess(a, b) ? -1 : idGreater(a, b) ? 1 : 0
354
+ );
355
+ if (ids.length === 0) return [0, null, null, null];
356
+ const byConsumer = new Map<string, number>();
357
+ for (const p of group.pel.values()) {
358
+ byConsumer.set(
359
+ p.consumer,
360
+ (byConsumer.get(p.consumer) ?? 0) + 1
361
+ );
362
+ }
363
+ return [
364
+ ids.length,
365
+ ids[0],
366
+ ids[ids.length - 1],
367
+ Array.from(byConsumer.entries()).map(([c, n]) => [c, String(n)]),
368
+ ];
369
+ }
370
+
371
+ const [minId, maxId, _count] = args;
372
+ const out: any[] = [];
373
+ for (const p of group.pel.values()) {
374
+ if (idLess(p.msgId, minId) || idGreater(p.msgId, maxId)) continue;
375
+ out.push([
376
+ p.msgId,
377
+ p.consumer,
378
+ Date.now() - p.deliveredAt,
379
+ p.deliveryCount,
380
+ ]);
381
+ }
382
+ return out;
383
+ }
384
+
385
+ /**
386
+ * XAUTOCLAIM stream group consumer idleMs cursor [COUNT n]
387
+ * Returns [nextCursor, entries]
388
+ */
389
+ xautoclaim(
390
+ key: string,
391
+ groupName: string,
392
+ consumer: string,
393
+ idleMs: number,
394
+ cursor: string,
395
+ ..._rest: any[]
396
+ ): any {
397
+ const stream = this.streams.get(key);
398
+ if (!stream) return ["0-0", []];
399
+ const group = stream.groups.get(groupName);
400
+ if (!group) return ["0-0", []];
401
+
402
+ const now = Date.now();
403
+ const claimed: any[] = [];
404
+ for (const p of group.pel.values()) {
405
+ if (now - p.deliveredAt < idleMs) continue;
406
+ if (idLess(p.msgId, cursor) && cursor !== "0-0") continue;
407
+ p.consumer = consumer;
408
+ p.deliveryCount++;
409
+ p.deliveredAt = now;
410
+ const entry = stream.entries.find((e) => e.id === p.msgId);
411
+ if (entry) claimed.push([entry.id, entry.fields]);
412
+ }
413
+ return ["0-0", claimed];
414
+ }
415
+
416
+ xlen(key: string): number {
417
+ return this.streams.get(key)?.entries.length ?? 0;
418
+ }
419
+
420
+ xrange(key: string, start: string, end: string, ..._rest: any[]): any[] {
421
+ const stream = this.streams.get(key);
422
+ if (!stream) return [];
423
+ const lo = start === "-" ? MIN_ID : start;
424
+ const hi = end === "+" ? "9999999999999-9999" : end;
425
+ return stream.entries
426
+ .filter(
427
+ (e) =>
428
+ !idLess(e.id, lo) && !idGreater(e.id, hi)
429
+ )
430
+ .map((e) => [e.id, e.fields]);
431
+ }
432
+
433
+ ping(): string {
434
+ return "PONG";
435
+ }
436
+
437
+ /** Helper for tests: total PEL entries across a group. */
438
+ getPelSize(streamKey: string, groupName: string): number {
439
+ return (
440
+ this.streams.get(streamKey)?.groups.get(groupName)?.pel.size ?? 0
441
+ );
442
+ }
443
+
444
+ /** Helper for tests: raw stream entry count ignoring groups. */
445
+ getStreamLength(key: string): number {
446
+ return this.xlen(key);
447
+ }
448
+ }
@@ -0,0 +1,110 @@
1
+ /**
2
+ * Integration tests for Entity.save timeout and cancellation behavior.
3
+ *
4
+ * Regression coverage for the production incident where Entity.save's wall-
5
+ * clock timeout rejected the outer Promise but left the underlying Bun SQL
6
+ * transaction mid-flight. Under pgbouncer transaction-mode pooling this
7
+ * leaked backend PostgreSQL sessions into `idle in transaction` state,
8
+ * exhausting the pool.
9
+ *
10
+ * These tests prove the invariants the fix must uphold:
11
+ * 1. An aborted save leaves no partial rows — Bun SQL's transaction callback
12
+ * throws, auto-ROLLBACK fires, backend connection is released.
13
+ * 2. The connection pool stays healthy after repeated aborts — subsequent
14
+ * saves on fresh entities still succeed.
15
+ * 3. A save with no abort still commits normally.
16
+ *
17
+ * The wall-clock DB_QUERY_TIMEOUT path is module-cached at import time so it
18
+ * is not exercised here directly. Manual verification on a real Postgres +
19
+ * pgbouncer stack (with query_wait_timeout short enough to fire) should
20
+ * confirm pg_stat_activity shows no `idle in transaction` backends after
21
+ * this test suite runs. See the handoff doc (2026-04-18) for the repro steps.
22
+ */
23
+ import { describe, test, expect, beforeAll } from 'bun:test';
24
+ import { Entity } from '../../../core/Entity';
25
+ import db from '../../../database';
26
+ import { TestUser } from '../../fixtures/components';
27
+ import { createTestContext, ensureComponentsRegistered } from '../../utils';
28
+
29
+ describe('Entity.save timeout and cancellation', () => {
30
+ const ctx = createTestContext();
31
+
32
+ beforeAll(async () => {
33
+ await ensureComponentsRegistered(TestUser);
34
+ });
35
+
36
+ test('aborted doSave does not leave partial rows (transaction rolls back)', async () => {
37
+ const entity = ctx.tracker.create();
38
+ entity.add(TestUser, { name: 'aborted', email: 'a@example.com', age: 1 });
39
+
40
+ const controller = new AbortController();
41
+ // Abort immediately — the first in-flight query will be cancelled,
42
+ // the transaction callback throws, Bun SQL issues ROLLBACK.
43
+ queueMicrotask(() => controller.abort(new Error('simulated save timeout')));
44
+
45
+ const result = db.transaction(async (trx) => {
46
+ await entity.doSave(trx, controller.signal);
47
+ });
48
+
49
+ await expect(result).rejects.toBeDefined();
50
+
51
+ // Entity must NOT exist — rollback invariant.
52
+ const rows = await db`SELECT id FROM entities WHERE id = ${entity.id}`;
53
+ expect(rows.length).toBe(0);
54
+ });
55
+
56
+ test('connection pool stays healthy after multiple aborted saves', async () => {
57
+ // Repeatedly abort saves — if connections leaked, subsequent saves
58
+ // would eventually block on pool acquire.
59
+ for (let i = 0; i < 8; i++) {
60
+ const entity = Entity.Create();
61
+ entity.add(TestUser, { name: `aborted-${i}`, email: `a${i}@e.com`, age: i });
62
+
63
+ const controller = new AbortController();
64
+ queueMicrotask(() => controller.abort(new Error('simulated timeout')));
65
+
66
+ await db.transaction(async (trx) => {
67
+ await entity.doSave(trx, controller.signal);
68
+ }).catch(() => { /* expected */ });
69
+ }
70
+
71
+ // A fresh save must still succeed on the pool that serviced the aborts.
72
+ const healthy = ctx.tracker.create();
73
+ healthy.add(TestUser, { name: 'healthy', email: 'h@e.com', age: 99 });
74
+ await healthy.save();
75
+
76
+ expect(healthy._persisted).toBe(true);
77
+
78
+ const rows = await db`SELECT id FROM entities WHERE id = ${healthy.id}`;
79
+ expect(rows.length).toBe(1);
80
+ });
81
+
82
+ test('doSave without signal behaves normally (backwards compatible)', async () => {
83
+ const entity = ctx.tracker.create();
84
+ entity.add(TestUser, { name: 'no-signal', email: 'n@e.com', age: 5 });
85
+
86
+ await db.transaction(async (trx) => {
87
+ await entity.doSave(trx); // no signal passed
88
+ });
89
+
90
+ const rows = await db`SELECT id FROM entities WHERE id = ${entity.id}`;
91
+ expect(rows.length).toBe(1);
92
+ });
93
+
94
+ test('save() resolves even if post-commit cache work is slow (fire-and-forget)', async () => {
95
+ // Cache handler is queued via queueMicrotask; save() must resolve as
96
+ // soon as the DB transaction commits. We assert save resolves quickly
97
+ // even though handleCacheAfterSave is awaited separately.
98
+ const entity = ctx.tracker.create();
99
+ entity.add(TestUser, { name: 'fast', email: 'f@e.com', age: 10 });
100
+
101
+ const start = performance.now();
102
+ await entity.save();
103
+ const elapsed = performance.now() - start;
104
+
105
+ expect(entity._persisted).toBe(true);
106
+ // Generous bound — if cache were blocking save, timings under load
107
+ // could stretch past the budget. This just guards gross regressions.
108
+ expect(elapsed).toBeLessThan(5000);
109
+ });
110
+ });