bunsane 0.2.9 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +266 -0
- package/config/cache.config.ts +12 -2
- package/core/App.ts +390 -66
- package/core/ApplicationLifecycle.ts +68 -4
- package/core/Entity.ts +407 -256
- package/core/EntityHookManager.ts +88 -21
- package/core/EntityManager.ts +12 -3
- package/core/Logger.ts +4 -0
- package/core/RequestContext.ts +4 -1
- package/core/SchedulerManager.ts +92 -9
- package/core/cache/CacheFactory.ts +3 -1
- package/core/cache/CacheManager.ts +54 -17
- package/core/cache/RedisCache.ts +38 -3
- package/core/decorators/EntityHooks.ts +24 -12
- package/core/middleware/RateLimit.ts +105 -0
- package/core/middleware/index.ts +1 -0
- package/core/remote/CircuitBreaker.ts +115 -0
- package/core/remote/OutboxWorker.ts +183 -0
- package/core/remote/RemoteManager.ts +400 -0
- package/core/remote/RpcCaller.ts +310 -0
- package/core/remote/StreamConsumer.ts +535 -0
- package/core/remote/decorators.ts +121 -0
- package/core/remote/health.ts +139 -0
- package/core/remote/index.ts +37 -0
- package/core/remote/metrics.ts +99 -0
- package/core/remote/outboxSchema.ts +41 -0
- package/core/remote/types.ts +151 -0
- package/core/scheduler/DistributedLock.ts +324 -266
- package/gql/builders/ResolverBuilder.ts +4 -4
- package/gql/complexityLimit.ts +95 -0
- package/gql/index.ts +15 -3
- package/gql/visitors/ResolverGeneratorVisitor.ts +16 -2
- package/package.json +1 -1
- package/query/ComponentInclusionNode.ts +13 -6
- package/query/OrNode.ts +2 -4
- package/query/Query.ts +30 -3
- package/query/SqlIdentifier.ts +105 -0
- package/query/builders/FullTextSearchBuilder.ts +19 -6
- package/service/ServiceRegistry.ts +21 -8
- package/storage/LocalStorageProvider.ts +12 -3
- package/storage/S3StorageProvider.ts +6 -6
- package/tests/e2e/http.test.ts +6 -2
- package/tests/helpers/MockRedisClient.ts +113 -0
- package/tests/helpers/MockRedisStreamServer.ts +448 -0
- package/tests/integration/entity/Entity.saveTimeout.test.ts +110 -0
- package/tests/integration/remote/dlq.test.ts +175 -0
- package/tests/integration/remote/event-dispatch.test.ts +114 -0
- package/tests/integration/remote/outbox.test.ts +130 -0
- package/tests/integration/remote/rpc.test.ts +177 -0
- package/tests/unit/remote/CircuitBreaker.test.ts +159 -0
- package/tests/unit/remote/RemoteError.test.ts +55 -0
- package/tests/unit/remote/decorators.test.ts +195 -0
- package/tests/unit/remote/metrics.test.ts +115 -0
- package/tests/unit/remote/mockRedisStreamServer.test.ts +104 -0
- package/tests/unit/storage/S3StorageProvider.test.ts +6 -10
- package/upload/FileValidator.ts +9 -6
|
@@ -114,6 +114,10 @@ export function ComponentTargetHook(
|
|
|
114
114
|
};
|
|
115
115
|
}
|
|
116
116
|
|
|
117
|
+
/** Per-instance registry of hook IDs created by registerDecoratedHooks.
|
|
118
|
+
* Used by unregisterDecoratedHooks to undo registration (H-HOOK-3). */
|
|
119
|
+
const REGISTERED_IDS = new WeakMap<object, string[]>();
|
|
120
|
+
|
|
117
121
|
/**
|
|
118
122
|
* Register all decorated hooks for a service class
|
|
119
123
|
* Call this method after instantiating a service to register its decorated hooks
|
|
@@ -121,17 +125,18 @@ export function ComponentTargetHook(
|
|
|
121
125
|
*/
|
|
122
126
|
export function registerDecoratedHooks(serviceInstance: any): void {
|
|
123
127
|
const constructor = serviceInstance.constructor;
|
|
128
|
+
const ids: string[] = REGISTERED_IDS.get(serviceInstance) ?? [];
|
|
124
129
|
|
|
125
130
|
// Register entity hooks
|
|
126
131
|
if (constructor.__entityHooks) {
|
|
127
132
|
for (const hookInfo of constructor.__entityHooks) {
|
|
128
133
|
const hookMethod = serviceInstance[hookInfo.methodName].bind(serviceInstance);
|
|
129
134
|
|
|
130
|
-
EntityHookManager.registerEntityHook(
|
|
135
|
+
ids.push(EntityHookManager.registerEntityHook(
|
|
131
136
|
hookInfo.eventType,
|
|
132
137
|
hookMethod,
|
|
133
138
|
hookInfo.options
|
|
134
|
-
);
|
|
139
|
+
));
|
|
135
140
|
}
|
|
136
141
|
}
|
|
137
142
|
|
|
@@ -140,11 +145,11 @@ export function registerDecoratedHooks(serviceInstance: any): void {
|
|
|
140
145
|
for (const hookInfo of constructor.__componentHooks) {
|
|
141
146
|
const hookMethod = serviceInstance[hookInfo.methodName].bind(serviceInstance);
|
|
142
147
|
|
|
143
|
-
EntityHookManager.registerComponentHook(
|
|
148
|
+
ids.push(EntityHookManager.registerComponentHook(
|
|
144
149
|
hookInfo.eventType,
|
|
145
150
|
hookMethod,
|
|
146
151
|
hookInfo.options
|
|
147
|
-
);
|
|
152
|
+
));
|
|
148
153
|
}
|
|
149
154
|
}
|
|
150
155
|
|
|
@@ -153,14 +158,14 @@ export function registerDecoratedHooks(serviceInstance: any): void {
|
|
|
153
158
|
for (const hookInfo of constructor.__componentTargetHooks) {
|
|
154
159
|
const hookMethod = serviceInstance[hookInfo.methodName].bind(serviceInstance);
|
|
155
160
|
|
|
156
|
-
EntityHookManager.registerEntityHook(
|
|
161
|
+
ids.push(EntityHookManager.registerEntityHook(
|
|
157
162
|
hookInfo.eventType,
|
|
158
163
|
hookMethod,
|
|
159
164
|
{
|
|
160
165
|
...hookInfo.options,
|
|
161
166
|
componentTarget: hookInfo.componentTarget
|
|
162
167
|
}
|
|
163
|
-
);
|
|
168
|
+
));
|
|
164
169
|
}
|
|
165
170
|
}
|
|
166
171
|
|
|
@@ -169,19 +174,26 @@ export function registerDecoratedHooks(serviceInstance: any): void {
|
|
|
169
174
|
for (const hookInfo of constructor.__lifecycleHooks) {
|
|
170
175
|
const hookMethod = serviceInstance[hookInfo.methodName].bind(serviceInstance);
|
|
171
176
|
|
|
172
|
-
EntityHookManager.registerLifecycleHook(
|
|
177
|
+
ids.push(EntityHookManager.registerLifecycleHook(
|
|
173
178
|
hookMethod,
|
|
174
179
|
hookInfo.options
|
|
175
|
-
);
|
|
180
|
+
));
|
|
176
181
|
}
|
|
177
182
|
}
|
|
183
|
+
|
|
184
|
+
REGISTERED_IDS.set(serviceInstance, ids);
|
|
178
185
|
}
|
|
179
186
|
|
|
180
187
|
/**
|
|
181
|
-
* Unregister all decorated hooks for a service
|
|
182
|
-
* Call
|
|
183
|
-
*
|
|
188
|
+
* Unregister all decorated hooks for a service instance.
|
|
189
|
+
* Call during teardown (service destruction, test isolation) to prevent
|
|
190
|
+
* hook leaks across repeated instantiations (H-HOOK-3).
|
|
184
191
|
*/
|
|
185
192
|
export function unregisterDecoratedHooks(serviceInstance: any): void {
|
|
186
|
-
|
|
193
|
+
const ids = REGISTERED_IDS.get(serviceInstance);
|
|
194
|
+
if (!ids) return;
|
|
195
|
+
for (const id of ids) {
|
|
196
|
+
EntityHookManager.removeHook(id);
|
|
197
|
+
}
|
|
198
|
+
REGISTERED_IDS.delete(serviceInstance);
|
|
187
199
|
}
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
import type { Middleware } from '../Middleware';
|
|
2
|
+
import { logger as MainLogger } from '../Logger';
|
|
3
|
+
|
|
4
|
+
const logger = MainLogger.child({ scope: 'RateLimit' });
|
|
5
|
+
|
|
6
|
+
export type RateLimitOptions = {
|
|
7
|
+
/** Maximum requests in the window. Default: 100 */
|
|
8
|
+
max?: number;
|
|
9
|
+
/** Window length in milliseconds. Default: 60_000 (1 min) */
|
|
10
|
+
windowMs?: number;
|
|
11
|
+
/** Only apply to paths matching this prefix list. Default: all */
|
|
12
|
+
pathPrefixes?: string[];
|
|
13
|
+
/** Extract client key (override default: X-Forwarded-For → remote). */
|
|
14
|
+
keyExtractor?: (req: Request) => string;
|
|
15
|
+
/** Response status for rejection. Default: 429 */
|
|
16
|
+
status?: number;
|
|
17
|
+
/** Trust X-Forwarded-For header. Default: false */
|
|
18
|
+
trustProxy?: boolean;
|
|
19
|
+
};
|
|
20
|
+
|
|
21
|
+
type Bucket = {
|
|
22
|
+
count: number;
|
|
23
|
+
resetAt: number;
|
|
24
|
+
};
|
|
25
|
+
|
|
26
|
+
/**
|
|
27
|
+
* In-memory token-bucket rate limiter. Per-instance only — for multi-instance
|
|
28
|
+
* deployments use a shared Redis-backed limiter. Sweeps expired buckets on
|
|
29
|
+
* each increment to keep memory bounded.
|
|
30
|
+
*/
|
|
31
|
+
export function rateLimit(options: RateLimitOptions = {}): Middleware {
|
|
32
|
+
const max = options.max ?? 100;
|
|
33
|
+
const windowMs = options.windowMs ?? 60_000;
|
|
34
|
+
const pathPrefixes = options.pathPrefixes;
|
|
35
|
+
const status = options.status ?? 429;
|
|
36
|
+
const trustProxy = options.trustProxy ?? false;
|
|
37
|
+
const keyExtractor = options.keyExtractor ?? ((req: Request) => {
|
|
38
|
+
if (trustProxy) {
|
|
39
|
+
const xff = req.headers.get('x-forwarded-for');
|
|
40
|
+
if (xff) return xff.split(',')[0]!.trim();
|
|
41
|
+
}
|
|
42
|
+
const realIp = req.headers.get('x-real-ip');
|
|
43
|
+
if (realIp) return realIp;
|
|
44
|
+
return 'anonymous';
|
|
45
|
+
});
|
|
46
|
+
|
|
47
|
+
const buckets = new Map<string, Bucket>();
|
|
48
|
+
let lastSweep = Date.now();
|
|
49
|
+
|
|
50
|
+
return async (req, next) => {
|
|
51
|
+
if (pathPrefixes && pathPrefixes.length > 0) {
|
|
52
|
+
const url = new URL(req.url);
|
|
53
|
+
const match = pathPrefixes.some((p) => url.pathname.startsWith(p));
|
|
54
|
+
if (!match) return next();
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
const now = Date.now();
|
|
58
|
+
const key = keyExtractor(req);
|
|
59
|
+
|
|
60
|
+
if (now - lastSweep > windowMs) {
|
|
61
|
+
for (const [k, v] of buckets) {
|
|
62
|
+
if (v.resetAt <= now) buckets.delete(k);
|
|
63
|
+
}
|
|
64
|
+
lastSweep = now;
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
let bucket = buckets.get(key);
|
|
68
|
+
if (!bucket || bucket.resetAt <= now) {
|
|
69
|
+
bucket = { count: 0, resetAt: now + windowMs };
|
|
70
|
+
buckets.set(key, bucket);
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
bucket.count++;
|
|
74
|
+
const remaining = Math.max(0, max - bucket.count);
|
|
75
|
+
const retryAfterSec = Math.ceil((bucket.resetAt - now) / 1000);
|
|
76
|
+
|
|
77
|
+
if (bucket.count > max) {
|
|
78
|
+
logger.warn({ key, path: new URL(req.url).pathname, count: bucket.count, max }, 'rate limit exceeded');
|
|
79
|
+
return new Response(
|
|
80
|
+
JSON.stringify({ error: 'Too many requests', retryAfter: retryAfterSec }),
|
|
81
|
+
{
|
|
82
|
+
status,
|
|
83
|
+
headers: {
|
|
84
|
+
'Content-Type': 'application/json',
|
|
85
|
+
'Retry-After': String(retryAfterSec),
|
|
86
|
+
'X-RateLimit-Limit': String(max),
|
|
87
|
+
'X-RateLimit-Remaining': '0',
|
|
88
|
+
'X-RateLimit-Reset': String(Math.floor(bucket.resetAt / 1000)),
|
|
89
|
+
},
|
|
90
|
+
},
|
|
91
|
+
);
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
const response = await next();
|
|
95
|
+
const newHeaders = new Headers(response.headers);
|
|
96
|
+
newHeaders.set('X-RateLimit-Limit', String(max));
|
|
97
|
+
newHeaders.set('X-RateLimit-Remaining', String(remaining));
|
|
98
|
+
newHeaders.set('X-RateLimit-Reset', String(Math.floor(bucket.resetAt / 1000)));
|
|
99
|
+
return new Response(response.body, {
|
|
100
|
+
status: response.status,
|
|
101
|
+
statusText: response.statusText,
|
|
102
|
+
headers: newHeaders,
|
|
103
|
+
});
|
|
104
|
+
};
|
|
105
|
+
}
|
package/core/middleware/index.ts
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
1
|
export { securityHeaders, type SecurityHeadersOptions } from './SecurityHeaders';
|
|
2
2
|
export { requestId, getRequestId, requestStore } from './RequestId';
|
|
3
3
|
export { accessLog, type AccessLogOptions } from './AccessLog';
|
|
4
|
+
export { rateLimit, type RateLimitOptions } from './RateLimit';
|
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Remote Communication: CircuitBreaker
|
|
3
|
+
*
|
|
4
|
+
* Three-state breaker: closed -> open -> half-open -> closed.
|
|
5
|
+
*
|
|
6
|
+
* closed: pass through; increment failure count on error; trip to open at N.
|
|
7
|
+
* open: reject immediately (fail-fast) until reset timeout elapses.
|
|
8
|
+
* half: one trial call allowed; success -> closed, failure -> open again.
|
|
9
|
+
*
|
|
10
|
+
* Wraps Redis publish operations so a sustained Redis outage does not stall
|
|
11
|
+
* callers waiting for command timeouts on every request.
|
|
12
|
+
*/
|
|
13
|
+
|
|
14
|
+
export type CircuitState = "closed" | "open" | "half-open";
|
|
15
|
+
|
|
16
|
+
export interface CircuitBreakerConfig {
|
|
17
|
+
/** Consecutive failures before opening (default 5) */
|
|
18
|
+
threshold?: number;
|
|
19
|
+
/** ms after opening before a half-open trial is allowed (default 30000) */
|
|
20
|
+
resetTimeoutMs?: number;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
export class CircuitOpenError extends Error {
|
|
24
|
+
public readonly code = "CIRCUIT_OPEN";
|
|
25
|
+
constructor(message = "Circuit breaker is open") {
|
|
26
|
+
super(message);
|
|
27
|
+
this.name = "CircuitOpenError";
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
export class CircuitBreaker {
|
|
32
|
+
private state: CircuitState = "closed";
|
|
33
|
+
private failures = 0;
|
|
34
|
+
private openedAt = 0;
|
|
35
|
+
private threshold: number;
|
|
36
|
+
private resetTimeoutMs: number;
|
|
37
|
+
|
|
38
|
+
/** Hooks for metrics. */
|
|
39
|
+
public onTrip?: () => void;
|
|
40
|
+
public onReject?: () => void;
|
|
41
|
+
|
|
42
|
+
constructor(config: CircuitBreakerConfig = {}) {
|
|
43
|
+
this.threshold = config.threshold ?? 5;
|
|
44
|
+
this.resetTimeoutMs = config.resetTimeoutMs ?? 30_000;
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
getState(): CircuitState {
|
|
48
|
+
// Lazy transition from open -> half-open when reset window elapses.
|
|
49
|
+
if (
|
|
50
|
+
this.state === "open" &&
|
|
51
|
+
Date.now() - this.openedAt >= this.resetTimeoutMs
|
|
52
|
+
) {
|
|
53
|
+
this.state = "half-open";
|
|
54
|
+
}
|
|
55
|
+
return this.state;
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
async exec<T>(fn: () => Promise<T>): Promise<T> {
|
|
59
|
+
const state = this.getState();
|
|
60
|
+
if (state === "open") {
|
|
61
|
+
this.onReject?.();
|
|
62
|
+
throw new CircuitOpenError();
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
try {
|
|
66
|
+
const result = await fn();
|
|
67
|
+
this.recordSuccess();
|
|
68
|
+
return result;
|
|
69
|
+
} catch (err) {
|
|
70
|
+
this.recordFailure();
|
|
71
|
+
throw err;
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
recordSuccess(): void {
|
|
76
|
+
const current = this.getState();
|
|
77
|
+
if (current === "half-open") {
|
|
78
|
+
this.state = "closed";
|
|
79
|
+
}
|
|
80
|
+
this.failures = 0;
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
recordFailure(): void {
|
|
84
|
+
// Force lazy open->half-open transition before deciding what to do.
|
|
85
|
+
const current = this.getState();
|
|
86
|
+
this.failures++;
|
|
87
|
+
if (current === "half-open") {
|
|
88
|
+
// Trial failed — back to open.
|
|
89
|
+
this.state = "open";
|
|
90
|
+
this.openedAt = Date.now();
|
|
91
|
+
this.onTrip?.();
|
|
92
|
+
return;
|
|
93
|
+
}
|
|
94
|
+
if (current === "closed" && this.failures >= this.threshold) {
|
|
95
|
+
this.state = "open";
|
|
96
|
+
this.openedAt = Date.now();
|
|
97
|
+
this.onTrip?.();
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
/** Force reset (useful for tests or manual recovery). */
|
|
102
|
+
reset(): void {
|
|
103
|
+
this.state = "closed";
|
|
104
|
+
this.failures = 0;
|
|
105
|
+
this.openedAt = 0;
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
getStats() {
|
|
109
|
+
return {
|
|
110
|
+
state: this.getState(),
|
|
111
|
+
failures: this.failures,
|
|
112
|
+
openedAt: this.openedAt,
|
|
113
|
+
};
|
|
114
|
+
}
|
|
115
|
+
}
|
|
@@ -0,0 +1,183 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Remote Communication: OutboxWorker
|
|
3
|
+
*
|
|
4
|
+
* Polls `remote_outbox` for unpublished rows, publishes each to Redis, and
|
|
5
|
+
* marks the row published. Uses `FOR UPDATE SKIP LOCKED` so multiple
|
|
6
|
+
* instances can run workers concurrently without double-publishing:
|
|
7
|
+
* each row is claimed by exactly one worker per batch.
|
|
8
|
+
*
|
|
9
|
+
* At-least-once semantics: if the worker crashes after XADD but before the
|
|
10
|
+
* UPDATE commits, the row stays pending and will be republished. Consumers
|
|
11
|
+
* must be idempotent — enforce this at the handler level (e.g., dedup on
|
|
12
|
+
* `ctx.messageId` or domain-level idempotency keys).
|
|
13
|
+
*/
|
|
14
|
+
|
|
15
|
+
import type Redis from "ioredis";
|
|
16
|
+
import { sql as sqlHelper, type SQL } from "bun";
|
|
17
|
+
import { logger } from "../Logger";
|
|
18
|
+
import type { RemoteMetrics } from "./metrics";
|
|
19
|
+
|
|
20
|
+
const loggerInstance = logger.child({ scope: "OutboxWorker" });
|
|
21
|
+
|
|
22
|
+
export interface OutboxWorkerConfig {
|
|
23
|
+
sourceApp: string;
|
|
24
|
+
streamPrefix: string;
|
|
25
|
+
pollIntervalMs: number;
|
|
26
|
+
batchSize: number;
|
|
27
|
+
enableLogging: boolean;
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
interface OutboxRow {
|
|
31
|
+
id: string;
|
|
32
|
+
target: string;
|
|
33
|
+
event: string;
|
|
34
|
+
data: unknown;
|
|
35
|
+
created_at: Date;
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
export class OutboxWorker {
|
|
39
|
+
private db: SQL;
|
|
40
|
+
private publisher: Redis;
|
|
41
|
+
private config: OutboxWorkerConfig;
|
|
42
|
+
private running = false;
|
|
43
|
+
private timer: ReturnType<typeof setTimeout> | null = null;
|
|
44
|
+
private currentTick: Promise<void> | null = null;
|
|
45
|
+
private metrics?: RemoteMetrics;
|
|
46
|
+
|
|
47
|
+
constructor(
|
|
48
|
+
db: SQL,
|
|
49
|
+
publisher: Redis,
|
|
50
|
+
config: OutboxWorkerConfig,
|
|
51
|
+
metrics?: RemoteMetrics
|
|
52
|
+
) {
|
|
53
|
+
this.db = db;
|
|
54
|
+
this.publisher = publisher;
|
|
55
|
+
this.config = config;
|
|
56
|
+
this.metrics = metrics;
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
async start(): Promise<void> {
|
|
60
|
+
if (this.running) return;
|
|
61
|
+
this.running = true;
|
|
62
|
+
this.scheduleNext(0);
|
|
63
|
+
loggerInstance.info(
|
|
64
|
+
`OutboxWorker started pollMs=${this.config.pollIntervalMs} batch=${this.config.batchSize}`
|
|
65
|
+
);
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
async stop(): Promise<void> {
|
|
69
|
+
if (!this.running) return;
|
|
70
|
+
this.running = false;
|
|
71
|
+
if (this.timer) {
|
|
72
|
+
clearTimeout(this.timer);
|
|
73
|
+
this.timer = null;
|
|
74
|
+
}
|
|
75
|
+
if (this.currentTick) {
|
|
76
|
+
await this.currentTick.catch(() => {});
|
|
77
|
+
}
|
|
78
|
+
loggerInstance.info("OutboxWorker stopped");
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
/**
|
|
82
|
+
* Force an immediate tick. Used during shutdown to flush any
|
|
83
|
+
* committed-but-unpublished rows before the process exits.
|
|
84
|
+
*/
|
|
85
|
+
async flush(): Promise<void> {
|
|
86
|
+
await this.tick();
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
private scheduleNext(delayMs: number): void {
|
|
90
|
+
if (!this.running) return;
|
|
91
|
+
this.timer = setTimeout(() => {
|
|
92
|
+
this.currentTick = this.tick().finally(() => {
|
|
93
|
+
this.currentTick = null;
|
|
94
|
+
this.scheduleNext(this.config.pollIntervalMs);
|
|
95
|
+
});
|
|
96
|
+
}, delayMs);
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
private async tick(): Promise<void> {
|
|
100
|
+
if (!this.running) return;
|
|
101
|
+
try {
|
|
102
|
+
await this.processBatch();
|
|
103
|
+
} catch (error: any) {
|
|
104
|
+
loggerInstance.error(
|
|
105
|
+
{ err: error, msg: "OutboxWorker tick error" }
|
|
106
|
+
);
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
private async processBatch(): Promise<void> {
|
|
111
|
+
const db = this.db as any;
|
|
112
|
+
await db.begin(async (trx: any) => {
|
|
113
|
+
const rows: OutboxRow[] = await trx`
|
|
114
|
+
SELECT id, target, event, data, created_at
|
|
115
|
+
FROM remote_outbox
|
|
116
|
+
WHERE published_at IS NULL
|
|
117
|
+
ORDER BY created_at
|
|
118
|
+
LIMIT ${this.config.batchSize}
|
|
119
|
+
FOR UPDATE SKIP LOCKED
|
|
120
|
+
`;
|
|
121
|
+
|
|
122
|
+
if (rows.length === 0) return;
|
|
123
|
+
|
|
124
|
+
this.metrics?.outboxClaimed(rows.length);
|
|
125
|
+
if (this.config.enableLogging) {
|
|
126
|
+
loggerInstance.debug(`Claimed ${rows.length} outbox rows`);
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
// Publish concurrently rather than serially. Each xadd is bounded
|
|
130
|
+
// by the publisher client's `commandTimeout`; with serial awaits a
|
|
131
|
+
// batch of N slow rows would hold PG row locks for N × timeout.
|
|
132
|
+
// Parallel keeps worst-case lock hold ≈ single-xadd timeout.
|
|
133
|
+
// (H-DB-1 partial — full fix requires a claim-via-column design
|
|
134
|
+
// so Redis latency no longer sits inside a PG transaction at all.)
|
|
135
|
+
const publishResults = await Promise.allSettled(
|
|
136
|
+
rows.map((row) => {
|
|
137
|
+
const stream = `${this.config.streamPrefix}${row.target}`;
|
|
138
|
+
const envelope = JSON.stringify({
|
|
139
|
+
kind: "event",
|
|
140
|
+
sourceApp: this.config.sourceApp,
|
|
141
|
+
event: row.event,
|
|
142
|
+
data: row.data,
|
|
143
|
+
emittedAt: row.created_at.getTime(),
|
|
144
|
+
});
|
|
145
|
+
return this.publisher.xadd(stream, "*", "data", envelope);
|
|
146
|
+
})
|
|
147
|
+
);
|
|
148
|
+
|
|
149
|
+
const successIds: string[] = [];
|
|
150
|
+
for (let i = 0; i < publishResults.length; i++) {
|
|
151
|
+
const r = publishResults[i];
|
|
152
|
+
const row = rows[i]!;
|
|
153
|
+
if (r!.status === "fulfilled") {
|
|
154
|
+
successIds.push(row.id);
|
|
155
|
+
} else {
|
|
156
|
+
this.metrics?.outboxPublishFailed();
|
|
157
|
+
loggerInstance.error({
|
|
158
|
+
err: r!.reason,
|
|
159
|
+
outboxId: row.id,
|
|
160
|
+
target: row.target,
|
|
161
|
+
event: row.event,
|
|
162
|
+
msg: "Outbox XADD failed — row will retry next tick",
|
|
163
|
+
});
|
|
164
|
+
// Leave row unpublished; SKIP LOCKED releases on tx end
|
|
165
|
+
// so next tick (or another instance) picks it up.
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
if (successIds.length > 0) {
|
|
170
|
+
// Single bulk UPDATE instead of N round-trips holding row
|
|
171
|
+
// locks (H-DB-3). Previously each success fired its own
|
|
172
|
+
// UPDATE statement serially. Uses Bun SQL's `sql(...)` helper
|
|
173
|
+
// for the IN-list so ids are parameterised individually.
|
|
174
|
+
await trx`
|
|
175
|
+
UPDATE remote_outbox
|
|
176
|
+
SET published_at = NOW()
|
|
177
|
+
WHERE id IN ${sqlHelper(successIds)}
|
|
178
|
+
`;
|
|
179
|
+
this.metrics?.outboxPublished(successIds.length);
|
|
180
|
+
}
|
|
181
|
+
});
|
|
182
|
+
}
|
|
183
|
+
}
|