@crossdelta/cloudevents 0.1.7 → 0.1.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +230 -48
- package/dist/src/domain/handler-factory.js +2 -2
- package/dist/src/transports/nats/base-message-processor.d.ts +44 -0
- package/dist/src/transports/nats/base-message-processor.js +107 -0
- package/dist/src/transports/nats/index.d.ts +3 -0
- package/dist/src/transports/nats/index.js +3 -0
- package/dist/src/transports/nats/jetstream-consumer.d.ts +72 -0
- package/dist/src/transports/nats/jetstream-consumer.js +187 -0
- package/dist/src/transports/nats/jetstream-message-processor.d.ts +9 -0
- package/dist/src/transports/nats/jetstream-message-processor.js +32 -0
- package/dist/src/transports/nats/nats-message-processor.d.ts +4 -15
- package/dist/src/transports/nats/nats-message-processor.js +15 -78
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -1,15 +1,38 @@
|
|
|
1
1
|
# @crossdelta/cloudevents
|
|
2
2
|
|
|
3
|
-
|
|
3
|
+
[](https://www.npmjs.com/package/@crossdelta/cloudevents)
|
|
4
|
+
[](https://opensource.org/licenses/MIT)
|
|
5
|
+
[](https://www.typescriptlang.org/)
|
|
4
6
|
|
|
5
|
-
|
|
7
|
+
A TypeScript toolkit for [CloudEvents](https://cloudevents.io/) over [NATS](https://nats.io/).
|
|
6
8
|
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
-
|
|
12
|
-
|
|
9
|
+
Publish events from one service, consume them in another — with automatic handler discovery, type-safe validation, and guaranteed delivery via JetStream.
|
|
10
|
+
|
|
11
|
+
```
|
|
12
|
+
┌─────────────────┐ NATS ┌─────────────────┐
|
|
13
|
+
│ orders-service │ ──── publish ───► │ JetStream │
|
|
14
|
+
└─────────────────┘ CloudEvent │ (persistent) │
|
|
15
|
+
└────────┬────────┘
|
|
16
|
+
│
|
|
17
|
+
┌───────────────────────┼───────────────────────┐
|
|
18
|
+
▼ ▼ ▼
|
|
19
|
+
┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐
|
|
20
|
+
│ notifications │ │ billing │ │ analytics │
|
|
21
|
+
│ service │ │ service │ │ service │
|
|
22
|
+
└─────────────────┘ └─────────────────┘ └─────────────────┘
|
|
23
|
+
consume consume consume
|
|
24
|
+
```
|
|
25
|
+
|
|
26
|
+
## Why this library?
|
|
27
|
+
|
|
28
|
+
Event-driven microservices are hard: messages get lost when services restart, handlers are scattered across files, validation is inconsistent.
|
|
29
|
+
|
|
30
|
+
| Feature | Benefit |
|
|
31
|
+
|---------|---------|
|
|
32
|
+
| 🔍 **Auto-discovery** | Drop a `*.event.ts` file, it's registered automatically |
|
|
33
|
+
| 🛡️ **Type-safe handlers** | Zod schemas ensure runtime validation matches TypeScript |
|
|
34
|
+
| 🔄 **JetStream persistence** | Messages survive restarts, get retried on failure |
|
|
35
|
+
| 🏥 **DLQ-safe processing** | Invalid messages are quarantined, not lost |
|
|
13
36
|
|
|
14
37
|
## Installation
|
|
15
38
|
|
|
@@ -17,79 +40,238 @@ CloudEvents toolkit for TypeScript. Handler discovery, DLQ-safe processing, NATS
|
|
|
17
40
|
bun add @crossdelta/cloudevents
|
|
18
41
|
```
|
|
19
42
|
|
|
20
|
-
|
|
43
|
+
## Getting Started
|
|
21
44
|
|
|
22
|
-
|
|
45
|
+
A minimal example: `orders-service` publishes an event, `notifications-service` consumes it.
|
|
23
46
|
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
```
|
|
47
|
+
### 1. Publish an Event (orders-service)
|
|
48
|
+
|
|
49
|
+
```typescript
|
|
50
|
+
// orders-service/src/index.ts
|
|
51
|
+
import { publishNatsEvent } from '@crossdelta/cloudevents'
|
|
27
52
|
|
|
28
|
-
|
|
53
|
+
// When an order is created...
|
|
54
|
+
await publishNatsEvent({
|
|
55
|
+
type: 'com.acme.orders.created',
|
|
56
|
+
source: '/orders-service',
|
|
57
|
+
data: {
|
|
58
|
+
orderId: 'ord_123',
|
|
59
|
+
customerId: 'cust_456',
|
|
60
|
+
total: 99.99,
|
|
61
|
+
},
|
|
62
|
+
})
|
|
63
|
+
```
|
|
29
64
|
|
|
30
|
-
### Define a
|
|
65
|
+
### 2. Define a Handler (notifications-service)
|
|
31
66
|
|
|
32
67
|
```typescript
|
|
68
|
+
// notifications-service/src/handlers/order-created.event.ts
|
|
33
69
|
import { z } from 'zod'
|
|
34
|
-
import {
|
|
70
|
+
import { handleEvent } from '@crossdelta/cloudevents'
|
|
35
71
|
|
|
36
|
-
|
|
37
|
-
type:
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
72
|
+
export default handleEvent({
|
|
73
|
+
type: 'com.acme.orders.created',
|
|
74
|
+
schema: z.object({
|
|
75
|
+
orderId: z.string(),
|
|
76
|
+
customerId: z.string(),
|
|
77
|
+
total: z.number(),
|
|
41
78
|
}),
|
|
79
|
+
async handle(data) {
|
|
80
|
+
console.log(`📧 Sending confirmation for order ${data.orderId}`)
|
|
81
|
+
// Send email, push notification, etc.
|
|
82
|
+
},
|
|
42
83
|
})
|
|
84
|
+
```
|
|
43
85
|
|
|
44
|
-
|
|
45
|
-
|
|
86
|
+
### 3. Start Consuming (notifications-service)
|
|
87
|
+
|
|
88
|
+
```typescript
|
|
89
|
+
// notifications-service/src/index.ts
|
|
90
|
+
import { consumeJetStreamEvents } from '@crossdelta/cloudevents'
|
|
91
|
+
|
|
92
|
+
await consumeJetStreamEvents({
|
|
93
|
+
stream: 'ORDERS',
|
|
94
|
+
subjects: ['com.acme.orders.>'],
|
|
95
|
+
consumer: 'notifications-service',
|
|
96
|
+
discover: './src/handlers/**/*.event.ts', // Auto-discovers handlers
|
|
46
97
|
})
|
|
98
|
+
|
|
99
|
+
console.log('🎧 Listening for order events...')
|
|
47
100
|
```
|
|
48
101
|
|
|
49
|
-
|
|
102
|
+
**That's it.** The handler is discovered automatically, events are persisted in JetStream, and failed messages are retried.
|
|
103
|
+
|
|
104
|
+
## Consuming Events
|
|
105
|
+
|
|
106
|
+
### NATS Core (Fire & Forget)
|
|
107
|
+
|
|
108
|
+
For high-throughput scenarios where occasional message loss is acceptable:
|
|
50
109
|
|
|
51
110
|
```typescript
|
|
52
|
-
import {
|
|
53
|
-
import { cloudEvents } from '@crossdelta/cloudevents'
|
|
111
|
+
import { consumeNatsEvents } from '@crossdelta/cloudevents'
|
|
54
112
|
|
|
55
|
-
|
|
113
|
+
await consumeNatsEvents({
|
|
114
|
+
subject: 'telemetry.>',
|
|
115
|
+
discover: './src/handlers/**/*.event.ts',
|
|
116
|
+
})
|
|
117
|
+
```
|
|
56
118
|
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
119
|
+
### NATS JetStream (Guaranteed Delivery)
|
|
120
|
+
|
|
121
|
+
For critical business events that must not be lost:
|
|
122
|
+
|
|
123
|
+
```typescript
|
|
124
|
+
import { consumeJetStreamEvents } from '@crossdelta/cloudevents'
|
|
125
|
+
|
|
126
|
+
await consumeJetStreamEvents({
|
|
127
|
+
// Stream configuration
|
|
128
|
+
stream: 'ORDERS',
|
|
129
|
+
subjects: ['orders.>'],
|
|
130
|
+
|
|
131
|
+
// Consumer configuration
|
|
132
|
+
consumer: 'billing-service',
|
|
133
|
+
discover: './src/handlers/**/*.event.ts',
|
|
134
|
+
|
|
135
|
+
// Optional: Stream settings
|
|
136
|
+
streamConfig: {
|
|
137
|
+
maxAge: 7 * 24 * 60 * 60 * 1_000_000_000, // 7 days retention
|
|
138
|
+
maxBytes: 1024 * 1024 * 1024, // 1 GB max
|
|
139
|
+
numReplicas: 3, // For HA clusters
|
|
140
|
+
},
|
|
141
|
+
|
|
142
|
+
// Optional: Consumer settings
|
|
143
|
+
ackWait: 30_000, // 30s to process before retry
|
|
144
|
+
maxDeliver: 5, // Max retry attempts
|
|
145
|
+
startFrom: 'all', // 'new' | 'all' | 'last' | Date
|
|
146
|
+
})
|
|
147
|
+
```
|
|
148
|
+
|
|
149
|
+
## Handler Patterns
|
|
150
|
+
|
|
151
|
+
### Data-Only Schema (Recommended)
|
|
152
|
+
|
|
153
|
+
The simplest pattern — just validate the `data` field:
|
|
154
|
+
|
|
155
|
+
```typescript
|
|
156
|
+
export default handleEvent({
|
|
157
|
+
type: 'com.example.users.created',
|
|
158
|
+
schema: z.object({
|
|
159
|
+
userId: z.string().uuid(),
|
|
160
|
+
email: z.string().email(),
|
|
161
|
+
}),
|
|
162
|
+
async handle(data) {
|
|
163
|
+
// data is typed as { userId: string, email: string }
|
|
164
|
+
},
|
|
165
|
+
})
|
|
166
|
+
```
|
|
167
|
+
|
|
168
|
+
### Full CloudEvent Schema
|
|
169
|
+
|
|
170
|
+
When you need access to CloudEvent metadata:
|
|
171
|
+
|
|
172
|
+
```typescript
|
|
173
|
+
export default handleEvent({
|
|
174
|
+
type: 'com.example.orders.shipped',
|
|
175
|
+
schema: z.object({
|
|
176
|
+
data: z.object({
|
|
177
|
+
orderId: z.string(),
|
|
178
|
+
trackingNumber: z.string(),
|
|
179
|
+
}),
|
|
180
|
+
source: z.string(),
|
|
181
|
+
subject: z.string().optional(),
|
|
182
|
+
}),
|
|
183
|
+
async handle(event) {
|
|
184
|
+
// event.data, event.source, event.subject all available
|
|
185
|
+
},
|
|
186
|
+
})
|
|
187
|
+
```
|
|
188
|
+
|
|
189
|
+
### Conditional Matching
|
|
190
|
+
|
|
191
|
+
Process only specific events:
|
|
192
|
+
|
|
193
|
+
```typescript
|
|
194
|
+
export default handleEvent({
|
|
195
|
+
type: 'com.example.orders.*',
|
|
196
|
+
schema: OrderSchema,
|
|
197
|
+
match: (event) => event.data.region === 'EU',
|
|
198
|
+
async handle(data) {
|
|
199
|
+
// Only EU orders
|
|
200
|
+
},
|
|
201
|
+
})
|
|
60
202
|
```
|
|
61
203
|
|
|
62
204
|
## Publishing
|
|
63
205
|
|
|
206
|
+
### To NATS
|
|
207
|
+
|
|
64
208
|
```typescript
|
|
65
|
-
import { publishNatsEvent } from '@crossdelta/cloudevents'
|
|
209
|
+
import { publishNatsEvent, publishNatsRawEvent } from '@crossdelta/cloudevents'
|
|
66
210
|
|
|
67
|
-
|
|
68
|
-
|
|
211
|
+
// Structured CloudEvent
|
|
212
|
+
await publishNatsEvent({
|
|
213
|
+
type: 'com.example.orders.created',
|
|
214
|
+
source: '/orders-service',
|
|
215
|
+
subject: 'order-123',
|
|
216
|
+
data: { orderId: '123', total: 99.99 },
|
|
217
|
+
})
|
|
218
|
+
|
|
219
|
+
// Raw data (auto-wrapped in CloudEvent)
|
|
220
|
+
await publishNatsRawEvent('orders.created', {
|
|
221
|
+
orderId: '123',
|
|
222
|
+
total: 99.99,
|
|
69
223
|
})
|
|
70
224
|
```
|
|
71
225
|
|
|
72
|
-
|
|
226
|
+
### To Google Pub/Sub
|
|
73
227
|
|
|
74
228
|
```typescript
|
|
75
|
-
import {
|
|
229
|
+
import { publishEvent } from '@crossdelta/cloudevents'
|
|
76
230
|
|
|
77
|
-
await
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
discover: 'dist/events/*.event.js',
|
|
81
|
-
consumerName: 'orders-api',
|
|
231
|
+
await publishEvent('orders-topic', {
|
|
232
|
+
type: 'com.example.orders.created',
|
|
233
|
+
data: { orderId: '123' },
|
|
82
234
|
})
|
|
83
235
|
```
|
|
84
236
|
|
|
85
|
-
##
|
|
237
|
+
## Hono Middleware
|
|
238
|
+
|
|
239
|
+
For HTTP-based event ingestion:
|
|
240
|
+
|
|
241
|
+
```typescript
|
|
242
|
+
import { Hono } from 'hono'
|
|
243
|
+
import { cloudEvents } from '@crossdelta/cloudevents'
|
|
244
|
+
|
|
245
|
+
const app = new Hono()
|
|
246
|
+
|
|
247
|
+
app.use('/events', cloudEvents({
|
|
248
|
+
discover: 'src/handlers/**/*.event.ts',
|
|
249
|
+
dlqEnabled: true,
|
|
250
|
+
}))
|
|
251
|
+
```
|
|
252
|
+
|
|
253
|
+
## API Reference
|
|
86
254
|
|
|
87
255
|
| Function | Description |
|
|
88
256
|
|----------|-------------|
|
|
89
|
-
| `
|
|
90
|
-
| `
|
|
91
|
-
| `
|
|
92
|
-
| `publishNatsEvent`
|
|
93
|
-
| `
|
|
94
|
-
| `
|
|
95
|
-
| `clearHandlerCache()` | Reset discovery cache |
|
|
257
|
+
| `handleEvent(config)` | Create a discoverable event handler |
|
|
258
|
+
| `consumeJetStreamEvents(options)` | Subscribe with guaranteed delivery |
|
|
259
|
+
| `consumeNatsEvents(options)` | Subscribe with fire-and-forget |
|
|
260
|
+
| `publishNatsEvent(event)` | Publish structured CloudEvent to NATS |
|
|
261
|
+
| `publishNatsRawEvent(subject, data)` | Publish raw data to NATS |
|
|
262
|
+
| `cloudEvents(options)` | Hono middleware for HTTP ingestion |
|
|
263
|
+
| `clearHandlerCache()` | Reset handler discovery cache |
|
|
264
|
+
|
|
265
|
+
## Why JetStream?
|
|
266
|
+
|
|
267
|
+
| Scenario | Core NATS | JetStream |
|
|
268
|
+
|----------|-----------|-----------|
|
|
269
|
+
| Service restarts | ❌ Messages lost | ✅ Messages replayed |
|
|
270
|
+
| Handler crashes | ❌ Message lost | ✅ Auto-retry with backoff |
|
|
271
|
+
| Multiple consumers | ❌ All receive same msg | ✅ Load balanced |
|
|
272
|
+
| Message history | ❌ None | ✅ Configurable retention |
|
|
273
|
+
| Exactly-once | ❌ At-most-once | ✅ With deduplication |
|
|
274
|
+
|
|
275
|
+
## License
|
|
276
|
+
|
|
277
|
+
MIT
|
|
@@ -36,11 +36,11 @@ export function handleEvent(schemaOrOptions, handler, eventType) {
|
|
|
36
36
|
finalEventType = 'unknown.event';
|
|
37
37
|
}
|
|
38
38
|
// Create handler class with proper naming (e.g., "orderboss.orders.created" → "OrdersCreatedHandler")
|
|
39
|
-
const handlerName = finalEventType
|
|
39
|
+
const handlerName = `${finalEventType
|
|
40
40
|
.split('.')
|
|
41
41
|
.slice(-2) // Take last 2 segments (e.g., ["orders", "created"])
|
|
42
42
|
.map(s => s.charAt(0).toUpperCase() + s.slice(1)) // Capitalize
|
|
43
|
-
.join('')
|
|
43
|
+
.join('')}Handler`;
|
|
44
44
|
const HandlerClass = class extends Object {
|
|
45
45
|
static __eventarcMetadata = {
|
|
46
46
|
schema,
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Shared message processing logic for NATS Core and JetStream consumers.
|
|
3
|
+
* Abstracts CloudEvent parsing, handler matching, validation, and execution.
|
|
4
|
+
*/
|
|
5
|
+
import type { CloudEventV1 } from 'cloudevents';
|
|
6
|
+
import type { EnrichedEvent } from '../../domain';
|
|
7
|
+
import { createProcessingContext, type DlqOptions } from '../../processing/dlq-safe';
|
|
8
|
+
import type { ProcessedHandler } from '../../processing/handler-cache';
|
|
9
|
+
export interface LoggerLike {
|
|
10
|
+
info(message: string, meta?: unknown): void;
|
|
11
|
+
warn(message: string, meta?: unknown): void;
|
|
12
|
+
error(message: string, meta?: unknown): void;
|
|
13
|
+
}
|
|
14
|
+
export interface BaseMessageProcessorDeps {
|
|
15
|
+
name: string;
|
|
16
|
+
dlqEnabled: boolean;
|
|
17
|
+
options: DlqOptions;
|
|
18
|
+
processedHandlers: ProcessedHandler[];
|
|
19
|
+
decode: (data: Uint8Array) => string;
|
|
20
|
+
logger: LoggerLike;
|
|
21
|
+
}
|
|
22
|
+
export type ProcessingContext = ReturnType<typeof createProcessingContext>;
|
|
23
|
+
export type ParseResult<T> = {
|
|
24
|
+
ok: true;
|
|
25
|
+
cloudEvent: CloudEventV1<unknown>;
|
|
26
|
+
enriched: EnrichedEvent<unknown>;
|
|
27
|
+
} | {
|
|
28
|
+
ok: false;
|
|
29
|
+
error: unknown;
|
|
30
|
+
context: ProcessingContext;
|
|
31
|
+
rawMessage: T;
|
|
32
|
+
};
|
|
33
|
+
/**
|
|
34
|
+
* Creates shared message processing utilities
|
|
35
|
+
*/
|
|
36
|
+
export declare function createBaseMessageProcessor(deps: BaseMessageProcessorDeps): {
|
|
37
|
+
toEnrichedEvent: (ce: CloudEventV1<unknown>) => EnrichedEvent<unknown>;
|
|
38
|
+
createContext: (event: EnrichedEvent<unknown>, ce?: CloudEventV1<unknown>) => import("../../processing").ProcessingContext;
|
|
39
|
+
parseCloudEvent: (data: Uint8Array) => CloudEventV1<unknown>;
|
|
40
|
+
findHandler: (event: EnrichedEvent<unknown>) => ProcessedHandler | undefined;
|
|
41
|
+
processEvent: (cloudEvent: CloudEventV1<unknown>, enriched: EnrichedEvent<unknown>) => Promise<boolean>;
|
|
42
|
+
handleParseError: (error: unknown, context: ProcessingContext, redeliveryCount?: number) => Promise<boolean>;
|
|
43
|
+
handleUnhandledError: (error: unknown, context: ProcessingContext, ackFn?: () => void) => Promise<void>;
|
|
44
|
+
};
|
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Shared message processing logic for NATS Core and JetStream consumers.
|
|
3
|
+
* Abstracts CloudEvent parsing, handler matching, validation, and execution.
|
|
4
|
+
*/
|
|
5
|
+
import { createProcessingContext, publishRecoverableError, quarantineMessage, } from '../../processing/dlq-safe';
|
|
6
|
+
import { throwValidationError, validateEventData } from '../../processing/validation';
|
|
7
|
+
/**
|
|
8
|
+
* Creates shared message processing utilities
|
|
9
|
+
*/
|
|
10
|
+
export function createBaseMessageProcessor(deps) {
|
|
11
|
+
const { name, dlqEnabled, options, processedHandlers, decode, logger } = deps;
|
|
12
|
+
const toEnrichedEvent = (ce) => ({
|
|
13
|
+
eventType: ce.type,
|
|
14
|
+
source: ce.source,
|
|
15
|
+
subject: ce.subject,
|
|
16
|
+
time: ce.time ?? new Date().toISOString(),
|
|
17
|
+
messageId: ce.id,
|
|
18
|
+
data: ce.data,
|
|
19
|
+
});
|
|
20
|
+
const createContext = (event, ce) => createProcessingContext(event.eventType, event.data, event, ce);
|
|
21
|
+
const parseCloudEvent = (data) => {
|
|
22
|
+
return JSON.parse(decode(data));
|
|
23
|
+
};
|
|
24
|
+
const findHandler = (event) => processedHandlers.find((handler) => handler.type === event.eventType && (!handler.match || handler.match(event)));
|
|
25
|
+
const handleMissingHandler = async (context, eventType) => {
|
|
26
|
+
logger.warn(`[${name}] no handler for event type: ${eventType}`);
|
|
27
|
+
if (dlqEnabled) {
|
|
28
|
+
await quarantineMessage(context, 'no_handler', options, new Error(`No handler for event type ${eventType}`));
|
|
29
|
+
}
|
|
30
|
+
// Ack - no point in retrying if there's no handler
|
|
31
|
+
return { handled: true, shouldAck: true };
|
|
32
|
+
};
|
|
33
|
+
const handleValidationFailure = async (validationResult, handler, context) => {
|
|
34
|
+
if (dlqEnabled) {
|
|
35
|
+
await quarantineMessage(context, 'validation_error', options, validationResult.error);
|
|
36
|
+
return { handled: true, shouldAck: true };
|
|
37
|
+
}
|
|
38
|
+
if (validationResult.shouldSkip) {
|
|
39
|
+
return { handled: true, shouldAck: true };
|
|
40
|
+
}
|
|
41
|
+
// Throw to trigger retry
|
|
42
|
+
throwValidationError(handler.name, validationResult.error);
|
|
43
|
+
return { handled: true, shouldAck: false }; // Never reached
|
|
44
|
+
};
|
|
45
|
+
const executeHandler = async (handler, enriched, context) => {
|
|
46
|
+
try {
|
|
47
|
+
await handler.handle(enriched.data, enriched);
|
|
48
|
+
return { success: true };
|
|
49
|
+
}
|
|
50
|
+
catch (error) {
|
|
51
|
+
if (dlqEnabled) {
|
|
52
|
+
await publishRecoverableError(context, error, options);
|
|
53
|
+
return { success: true }; // Ack after publishing to error topic
|
|
54
|
+
}
|
|
55
|
+
throw error; // Will trigger retry
|
|
56
|
+
}
|
|
57
|
+
};
|
|
58
|
+
/**
|
|
59
|
+
* Process a parsed CloudEvent message
|
|
60
|
+
* @returns true if message was handled successfully (should ack), false for retry (should nak)
|
|
61
|
+
*/
|
|
62
|
+
const processEvent = async (cloudEvent, enriched) => {
|
|
63
|
+
const context = createContext(enriched, cloudEvent);
|
|
64
|
+
const handler = findHandler(enriched);
|
|
65
|
+
if (!handler) {
|
|
66
|
+
const result = await handleMissingHandler(context, enriched.eventType);
|
|
67
|
+
return result.shouldAck;
|
|
68
|
+
}
|
|
69
|
+
const validationResult = validateEventData(handler, enriched.data);
|
|
70
|
+
if ('error' in validationResult) {
|
|
71
|
+
const result = await handleValidationFailure(validationResult, handler, context);
|
|
72
|
+
return result.shouldAck;
|
|
73
|
+
}
|
|
74
|
+
const result = await executeHandler(handler, enriched, context);
|
|
75
|
+
return result.success;
|
|
76
|
+
};
|
|
77
|
+
const handleParseError = async (error, context, redeliveryCount = 0) => {
|
|
78
|
+
logger.error(`[${name}] failed to parse CloudEvent (attempt ${redeliveryCount + 1})`, error);
|
|
79
|
+
if (dlqEnabled) {
|
|
80
|
+
await quarantineMessage(context, 'parse_error', options, error);
|
|
81
|
+
return true; // Ack after quarantine
|
|
82
|
+
}
|
|
83
|
+
// After max retries, ack to prevent infinite loop
|
|
84
|
+
return redeliveryCount >= 2;
|
|
85
|
+
};
|
|
86
|
+
const handleUnhandledError = async (error, context, ackFn) => {
|
|
87
|
+
logger.error(`[${name}] unhandled processing error`, error);
|
|
88
|
+
if (dlqEnabled) {
|
|
89
|
+
try {
|
|
90
|
+
await quarantineMessage(context, 'unhandled_error', options, error);
|
|
91
|
+
ackFn?.();
|
|
92
|
+
}
|
|
93
|
+
catch (quarantineError) {
|
|
94
|
+
logger.error(`[${name}] failed to quarantine unhandled error`, quarantineError);
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
};
|
|
98
|
+
return {
|
|
99
|
+
toEnrichedEvent,
|
|
100
|
+
createContext,
|
|
101
|
+
parseCloudEvent,
|
|
102
|
+
findHandler,
|
|
103
|
+
processEvent,
|
|
104
|
+
handleParseError,
|
|
105
|
+
handleUnhandledError,
|
|
106
|
+
};
|
|
107
|
+
}
|
|
@@ -1,2 +1,5 @@
|
|
|
1
|
+
export * from './base-message-processor';
|
|
2
|
+
export * from './jetstream-consumer';
|
|
3
|
+
export { createJetStreamMessageProcessor, type JetStreamMessageProcessor, type JetStreamMessageProcessorDeps, } from './jetstream-message-processor';
|
|
1
4
|
export * from './nats-consumer';
|
|
2
5
|
export * from './nats-message-processor';
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
import { type ConsumerMessages } from 'nats';
|
|
2
|
+
import type { CloudEventsOptions } from '../../middlewares/cloudevents-middleware';
|
|
3
|
+
/**
|
|
4
|
+
* Stream configuration options
|
|
5
|
+
*/
|
|
6
|
+
export interface StreamConfig {
|
|
7
|
+
/** Maximum age of messages in the stream (ms). @default 7 days */
|
|
8
|
+
maxAge?: number;
|
|
9
|
+
/** Maximum size of the stream in bytes. @default 1GB */
|
|
10
|
+
maxBytes?: number;
|
|
11
|
+
/** Number of replicas. @default 1 */
|
|
12
|
+
replicas?: number;
|
|
13
|
+
}
|
|
14
|
+
/**
|
|
15
|
+
* JetStream consumer configuration
|
|
16
|
+
*/
|
|
17
|
+
export interface JetStreamConsumerOptions extends Pick<CloudEventsOptions, 'quarantineTopic' | 'errorTopic' | 'projectId' | 'source'> {
|
|
18
|
+
/** NATS server URL. Defaults to NATS_URL env or nats://localhost:4222 */
|
|
19
|
+
servers?: string;
|
|
20
|
+
/** JetStream stream name. Will be auto-created if it doesn't exist */
|
|
21
|
+
stream: string;
|
|
22
|
+
/** Subjects to bind to the stream (e.g., ['orders.>', 'payments.>']) */
|
|
23
|
+
subjects: string[];
|
|
24
|
+
/** Durable consumer name. Required for persistence across restarts */
|
|
25
|
+
consumer: string;
|
|
26
|
+
/** Glob pattern to discover event handlers */
|
|
27
|
+
discover: string;
|
|
28
|
+
/**
|
|
29
|
+
* Where to start consuming from on first subscription.
|
|
30
|
+
* @default 'new' - Only new messages
|
|
31
|
+
* Options: 'all' | 'new' | 'last' | Date
|
|
32
|
+
*/
|
|
33
|
+
startFrom?: 'all' | 'new' | 'last' | Date;
|
|
34
|
+
/**
|
|
35
|
+
* Max number of messages to buffer for processing
|
|
36
|
+
* @default 100
|
|
37
|
+
*/
|
|
38
|
+
maxMessages?: number;
|
|
39
|
+
/**
|
|
40
|
+
* Ack wait timeout in milliseconds before message is redelivered
|
|
41
|
+
* @default 30000 (30 seconds)
|
|
42
|
+
*/
|
|
43
|
+
ackWait?: number;
|
|
44
|
+
/**
|
|
45
|
+
* Max redelivery attempts before message goes to DLQ
|
|
46
|
+
* @default 3
|
|
47
|
+
*/
|
|
48
|
+
maxDeliver?: number;
|
|
49
|
+
/** Stream configuration (only used when auto-creating) */
|
|
50
|
+
streamConfig?: StreamConfig;
|
|
51
|
+
}
|
|
52
|
+
/**
|
|
53
|
+
* Consume CloudEvents from NATS JetStream with persistence and guaranteed delivery.
|
|
54
|
+
*
|
|
55
|
+
* Features:
|
|
56
|
+
* - Automatic stream and consumer creation
|
|
57
|
+
* - Durable subscriptions (survive restarts)
|
|
58
|
+
* - Automatic acknowledgments on successful processing
|
|
59
|
+
* - Configurable retry with max redelivery
|
|
60
|
+
* - Dead letter queue support
|
|
61
|
+
*
|
|
62
|
+
* @example
|
|
63
|
+
* ```typescript
|
|
64
|
+
* await consumeJetStreamEvents({
|
|
65
|
+
* stream: 'ORDERS',
|
|
66
|
+
* subjects: ['orders.>'],
|
|
67
|
+
* consumer: 'notifications',
|
|
68
|
+
* discover: './src/handlers/**\/*.event.ts',
|
|
69
|
+
* })
|
|
70
|
+
* ```
|
|
71
|
+
*/
|
|
72
|
+
export declare function consumeJetStreamEvents(options: JetStreamConsumerOptions): Promise<ConsumerMessages>;
|
|
@@ -0,0 +1,187 @@
|
|
|
1
|
+
import { AckPolicy, connect, DeliverPolicy, ReplayPolicy, RetentionPolicy, StorageType, StringCodec, } from 'nats';
|
|
2
|
+
import { discoverHandlers } from '../../domain';
|
|
3
|
+
import { logger } from '../../infrastructure/logging';
|
|
4
|
+
import { processHandler } from '../../processing/handler-cache';
|
|
5
|
+
import { createJetStreamMessageProcessor } from './jetstream-message-processor';
|
|
6
|
+
const sc = StringCodec();
|
|
7
|
+
// Use globalThis to persist across hot-reloads
|
|
8
|
+
const JETSTREAM_REGISTRY_KEY = '__crossdelta_jetstream_consumers__';
|
|
9
|
+
function getJetStreamRegistry() {
|
|
10
|
+
if (!globalThis[JETSTREAM_REGISTRY_KEY]) {
|
|
11
|
+
;
|
|
12
|
+
globalThis[JETSTREAM_REGISTRY_KEY] = new Map();
|
|
13
|
+
}
|
|
14
|
+
return globalThis[JETSTREAM_REGISTRY_KEY];
|
|
15
|
+
}
|
|
16
|
+
// Default stream configuration
|
|
17
|
+
const DEFAULT_STREAM_CONFIG = {
|
|
18
|
+
maxAge: 7 * 24 * 60 * 60 * 1000, // 7 days in ms
|
|
19
|
+
maxBytes: 1024 * 1024 * 1024, // 1 GB
|
|
20
|
+
replicas: 1,
|
|
21
|
+
};
|
|
22
|
+
/**
|
|
23
|
+
* Ensures stream exists with the given configuration
|
|
24
|
+
*/
|
|
25
|
+
async function ensureStream(jsm, name, subjects, config = {}) {
|
|
26
|
+
const streamConfig = { ...DEFAULT_STREAM_CONFIG, ...config };
|
|
27
|
+
try {
|
|
28
|
+
const stream = await jsm.streams.info(name);
|
|
29
|
+
// Update subjects if needed
|
|
30
|
+
const existingSubjects = new Set(stream.config.subjects);
|
|
31
|
+
const newSubjects = subjects.filter((s) => !existingSubjects.has(s));
|
|
32
|
+
if (newSubjects.length > 0) {
|
|
33
|
+
await jsm.streams.update(name, {
|
|
34
|
+
subjects: [...stream.config.subjects, ...newSubjects],
|
|
35
|
+
});
|
|
36
|
+
logger.info(`[jetstream] updated stream ${name} with subjects: ${newSubjects.join(', ')}`);
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
catch {
|
|
40
|
+
// Stream doesn't exist, create it
|
|
41
|
+
await jsm.streams.add({
|
|
42
|
+
name,
|
|
43
|
+
subjects,
|
|
44
|
+
retention: RetentionPolicy.Limits,
|
|
45
|
+
storage: StorageType.File,
|
|
46
|
+
max_age: streamConfig.maxAge * 1_000_000, // Convert ms to nanoseconds
|
|
47
|
+
max_bytes: streamConfig.maxBytes,
|
|
48
|
+
num_replicas: streamConfig.replicas,
|
|
49
|
+
});
|
|
50
|
+
logger.info(`[jetstream] created stream ${name} with subjects: ${subjects.join(', ')}`);
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
/**
|
|
54
|
+
* Ensures durable consumer exists
|
|
55
|
+
*/
|
|
56
|
+
async function ensureConsumer(jsm, streamName, consumerName, options) {
|
|
57
|
+
const deliverPolicy = (() => {
|
|
58
|
+
switch (options.startFrom) {
|
|
59
|
+
case 'all':
|
|
60
|
+
return DeliverPolicy.All;
|
|
61
|
+
case 'last':
|
|
62
|
+
return DeliverPolicy.Last;
|
|
63
|
+
default:
|
|
64
|
+
return DeliverPolicy.New;
|
|
65
|
+
}
|
|
66
|
+
})();
|
|
67
|
+
const optStartTime = options.startFrom instanceof Date ? options.startFrom : undefined;
|
|
68
|
+
try {
|
|
69
|
+
await jsm.consumers.info(streamName, consumerName);
|
|
70
|
+
// Consumer exists, no update needed for durable consumers
|
|
71
|
+
}
|
|
72
|
+
catch {
|
|
73
|
+
// Consumer doesn't exist, create it
|
|
74
|
+
await jsm.consumers.add(streamName, {
|
|
75
|
+
durable_name: consumerName,
|
|
76
|
+
ack_policy: AckPolicy.Explicit,
|
|
77
|
+
deliver_policy: optStartTime ? DeliverPolicy.StartTime : deliverPolicy,
|
|
78
|
+
opt_start_time: optStartTime?.toISOString(),
|
|
79
|
+
replay_policy: ReplayPolicy.Instant,
|
|
80
|
+
ack_wait: (options.ackWait ?? 30_000) * 1_000_000, // Convert to nanoseconds
|
|
81
|
+
max_deliver: options.maxDeliver ?? 3,
|
|
82
|
+
});
|
|
83
|
+
logger.info(`[jetstream] created durable consumer ${consumerName} on stream ${streamName}`);
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
/**
|
|
87
|
+
* Cleanup function to close a JetStream consumer
|
|
88
|
+
*/
|
|
89
|
+
async function cleanupJetStreamConsumer(name) {
|
|
90
|
+
const registry = getJetStreamRegistry();
|
|
91
|
+
const consumer = registry.get(name);
|
|
92
|
+
if (consumer) {
|
|
93
|
+
logger.info(`[${name}] cleaning up JetStream consumer...`);
|
|
94
|
+
consumer.abortController.abort();
|
|
95
|
+
await consumer.messages.close();
|
|
96
|
+
await consumer.connection.drain();
|
|
97
|
+
registry.delete(name);
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
/**
|
|
101
|
+
* Consume CloudEvents from NATS JetStream with persistence and guaranteed delivery.
|
|
102
|
+
*
|
|
103
|
+
* Features:
|
|
104
|
+
* - Automatic stream and consumer creation
|
|
105
|
+
* - Durable subscriptions (survive restarts)
|
|
106
|
+
* - Automatic acknowledgments on successful processing
|
|
107
|
+
* - Configurable retry with max redelivery
|
|
108
|
+
* - Dead letter queue support
|
|
109
|
+
*
|
|
110
|
+
* @example
|
|
111
|
+
* ```typescript
|
|
112
|
+
* await consumeJetStreamEvents({
|
|
113
|
+
* stream: 'ORDERS',
|
|
114
|
+
* subjects: ['orders.>'],
|
|
115
|
+
* consumer: 'notifications',
|
|
116
|
+
* discover: './src/handlers/**\/*.event.ts',
|
|
117
|
+
* })
|
|
118
|
+
* ```
|
|
119
|
+
*/
|
|
120
|
+
export async function consumeJetStreamEvents(options) {
|
|
121
|
+
const servers = options.servers ?? process.env.NATS_URL ?? 'nats://localhost:4222';
|
|
122
|
+
const name = options.consumer;
|
|
123
|
+
// Cleanup existing consumer (handles hot-reload)
|
|
124
|
+
await cleanupJetStreamConsumer(name);
|
|
125
|
+
// 1) Discover handlers
|
|
126
|
+
const handlerConstructors = await discoverHandlers(options.discover);
|
|
127
|
+
const processedHandlers = handlerConstructors
|
|
128
|
+
.map(processHandler)
|
|
129
|
+
.filter((h) => h !== null);
|
|
130
|
+
const handlerNames = processedHandlers.map((h) => h.name).join(', ');
|
|
131
|
+
logger.info(`[${name}] discovered ${processedHandlers.length} handler(s): ${handlerNames}`);
|
|
132
|
+
// 2) Connect to NATS
|
|
133
|
+
const nc = await connect({ servers });
|
|
134
|
+
logger.info(`[${name}] connected to NATS: ${servers}`);
|
|
135
|
+
// 3) Setup JetStream
|
|
136
|
+
const jsm = await nc.jetstreamManager();
|
|
137
|
+
const js = nc.jetstream();
|
|
138
|
+
// 4) Ensure stream exists
|
|
139
|
+
await ensureStream(jsm, options.stream, options.subjects, options.streamConfig);
|
|
140
|
+
// 5) Ensure durable consumer exists
|
|
141
|
+
await ensureConsumer(jsm, options.stream, name, options);
|
|
142
|
+
// 6) Get consumer and start consuming
|
|
143
|
+
const consumer = await js.consumers.get(options.stream, name);
|
|
144
|
+
const messages = await consumer.consume({
|
|
145
|
+
max_messages: options.maxMessages ?? 100,
|
|
146
|
+
});
|
|
147
|
+
logger.info(`[${name}] consuming from stream ${options.stream}`);
|
|
148
|
+
// Track for cleanup
|
|
149
|
+
const abortController = new AbortController();
|
|
150
|
+
getJetStreamRegistry().set(name, { messages, connection: nc, abortController });
|
|
151
|
+
const dlqEnabled = Boolean(options.quarantineTopic || options.errorTopic);
|
|
152
|
+
const { handleMessage, handleUnhandledProcessingError } = createJetStreamMessageProcessor({
|
|
153
|
+
name,
|
|
154
|
+
dlqEnabled,
|
|
155
|
+
options,
|
|
156
|
+
processedHandlers,
|
|
157
|
+
decode: (data) => sc.decode(data),
|
|
158
|
+
logger,
|
|
159
|
+
});
|
|
160
|
+
// Process messages
|
|
161
|
+
const processMessages = async () => {
|
|
162
|
+
for await (const msg of messages) {
|
|
163
|
+
if (abortController.signal.aborted)
|
|
164
|
+
break;
|
|
165
|
+
try {
|
|
166
|
+
const success = await handleMessage(msg);
|
|
167
|
+
if (success) {
|
|
168
|
+
msg.ack();
|
|
169
|
+
}
|
|
170
|
+
else {
|
|
171
|
+
// Handler returned false, negative ack for retry
|
|
172
|
+
msg.nak();
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
catch (error) {
|
|
176
|
+
await handleUnhandledProcessingError(msg, error);
|
|
177
|
+
// Don't ack - message will be redelivered after ack_wait
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
};
|
|
181
|
+
processMessages().catch((err) => {
|
|
182
|
+
if (!abortController.signal.aborted) {
|
|
183
|
+
logger.error(`[${name}] message processing loop crashed`, err);
|
|
184
|
+
}
|
|
185
|
+
});
|
|
186
|
+
return messages;
|
|
187
|
+
}
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
import type { JsMsg } from 'nats';
|
|
2
|
+
import { type BaseMessageProcessorDeps } from './base-message-processor';
|
|
3
|
+
export type JetStreamMessageProcessorDeps = BaseMessageProcessorDeps;
|
|
4
|
+
export interface JetStreamMessageProcessor {
|
|
5
|
+
/** Returns true if message was handled successfully (should ack), false for retry (should nak) */
|
|
6
|
+
handleMessage(msg: JsMsg): Promise<boolean>;
|
|
7
|
+
handleUnhandledProcessingError(msg: JsMsg, error: unknown): Promise<void>;
|
|
8
|
+
}
|
|
9
|
+
export declare const createJetStreamMessageProcessor: (deps: JetStreamMessageProcessorDeps) => JetStreamMessageProcessor;
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
import { createProcessingContext } from '../../processing/dlq-safe';
|
|
2
|
+
import { createBaseMessageProcessor } from './base-message-processor';
|
|
3
|
+
export const createJetStreamMessageProcessor = (deps) => {
|
|
4
|
+
const { decode } = deps;
|
|
5
|
+
const base = createBaseMessageProcessor(deps);
|
|
6
|
+
const toUnknownContext = (msg) => ({
|
|
7
|
+
eventType: 'unknown',
|
|
8
|
+
source: `jetstream://${msg.info.stream}`,
|
|
9
|
+
subject: msg.subject,
|
|
10
|
+
time: new Date().toISOString(),
|
|
11
|
+
messageId: `${msg.info.stream}:${msg.seq}`,
|
|
12
|
+
data: decode(msg.data),
|
|
13
|
+
});
|
|
14
|
+
const handleMessage = async (msg) => {
|
|
15
|
+
try {
|
|
16
|
+
const cloudEvent = base.parseCloudEvent(msg.data);
|
|
17
|
+
const enriched = base.toEnrichedEvent(cloudEvent);
|
|
18
|
+
return base.processEvent(cloudEvent, enriched);
|
|
19
|
+
}
|
|
20
|
+
catch (error) {
|
|
21
|
+
const unknownCtx = toUnknownContext(msg);
|
|
22
|
+
const context = createProcessingContext('unknown', decode(msg.data), unknownCtx, undefined);
|
|
23
|
+
return base.handleParseError(error, context, msg.info.deliveryCount);
|
|
24
|
+
}
|
|
25
|
+
};
|
|
26
|
+
const handleUnhandledProcessingError = async (msg, error) => {
|
|
27
|
+
const unknownCtx = toUnknownContext(msg);
|
|
28
|
+
const context = createProcessingContext('unknown', decode(msg.data), unknownCtx, undefined);
|
|
29
|
+
await base.handleUnhandledError(error, context, () => msg.ack());
|
|
30
|
+
};
|
|
31
|
+
return { handleMessage, handleUnhandledProcessingError };
|
|
32
|
+
};
|
|
@@ -1,22 +1,11 @@
|
|
|
1
1
|
import type { Msg } from 'nats';
|
|
2
|
-
import { type
|
|
3
|
-
|
|
4
|
-
export interface
|
|
5
|
-
info(message: string, meta?: unknown): void;
|
|
6
|
-
warn(message: string, meta?: unknown): void;
|
|
7
|
-
error(message: string, meta?: unknown): void;
|
|
8
|
-
}
|
|
9
|
-
export interface NatsMessageProcessorDeps {
|
|
10
|
-
name: string;
|
|
2
|
+
import { type BaseMessageProcessorDeps, type LoggerLike } from './base-message-processor';
|
|
3
|
+
export type { LoggerLike };
|
|
4
|
+
export interface NatsMessageProcessorDeps extends BaseMessageProcessorDeps {
|
|
11
5
|
subject: string;
|
|
12
|
-
dlqEnabled: boolean;
|
|
13
|
-
options: DlqOptions;
|
|
14
|
-
processedHandlers: ProcessedHandler[];
|
|
15
|
-
decode: (data: Uint8Array) => string;
|
|
16
|
-
logger: LoggerLike;
|
|
17
6
|
}
|
|
18
7
|
export interface NatsMessageProcessor {
|
|
19
8
|
handleMessage(msg: Msg): Promise<void>;
|
|
20
9
|
handleUnhandledProcessingError(msg: Msg, error: unknown): Promise<void>;
|
|
21
10
|
}
|
|
22
|
-
export declare const createNatsMessageProcessor: (
|
|
11
|
+
export declare const createNatsMessageProcessor: (deps: NatsMessageProcessorDeps) => NatsMessageProcessor;
|
|
@@ -1,14 +1,8 @@
|
|
|
1
|
-
import { createProcessingContext
|
|
2
|
-
import {
|
|
3
|
-
export const createNatsMessageProcessor = (
|
|
4
|
-
const
|
|
5
|
-
|
|
6
|
-
source: ce.source,
|
|
7
|
-
subject: ce.subject,
|
|
8
|
-
time: ce.time ?? new Date().toISOString(),
|
|
9
|
-
messageId: ce.id,
|
|
10
|
-
data: ce.data,
|
|
11
|
-
});
|
|
1
|
+
import { createProcessingContext } from '../../processing/dlq-safe';
|
|
2
|
+
import { createBaseMessageProcessor } from './base-message-processor';
|
|
3
|
+
export const createNatsMessageProcessor = (deps) => {
|
|
4
|
+
const { subject, decode } = deps;
|
|
5
|
+
const base = createBaseMessageProcessor(deps);
|
|
12
6
|
const toUnknownContext = (msg) => ({
|
|
13
7
|
eventType: 'unknown',
|
|
14
8
|
source: `nats://${subject}`,
|
|
@@ -17,79 +11,22 @@ export const createNatsMessageProcessor = ({ name, subject, dlqEnabled, options,
|
|
|
17
11
|
messageId: msg.headers?.get('Nats-Msg-Id') ?? 'unknown',
|
|
18
12
|
data: decode(msg.data),
|
|
19
13
|
});
|
|
20
|
-
const
|
|
21
|
-
const safeParseMessage = (msg) => {
|
|
22
|
-
try {
|
|
23
|
-
const cloudEvent = JSON.parse(decode(msg.data));
|
|
24
|
-
return { ok: true, cloudEvent, enriched: toEnrichedEvent(cloudEvent) };
|
|
25
|
-
}
|
|
26
|
-
catch (error) {
|
|
27
|
-
return { ok: false, error, context: createContext(toUnknownContext(msg)) };
|
|
28
|
-
}
|
|
29
|
-
};
|
|
30
|
-
const findHandler = (event) => processedHandlers.find((handler) => handler.type === event.eventType && (!handler.match || handler.match(event)));
|
|
31
|
-
const handleParseFailure = async ({ context, error }) => {
|
|
32
|
-
logger.error(`[${name}] failed to parse CloudEvent payload`, error);
|
|
33
|
-
if (!dlqEnabled)
|
|
34
|
-
return;
|
|
35
|
-
await quarantineMessage(context, 'parse_error', options, error);
|
|
36
|
-
};
|
|
37
|
-
const handleMissingHandler = async (context, eventType) => {
|
|
38
|
-
logger.warn(`[${name}] no handler for event type ${eventType}`);
|
|
39
|
-
if (!dlqEnabled)
|
|
40
|
-
return;
|
|
41
|
-
await quarantineMessage(context, 'no_handler', options, new Error(`No handler for event type ${eventType}`));
|
|
42
|
-
};
|
|
43
|
-
const handleValidationFailure = async (validationResult, handler, context) => {
|
|
44
|
-
if (dlqEnabled) {
|
|
45
|
-
await quarantineMessage(context, 'validation_error', options, validationResult.error);
|
|
46
|
-
return;
|
|
47
|
-
}
|
|
48
|
-
if (validationResult.shouldSkip)
|
|
49
|
-
return;
|
|
50
|
-
throwValidationError(handler.name, validationResult.error);
|
|
51
|
-
};
|
|
52
|
-
const executeHandler = async (handler, enriched, context) => {
|
|
14
|
+
const handleMessage = async (msg) => {
|
|
53
15
|
try {
|
|
54
|
-
|
|
16
|
+
const cloudEvent = base.parseCloudEvent(msg.data);
|
|
17
|
+
const enriched = base.toEnrichedEvent(cloudEvent);
|
|
18
|
+
await base.processEvent(cloudEvent, enriched);
|
|
55
19
|
}
|
|
56
20
|
catch (error) {
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
await
|
|
60
|
-
}
|
|
61
|
-
};
|
|
62
|
-
const handleMessage = async (msg) => {
|
|
63
|
-
const parseResult = safeParseMessage(msg);
|
|
64
|
-
if (!parseResult.ok) {
|
|
65
|
-
await handleParseFailure(parseResult);
|
|
66
|
-
return;
|
|
67
|
-
}
|
|
68
|
-
const { cloudEvent, enriched } = parseResult;
|
|
69
|
-
const processingContext = createContext(enriched, cloudEvent);
|
|
70
|
-
const handler = findHandler(enriched);
|
|
71
|
-
if (!handler) {
|
|
72
|
-
await handleMissingHandler(processingContext, enriched.eventType);
|
|
73
|
-
return;
|
|
21
|
+
const unknownCtx = toUnknownContext(msg);
|
|
22
|
+
const context = createProcessingContext('unknown', decode(msg.data), unknownCtx, undefined);
|
|
23
|
+
await base.handleParseError(error, context);
|
|
74
24
|
}
|
|
75
|
-
const validationResult = validateEventData(handler, enriched.data);
|
|
76
|
-
if ('error' in validationResult) {
|
|
77
|
-
await handleValidationFailure(validationResult, handler, processingContext);
|
|
78
|
-
return;
|
|
79
|
-
}
|
|
80
|
-
await executeHandler(handler, enriched, processingContext);
|
|
81
25
|
};
|
|
82
26
|
const handleUnhandledProcessingError = async (msg, error) => {
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
}
|
|
87
|
-
try {
|
|
88
|
-
await quarantineMessage(createContext(toUnknownContext(msg)), 'unhandled_error', options, error);
|
|
89
|
-
}
|
|
90
|
-
catch (quarantineError) {
|
|
91
|
-
logger.error(`[${name}] failed to quarantine unhandled error`, quarantineError);
|
|
92
|
-
}
|
|
27
|
+
const unknownCtx = toUnknownContext(msg);
|
|
28
|
+
const context = createProcessingContext('unknown', decode(msg.data), unknownCtx, undefined);
|
|
29
|
+
await base.handleUnhandledError(error, context);
|
|
93
30
|
};
|
|
94
31
|
return { handleMessage, handleUnhandledProcessingError };
|
|
95
32
|
};
|
package/package.json
CHANGED