@drarzter/kafka-client 0.5.5 → 0.5.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +33 -17
- package/dist/chunk-TD2AE774.mjs +1231 -0
- package/dist/chunk-TD2AE774.mjs.map +1 -0
- package/dist/core.d.mts +14 -41
- package/dist/core.d.ts +14 -41
- package/dist/core.js +652 -511
- package/dist/core.js.map +1 -1
- package/dist/core.mjs +1 -1
- package/dist/index.d.mts +17 -23
- package/dist/index.d.ts +17 -23
- package/dist/index.js +692 -569
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +41 -59
- package/dist/index.mjs.map +1 -1
- package/dist/otel.d.mts +1 -1
- package/dist/otel.d.ts +1 -1
- package/dist/otel.js +9 -4
- package/dist/otel.js.map +1 -1
- package/dist/otel.mjs +9 -4
- package/dist/otel.mjs.map +1 -1
- package/dist/testing.d.mts +1 -1
- package/dist/testing.d.ts +1 -1
- package/dist/testing.js +6 -6
- package/dist/testing.js.map +1 -1
- package/dist/testing.mjs +6 -6
- package/dist/testing.mjs.map +1 -1
- package/dist/{envelope-BpyKN_WL.d.mts → types-DwERZ6ql.d.mts} +99 -83
- package/dist/{envelope-BpyKN_WL.d.ts → types-DwERZ6ql.d.ts} +99 -83
- package/package.json +1 -1
- package/dist/chunk-Z3O5GTS7.mjs +0 -1090
- package/dist/chunk-Z3O5GTS7.mjs.map +0 -1
|
@@ -0,0 +1,1231 @@
|
|
|
1
|
+
// src/client/kafka.client/index.ts
|
|
2
|
+
import { KafkaJS } from "@confluentinc/kafka-javascript";
|
|
3
|
+
|
|
4
|
+
// src/client/message/envelope.ts
|
|
5
|
+
import { AsyncLocalStorage } from "async_hooks";
|
|
6
|
+
import { randomUUID } from "crypto";
|
|
7
|
+
var HEADER_EVENT_ID = "x-event-id";
|
|
8
|
+
var HEADER_CORRELATION_ID = "x-correlation-id";
|
|
9
|
+
var HEADER_TIMESTAMP = "x-timestamp";
|
|
10
|
+
var HEADER_SCHEMA_VERSION = "x-schema-version";
|
|
11
|
+
var HEADER_TRACEPARENT = "traceparent";
|
|
12
|
+
var envelopeStorage = new AsyncLocalStorage();
|
|
13
|
+
function getEnvelopeContext() {
|
|
14
|
+
return envelopeStorage.getStore();
|
|
15
|
+
}
|
|
16
|
+
function runWithEnvelopeContext(ctx, fn) {
|
|
17
|
+
return envelopeStorage.run(ctx, fn);
|
|
18
|
+
}
|
|
19
|
+
function buildEnvelopeHeaders(options = {}) {
|
|
20
|
+
const ctx = getEnvelopeContext();
|
|
21
|
+
const correlationId = options.correlationId ?? ctx?.correlationId ?? randomUUID();
|
|
22
|
+
const eventId = options.eventId ?? randomUUID();
|
|
23
|
+
const timestamp = (/* @__PURE__ */ new Date()).toISOString();
|
|
24
|
+
const schemaVersion = String(options.schemaVersion ?? 1);
|
|
25
|
+
const envelope = {
|
|
26
|
+
[HEADER_EVENT_ID]: eventId,
|
|
27
|
+
[HEADER_CORRELATION_ID]: correlationId,
|
|
28
|
+
[HEADER_TIMESTAMP]: timestamp,
|
|
29
|
+
[HEADER_SCHEMA_VERSION]: schemaVersion
|
|
30
|
+
};
|
|
31
|
+
if (ctx?.traceparent) {
|
|
32
|
+
envelope[HEADER_TRACEPARENT] = ctx.traceparent;
|
|
33
|
+
}
|
|
34
|
+
return { ...envelope, ...options.headers };
|
|
35
|
+
}
|
|
36
|
+
function decodeHeaders(raw) {
|
|
37
|
+
if (!raw) return {};
|
|
38
|
+
const result = {};
|
|
39
|
+
for (const [key, value] of Object.entries(raw)) {
|
|
40
|
+
if (value === void 0) continue;
|
|
41
|
+
if (Array.isArray(value)) {
|
|
42
|
+
result[key] = value.map((v) => Buffer.isBuffer(v) ? v.toString() : v).join(",");
|
|
43
|
+
} else {
|
|
44
|
+
result[key] = Buffer.isBuffer(value) ? value.toString() : value;
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
return result;
|
|
48
|
+
}
|
|
49
|
+
function extractEnvelope(payload, headers, topic2, partition, offset) {
|
|
50
|
+
return {
|
|
51
|
+
payload,
|
|
52
|
+
topic: topic2,
|
|
53
|
+
partition,
|
|
54
|
+
offset,
|
|
55
|
+
eventId: headers[HEADER_EVENT_ID] ?? randomUUID(),
|
|
56
|
+
correlationId: headers[HEADER_CORRELATION_ID] ?? randomUUID(),
|
|
57
|
+
timestamp: headers[HEADER_TIMESTAMP] ?? (/* @__PURE__ */ new Date()).toISOString(),
|
|
58
|
+
schemaVersion: Number(headers[HEADER_SCHEMA_VERSION] ?? 1),
|
|
59
|
+
traceparent: headers[HEADER_TRACEPARENT],
|
|
60
|
+
headers
|
|
61
|
+
};
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
// src/client/errors.ts
|
|
65
|
+
var KafkaProcessingError = class extends Error {
|
|
66
|
+
constructor(message, topic2, originalMessage, options) {
|
|
67
|
+
super(message, options);
|
|
68
|
+
this.topic = topic2;
|
|
69
|
+
this.originalMessage = originalMessage;
|
|
70
|
+
this.name = "KafkaProcessingError";
|
|
71
|
+
if (options?.cause) this.cause = options.cause;
|
|
72
|
+
}
|
|
73
|
+
};
|
|
74
|
+
var KafkaValidationError = class extends Error {
|
|
75
|
+
constructor(topic2, originalMessage, options) {
|
|
76
|
+
super(`Schema validation failed for topic "${topic2}"`, options);
|
|
77
|
+
this.topic = topic2;
|
|
78
|
+
this.originalMessage = originalMessage;
|
|
79
|
+
this.name = "KafkaValidationError";
|
|
80
|
+
if (options?.cause) this.cause = options.cause;
|
|
81
|
+
}
|
|
82
|
+
};
|
|
83
|
+
var KafkaRetryExhaustedError = class extends KafkaProcessingError {
|
|
84
|
+
constructor(topic2, originalMessage, attempts, options) {
|
|
85
|
+
super(
|
|
86
|
+
`Message processing failed after ${attempts} attempts on topic "${topic2}"`,
|
|
87
|
+
topic2,
|
|
88
|
+
originalMessage,
|
|
89
|
+
options
|
|
90
|
+
);
|
|
91
|
+
this.attempts = attempts;
|
|
92
|
+
this.name = "KafkaRetryExhaustedError";
|
|
93
|
+
}
|
|
94
|
+
};
|
|
95
|
+
|
|
96
|
+
// src/client/kafka.client/producer-ops.ts
|
|
97
|
+
function resolveTopicName(topicOrDescriptor) {
|
|
98
|
+
if (typeof topicOrDescriptor === "string") return topicOrDescriptor;
|
|
99
|
+
if (topicOrDescriptor && typeof topicOrDescriptor === "object" && "__topic" in topicOrDescriptor) {
|
|
100
|
+
return topicOrDescriptor.__topic;
|
|
101
|
+
}
|
|
102
|
+
return String(topicOrDescriptor);
|
|
103
|
+
}
|
|
104
|
+
function registerSchema(topicOrDesc, schemaRegistry) {
|
|
105
|
+
if (topicOrDesc?.__schema) {
|
|
106
|
+
const topic2 = resolveTopicName(topicOrDesc);
|
|
107
|
+
schemaRegistry.set(topic2, topicOrDesc.__schema);
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
async function validateMessage(topicOrDesc, message, deps) {
|
|
111
|
+
const topicName = resolveTopicName(topicOrDesc);
|
|
112
|
+
if (topicOrDesc?.__schema) {
|
|
113
|
+
try {
|
|
114
|
+
return await topicOrDesc.__schema.parse(message);
|
|
115
|
+
} catch (error) {
|
|
116
|
+
throw new KafkaValidationError(topicName, message, {
|
|
117
|
+
cause: error instanceof Error ? error : new Error(String(error))
|
|
118
|
+
});
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
if (deps.strictSchemasEnabled && typeof topicOrDesc === "string") {
|
|
122
|
+
const schema = deps.schemaRegistry.get(topicOrDesc);
|
|
123
|
+
if (schema) {
|
|
124
|
+
try {
|
|
125
|
+
return await schema.parse(message);
|
|
126
|
+
} catch (error) {
|
|
127
|
+
throw new KafkaValidationError(topicName, message, {
|
|
128
|
+
cause: error instanceof Error ? error : new Error(String(error))
|
|
129
|
+
});
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
return message;
|
|
134
|
+
}
|
|
135
|
+
async function buildSendPayload(topicOrDesc, messages, deps) {
|
|
136
|
+
const topic2 = resolveTopicName(topicOrDesc);
|
|
137
|
+
const builtMessages = await Promise.all(
|
|
138
|
+
messages.map(async (m) => {
|
|
139
|
+
const envelopeHeaders = buildEnvelopeHeaders({
|
|
140
|
+
correlationId: m.correlationId,
|
|
141
|
+
schemaVersion: m.schemaVersion,
|
|
142
|
+
eventId: m.eventId,
|
|
143
|
+
headers: m.headers
|
|
144
|
+
});
|
|
145
|
+
for (const inst of deps.instrumentation) {
|
|
146
|
+
inst.beforeSend?.(topic2, envelopeHeaders);
|
|
147
|
+
}
|
|
148
|
+
return {
|
|
149
|
+
value: JSON.stringify(
|
|
150
|
+
await validateMessage(topicOrDesc, m.value, deps)
|
|
151
|
+
),
|
|
152
|
+
key: m.key ?? null,
|
|
153
|
+
headers: envelopeHeaders
|
|
154
|
+
};
|
|
155
|
+
})
|
|
156
|
+
);
|
|
157
|
+
return { topic: topic2, messages: builtMessages };
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
// src/client/kafka.client/consumer-ops.ts
|
|
161
|
+
function getOrCreateConsumer(groupId, fromBeginning, autoCommit, deps) {
|
|
162
|
+
const { consumers, consumerCreationOptions, kafka, onRebalance, logger } = deps;
|
|
163
|
+
if (consumers.has(groupId)) {
|
|
164
|
+
const prev = consumerCreationOptions.get(groupId);
|
|
165
|
+
if (prev.fromBeginning !== fromBeginning || prev.autoCommit !== autoCommit) {
|
|
166
|
+
logger.warn(
|
|
167
|
+
`Consumer group "${groupId}" already exists with options (fromBeginning: ${prev.fromBeginning}, autoCommit: ${prev.autoCommit}) \u2014 new options (fromBeginning: ${fromBeginning}, autoCommit: ${autoCommit}) ignored. Use a different groupId to apply different options.`
|
|
168
|
+
);
|
|
169
|
+
}
|
|
170
|
+
return consumers.get(groupId);
|
|
171
|
+
}
|
|
172
|
+
consumerCreationOptions.set(groupId, { fromBeginning, autoCommit });
|
|
173
|
+
const config = {
|
|
174
|
+
kafkaJS: { groupId, fromBeginning, autoCommit }
|
|
175
|
+
};
|
|
176
|
+
if (onRebalance) {
|
|
177
|
+
const cb = onRebalance;
|
|
178
|
+
config["rebalance_cb"] = (err, assignment) => {
|
|
179
|
+
const type = err.code === -175 ? "assign" : "revoke";
|
|
180
|
+
try {
|
|
181
|
+
cb(
|
|
182
|
+
type,
|
|
183
|
+
assignment.map((p) => ({ topic: p.topic, partition: p.partition }))
|
|
184
|
+
);
|
|
185
|
+
} catch (e) {
|
|
186
|
+
logger.warn(`onRebalance callback threw: ${e.message}`);
|
|
187
|
+
}
|
|
188
|
+
};
|
|
189
|
+
}
|
|
190
|
+
const consumer = kafka.consumer(config);
|
|
191
|
+
consumers.set(groupId, consumer);
|
|
192
|
+
return consumer;
|
|
193
|
+
}
|
|
194
|
+
function buildSchemaMap(topics, schemaRegistry, optionSchemas) {
|
|
195
|
+
const schemaMap = /* @__PURE__ */ new Map();
|
|
196
|
+
for (const t of topics) {
|
|
197
|
+
if (t?.__schema) {
|
|
198
|
+
const name = resolveTopicName(t);
|
|
199
|
+
schemaMap.set(name, t.__schema);
|
|
200
|
+
schemaRegistry.set(name, t.__schema);
|
|
201
|
+
}
|
|
202
|
+
}
|
|
203
|
+
if (optionSchemas) {
|
|
204
|
+
for (const [k, v] of optionSchemas) {
|
|
205
|
+
schemaMap.set(k, v);
|
|
206
|
+
schemaRegistry.set(k, v);
|
|
207
|
+
}
|
|
208
|
+
}
|
|
209
|
+
return schemaMap;
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
// src/client/consumer/pipeline.ts
|
|
213
|
+
function toError(error) {
|
|
214
|
+
return error instanceof Error ? error : new Error(String(error));
|
|
215
|
+
}
|
|
216
|
+
function sleep(ms) {
|
|
217
|
+
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
218
|
+
}
|
|
219
|
+
function parseJsonMessage(raw, topic2, logger) {
|
|
220
|
+
try {
|
|
221
|
+
return JSON.parse(raw);
|
|
222
|
+
} catch (error) {
|
|
223
|
+
logger.error(
|
|
224
|
+
`Failed to parse message from topic ${topic2}:`,
|
|
225
|
+
toError(error).stack
|
|
226
|
+
);
|
|
227
|
+
return null;
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
async function validateWithSchema(message, raw, topic2, schemaMap, interceptors, dlq, deps) {
|
|
231
|
+
const schema = schemaMap.get(topic2);
|
|
232
|
+
if (!schema) return message;
|
|
233
|
+
try {
|
|
234
|
+
return await schema.parse(message);
|
|
235
|
+
} catch (error) {
|
|
236
|
+
const err = toError(error);
|
|
237
|
+
const validationError = new KafkaValidationError(topic2, message, {
|
|
238
|
+
cause: err
|
|
239
|
+
});
|
|
240
|
+
deps.logger.error(
|
|
241
|
+
`Schema validation failed for topic ${topic2}:`,
|
|
242
|
+
err.message
|
|
243
|
+
);
|
|
244
|
+
if (dlq) {
|
|
245
|
+
await sendToDlq(topic2, raw, deps, {
|
|
246
|
+
error: validationError,
|
|
247
|
+
attempt: 0,
|
|
248
|
+
originalHeaders: deps.originalHeaders
|
|
249
|
+
});
|
|
250
|
+
} else {
|
|
251
|
+
await deps.onMessageLost?.({
|
|
252
|
+
topic: topic2,
|
|
253
|
+
error: validationError,
|
|
254
|
+
attempt: 0,
|
|
255
|
+
headers: deps.originalHeaders ?? {}
|
|
256
|
+
});
|
|
257
|
+
}
|
|
258
|
+
const errorEnvelope = extractEnvelope(
|
|
259
|
+
message,
|
|
260
|
+
deps.originalHeaders ?? {},
|
|
261
|
+
topic2,
|
|
262
|
+
-1,
|
|
263
|
+
""
|
|
264
|
+
);
|
|
265
|
+
for (const interceptor of interceptors) {
|
|
266
|
+
await interceptor.onError?.(errorEnvelope, validationError);
|
|
267
|
+
}
|
|
268
|
+
return null;
|
|
269
|
+
}
|
|
270
|
+
}
|
|
271
|
+
async function sendToDlq(topic2, rawMessage, deps, meta) {
|
|
272
|
+
const dlqTopic = `${topic2}.dlq`;
|
|
273
|
+
const headers = {
|
|
274
|
+
...meta?.originalHeaders ?? {},
|
|
275
|
+
"x-dlq-original-topic": topic2,
|
|
276
|
+
"x-dlq-failed-at": (/* @__PURE__ */ new Date()).toISOString(),
|
|
277
|
+
"x-dlq-error-message": meta?.error.message ?? "unknown",
|
|
278
|
+
"x-dlq-error-stack": meta?.error.stack?.slice(0, 2e3) ?? "",
|
|
279
|
+
"x-dlq-attempt-count": String(meta?.attempt ?? 0)
|
|
280
|
+
};
|
|
281
|
+
try {
|
|
282
|
+
await deps.producer.send({
|
|
283
|
+
topic: dlqTopic,
|
|
284
|
+
messages: [{ value: rawMessage, headers }]
|
|
285
|
+
});
|
|
286
|
+
deps.logger.warn(`Message sent to DLQ: ${dlqTopic}`);
|
|
287
|
+
} catch (error) {
|
|
288
|
+
deps.logger.error(
|
|
289
|
+
`Failed to send message to DLQ ${dlqTopic}:`,
|
|
290
|
+
toError(error).stack
|
|
291
|
+
);
|
|
292
|
+
}
|
|
293
|
+
}
|
|
294
|
+
var RETRY_HEADER_ATTEMPT = "x-retry-attempt";
|
|
295
|
+
var RETRY_HEADER_AFTER = "x-retry-after";
|
|
296
|
+
var RETRY_HEADER_MAX_RETRIES = "x-retry-max-retries";
|
|
297
|
+
var RETRY_HEADER_ORIGINAL_TOPIC = "x-retry-original-topic";
|
|
298
|
+
async function sendToRetryTopic(originalTopic, rawMessages, attempt, maxRetries, delayMs, originalHeaders, deps) {
|
|
299
|
+
const retryTopic = `${originalTopic}.retry.${attempt}`;
|
|
300
|
+
const {
|
|
301
|
+
[RETRY_HEADER_ATTEMPT]: _a,
|
|
302
|
+
[RETRY_HEADER_AFTER]: _b,
|
|
303
|
+
[RETRY_HEADER_MAX_RETRIES]: _c,
|
|
304
|
+
[RETRY_HEADER_ORIGINAL_TOPIC]: _d,
|
|
305
|
+
...userHeaders
|
|
306
|
+
} = originalHeaders;
|
|
307
|
+
const headers = {
|
|
308
|
+
...userHeaders,
|
|
309
|
+
[RETRY_HEADER_ATTEMPT]: String(attempt),
|
|
310
|
+
[RETRY_HEADER_AFTER]: String(Date.now() + delayMs),
|
|
311
|
+
[RETRY_HEADER_MAX_RETRIES]: String(maxRetries),
|
|
312
|
+
[RETRY_HEADER_ORIGINAL_TOPIC]: originalTopic
|
|
313
|
+
};
|
|
314
|
+
try {
|
|
315
|
+
for (const raw of rawMessages) {
|
|
316
|
+
await deps.producer.send({
|
|
317
|
+
topic: retryTopic,
|
|
318
|
+
messages: [{ value: raw, headers }]
|
|
319
|
+
});
|
|
320
|
+
}
|
|
321
|
+
deps.logger.warn(
|
|
322
|
+
`Message queued in retry topic ${retryTopic} (attempt ${attempt}/${maxRetries})`
|
|
323
|
+
);
|
|
324
|
+
} catch (error) {
|
|
325
|
+
deps.logger.error(
|
|
326
|
+
`Failed to send message to retry topic ${retryTopic}:`,
|
|
327
|
+
toError(error).stack
|
|
328
|
+
);
|
|
329
|
+
}
|
|
330
|
+
}
|
|
331
|
+
async function broadcastToInterceptors(envelopes, interceptors, cb) {
|
|
332
|
+
for (const env of envelopes) {
|
|
333
|
+
for (const interceptor of interceptors) {
|
|
334
|
+
await cb(interceptor, env);
|
|
335
|
+
}
|
|
336
|
+
}
|
|
337
|
+
}
|
|
338
|
+
async function runHandlerWithPipeline(fn, envelopes, interceptors, instrumentation) {
|
|
339
|
+
const cleanups = [];
|
|
340
|
+
try {
|
|
341
|
+
for (const env of envelopes) {
|
|
342
|
+
for (const inst of instrumentation) {
|
|
343
|
+
const cleanup = inst.beforeConsume?.(env);
|
|
344
|
+
if (typeof cleanup === "function") cleanups.push(cleanup);
|
|
345
|
+
}
|
|
346
|
+
}
|
|
347
|
+
for (const env of envelopes) {
|
|
348
|
+
for (const interceptor of interceptors) {
|
|
349
|
+
await interceptor.before?.(env);
|
|
350
|
+
}
|
|
351
|
+
}
|
|
352
|
+
await fn();
|
|
353
|
+
for (const env of envelopes) {
|
|
354
|
+
for (const interceptor of interceptors) {
|
|
355
|
+
await interceptor.after?.(env);
|
|
356
|
+
}
|
|
357
|
+
}
|
|
358
|
+
for (const cleanup of cleanups) cleanup();
|
|
359
|
+
return null;
|
|
360
|
+
} catch (error) {
|
|
361
|
+
const err = toError(error);
|
|
362
|
+
for (const env of envelopes) {
|
|
363
|
+
for (const inst of instrumentation) {
|
|
364
|
+
inst.onConsumeError?.(env, err);
|
|
365
|
+
}
|
|
366
|
+
}
|
|
367
|
+
for (const cleanup of cleanups) cleanup();
|
|
368
|
+
return err;
|
|
369
|
+
}
|
|
370
|
+
}
|
|
371
|
+
async function notifyInterceptorsOnError(envelopes, interceptors, error) {
|
|
372
|
+
await broadcastToInterceptors(
|
|
373
|
+
envelopes,
|
|
374
|
+
interceptors,
|
|
375
|
+
(i, env) => i.onError?.(env, error)
|
|
376
|
+
);
|
|
377
|
+
}
|
|
378
|
+
async function executeWithRetry(fn, ctx, deps) {
|
|
379
|
+
const {
|
|
380
|
+
envelope,
|
|
381
|
+
rawMessages,
|
|
382
|
+
interceptors,
|
|
383
|
+
dlq,
|
|
384
|
+
retry,
|
|
385
|
+
isBatch,
|
|
386
|
+
retryTopics
|
|
387
|
+
} = ctx;
|
|
388
|
+
const maxAttempts = retryTopics ? 1 : retry ? retry.maxRetries + 1 : 1;
|
|
389
|
+
const backoffMs = retry?.backoffMs ?? 1e3;
|
|
390
|
+
const maxBackoffMs = retry?.maxBackoffMs ?? 3e4;
|
|
391
|
+
const envelopes = Array.isArray(envelope) ? envelope : [envelope];
|
|
392
|
+
const topic2 = envelopes[0]?.topic ?? "unknown";
|
|
393
|
+
for (let attempt = 1; attempt <= maxAttempts; attempt++) {
|
|
394
|
+
const error = await runHandlerWithPipeline(
|
|
395
|
+
fn,
|
|
396
|
+
envelopes,
|
|
397
|
+
interceptors,
|
|
398
|
+
deps.instrumentation
|
|
399
|
+
);
|
|
400
|
+
if (!error) return;
|
|
401
|
+
const isLastAttempt = attempt === maxAttempts;
|
|
402
|
+
const reportedError = isLastAttempt && maxAttempts > 1 ? new KafkaRetryExhaustedError(
|
|
403
|
+
topic2,
|
|
404
|
+
envelopes.map((e) => e.payload),
|
|
405
|
+
maxAttempts,
|
|
406
|
+
{ cause: error }
|
|
407
|
+
) : error;
|
|
408
|
+
await notifyInterceptorsOnError(envelopes, interceptors, reportedError);
|
|
409
|
+
deps.logger.error(
|
|
410
|
+
`Error processing ${isBatch ? "batch" : "message"} from topic ${topic2} (attempt ${attempt}/${maxAttempts}):`,
|
|
411
|
+
error.stack
|
|
412
|
+
);
|
|
413
|
+
if (retryTopics && retry) {
|
|
414
|
+
const cap = Math.min(backoffMs, maxBackoffMs);
|
|
415
|
+
const delay = Math.floor(Math.random() * cap);
|
|
416
|
+
await sendToRetryTopic(
|
|
417
|
+
topic2,
|
|
418
|
+
rawMessages,
|
|
419
|
+
1,
|
|
420
|
+
retry.maxRetries,
|
|
421
|
+
delay,
|
|
422
|
+
envelopes[0]?.headers ?? {},
|
|
423
|
+
deps
|
|
424
|
+
);
|
|
425
|
+
} else if (isLastAttempt) {
|
|
426
|
+
if (dlq) {
|
|
427
|
+
const dlqMeta = {
|
|
428
|
+
error,
|
|
429
|
+
attempt,
|
|
430
|
+
originalHeaders: envelopes[0]?.headers
|
|
431
|
+
};
|
|
432
|
+
for (const raw of rawMessages) {
|
|
433
|
+
await sendToDlq(topic2, raw, deps, dlqMeta);
|
|
434
|
+
}
|
|
435
|
+
} else {
|
|
436
|
+
await deps.onMessageLost?.({
|
|
437
|
+
topic: topic2,
|
|
438
|
+
error,
|
|
439
|
+
attempt,
|
|
440
|
+
headers: envelopes[0]?.headers ?? {}
|
|
441
|
+
});
|
|
442
|
+
}
|
|
443
|
+
} else {
|
|
444
|
+
const cap = Math.min(backoffMs * 2 ** (attempt - 1), maxBackoffMs);
|
|
445
|
+
await sleep(Math.random() * cap);
|
|
446
|
+
}
|
|
447
|
+
}
|
|
448
|
+
}
|
|
449
|
+
|
|
450
|
+
// src/client/kafka.client/message-handler.ts
|
|
451
|
+
async function parseSingleMessage(message, topic2, partition, schemaMap, interceptors, dlq, deps) {
|
|
452
|
+
if (!message.value) {
|
|
453
|
+
deps.logger.warn(`Received empty message from topic ${topic2}`);
|
|
454
|
+
return null;
|
|
455
|
+
}
|
|
456
|
+
const raw = message.value.toString();
|
|
457
|
+
const parsed = parseJsonMessage(raw, topic2, deps.logger);
|
|
458
|
+
if (parsed === null) return null;
|
|
459
|
+
const headers = decodeHeaders(message.headers);
|
|
460
|
+
const validated = await validateWithSchema(
|
|
461
|
+
parsed,
|
|
462
|
+
raw,
|
|
463
|
+
topic2,
|
|
464
|
+
schemaMap,
|
|
465
|
+
interceptors,
|
|
466
|
+
dlq,
|
|
467
|
+
{ ...deps, originalHeaders: headers }
|
|
468
|
+
);
|
|
469
|
+
if (validated === null) return null;
|
|
470
|
+
return extractEnvelope(validated, headers, topic2, partition, message.offset);
|
|
471
|
+
}
|
|
472
|
+
async function handleEachMessage(payload, opts, deps) {
|
|
473
|
+
const { topic: topic2, partition, message } = payload;
|
|
474
|
+
const {
|
|
475
|
+
schemaMap,
|
|
476
|
+
handleMessage,
|
|
477
|
+
interceptors,
|
|
478
|
+
dlq,
|
|
479
|
+
retry,
|
|
480
|
+
retryTopics,
|
|
481
|
+
timeoutMs,
|
|
482
|
+
wrapWithTimeout
|
|
483
|
+
} = opts;
|
|
484
|
+
const envelope = await parseSingleMessage(
|
|
485
|
+
message,
|
|
486
|
+
topic2,
|
|
487
|
+
partition,
|
|
488
|
+
schemaMap,
|
|
489
|
+
interceptors,
|
|
490
|
+
dlq,
|
|
491
|
+
deps
|
|
492
|
+
);
|
|
493
|
+
if (envelope === null) return;
|
|
494
|
+
await executeWithRetry(
|
|
495
|
+
() => {
|
|
496
|
+
const fn = () => runWithEnvelopeContext(
|
|
497
|
+
{
|
|
498
|
+
correlationId: envelope.correlationId,
|
|
499
|
+
traceparent: envelope.traceparent
|
|
500
|
+
},
|
|
501
|
+
() => handleMessage(envelope)
|
|
502
|
+
);
|
|
503
|
+
return timeoutMs ? wrapWithTimeout(fn, timeoutMs, topic2) : fn();
|
|
504
|
+
},
|
|
505
|
+
{
|
|
506
|
+
envelope,
|
|
507
|
+
rawMessages: [message.value.toString()],
|
|
508
|
+
interceptors,
|
|
509
|
+
dlq,
|
|
510
|
+
retry,
|
|
511
|
+
retryTopics
|
|
512
|
+
},
|
|
513
|
+
deps
|
|
514
|
+
);
|
|
515
|
+
}
|
|
516
|
+
async function handleEachBatch(payload, opts, deps) {
|
|
517
|
+
const { batch, heartbeat, resolveOffset, commitOffsetsIfNecessary } = payload;
|
|
518
|
+
const {
|
|
519
|
+
schemaMap,
|
|
520
|
+
handleBatch,
|
|
521
|
+
interceptors,
|
|
522
|
+
dlq,
|
|
523
|
+
retry,
|
|
524
|
+
timeoutMs,
|
|
525
|
+
wrapWithTimeout
|
|
526
|
+
} = opts;
|
|
527
|
+
const envelopes = [];
|
|
528
|
+
const rawMessages = [];
|
|
529
|
+
for (const message of batch.messages) {
|
|
530
|
+
const envelope = await parseSingleMessage(
|
|
531
|
+
message,
|
|
532
|
+
batch.topic,
|
|
533
|
+
batch.partition,
|
|
534
|
+
schemaMap,
|
|
535
|
+
interceptors,
|
|
536
|
+
dlq,
|
|
537
|
+
deps
|
|
538
|
+
);
|
|
539
|
+
if (envelope === null) continue;
|
|
540
|
+
envelopes.push(envelope);
|
|
541
|
+
rawMessages.push(message.value.toString());
|
|
542
|
+
}
|
|
543
|
+
if (envelopes.length === 0) return;
|
|
544
|
+
const meta = {
|
|
545
|
+
partition: batch.partition,
|
|
546
|
+
highWatermark: batch.highWatermark,
|
|
547
|
+
heartbeat,
|
|
548
|
+
resolveOffset,
|
|
549
|
+
commitOffsetsIfNecessary
|
|
550
|
+
};
|
|
551
|
+
await executeWithRetry(
|
|
552
|
+
() => {
|
|
553
|
+
const fn = () => handleBatch(envelopes, meta);
|
|
554
|
+
return timeoutMs ? wrapWithTimeout(fn, timeoutMs, batch.topic) : fn();
|
|
555
|
+
},
|
|
556
|
+
{
|
|
557
|
+
envelope: envelopes,
|
|
558
|
+
rawMessages: batch.messages.filter((m) => m.value).map((m) => m.value.toString()),
|
|
559
|
+
interceptors,
|
|
560
|
+
dlq,
|
|
561
|
+
retry,
|
|
562
|
+
isBatch: true
|
|
563
|
+
},
|
|
564
|
+
deps
|
|
565
|
+
);
|
|
566
|
+
}
|
|
567
|
+
|
|
568
|
+
// src/client/consumer/subscribe-retry.ts
|
|
569
|
+
async function subscribeWithRetry(consumer, topics, logger, retryOpts) {
|
|
570
|
+
const maxAttempts = retryOpts?.retries ?? 5;
|
|
571
|
+
const backoffMs = retryOpts?.backoffMs ?? 5e3;
|
|
572
|
+
for (let attempt = 1; attempt <= maxAttempts; attempt++) {
|
|
573
|
+
try {
|
|
574
|
+
await consumer.subscribe({ topics });
|
|
575
|
+
return;
|
|
576
|
+
} catch (error) {
|
|
577
|
+
if (attempt === maxAttempts) throw error;
|
|
578
|
+
const msg = toError(error).message;
|
|
579
|
+
logger.warn(
|
|
580
|
+
`Failed to subscribe to [${topics.join(", ")}] (attempt ${attempt}/${maxAttempts}): ${msg}. Retrying in ${backoffMs}ms...`
|
|
581
|
+
);
|
|
582
|
+
await sleep(backoffMs);
|
|
583
|
+
}
|
|
584
|
+
}
|
|
585
|
+
}
|
|
586
|
+
|
|
587
|
+
// src/client/kafka.client/retry-topic.ts
|
|
588
|
+
async function waitForPartitionAssignment(consumer, topics, logger, timeoutMs = 1e4) {
|
|
589
|
+
const topicSet = new Set(topics);
|
|
590
|
+
const deadline = Date.now() + timeoutMs;
|
|
591
|
+
while (Date.now() < deadline) {
|
|
592
|
+
try {
|
|
593
|
+
const assigned = consumer.assignment();
|
|
594
|
+
if (assigned.some((a) => topicSet.has(a.topic))) return;
|
|
595
|
+
} catch {
|
|
596
|
+
}
|
|
597
|
+
await sleep(200);
|
|
598
|
+
}
|
|
599
|
+
logger.warn(
|
|
600
|
+
`Retry consumer did not receive partition assignments for [${topics.join(", ")}] within ${timeoutMs}ms`
|
|
601
|
+
);
|
|
602
|
+
}
|
|
603
|
+
async function startLevelConsumer(level, levelTopics, levelGroupId, originalTopics, handleMessage, retry, dlq, interceptors, schemaMap, deps, assignmentTimeoutMs) {
|
|
604
|
+
const {
|
|
605
|
+
logger,
|
|
606
|
+
producer,
|
|
607
|
+
instrumentation,
|
|
608
|
+
onMessageLost,
|
|
609
|
+
ensureTopic,
|
|
610
|
+
getOrCreateConsumer: getOrCreateConsumer2,
|
|
611
|
+
runningConsumers
|
|
612
|
+
} = deps;
|
|
613
|
+
const backoffMs = retry.backoffMs ?? 1e3;
|
|
614
|
+
const maxBackoffMs = retry.maxBackoffMs ?? 3e4;
|
|
615
|
+
const pipelineDeps = { logger, producer, instrumentation, onMessageLost };
|
|
616
|
+
for (const lt of levelTopics) {
|
|
617
|
+
await ensureTopic(lt);
|
|
618
|
+
}
|
|
619
|
+
const consumer = getOrCreateConsumer2(levelGroupId, false, false);
|
|
620
|
+
await consumer.connect();
|
|
621
|
+
await subscribeWithRetry(consumer, levelTopics, logger);
|
|
622
|
+
await consumer.run({
|
|
623
|
+
eachMessage: async ({ topic: levelTopic, partition, message }) => {
|
|
624
|
+
const nextOffset = {
|
|
625
|
+
topic: levelTopic,
|
|
626
|
+
partition,
|
|
627
|
+
offset: (parseInt(message.offset, 10) + 1).toString()
|
|
628
|
+
};
|
|
629
|
+
if (!message.value) {
|
|
630
|
+
await consumer.commitOffsets([nextOffset]);
|
|
631
|
+
return;
|
|
632
|
+
}
|
|
633
|
+
const headers = decodeHeaders(message.headers);
|
|
634
|
+
const retryAfter = parseInt(
|
|
635
|
+
headers[RETRY_HEADER_AFTER] ?? "0",
|
|
636
|
+
10
|
|
637
|
+
);
|
|
638
|
+
const remaining = retryAfter - Date.now();
|
|
639
|
+
if (remaining > 0) {
|
|
640
|
+
consumer.pause([{ topic: levelTopic, partitions: [partition] }]);
|
|
641
|
+
await sleep(remaining);
|
|
642
|
+
consumer.resume([{ topic: levelTopic, partitions: [partition] }]);
|
|
643
|
+
}
|
|
644
|
+
const raw = message.value.toString();
|
|
645
|
+
const parsed = parseJsonMessage(raw, levelTopic, logger);
|
|
646
|
+
if (parsed === null) {
|
|
647
|
+
await consumer.commitOffsets([nextOffset]);
|
|
648
|
+
return;
|
|
649
|
+
}
|
|
650
|
+
const currentMaxRetries = parseInt(
|
|
651
|
+
headers[RETRY_HEADER_MAX_RETRIES] ?? String(retry.maxRetries),
|
|
652
|
+
10
|
|
653
|
+
);
|
|
654
|
+
const originalTopic = headers[RETRY_HEADER_ORIGINAL_TOPIC] ?? levelTopic.replace(/\.retry\.\d+$/, "");
|
|
655
|
+
const validated = await validateWithSchema(
|
|
656
|
+
parsed,
|
|
657
|
+
raw,
|
|
658
|
+
originalTopic,
|
|
659
|
+
schemaMap,
|
|
660
|
+
interceptors,
|
|
661
|
+
dlq,
|
|
662
|
+
{ ...pipelineDeps, originalHeaders: headers }
|
|
663
|
+
);
|
|
664
|
+
if (validated === null) {
|
|
665
|
+
await consumer.commitOffsets([nextOffset]);
|
|
666
|
+
return;
|
|
667
|
+
}
|
|
668
|
+
const envelope = extractEnvelope(
|
|
669
|
+
validated,
|
|
670
|
+
headers,
|
|
671
|
+
originalTopic,
|
|
672
|
+
partition,
|
|
673
|
+
message.offset
|
|
674
|
+
);
|
|
675
|
+
const error = await runHandlerWithPipeline(
|
|
676
|
+
() => runWithEnvelopeContext(
|
|
677
|
+
{
|
|
678
|
+
correlationId: envelope.correlationId,
|
|
679
|
+
traceparent: envelope.traceparent
|
|
680
|
+
},
|
|
681
|
+
() => handleMessage(envelope)
|
|
682
|
+
),
|
|
683
|
+
[envelope],
|
|
684
|
+
interceptors,
|
|
685
|
+
instrumentation
|
|
686
|
+
);
|
|
687
|
+
if (!error) {
|
|
688
|
+
await consumer.commitOffsets([nextOffset]);
|
|
689
|
+
return;
|
|
690
|
+
}
|
|
691
|
+
const exhausted = level >= currentMaxRetries;
|
|
692
|
+
const reportedError = exhausted && currentMaxRetries > 1 ? new KafkaRetryExhaustedError(
|
|
693
|
+
originalTopic,
|
|
694
|
+
[envelope.payload],
|
|
695
|
+
currentMaxRetries,
|
|
696
|
+
{ cause: error }
|
|
697
|
+
) : error;
|
|
698
|
+
await notifyInterceptorsOnError([envelope], interceptors, reportedError);
|
|
699
|
+
logger.error(
|
|
700
|
+
`Retry consumer error for ${originalTopic} (level ${level}/${currentMaxRetries}):`,
|
|
701
|
+
error.stack
|
|
702
|
+
);
|
|
703
|
+
if (!exhausted) {
|
|
704
|
+
const nextLevel = level + 1;
|
|
705
|
+
const cap = Math.min(backoffMs * 2 ** level, maxBackoffMs);
|
|
706
|
+
const delay = Math.floor(Math.random() * cap);
|
|
707
|
+
await sendToRetryTopic(
|
|
708
|
+
originalTopic,
|
|
709
|
+
[raw],
|
|
710
|
+
nextLevel,
|
|
711
|
+
currentMaxRetries,
|
|
712
|
+
delay,
|
|
713
|
+
headers,
|
|
714
|
+
pipelineDeps
|
|
715
|
+
);
|
|
716
|
+
} else if (dlq) {
|
|
717
|
+
await sendToDlq(originalTopic, raw, pipelineDeps, {
|
|
718
|
+
error,
|
|
719
|
+
// +1 to account for the main consumer's initial attempt before routing.
|
|
720
|
+
attempt: level + 1,
|
|
721
|
+
originalHeaders: headers
|
|
722
|
+
});
|
|
723
|
+
} else {
|
|
724
|
+
await onMessageLost?.({
|
|
725
|
+
topic: originalTopic,
|
|
726
|
+
error,
|
|
727
|
+
attempt: level,
|
|
728
|
+
headers
|
|
729
|
+
});
|
|
730
|
+
}
|
|
731
|
+
await consumer.commitOffsets([nextOffset]);
|
|
732
|
+
}
|
|
733
|
+
});
|
|
734
|
+
runningConsumers.set(levelGroupId, "eachMessage");
|
|
735
|
+
await waitForPartitionAssignment(consumer, levelTopics, logger, assignmentTimeoutMs);
|
|
736
|
+
logger.log(
|
|
737
|
+
`Retry level ${level}/${retry.maxRetries} consumer started for: ${originalTopics.join(", ")} (group: ${levelGroupId})`
|
|
738
|
+
);
|
|
739
|
+
}
|
|
740
|
+
async function startRetryTopicConsumers(originalTopics, originalGroupId, handleMessage, retry, dlq, interceptors, schemaMap, deps, assignmentTimeoutMs) {
|
|
741
|
+
const levelGroupIds = [];
|
|
742
|
+
for (let level = 1; level <= retry.maxRetries; level++) {
|
|
743
|
+
const levelTopics = originalTopics.map((t) => `${t}.retry.${level}`);
|
|
744
|
+
const levelGroupId = `${originalGroupId}-retry.${level}`;
|
|
745
|
+
await startLevelConsumer(
|
|
746
|
+
level,
|
|
747
|
+
levelTopics,
|
|
748
|
+
levelGroupId,
|
|
749
|
+
originalTopics,
|
|
750
|
+
handleMessage,
|
|
751
|
+
retry,
|
|
752
|
+
dlq,
|
|
753
|
+
interceptors,
|
|
754
|
+
schemaMap,
|
|
755
|
+
deps,
|
|
756
|
+
assignmentTimeoutMs
|
|
757
|
+
);
|
|
758
|
+
levelGroupIds.push(levelGroupId);
|
|
759
|
+
}
|
|
760
|
+
return levelGroupIds;
|
|
761
|
+
}
|
|
762
|
+
|
|
763
|
+
// src/client/kafka.client/index.ts
|
|
764
|
+
var { Kafka: KafkaClass, logLevel: KafkaLogLevel } = KafkaJS;
|
|
765
|
+
var KafkaClient = class {
|
|
766
|
+
kafka;
|
|
767
|
+
producer;
|
|
768
|
+
txProducer;
|
|
769
|
+
consumers = /* @__PURE__ */ new Map();
|
|
770
|
+
admin;
|
|
771
|
+
logger;
|
|
772
|
+
autoCreateTopicsEnabled;
|
|
773
|
+
strictSchemasEnabled;
|
|
774
|
+
numPartitions;
|
|
775
|
+
ensuredTopics = /* @__PURE__ */ new Set();
|
|
776
|
+
defaultGroupId;
|
|
777
|
+
schemaRegistry = /* @__PURE__ */ new Map();
|
|
778
|
+
runningConsumers = /* @__PURE__ */ new Map();
|
|
779
|
+
consumerCreationOptions = /* @__PURE__ */ new Map();
|
|
780
|
+
/** Maps each main consumer groupId to its companion retry level groupIds. */
|
|
781
|
+
companionGroupIds = /* @__PURE__ */ new Map();
|
|
782
|
+
instrumentation;
|
|
783
|
+
onMessageLost;
|
|
784
|
+
onRebalance;
|
|
785
|
+
isAdminConnected = false;
|
|
786
|
+
clientId;
|
|
787
|
+
constructor(clientId, groupId, brokers, options) {
|
|
788
|
+
this.clientId = clientId;
|
|
789
|
+
this.defaultGroupId = groupId;
|
|
790
|
+
this.logger = options?.logger ?? {
|
|
791
|
+
log: (msg) => console.log(`[KafkaClient:${clientId}] ${msg}`),
|
|
792
|
+
warn: (msg, ...args) => console.warn(`[KafkaClient:${clientId}] ${msg}`, ...args),
|
|
793
|
+
error: (msg, ...args) => console.error(`[KafkaClient:${clientId}] ${msg}`, ...args)
|
|
794
|
+
};
|
|
795
|
+
this.autoCreateTopicsEnabled = options?.autoCreateTopics ?? false;
|
|
796
|
+
this.strictSchemasEnabled = options?.strictSchemas ?? true;
|
|
797
|
+
this.numPartitions = options?.numPartitions ?? 1;
|
|
798
|
+
this.instrumentation = options?.instrumentation ?? [];
|
|
799
|
+
this.onMessageLost = options?.onMessageLost;
|
|
800
|
+
this.onRebalance = options?.onRebalance;
|
|
801
|
+
this.kafka = new KafkaClass({
|
|
802
|
+
kafkaJS: {
|
|
803
|
+
clientId: this.clientId,
|
|
804
|
+
brokers,
|
|
805
|
+
logLevel: KafkaLogLevel.ERROR
|
|
806
|
+
}
|
|
807
|
+
});
|
|
808
|
+
this.producer = this.kafka.producer({
|
|
809
|
+
kafkaJS: {
|
|
810
|
+
acks: -1
|
|
811
|
+
}
|
|
812
|
+
});
|
|
813
|
+
this.admin = this.kafka.admin();
|
|
814
|
+
}
|
|
815
|
+
async sendMessage(topicOrDesc, message, options = {}) {
|
|
816
|
+
const payload = await this.preparePayload(topicOrDesc, [
|
|
817
|
+
{
|
|
818
|
+
value: message,
|
|
819
|
+
key: options.key,
|
|
820
|
+
headers: options.headers,
|
|
821
|
+
correlationId: options.correlationId,
|
|
822
|
+
schemaVersion: options.schemaVersion,
|
|
823
|
+
eventId: options.eventId
|
|
824
|
+
}
|
|
825
|
+
]);
|
|
826
|
+
await this.producer.send(payload);
|
|
827
|
+
this.notifyAfterSend(payload.topic, payload.messages.length);
|
|
828
|
+
}
|
|
829
|
+
async sendBatch(topicOrDesc, messages) {
|
|
830
|
+
const payload = await this.preparePayload(topicOrDesc, messages);
|
|
831
|
+
await this.producer.send(payload);
|
|
832
|
+
this.notifyAfterSend(payload.topic, payload.messages.length);
|
|
833
|
+
}
|
|
834
|
+
/** Execute multiple sends atomically. Commits on success, aborts on error. */
|
|
835
|
+
async transaction(fn) {
|
|
836
|
+
if (!this.txProducer) {
|
|
837
|
+
this.txProducer = this.kafka.producer({
|
|
838
|
+
kafkaJS: {
|
|
839
|
+
acks: -1,
|
|
840
|
+
idempotent: true,
|
|
841
|
+
transactionalId: `${this.clientId}-tx`,
|
|
842
|
+
maxInFlightRequests: 1
|
|
843
|
+
}
|
|
844
|
+
});
|
|
845
|
+
await this.txProducer.connect();
|
|
846
|
+
}
|
|
847
|
+
const tx = await this.txProducer.transaction();
|
|
848
|
+
try {
|
|
849
|
+
const ctx = {
|
|
850
|
+
send: async (topicOrDesc, message, options = {}) => {
|
|
851
|
+
const payload = await this.preparePayload(topicOrDesc, [
|
|
852
|
+
{
|
|
853
|
+
value: message,
|
|
854
|
+
key: options.key,
|
|
855
|
+
headers: options.headers,
|
|
856
|
+
correlationId: options.correlationId,
|
|
857
|
+
schemaVersion: options.schemaVersion,
|
|
858
|
+
eventId: options.eventId
|
|
859
|
+
}
|
|
860
|
+
]);
|
|
861
|
+
await tx.send(payload);
|
|
862
|
+
},
|
|
863
|
+
sendBatch: async (topicOrDesc, messages) => {
|
|
864
|
+
await tx.send(await this.preparePayload(topicOrDesc, messages));
|
|
865
|
+
}
|
|
866
|
+
};
|
|
867
|
+
await fn(ctx);
|
|
868
|
+
await tx.commit();
|
|
869
|
+
} catch (error) {
|
|
870
|
+
try {
|
|
871
|
+
await tx.abort();
|
|
872
|
+
} catch (abortError) {
|
|
873
|
+
this.logger.error(
|
|
874
|
+
"Failed to abort transaction:",
|
|
875
|
+
toError(abortError).message
|
|
876
|
+
);
|
|
877
|
+
}
|
|
878
|
+
throw error;
|
|
879
|
+
}
|
|
880
|
+
}
|
|
881
|
+
// ── Producer lifecycle ───────────────────────────────────────────
|
|
882
|
+
/** Connect the idempotent producer. Called automatically by `KafkaModule.register()`. */
|
|
883
|
+
async connectProducer() {
|
|
884
|
+
await this.producer.connect();
|
|
885
|
+
this.logger.log("Producer connected");
|
|
886
|
+
}
|
|
887
|
+
async disconnectProducer() {
|
|
888
|
+
await this.producer.disconnect();
|
|
889
|
+
this.logger.log("Producer disconnected");
|
|
890
|
+
}
|
|
891
|
+
async startConsumer(topics, handleMessage, options = {}) {
|
|
892
|
+
if (options.retryTopics && !options.retry) {
|
|
893
|
+
throw new Error(
|
|
894
|
+
"retryTopics requires retry to be configured \u2014 set retry.maxRetries to enable the retry topic chain"
|
|
895
|
+
);
|
|
896
|
+
}
|
|
897
|
+
const { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachMessage", options);
|
|
898
|
+
const deps = this.messageDeps;
|
|
899
|
+
const timeoutMs = options.handlerTimeoutMs;
|
|
900
|
+
await consumer.run({
|
|
901
|
+
eachMessage: (payload) => handleEachMessage(
|
|
902
|
+
payload,
|
|
903
|
+
{
|
|
904
|
+
schemaMap,
|
|
905
|
+
handleMessage,
|
|
906
|
+
interceptors,
|
|
907
|
+
dlq,
|
|
908
|
+
retry,
|
|
909
|
+
retryTopics: options.retryTopics,
|
|
910
|
+
timeoutMs,
|
|
911
|
+
wrapWithTimeout: this.wrapWithTimeoutWarning.bind(this)
|
|
912
|
+
},
|
|
913
|
+
deps
|
|
914
|
+
)
|
|
915
|
+
});
|
|
916
|
+
this.runningConsumers.set(gid, "eachMessage");
|
|
917
|
+
if (options.retryTopics && retry) {
|
|
918
|
+
const companions = await startRetryTopicConsumers(
|
|
919
|
+
topicNames,
|
|
920
|
+
gid,
|
|
921
|
+
handleMessage,
|
|
922
|
+
retry,
|
|
923
|
+
dlq,
|
|
924
|
+
interceptors,
|
|
925
|
+
schemaMap,
|
|
926
|
+
this.retryTopicDeps,
|
|
927
|
+
options.retryTopicAssignmentTimeoutMs
|
|
928
|
+
);
|
|
929
|
+
this.companionGroupIds.set(gid, companions);
|
|
930
|
+
}
|
|
931
|
+
return { groupId: gid, stop: () => this.stopConsumer(gid) };
|
|
932
|
+
}
|
|
933
|
+
async startBatchConsumer(topics, handleBatch, options = {}) {
|
|
934
|
+
const { consumer, schemaMap, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachBatch", options);
|
|
935
|
+
const deps = this.messageDeps;
|
|
936
|
+
const timeoutMs = options.handlerTimeoutMs;
|
|
937
|
+
await consumer.run({
|
|
938
|
+
eachBatch: (payload) => handleEachBatch(
|
|
939
|
+
payload,
|
|
940
|
+
{
|
|
941
|
+
schemaMap,
|
|
942
|
+
handleBatch,
|
|
943
|
+
interceptors,
|
|
944
|
+
dlq,
|
|
945
|
+
retry,
|
|
946
|
+
timeoutMs,
|
|
947
|
+
wrapWithTimeout: this.wrapWithTimeoutWarning.bind(this)
|
|
948
|
+
},
|
|
949
|
+
deps
|
|
950
|
+
)
|
|
951
|
+
});
|
|
952
|
+
this.runningConsumers.set(gid, "eachBatch");
|
|
953
|
+
return { groupId: gid, stop: () => this.stopConsumer(gid) };
|
|
954
|
+
}
|
|
955
|
+
// ── Consumer lifecycle ───────────────────────────────────────────
|
|
956
|
+
async stopConsumer(groupId) {
|
|
957
|
+
if (groupId !== void 0) {
|
|
958
|
+
const consumer = this.consumers.get(groupId);
|
|
959
|
+
if (!consumer) {
|
|
960
|
+
this.logger.warn(
|
|
961
|
+
`stopConsumer: no active consumer for group "${groupId}"`
|
|
962
|
+
);
|
|
963
|
+
return;
|
|
964
|
+
}
|
|
965
|
+
await consumer.disconnect().catch(() => {
|
|
966
|
+
});
|
|
967
|
+
this.consumers.delete(groupId);
|
|
968
|
+
this.runningConsumers.delete(groupId);
|
|
969
|
+
this.consumerCreationOptions.delete(groupId);
|
|
970
|
+
this.logger.log(`Consumer disconnected: group "${groupId}"`);
|
|
971
|
+
const companions = this.companionGroupIds.get(groupId) ?? [];
|
|
972
|
+
for (const cGroupId of companions) {
|
|
973
|
+
const cConsumer = this.consumers.get(cGroupId);
|
|
974
|
+
if (cConsumer) {
|
|
975
|
+
await cConsumer.disconnect().catch(() => {
|
|
976
|
+
});
|
|
977
|
+
this.consumers.delete(cGroupId);
|
|
978
|
+
this.runningConsumers.delete(cGroupId);
|
|
979
|
+
this.consumerCreationOptions.delete(cGroupId);
|
|
980
|
+
this.logger.log(`Retry consumer disconnected: group "${cGroupId}"`);
|
|
981
|
+
}
|
|
982
|
+
}
|
|
983
|
+
this.companionGroupIds.delete(groupId);
|
|
984
|
+
} else {
|
|
985
|
+
const tasks = Array.from(this.consumers.values()).map(
|
|
986
|
+
(c) => c.disconnect().catch(() => {
|
|
987
|
+
})
|
|
988
|
+
);
|
|
989
|
+
await Promise.allSettled(tasks);
|
|
990
|
+
this.consumers.clear();
|
|
991
|
+
this.runningConsumers.clear();
|
|
992
|
+
this.consumerCreationOptions.clear();
|
|
993
|
+
this.companionGroupIds.clear();
|
|
994
|
+
this.logger.log("All consumers disconnected");
|
|
995
|
+
}
|
|
996
|
+
}
|
|
997
|
+
/**
|
|
998
|
+
* Query consumer group lag per partition.
|
|
999
|
+
* Lag = broker high-watermark − last committed offset.
|
|
1000
|
+
* A committed offset of -1 (nothing committed yet) counts as full lag.
|
|
1001
|
+
*/
|
|
1002
|
+
async getConsumerLag(groupId) {
|
|
1003
|
+
const gid = groupId ?? this.defaultGroupId;
|
|
1004
|
+
if (!this.isAdminConnected) {
|
|
1005
|
+
await this.admin.connect();
|
|
1006
|
+
this.isAdminConnected = true;
|
|
1007
|
+
}
|
|
1008
|
+
const committedByTopic = await this.admin.fetchOffsets({ groupId: gid });
|
|
1009
|
+
const result = [];
|
|
1010
|
+
for (const { topic: topic2, partitions } of committedByTopic) {
|
|
1011
|
+
const brokerOffsets = await this.admin.fetchTopicOffsets(topic2);
|
|
1012
|
+
for (const { partition, offset } of partitions) {
|
|
1013
|
+
const broker = brokerOffsets.find((o) => o.partition === partition);
|
|
1014
|
+
if (!broker) continue;
|
|
1015
|
+
const committed = parseInt(offset, 10);
|
|
1016
|
+
const high = parseInt(broker.high, 10);
|
|
1017
|
+
const lag = committed === -1 ? high : Math.max(0, high - committed);
|
|
1018
|
+
result.push({ topic: topic2, partition, lag });
|
|
1019
|
+
}
|
|
1020
|
+
}
|
|
1021
|
+
return result;
|
|
1022
|
+
}
|
|
1023
|
+
/** Check broker connectivity. Never throws — returns a discriminated union. */
|
|
1024
|
+
async checkStatus() {
|
|
1025
|
+
try {
|
|
1026
|
+
if (!this.isAdminConnected) {
|
|
1027
|
+
await this.admin.connect();
|
|
1028
|
+
this.isAdminConnected = true;
|
|
1029
|
+
}
|
|
1030
|
+
const topics = await this.admin.listTopics();
|
|
1031
|
+
return { status: "up", clientId: this.clientId, topics };
|
|
1032
|
+
} catch (error) {
|
|
1033
|
+
return {
|
|
1034
|
+
status: "down",
|
|
1035
|
+
clientId: this.clientId,
|
|
1036
|
+
error: error instanceof Error ? error.message : String(error)
|
|
1037
|
+
};
|
|
1038
|
+
}
|
|
1039
|
+
}
|
|
1040
|
+
getClientId() {
|
|
1041
|
+
return this.clientId;
|
|
1042
|
+
}
|
|
1043
|
+
/** Gracefully disconnect producer, all consumers, and admin. */
|
|
1044
|
+
async disconnect() {
|
|
1045
|
+
const tasks = [this.producer.disconnect()];
|
|
1046
|
+
if (this.txProducer) {
|
|
1047
|
+
tasks.push(this.txProducer.disconnect());
|
|
1048
|
+
this.txProducer = void 0;
|
|
1049
|
+
}
|
|
1050
|
+
for (const consumer of this.consumers.values()) {
|
|
1051
|
+
tasks.push(consumer.disconnect());
|
|
1052
|
+
}
|
|
1053
|
+
if (this.isAdminConnected) {
|
|
1054
|
+
tasks.push(this.admin.disconnect());
|
|
1055
|
+
this.isAdminConnected = false;
|
|
1056
|
+
}
|
|
1057
|
+
await Promise.allSettled(tasks);
|
|
1058
|
+
this.consumers.clear();
|
|
1059
|
+
this.runningConsumers.clear();
|
|
1060
|
+
this.consumerCreationOptions.clear();
|
|
1061
|
+
this.companionGroupIds.clear();
|
|
1062
|
+
this.logger.log("All connections closed");
|
|
1063
|
+
}
|
|
1064
|
+
// ── Private helpers ──────────────────────────────────────────────
|
|
1065
|
+
async preparePayload(topicOrDesc, messages) {
|
|
1066
|
+
registerSchema(topicOrDesc, this.schemaRegistry);
|
|
1067
|
+
const payload = await buildSendPayload(
|
|
1068
|
+
topicOrDesc,
|
|
1069
|
+
messages,
|
|
1070
|
+
this.producerOpsDeps
|
|
1071
|
+
);
|
|
1072
|
+
await this.ensureTopic(payload.topic);
|
|
1073
|
+
return payload;
|
|
1074
|
+
}
|
|
1075
|
+
// afterSend is called once per message — symmetric with beforeSend in buildSendPayload.
|
|
1076
|
+
notifyAfterSend(topic2, count) {
|
|
1077
|
+
for (let i = 0; i < count; i++) {
|
|
1078
|
+
for (const inst of this.instrumentation) {
|
|
1079
|
+
inst.afterSend?.(topic2);
|
|
1080
|
+
}
|
|
1081
|
+
}
|
|
1082
|
+
}
|
|
1083
|
+
/**
|
|
1084
|
+
* Start a timer that logs a warning if `fn` hasn't resolved within `timeoutMs`.
|
|
1085
|
+
* The handler itself is not cancelled — the warning is diagnostic only.
|
|
1086
|
+
*/
|
|
1087
|
+
wrapWithTimeoutWarning(fn, timeoutMs, topic2) {
|
|
1088
|
+
let timer;
|
|
1089
|
+
const promise = fn().finally(() => {
|
|
1090
|
+
if (timer !== void 0) clearTimeout(timer);
|
|
1091
|
+
});
|
|
1092
|
+
timer = setTimeout(() => {
|
|
1093
|
+
this.logger.warn(
|
|
1094
|
+
`Handler for topic "${topic2}" has not resolved after ${timeoutMs}ms \u2014 possible stuck handler`
|
|
1095
|
+
);
|
|
1096
|
+
}, timeoutMs);
|
|
1097
|
+
return promise;
|
|
1098
|
+
}
|
|
1099
|
+
async ensureTopic(topic2) {
|
|
1100
|
+
if (!this.autoCreateTopicsEnabled || this.ensuredTopics.has(topic2)) return;
|
|
1101
|
+
if (!this.isAdminConnected) {
|
|
1102
|
+
await this.admin.connect();
|
|
1103
|
+
this.isAdminConnected = true;
|
|
1104
|
+
}
|
|
1105
|
+
await this.admin.createTopics({
|
|
1106
|
+
topics: [{ topic: topic2, numPartitions: this.numPartitions }]
|
|
1107
|
+
});
|
|
1108
|
+
this.ensuredTopics.add(topic2);
|
|
1109
|
+
}
|
|
1110
|
+
/** Shared consumer setup: groupId check, schema map, connect, subscribe. */
|
|
1111
|
+
async setupConsumer(topics, mode, options) {
|
|
1112
|
+
const {
|
|
1113
|
+
groupId: optGroupId,
|
|
1114
|
+
fromBeginning = false,
|
|
1115
|
+
retry,
|
|
1116
|
+
dlq = false,
|
|
1117
|
+
interceptors = [],
|
|
1118
|
+
schemas: optionSchemas
|
|
1119
|
+
} = options;
|
|
1120
|
+
const gid = optGroupId || this.defaultGroupId;
|
|
1121
|
+
const existingMode = this.runningConsumers.get(gid);
|
|
1122
|
+
const oppositeMode = mode === "eachMessage" ? "eachBatch" : "eachMessage";
|
|
1123
|
+
if (existingMode === oppositeMode) {
|
|
1124
|
+
throw new Error(
|
|
1125
|
+
`Cannot use ${mode} on consumer group "${gid}" \u2014 it is already running with ${oppositeMode}. Use a different groupId for this consumer.`
|
|
1126
|
+
);
|
|
1127
|
+
}
|
|
1128
|
+
const consumer = getOrCreateConsumer(
|
|
1129
|
+
gid,
|
|
1130
|
+
fromBeginning,
|
|
1131
|
+
options.autoCommit ?? true,
|
|
1132
|
+
this.consumerOpsDeps
|
|
1133
|
+
);
|
|
1134
|
+
const schemaMap = buildSchemaMap(
|
|
1135
|
+
topics,
|
|
1136
|
+
this.schemaRegistry,
|
|
1137
|
+
optionSchemas
|
|
1138
|
+
);
|
|
1139
|
+
const topicNames = topics.map((t) => resolveTopicName(t));
|
|
1140
|
+
for (const t of topicNames) {
|
|
1141
|
+
await this.ensureTopic(t);
|
|
1142
|
+
}
|
|
1143
|
+
if (dlq) {
|
|
1144
|
+
for (const t of topicNames) {
|
|
1145
|
+
await this.ensureTopic(`${t}.dlq`);
|
|
1146
|
+
}
|
|
1147
|
+
}
|
|
1148
|
+
await consumer.connect();
|
|
1149
|
+
await subscribeWithRetry(
|
|
1150
|
+
consumer,
|
|
1151
|
+
topicNames,
|
|
1152
|
+
this.logger,
|
|
1153
|
+
options.subscribeRetry
|
|
1154
|
+
);
|
|
1155
|
+
this.logger.log(
|
|
1156
|
+
`${mode === "eachBatch" ? "Batch consumer" : "Consumer"} subscribed to topics: ${topicNames.join(", ")}`
|
|
1157
|
+
);
|
|
1158
|
+
return { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry };
|
|
1159
|
+
}
|
|
1160
|
+
// ── Deps object getters ──────────────────────────────────────────
|
|
1161
|
+
get producerOpsDeps() {
|
|
1162
|
+
return {
|
|
1163
|
+
schemaRegistry: this.schemaRegistry,
|
|
1164
|
+
strictSchemasEnabled: this.strictSchemasEnabled,
|
|
1165
|
+
instrumentation: this.instrumentation
|
|
1166
|
+
};
|
|
1167
|
+
}
|
|
1168
|
+
get consumerOpsDeps() {
|
|
1169
|
+
return {
|
|
1170
|
+
consumers: this.consumers,
|
|
1171
|
+
consumerCreationOptions: this.consumerCreationOptions,
|
|
1172
|
+
kafka: this.kafka,
|
|
1173
|
+
onRebalance: this.onRebalance,
|
|
1174
|
+
logger: this.logger
|
|
1175
|
+
};
|
|
1176
|
+
}
|
|
1177
|
+
get messageDeps() {
|
|
1178
|
+
return {
|
|
1179
|
+
logger: this.logger,
|
|
1180
|
+
producer: this.producer,
|
|
1181
|
+
instrumentation: this.instrumentation,
|
|
1182
|
+
onMessageLost: this.onMessageLost
|
|
1183
|
+
};
|
|
1184
|
+
}
|
|
1185
|
+
get retryTopicDeps() {
|
|
1186
|
+
return {
|
|
1187
|
+
logger: this.logger,
|
|
1188
|
+
producer: this.producer,
|
|
1189
|
+
instrumentation: this.instrumentation,
|
|
1190
|
+
onMessageLost: this.onMessageLost,
|
|
1191
|
+
ensureTopic: (t) => this.ensureTopic(t),
|
|
1192
|
+
getOrCreateConsumer: (gid, fb, ac) => getOrCreateConsumer(gid, fb, ac, this.consumerOpsDeps),
|
|
1193
|
+
runningConsumers: this.runningConsumers
|
|
1194
|
+
};
|
|
1195
|
+
}
|
|
1196
|
+
};
|
|
1197
|
+
|
|
1198
|
+
// src/client/message/topic.ts
|
|
1199
|
+
function topic(name) {
|
|
1200
|
+
return {
|
|
1201
|
+
/** Provide an explicit message type without a runtime schema. */
|
|
1202
|
+
type: () => ({
|
|
1203
|
+
__topic: name,
|
|
1204
|
+
__type: void 0
|
|
1205
|
+
}),
|
|
1206
|
+
schema: (schema) => ({
|
|
1207
|
+
__topic: name,
|
|
1208
|
+
__type: void 0,
|
|
1209
|
+
__schema: schema
|
|
1210
|
+
})
|
|
1211
|
+
};
|
|
1212
|
+
}
|
|
1213
|
+
|
|
1214
|
+
export {
|
|
1215
|
+
HEADER_EVENT_ID,
|
|
1216
|
+
HEADER_CORRELATION_ID,
|
|
1217
|
+
HEADER_TIMESTAMP,
|
|
1218
|
+
HEADER_SCHEMA_VERSION,
|
|
1219
|
+
HEADER_TRACEPARENT,
|
|
1220
|
+
getEnvelopeContext,
|
|
1221
|
+
runWithEnvelopeContext,
|
|
1222
|
+
buildEnvelopeHeaders,
|
|
1223
|
+
decodeHeaders,
|
|
1224
|
+
extractEnvelope,
|
|
1225
|
+
KafkaProcessingError,
|
|
1226
|
+
KafkaValidationError,
|
|
1227
|
+
KafkaRetryExhaustedError,
|
|
1228
|
+
KafkaClient,
|
|
1229
|
+
topic
|
|
1230
|
+
};
|
|
1231
|
+
//# sourceMappingURL=chunk-TD2AE774.mjs.map
|