@drarzter/kafka-client 0.6.9 → 0.7.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +231 -1
- package/dist/{chunk-4526Y4PV.mjs → chunk-AMEGMOZH.mjs} +351 -13
- package/dist/chunk-AMEGMOZH.mjs.map +1 -0
- package/dist/core.d.mts +43 -3
- package/dist/core.d.ts +43 -3
- package/dist/core.js +350 -12
- package/dist/core.js.map +1 -1
- package/dist/core.mjs +1 -1
- package/dist/index.d.mts +2 -2
- package/dist/index.d.ts +2 -2
- package/dist/index.js +350 -12
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1 -1
- package/dist/otel.d.mts +1 -1
- package/dist/otel.d.ts +1 -1
- package/dist/testing.d.mts +1 -1
- package/dist/testing.d.ts +1 -1
- package/dist/testing.js +7 -0
- package/dist/testing.js.map +1 -1
- package/dist/testing.mjs +7 -0
- package/dist/testing.mjs.map +1 -1
- package/dist/{types-736Gj0J3.d.mts → types-BEIGjmV6.d.mts} +145 -2
- package/dist/{types-736Gj0J3.d.ts → types-BEIGjmV6.d.ts} +145 -2
- package/package.json +1 -1
- package/dist/chunk-4526Y4PV.mjs.map +0 -1
package/dist/core.mjs
CHANGED
package/dist/index.d.mts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
export { KafkaClient, KafkaProcessingError, KafkaRetryExhaustedError, KafkaValidationError } from './core.mjs';
|
|
2
|
-
import { T as TopicMapConstraint, C as ClientId, G as GroupId, K as KafkaInstrumentation, a as KafkaClientOptions, S as SchemaLike, b as ConsumerOptions, c as TopicDescriptor, I as IKafkaClient, d as KafkaHealthResult } from './types-
|
|
3
|
-
export { B as BatchMessageItem, e as BatchMeta, f as BeforeConsumeResult, g as
|
|
2
|
+
import { T as TopicMapConstraint, C as ClientId, G as GroupId, K as KafkaInstrumentation, a as KafkaClientOptions, S as SchemaLike, b as ConsumerOptions, c as TopicDescriptor, I as IKafkaClient, d as KafkaHealthResult } from './types-BEIGjmV6.mjs';
|
|
3
|
+
export { B as BatchMessageItem, e as BatchMeta, f as BeforeConsumeResult, g as CircuitBreakerOptions, h as ConsumerHandle, i as ConsumerInterceptor, D as DeduplicationOptions, j as DlqReason, k as DlqReplayOptions, E as EnvelopeHeaderOptions, l as EventEnvelope, H as HEADER_CORRELATION_ID, m as HEADER_EVENT_ID, n as HEADER_LAMPORT_CLOCK, o as HEADER_SCHEMA_VERSION, p as HEADER_TIMESTAMP, q as HEADER_TRACEPARENT, r as InferSchema, s as KafkaLogger, t as KafkaMetrics, M as MessageHeaders, u as MessageLostContext, R as RetryOptions, v as SchemaParseContext, w as SendOptions, x as SubscribeRetryOptions, y as TTopicMessageMap, z as TopicsFrom, A as TransactionContext, F as TtlExpiredContext, J as buildEnvelopeHeaders, L as decodeHeaders, N as extractEnvelope, O as getEnvelopeContext, P as runWithEnvelopeContext, Q as topic } from './types-BEIGjmV6.mjs';
|
|
4
4
|
import { DynamicModule, OnModuleInit } from '@nestjs/common';
|
|
5
5
|
import { DiscoveryService, ModuleRef } from '@nestjs/core';
|
|
6
6
|
|
package/dist/index.d.ts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
export { KafkaClient, KafkaProcessingError, KafkaRetryExhaustedError, KafkaValidationError } from './core.js';
|
|
2
|
-
import { T as TopicMapConstraint, C as ClientId, G as GroupId, K as KafkaInstrumentation, a as KafkaClientOptions, S as SchemaLike, b as ConsumerOptions, c as TopicDescriptor, I as IKafkaClient, d as KafkaHealthResult } from './types-
|
|
3
|
-
export { B as BatchMessageItem, e as BatchMeta, f as BeforeConsumeResult, g as
|
|
2
|
+
import { T as TopicMapConstraint, C as ClientId, G as GroupId, K as KafkaInstrumentation, a as KafkaClientOptions, S as SchemaLike, b as ConsumerOptions, c as TopicDescriptor, I as IKafkaClient, d as KafkaHealthResult } from './types-BEIGjmV6.js';
|
|
3
|
+
export { B as BatchMessageItem, e as BatchMeta, f as BeforeConsumeResult, g as CircuitBreakerOptions, h as ConsumerHandle, i as ConsumerInterceptor, D as DeduplicationOptions, j as DlqReason, k as DlqReplayOptions, E as EnvelopeHeaderOptions, l as EventEnvelope, H as HEADER_CORRELATION_ID, m as HEADER_EVENT_ID, n as HEADER_LAMPORT_CLOCK, o as HEADER_SCHEMA_VERSION, p as HEADER_TIMESTAMP, q as HEADER_TRACEPARENT, r as InferSchema, s as KafkaLogger, t as KafkaMetrics, M as MessageHeaders, u as MessageLostContext, R as RetryOptions, v as SchemaParseContext, w as SendOptions, x as SubscribeRetryOptions, y as TTopicMessageMap, z as TopicsFrom, A as TransactionContext, F as TtlExpiredContext, J as buildEnvelopeHeaders, L as decodeHeaders, N as extractEnvelope, O as getEnvelopeContext, P as runWithEnvelopeContext, Q as topic } from './types-BEIGjmV6.js';
|
|
4
4
|
import { DynamicModule, OnModuleInit } from '@nestjs/common';
|
|
5
5
|
import { DiscoveryService, ModuleRef } from '@nestjs/core';
|
|
6
6
|
|
package/dist/index.js
CHANGED
|
@@ -394,9 +394,16 @@ var RETRY_HEADER_MAX_RETRIES = "x-retry-max-retries";
|
|
|
394
394
|
var RETRY_HEADER_ORIGINAL_TOPIC = "x-retry-original-topic";
|
|
395
395
|
function buildRetryTopicPayload(originalTopic, rawMessages, attempt, maxRetries, delayMs, originalHeaders) {
|
|
396
396
|
const retryTopic = `${originalTopic}.retry.${attempt}`;
|
|
397
|
-
const STRIP = /* @__PURE__ */ new Set([
|
|
397
|
+
const STRIP = /* @__PURE__ */ new Set([
|
|
398
|
+
RETRY_HEADER_ATTEMPT,
|
|
399
|
+
RETRY_HEADER_AFTER,
|
|
400
|
+
RETRY_HEADER_MAX_RETRIES,
|
|
401
|
+
RETRY_HEADER_ORIGINAL_TOPIC
|
|
402
|
+
]);
|
|
398
403
|
function buildHeaders(hdr) {
|
|
399
|
-
const userHeaders = Object.fromEntries(
|
|
404
|
+
const userHeaders = Object.fromEntries(
|
|
405
|
+
Object.entries(hdr).filter(([k]) => !STRIP.has(k))
|
|
406
|
+
);
|
|
400
407
|
return {
|
|
401
408
|
...userHeaders,
|
|
402
409
|
[RETRY_HEADER_ATTEMPT]: String(attempt),
|
|
@@ -769,6 +776,31 @@ async function handleEachMessage(payload, opts, deps) {
|
|
|
769
776
|
return;
|
|
770
777
|
}
|
|
771
778
|
}
|
|
779
|
+
if (opts.messageTtlMs !== void 0) {
|
|
780
|
+
const ageMs = Date.now() - new Date(envelope.timestamp).getTime();
|
|
781
|
+
if (ageMs > opts.messageTtlMs) {
|
|
782
|
+
deps.logger.warn(
|
|
783
|
+
`[KafkaClient] TTL expired on ${topic2}: age ${ageMs}ms > ${opts.messageTtlMs}ms`
|
|
784
|
+
);
|
|
785
|
+
if (dlq) {
|
|
786
|
+
await sendToDlq(topic2, message.value.toString(), deps, {
|
|
787
|
+
error: new Error(`Message TTL expired: age ${ageMs}ms`),
|
|
788
|
+
attempt: 0,
|
|
789
|
+
originalHeaders: envelope.headers
|
|
790
|
+
});
|
|
791
|
+
deps.onDlq?.(envelope, "ttl-expired");
|
|
792
|
+
} else {
|
|
793
|
+
await deps.onTtlExpired?.({
|
|
794
|
+
topic: topic2,
|
|
795
|
+
ageMs,
|
|
796
|
+
messageTtlMs: opts.messageTtlMs,
|
|
797
|
+
headers: envelope.headers
|
|
798
|
+
});
|
|
799
|
+
}
|
|
800
|
+
await commitOffset?.();
|
|
801
|
+
return;
|
|
802
|
+
}
|
|
803
|
+
}
|
|
772
804
|
await executeWithRetry(
|
|
773
805
|
() => {
|
|
774
806
|
const fn = () => runWithEnvelopeContext(
|
|
@@ -871,6 +903,30 @@ async function handleEachBatch(payload, opts, deps) {
|
|
|
871
903
|
);
|
|
872
904
|
if (isDuplicate) continue;
|
|
873
905
|
}
|
|
906
|
+
if (opts.messageTtlMs !== void 0) {
|
|
907
|
+
const ageMs = Date.now() - new Date(envelope.timestamp).getTime();
|
|
908
|
+
if (ageMs > opts.messageTtlMs) {
|
|
909
|
+
deps.logger.warn(
|
|
910
|
+
`[KafkaClient] TTL expired on ${batch.topic}: age ${ageMs}ms > ${opts.messageTtlMs}ms`
|
|
911
|
+
);
|
|
912
|
+
if (dlq) {
|
|
913
|
+
await sendToDlq(batch.topic, message.value.toString(), deps, {
|
|
914
|
+
error: new Error(`Message TTL expired: age ${ageMs}ms`),
|
|
915
|
+
attempt: 0,
|
|
916
|
+
originalHeaders: envelope.headers
|
|
917
|
+
});
|
|
918
|
+
deps.onDlq?.(envelope, "ttl-expired");
|
|
919
|
+
} else {
|
|
920
|
+
await deps.onTtlExpired?.({
|
|
921
|
+
topic: batch.topic,
|
|
922
|
+
ageMs,
|
|
923
|
+
messageTtlMs: opts.messageTtlMs,
|
|
924
|
+
headers: envelope.headers
|
|
925
|
+
});
|
|
926
|
+
}
|
|
927
|
+
continue;
|
|
928
|
+
}
|
|
929
|
+
}
|
|
874
930
|
envelopes.push(envelope);
|
|
875
931
|
rawMessages.push(message.value.toString());
|
|
876
932
|
}
|
|
@@ -1180,6 +1236,54 @@ async function startRetryTopicConsumers(originalTopics, originalGroupId, handleM
|
|
|
1180
1236
|
// src/client/kafka.client/index.ts
|
|
1181
1237
|
var { Kafka: KafkaClass, logLevel: KafkaLogLevel } = import_kafka_javascript.KafkaJS;
|
|
1182
1238
|
var _activeTransactionalIds = /* @__PURE__ */ new Set();
|
|
1239
|
+
var AsyncQueue = class {
|
|
1240
|
+
constructor(highWaterMark = Infinity, onFull = () => {
|
|
1241
|
+
}, onDrained = () => {
|
|
1242
|
+
}) {
|
|
1243
|
+
this.highWaterMark = highWaterMark;
|
|
1244
|
+
this.onFull = onFull;
|
|
1245
|
+
this.onDrained = onDrained;
|
|
1246
|
+
}
|
|
1247
|
+
items = [];
|
|
1248
|
+
waiting = [];
|
|
1249
|
+
closed = false;
|
|
1250
|
+
error;
|
|
1251
|
+
paused = false;
|
|
1252
|
+
push(item) {
|
|
1253
|
+
if (this.waiting.length > 0) {
|
|
1254
|
+
this.waiting.shift().resolve({ value: item, done: false });
|
|
1255
|
+
} else {
|
|
1256
|
+
this.items.push(item);
|
|
1257
|
+
if (!this.paused && this.items.length >= this.highWaterMark) {
|
|
1258
|
+
this.paused = true;
|
|
1259
|
+
this.onFull();
|
|
1260
|
+
}
|
|
1261
|
+
}
|
|
1262
|
+
}
|
|
1263
|
+
fail(err) {
|
|
1264
|
+
this.closed = true;
|
|
1265
|
+
this.error = err;
|
|
1266
|
+
for (const { reject } of this.waiting.splice(0)) reject(err);
|
|
1267
|
+
}
|
|
1268
|
+
close() {
|
|
1269
|
+
this.closed = true;
|
|
1270
|
+
for (const { resolve } of this.waiting.splice(0))
|
|
1271
|
+
resolve({ value: void 0, done: true });
|
|
1272
|
+
}
|
|
1273
|
+
next() {
|
|
1274
|
+
if (this.error) return Promise.reject(this.error);
|
|
1275
|
+
if (this.items.length > 0) {
|
|
1276
|
+
const value = this.items.shift();
|
|
1277
|
+
if (this.paused && this.items.length <= Math.floor(this.highWaterMark / 2)) {
|
|
1278
|
+
this.paused = false;
|
|
1279
|
+
this.onDrained();
|
|
1280
|
+
}
|
|
1281
|
+
return Promise.resolve({ value, done: false });
|
|
1282
|
+
}
|
|
1283
|
+
if (this.closed) return Promise.resolve({ value: void 0, done: true });
|
|
1284
|
+
return new Promise((resolve, reject) => this.waiting.push({ resolve, reject }));
|
|
1285
|
+
}
|
|
1286
|
+
};
|
|
1183
1287
|
var KafkaClient = class _KafkaClient {
|
|
1184
1288
|
kafka;
|
|
1185
1289
|
producer;
|
|
@@ -1204,6 +1308,7 @@ var KafkaClient = class _KafkaClient {
|
|
|
1204
1308
|
companionGroupIds = /* @__PURE__ */ new Map();
|
|
1205
1309
|
instrumentation;
|
|
1206
1310
|
onMessageLost;
|
|
1311
|
+
onTtlExpired;
|
|
1207
1312
|
onRebalance;
|
|
1208
1313
|
/** Transactional producer ID — configurable via `KafkaClientOptions.transactionalId`. */
|
|
1209
1314
|
txId;
|
|
@@ -1213,6 +1318,10 @@ var KafkaClient = class _KafkaClient {
|
|
|
1213
1318
|
_lamportClock = 0;
|
|
1214
1319
|
/** Per-groupId deduplication state: `"topic:partition"` → last processed clock. */
|
|
1215
1320
|
dedupStates = /* @__PURE__ */ new Map();
|
|
1321
|
+
/** Circuit breaker state per `"${gid}:${topic}:${partition}"` key. */
|
|
1322
|
+
circuitStates = /* @__PURE__ */ new Map();
|
|
1323
|
+
/** Circuit breaker config per groupId, set at startConsumer/startBatchConsumer time. */
|
|
1324
|
+
circuitConfigs = /* @__PURE__ */ new Map();
|
|
1216
1325
|
isAdminConnected = false;
|
|
1217
1326
|
inFlightTotal = 0;
|
|
1218
1327
|
drainResolvers = [];
|
|
@@ -1231,6 +1340,7 @@ var KafkaClient = class _KafkaClient {
|
|
|
1231
1340
|
this.numPartitions = options?.numPartitions ?? 1;
|
|
1232
1341
|
this.instrumentation = options?.instrumentation ?? [];
|
|
1233
1342
|
this.onMessageLost = options?.onMessageLost;
|
|
1343
|
+
this.onTtlExpired = options?.onTtlExpired;
|
|
1234
1344
|
this.onRebalance = options?.onRebalance;
|
|
1235
1345
|
this.txId = options?.transactionalId ?? `${clientId}-tx`;
|
|
1236
1346
|
this.kafka = new KafkaClass({
|
|
@@ -1354,7 +1464,9 @@ var KafkaClient = class _KafkaClient {
|
|
|
1354
1464
|
}
|
|
1355
1465
|
const setupOptions = options.retryTopics ? { ...options, autoCommit: false } : options;
|
|
1356
1466
|
const { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachMessage", setupOptions);
|
|
1357
|
-
|
|
1467
|
+
if (options.circuitBreaker)
|
|
1468
|
+
this.circuitConfigs.set(gid, options.circuitBreaker);
|
|
1469
|
+
const deps = this.messageDepsFor(gid);
|
|
1358
1470
|
const timeoutMs = options.handlerTimeoutMs;
|
|
1359
1471
|
const deduplication = this.resolveDeduplicationContext(
|
|
1360
1472
|
gid,
|
|
@@ -1380,6 +1492,7 @@ var KafkaClient = class _KafkaClient {
|
|
|
1380
1492
|
timeoutMs,
|
|
1381
1493
|
wrapWithTimeout: this.wrapWithTimeoutWarning.bind(this),
|
|
1382
1494
|
deduplication,
|
|
1495
|
+
messageTtlMs: options.messageTtlMs,
|
|
1383
1496
|
eosMainContext
|
|
1384
1497
|
},
|
|
1385
1498
|
deps
|
|
@@ -1420,7 +1533,9 @@ var KafkaClient = class _KafkaClient {
|
|
|
1420
1533
|
}
|
|
1421
1534
|
const setupOptions = options.retryTopics ? { ...options, autoCommit: false } : options;
|
|
1422
1535
|
const { consumer, schemaMap, topicNames, gid, dlq, interceptors, retry } = await this.setupConsumer(topics, "eachBatch", setupOptions);
|
|
1423
|
-
|
|
1536
|
+
if (options.circuitBreaker)
|
|
1537
|
+
this.circuitConfigs.set(gid, options.circuitBreaker);
|
|
1538
|
+
const deps = this.messageDepsFor(gid);
|
|
1424
1539
|
const timeoutMs = options.handlerTimeoutMs;
|
|
1425
1540
|
const deduplication = this.resolveDeduplicationContext(
|
|
1426
1541
|
gid,
|
|
@@ -1446,6 +1561,7 @@ var KafkaClient = class _KafkaClient {
|
|
|
1446
1561
|
timeoutMs,
|
|
1447
1562
|
wrapWithTimeout: this.wrapWithTimeoutWarning.bind(this),
|
|
1448
1563
|
deduplication,
|
|
1564
|
+
messageTtlMs: options.messageTtlMs,
|
|
1449
1565
|
eosMainContext
|
|
1450
1566
|
},
|
|
1451
1567
|
deps
|
|
@@ -1482,6 +1598,43 @@ var KafkaClient = class _KafkaClient {
|
|
|
1482
1598
|
}
|
|
1483
1599
|
return { groupId: gid, stop: () => this.stopConsumer(gid) };
|
|
1484
1600
|
}
|
|
1601
|
+
/**
|
|
1602
|
+
* Consume messages from a topic as an AsyncIterableIterator.
|
|
1603
|
+
* Use with `for await` — breaking out of the loop automatically stops the consumer.
|
|
1604
|
+
*
|
|
1605
|
+
* @example
|
|
1606
|
+
* for await (const envelope of kafka.consume('my.topic')) {
|
|
1607
|
+
* console.log(envelope.data);
|
|
1608
|
+
* }
|
|
1609
|
+
*/
|
|
1610
|
+
consume(topic2, options) {
|
|
1611
|
+
const gid = options?.groupId ?? this.defaultGroupId;
|
|
1612
|
+
const queue = new AsyncQueue(
|
|
1613
|
+
options?.queueHighWaterMark,
|
|
1614
|
+
() => this.pauseTopicAllPartitions(gid, topic2),
|
|
1615
|
+
() => this.resumeTopicAllPartitions(gid, topic2)
|
|
1616
|
+
);
|
|
1617
|
+
const handlePromise = this.startConsumer(
|
|
1618
|
+
[topic2],
|
|
1619
|
+
async (envelope) => {
|
|
1620
|
+
queue.push(envelope);
|
|
1621
|
+
},
|
|
1622
|
+
options
|
|
1623
|
+
);
|
|
1624
|
+
handlePromise.catch((err) => queue.fail(err));
|
|
1625
|
+
return {
|
|
1626
|
+
[Symbol.asyncIterator]() {
|
|
1627
|
+
return this;
|
|
1628
|
+
},
|
|
1629
|
+
next: () => queue.next(),
|
|
1630
|
+
return: async () => {
|
|
1631
|
+
queue.close();
|
|
1632
|
+
const handle = await handlePromise;
|
|
1633
|
+
await handle.stop();
|
|
1634
|
+
return { value: void 0, done: true };
|
|
1635
|
+
}
|
|
1636
|
+
};
|
|
1637
|
+
}
|
|
1485
1638
|
// ── Consumer lifecycle ───────────────────────────────────────────
|
|
1486
1639
|
async stopConsumer(groupId) {
|
|
1487
1640
|
if (groupId !== void 0) {
|
|
@@ -1502,6 +1655,13 @@ var KafkaClient = class _KafkaClient {
|
|
|
1502
1655
|
this.runningConsumers.delete(groupId);
|
|
1503
1656
|
this.consumerCreationOptions.delete(groupId);
|
|
1504
1657
|
this.dedupStates.delete(groupId);
|
|
1658
|
+
for (const key of [...this.circuitStates.keys()]) {
|
|
1659
|
+
if (key.startsWith(`${groupId}:`)) {
|
|
1660
|
+
clearTimeout(this.circuitStates.get(key).timer);
|
|
1661
|
+
this.circuitStates.delete(key);
|
|
1662
|
+
}
|
|
1663
|
+
}
|
|
1664
|
+
this.circuitConfigs.delete(groupId);
|
|
1505
1665
|
this.logger.log(`Consumer disconnected: group "${groupId}"`);
|
|
1506
1666
|
const mainTxId = `${groupId}-main-tx`;
|
|
1507
1667
|
const mainTxProducer = this.retryTxProducers.get(mainTxId);
|
|
@@ -1562,6 +1722,10 @@ var KafkaClient = class _KafkaClient {
|
|
|
1562
1722
|
this.companionGroupIds.clear();
|
|
1563
1723
|
this.retryTxProducers.clear();
|
|
1564
1724
|
this.dedupStates.clear();
|
|
1725
|
+
for (const state of this.circuitStates.values())
|
|
1726
|
+
clearTimeout(state.timer);
|
|
1727
|
+
this.circuitStates.clear();
|
|
1728
|
+
this.circuitConfigs.clear();
|
|
1565
1729
|
this.logger.log("All consumers disconnected");
|
|
1566
1730
|
}
|
|
1567
1731
|
}
|
|
@@ -1591,6 +1755,24 @@ var KafkaClient = class _KafkaClient {
|
|
|
1591
1755
|
)
|
|
1592
1756
|
);
|
|
1593
1757
|
}
|
|
1758
|
+
/** Pause all assigned partitions of a topic for a consumer group (used for queue backpressure). */
|
|
1759
|
+
pauseTopicAllPartitions(gid, topic2) {
|
|
1760
|
+
const consumer = this.consumers.get(gid);
|
|
1761
|
+
if (!consumer) return;
|
|
1762
|
+
const assignment = consumer.assignment?.() ?? [];
|
|
1763
|
+
const partitions = assignment.filter((a) => a.topic === topic2).map((a) => a.partition);
|
|
1764
|
+
if (partitions.length > 0)
|
|
1765
|
+
consumer.pause(partitions.map((p) => ({ topic: topic2, partitions: [p] })));
|
|
1766
|
+
}
|
|
1767
|
+
/** Resume all assigned partitions of a topic for a consumer group (used for queue backpressure). */
|
|
1768
|
+
resumeTopicAllPartitions(gid, topic2) {
|
|
1769
|
+
const consumer = this.consumers.get(gid);
|
|
1770
|
+
if (!consumer) return;
|
|
1771
|
+
const assignment = consumer.assignment?.() ?? [];
|
|
1772
|
+
const partitions = assignment.filter((a) => a.topic === topic2).map((a) => a.partition);
|
|
1773
|
+
if (partitions.length > 0)
|
|
1774
|
+
consumer.resume(partitions.map((p) => ({ topic: topic2, partitions: [p] })));
|
|
1775
|
+
}
|
|
1594
1776
|
/** DLQ header keys added by `sendToDlq` — stripped before re-publishing. */
|
|
1595
1777
|
static DLQ_HEADER_KEYS = /* @__PURE__ */ new Set([
|
|
1596
1778
|
"x-dlq-original-topic",
|
|
@@ -1635,9 +1817,7 @@ var KafkaClient = class _KafkaClient {
|
|
|
1635
1817
|
this.consumerCreationOptions.delete(tempGroupId);
|
|
1636
1818
|
});
|
|
1637
1819
|
};
|
|
1638
|
-
consumer.connect().then(
|
|
1639
|
-
() => subscribeWithRetry(consumer, [dlqTopic], this.logger)
|
|
1640
|
-
).then(
|
|
1820
|
+
consumer.connect().then(() => subscribeWithRetry(consumer, [dlqTopic], this.logger)).then(
|
|
1641
1821
|
() => consumer.run({
|
|
1642
1822
|
eachMessage: async ({ partition, message }) => {
|
|
1643
1823
|
if (!message.value) return;
|
|
@@ -1703,6 +1883,75 @@ var KafkaClient = class _KafkaClient {
|
|
|
1703
1883
|
`Offsets reset to ${position} for group "${gid}" on topic "${topic2}"`
|
|
1704
1884
|
);
|
|
1705
1885
|
}
|
|
1886
|
+
/**
|
|
1887
|
+
* Seek specific topic-partition pairs to explicit offsets for a stopped consumer group.
|
|
1888
|
+
* Throws if the group is still running — call `stopConsumer(groupId)` first.
|
|
1889
|
+
* Assignments are grouped by topic and committed via `admin.setOffsets`.
|
|
1890
|
+
*/
|
|
1891
|
+
async seekToOffset(groupId, assignments) {
|
|
1892
|
+
const gid = groupId ?? this.defaultGroupId;
|
|
1893
|
+
if (this.runningConsumers.has(gid)) {
|
|
1894
|
+
throw new Error(
|
|
1895
|
+
`seekToOffset: consumer group "${gid}" is still running. Call stopConsumer("${gid}") before seeking offsets.`
|
|
1896
|
+
);
|
|
1897
|
+
}
|
|
1898
|
+
await this.ensureAdminConnected();
|
|
1899
|
+
const byTopic = /* @__PURE__ */ new Map();
|
|
1900
|
+
for (const { topic: topic2, partition, offset } of assignments) {
|
|
1901
|
+
const list = byTopic.get(topic2) ?? [];
|
|
1902
|
+
list.push({ partition, offset });
|
|
1903
|
+
byTopic.set(topic2, list);
|
|
1904
|
+
}
|
|
1905
|
+
for (const [topic2, partitions] of byTopic) {
|
|
1906
|
+
await this.admin.setOffsets({ groupId: gid, topic: topic2, partitions });
|
|
1907
|
+
this.logger.log(
|
|
1908
|
+
`Offsets set for group "${gid}" on "${topic2}": ${JSON.stringify(partitions)}`
|
|
1909
|
+
);
|
|
1910
|
+
}
|
|
1911
|
+
}
|
|
1912
|
+
async seekToTimestamp(groupId, assignments) {
|
|
1913
|
+
const gid = groupId ?? this.defaultGroupId;
|
|
1914
|
+
if (this.runningConsumers.has(gid)) {
|
|
1915
|
+
throw new Error(
|
|
1916
|
+
`seekToTimestamp: consumer group "${gid}" is still running. Call stopConsumer("${gid}") before seeking offsets.`
|
|
1917
|
+
);
|
|
1918
|
+
}
|
|
1919
|
+
await this.ensureAdminConnected();
|
|
1920
|
+
const byTopic = /* @__PURE__ */ new Map();
|
|
1921
|
+
for (const { topic: topic2, partition, timestamp } of assignments) {
|
|
1922
|
+
const list = byTopic.get(topic2) ?? [];
|
|
1923
|
+
list.push({ partition, timestamp });
|
|
1924
|
+
byTopic.set(topic2, list);
|
|
1925
|
+
}
|
|
1926
|
+
for (const [topic2, parts] of byTopic) {
|
|
1927
|
+
const offsets = await Promise.all(
|
|
1928
|
+
parts.map(async ({ partition, timestamp }) => {
|
|
1929
|
+
const results = await this.admin.fetchTopicOffsetsByTime(
|
|
1930
|
+
topic2,
|
|
1931
|
+
timestamp
|
|
1932
|
+
);
|
|
1933
|
+
const found = results.find(
|
|
1934
|
+
(r) => r.partition === partition
|
|
1935
|
+
);
|
|
1936
|
+
return { partition, offset: found?.offset ?? "-1" };
|
|
1937
|
+
})
|
|
1938
|
+
);
|
|
1939
|
+
await this.admin.setOffsets({ groupId: gid, topic: topic2, partitions: offsets });
|
|
1940
|
+
this.logger.log(
|
|
1941
|
+
`Offsets set by timestamp for group "${gid}" on "${topic2}": ${JSON.stringify(offsets)}`
|
|
1942
|
+
);
|
|
1943
|
+
}
|
|
1944
|
+
}
|
|
1945
|
+
getCircuitState(topic2, partition, groupId) {
|
|
1946
|
+
const gid = groupId ?? this.defaultGroupId;
|
|
1947
|
+
const state = this.circuitStates.get(`${gid}:${topic2}:${partition}`);
|
|
1948
|
+
if (!state) return void 0;
|
|
1949
|
+
return {
|
|
1950
|
+
status: state.status,
|
|
1951
|
+
failures: state.window.filter((v) => !v).length,
|
|
1952
|
+
windowSize: state.window.length
|
|
1953
|
+
};
|
|
1954
|
+
}
|
|
1706
1955
|
/**
|
|
1707
1956
|
* Query consumer group lag per partition.
|
|
1708
1957
|
* Lag = broker high-watermark − last committed offset.
|
|
@@ -1808,6 +2057,9 @@ var KafkaClient = class _KafkaClient {
|
|
|
1808
2057
|
this.runningConsumers.clear();
|
|
1809
2058
|
this.consumerCreationOptions.clear();
|
|
1810
2059
|
this.companionGroupIds.clear();
|
|
2060
|
+
for (const state of this.circuitStates.values()) clearTimeout(state.timer);
|
|
2061
|
+
this.circuitStates.clear();
|
|
2062
|
+
this.circuitConfigs.clear();
|
|
1811
2063
|
this.logger.log("All connections closed");
|
|
1812
2064
|
}
|
|
1813
2065
|
// ── Graceful shutdown ────────────────────────────────────────────
|
|
@@ -1901,11 +2153,66 @@ var KafkaClient = class _KafkaClient {
|
|
|
1901
2153
|
inst.onRetry?.(envelope, attempt, maxRetries);
|
|
1902
2154
|
}
|
|
1903
2155
|
}
|
|
1904
|
-
notifyDlq(envelope, reason) {
|
|
2156
|
+
notifyDlq(envelope, reason, gid) {
|
|
1905
2157
|
this.metricsFor(envelope.topic).dlqCount++;
|
|
1906
2158
|
for (const inst of this.instrumentation) {
|
|
1907
2159
|
inst.onDlq?.(envelope, reason);
|
|
1908
2160
|
}
|
|
2161
|
+
if (!gid) return;
|
|
2162
|
+
const cfg = this.circuitConfigs.get(gid);
|
|
2163
|
+
if (!cfg) return;
|
|
2164
|
+
const threshold = cfg.threshold ?? 5;
|
|
2165
|
+
const recoveryMs = cfg.recoveryMs ?? 3e4;
|
|
2166
|
+
const stateKey = `${gid}:${envelope.topic}:${envelope.partition}`;
|
|
2167
|
+
let state = this.circuitStates.get(stateKey);
|
|
2168
|
+
if (!state) {
|
|
2169
|
+
state = { status: "closed", window: [], successes: 0 };
|
|
2170
|
+
this.circuitStates.set(stateKey, state);
|
|
2171
|
+
}
|
|
2172
|
+
if (state.status === "open") return;
|
|
2173
|
+
const openCircuit = () => {
|
|
2174
|
+
state.status = "open";
|
|
2175
|
+
state.window = [];
|
|
2176
|
+
state.successes = 0;
|
|
2177
|
+
clearTimeout(state.timer);
|
|
2178
|
+
for (const inst of this.instrumentation)
|
|
2179
|
+
inst.onCircuitOpen?.(envelope.topic, envelope.partition);
|
|
2180
|
+
this.pauseConsumer(gid, [
|
|
2181
|
+
{ topic: envelope.topic, partitions: [envelope.partition] }
|
|
2182
|
+
]);
|
|
2183
|
+
state.timer = setTimeout(() => {
|
|
2184
|
+
state.status = "half-open";
|
|
2185
|
+
state.successes = 0;
|
|
2186
|
+
this.logger.log(
|
|
2187
|
+
`[CircuitBreaker] HALF-OPEN \u2014 group="${gid}" topic="${envelope.topic}" partition=${envelope.partition}`
|
|
2188
|
+
);
|
|
2189
|
+
for (const inst of this.instrumentation)
|
|
2190
|
+
inst.onCircuitHalfOpen?.(envelope.topic, envelope.partition);
|
|
2191
|
+
this.resumeConsumer(gid, [
|
|
2192
|
+
{ topic: envelope.topic, partitions: [envelope.partition] }
|
|
2193
|
+
]);
|
|
2194
|
+
}, recoveryMs);
|
|
2195
|
+
};
|
|
2196
|
+
if (state.status === "half-open") {
|
|
2197
|
+
clearTimeout(state.timer);
|
|
2198
|
+
this.logger.warn(
|
|
2199
|
+
`[CircuitBreaker] OPEN (half-open failure) \u2014 group="${gid}" topic="${envelope.topic}" partition=${envelope.partition}`
|
|
2200
|
+
);
|
|
2201
|
+
openCircuit();
|
|
2202
|
+
return;
|
|
2203
|
+
}
|
|
2204
|
+
const windowSize = cfg.windowSize ?? Math.max(threshold * 2, 10);
|
|
2205
|
+
state.window = [...state.window, false];
|
|
2206
|
+
if (state.window.length > windowSize) {
|
|
2207
|
+
state.window = state.window.slice(state.window.length - windowSize);
|
|
2208
|
+
}
|
|
2209
|
+
const failures = state.window.filter((v) => !v).length;
|
|
2210
|
+
if (failures >= threshold) {
|
|
2211
|
+
this.logger.warn(
|
|
2212
|
+
`[CircuitBreaker] OPEN \u2014 group="${gid}" topic="${envelope.topic}" partition=${envelope.partition} (${failures}/${state.window.length} failures, threshold=${threshold})`
|
|
2213
|
+
);
|
|
2214
|
+
openCircuit();
|
|
2215
|
+
}
|
|
1909
2216
|
}
|
|
1910
2217
|
notifyDuplicate(envelope, strategy) {
|
|
1911
2218
|
this.metricsFor(envelope.topic).dedupCount++;
|
|
@@ -1913,11 +2220,40 @@ var KafkaClient = class _KafkaClient {
|
|
|
1913
2220
|
inst.onDuplicate?.(envelope, strategy);
|
|
1914
2221
|
}
|
|
1915
2222
|
}
|
|
1916
|
-
notifyMessage(envelope) {
|
|
2223
|
+
notifyMessage(envelope, gid) {
|
|
1917
2224
|
this.metricsFor(envelope.topic).processedCount++;
|
|
1918
2225
|
for (const inst of this.instrumentation) {
|
|
1919
2226
|
inst.onMessage?.(envelope);
|
|
1920
2227
|
}
|
|
2228
|
+
if (!gid) return;
|
|
2229
|
+
const cfg = this.circuitConfigs.get(gid);
|
|
2230
|
+
if (!cfg) return;
|
|
2231
|
+
const stateKey = `${gid}:${envelope.topic}:${envelope.partition}`;
|
|
2232
|
+
const state = this.circuitStates.get(stateKey);
|
|
2233
|
+
if (!state) return;
|
|
2234
|
+
const halfOpenSuccesses = cfg.halfOpenSuccesses ?? 1;
|
|
2235
|
+
if (state.status === "half-open") {
|
|
2236
|
+
state.successes++;
|
|
2237
|
+
if (state.successes >= halfOpenSuccesses) {
|
|
2238
|
+
clearTimeout(state.timer);
|
|
2239
|
+
state.timer = void 0;
|
|
2240
|
+
state.status = "closed";
|
|
2241
|
+
state.window = [];
|
|
2242
|
+
state.successes = 0;
|
|
2243
|
+
this.logger.log(
|
|
2244
|
+
`[CircuitBreaker] CLOSED \u2014 group="${gid}" topic="${envelope.topic}" partition=${envelope.partition}`
|
|
2245
|
+
);
|
|
2246
|
+
for (const inst of this.instrumentation)
|
|
2247
|
+
inst.onCircuitClose?.(envelope.topic, envelope.partition);
|
|
2248
|
+
}
|
|
2249
|
+
} else if (state.status === "closed") {
|
|
2250
|
+
const threshold = cfg.threshold ?? 5;
|
|
2251
|
+
const windowSize = cfg.windowSize ?? Math.max(threshold * 2, 10);
|
|
2252
|
+
state.window = [...state.window, true];
|
|
2253
|
+
if (state.window.length > windowSize) {
|
|
2254
|
+
state.window = state.window.slice(state.window.length - windowSize);
|
|
2255
|
+
}
|
|
2256
|
+
}
|
|
1921
2257
|
}
|
|
1922
2258
|
/**
|
|
1923
2259
|
* Start a timer that logs a warning if `fn` hasn't resolved within `timeoutMs`.
|
|
@@ -2138,16 +2474,18 @@ var KafkaClient = class _KafkaClient {
|
|
|
2138
2474
|
logger: this.logger
|
|
2139
2475
|
};
|
|
2140
2476
|
}
|
|
2141
|
-
|
|
2477
|
+
/** Build MessageHandlerDeps with circuit breaker callbacks bound to the given groupId. */
|
|
2478
|
+
messageDepsFor(gid) {
|
|
2142
2479
|
return {
|
|
2143
2480
|
logger: this.logger,
|
|
2144
2481
|
producer: this.producer,
|
|
2145
2482
|
instrumentation: this.instrumentation,
|
|
2146
2483
|
onMessageLost: this.onMessageLost,
|
|
2484
|
+
onTtlExpired: this.onTtlExpired,
|
|
2147
2485
|
onRetry: this.notifyRetry.bind(this),
|
|
2148
|
-
onDlq: this.notifyDlq
|
|
2486
|
+
onDlq: (envelope, reason) => this.notifyDlq(envelope, reason, gid),
|
|
2149
2487
|
onDuplicate: this.notifyDuplicate.bind(this),
|
|
2150
|
-
onMessage: this.notifyMessage
|
|
2488
|
+
onMessage: (envelope) => this.notifyMessage(envelope, gid)
|
|
2151
2489
|
};
|
|
2152
2490
|
}
|
|
2153
2491
|
get retryTopicDeps() {
|