@drarzter/kafka-client 0.7.3 → 0.7.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1866,27 +1866,143 @@ Both suites run in CI on every push to `main` and on pull requests.
1866
1866
 
1867
1867
  ```text
1868
1868
  src/
1869
- ├── client/ # Core0 framework deps
1869
+ ├── index.ts # Full entrypoint core + NestJS adapter
1870
+ ├── core.ts # Standalone entrypoint (@drarzter/kafka-client/core)
1871
+ ├── otel.ts # OpenTelemetry entrypoint (@drarzter/kafka-client/otel)
1872
+ ├── testing.ts # Testing entrypoint (@drarzter/kafka-client/testing)
1873
+
1874
+ ├── client/ # Core library — zero framework dependencies
1875
+ │ ├── types.ts # All public interfaces: KafkaClientOptions, ConsumerOptions,
1876
+ │ │ # SendOptions, EventEnvelope, ConsumerHandle, BatchMeta,
1877
+ │ │ # KafkaInstrumentation, ConsumerInterceptor, SchemaLike, …
1878
+ │ ├── errors.ts # KafkaProcessingError, KafkaRetryExhaustedError, KafkaValidationError
1879
+ │ │
1880
+ │ ├── message/
1881
+ │ │ ├── envelope.ts # extractEnvelope() — Buffer → EventEnvelope; buildHeaders()
1882
+ │ │ └── topic.ts # topic() builder → TopicDescriptor; global schema registry;
1883
+ │ │ # TopicsFrom<T> utility type
1884
+ │ │
1870
1885
  │ ├── kafka.client/
1871
- │ │ ├── index.ts # KafkaClient class
1872
- │ │ ├── admin/ # AdminOps
1873
- │ │ ├── producer/ # payload builders, schema registry
1874
- │ │ ├── consumer/ # consumer ops, handler, retry-topic, DLQ replay, queue, pipeline
1875
- │ │ └── infra/ # CircuitBreakerManager, InFlightTracker, MetricsManager
1876
- ├── message/ # EventEnvelope, topic(), headers
1877
- ├── __tests__/
1878
- │ │ ├── helpers.ts
1879
- │ │ ├── consumer/ # consumer, batch, retry, dedup, TTL, DLQ replay, …
1880
- │ │ ├── producer/ # producer, transactions, schema, topic
1881
- │ │ ├── admin/ # admin, consumer lag
1882
- │ │ └── infra/ # circuit breaker, errors, instrumentation, OTel, metrics
1883
- └── types.ts, errors.ts,
1884
- ├── nest/ # NestJS adapter — Module, Explorer, decorators, health
1885
- ├── testing/ # Testing utilities — mock client, testcontainer wrapper
1886
- ├── core.ts # Standalone entrypoint (@drarzter/kafka-client/core)
1887
- ├── otel.ts # OpenTelemetry entrypoint (@drarzter/kafka-client/otel)
1888
- ├── testing.ts # Testing entrypoint (@drarzter/kafka-client/testing)
1889
- └── index.ts # Full entrypointcore + NestJS adapter
1886
+ │ │ ├── index.ts # KafkaClient class — public API, producer/consumer lifecycle,
1887
+ │ │ # Lamport clock, ALS correlation ID, graceful shutdown,
1888
+ │ │ # Lamport clock recovery (clockRecovery option)
1889
+ │ │
1890
+ │ │ ├── admin/
1891
+ │ │ └── ops.ts # AdminOps: listConsumerGroups(), describeTopics(),
1892
+ │ │ # deleteRecords(), resetOffsets(), seekToOffset(),
1893
+ │ │ │ # seekToTimestamp(), getConsumerLag(), ensureTopic()
1894
+ │ │
1895
+ │ │ ├── producer/
1896
+ │ │ │ └── ops.ts # buildPayload() JSON serialise + schema.parse();
1897
+ │ │ │ # sendMessage / sendBatch / transaction / sendTombstone;
1898
+ │ │ # schema registry lookup for strictSchemas mode
1899
+ │ │ │
1900
+ │ │ ├── consumer/
1901
+ │ │ │ ├── ops.ts # setupConsumer() librdkafka Consumer factory, rebalance
1902
+ │ │ │ │ # hooks, subscribe with retries, autoCommit config;
1903
+ │ │ │ │ # startConsumer() / startBatchConsumer() orchestration
1904
+ │ │ │ ├── handler.ts # handleEachMessage() / handleEachBatch() top-level
1905
+ │ │ │ │ # eachMessage/eachBatch callbacks wired to pipeline;
1906
+ │ │ │ │ # EOS main-consumer context for retryTopics mode
1907
+ │ │ │ ├── pipeline.ts # executeWithRetry() — dedup → TTL → interceptors →
1908
+ │ │ │ │ # handler → retry/DLQ/lost; sendToDlq(); sendToRetryTopic()
1909
+ │ │ │ ├── retry-topic.ts # startRetryTopicConsumers() — spins up N level consumers;
1910
+ │ │ │ │ # startLevelConsumer() — pause/sleep/resume per partition;
1911
+ │ │ │ │ # EOS routing via sendOffsetsToTransaction
1912
+ │ │ │ ├── subscribe-retry.ts # subscribeWithRetry() — retries consumer.subscribe() when
1913
+ │ │ │ │ # topic doesn't exist yet (subscribeRetry option)
1914
+ │ │ │ ├── dlq-replay.ts # replayDlq() — temp consumer reads DLQ up to high-watermark,
1915
+ │ │ │ │ # strips x-dlq-* headers, re-publishes to original topic
1916
+ │ │ │ └── queue.ts # AsyncQueue — bounded async iterator used by consume();
1917
+ │ │ │ # backpressure via queueHighWaterMark (pause/resume)
1918
+ │ │ │
1919
+ │ │ └── infra/
1920
+ │ │ ├── circuit-breaker.ts # CircuitBreakerManager — per groupId:topic:partition state
1921
+ │ │ │ # machine (CLOSED → OPEN → HALF_OPEN); sliding failure window
1922
+ │ │ ├── metrics-manager.ts # MetricsManager — in-process counters (processed / retry /
1923
+ │ │ │ # dlq / dedup) per topic; getMetrics() / resetMetrics()
1924
+ │ │ └── inflight-tracker.ts # InFlightTracker — tracks running handlers for graceful
1925
+ │ │ # shutdown drain (disconnect waits for all to settle)
1926
+ │ │
1927
+ │ └── __tests__/ # Unit tests — mocked @confluentinc/kafka-javascript
1928
+ │ ├── helpers.ts # buildMockMessage(), mock setup, spy exports (mockSend, …)
1929
+ │ ├── consumer.spec.ts # Legacy top-level consumer tests
1930
+ │ ├── consumer/
1931
+ │ │ ├── consumer.spec.ts # startConsumer() core behaviour
1932
+ │ │ ├── batch-consumer.spec.ts # startBatchConsumer(), BatchMeta, autoCommit
1933
+ │ │ ├── consumer-groups.spec.ts # Multiple groupId, eachMessage/eachBatch conflict guard
1934
+ │ │ ├── consumer-handle.spec.ts # ConsumerHandle.stop()
1935
+ │ │ ├── consume-iterator.spec.ts # consume() iterator, backpressure, break/return
1936
+ │ │ ├── retry.spec.ts # In-process retry, backoff, maxRetries
1937
+ │ │ ├── retry-topic.spec.ts # Retry topic chain, EOS routing, level consumers
1938
+ │ │ ├── deduplication.spec.ts # Lamport clock dedup, strategies (drop/dlq/topic)
1939
+ │ │ ├── interceptors.spec.ts # ConsumerInterceptor before/after/onError hooks
1940
+ │ │ ├── dlq-replay.spec.ts # replayDlq(), dryRun, filter, targetTopic
1941
+ │ │ ├── ttl.spec.ts # messageTtlMs, onTtlExpired, TTL→DLQ routing
1942
+ │ │ ├── message-lost.spec.ts # onMessageLost — handler error, validation, DLQ failure
1943
+ │ │ ├── handler-timeout.spec.ts # handlerTimeoutMs warning
1944
+ │ │ └── rebalance.spec.ts # onRebalance assign/revoke callbacks
1945
+ │ ├── producer/
1946
+ │ │ ├── producer.spec.ts # sendMessage(), sendBatch(), sendTombstone(), compression
1947
+ │ │ ├── transaction.spec.ts # transaction(), tx.send(), tx.sendBatch(), rollback
1948
+ │ │ ├── schema.spec.ts # Schema validation on send/consume, strictSchemas
1949
+ │ │ └── topic.spec.ts # topic() descriptor, TopicsFrom, schema registry
1950
+ │ ├── admin/
1951
+ │ │ ├── admin.spec.ts # listConsumerGroups(), describeTopics(), deleteRecords(),
1952
+ │ │ │ # resetOffsets(), seekToOffset(), seekToTimestamp()
1953
+ │ │ └── consumer-lag.spec.ts # getConsumerLag()
1954
+ │ └── infra/
1955
+ │ ├── circuit-breaker.spec.ts # CircuitBreaker state machine, getCircuitState()
1956
+ │ ├── errors.spec.ts # Error class hierarchy and properties
1957
+ │ ├── instrumentation.spec.ts # KafkaInstrumentation hooks, wrap/cleanup composition
1958
+ │ ├── otel.spec.ts # otelInstrumentation(), traceparent propagation
1959
+ │ ├── metrics-counters.spec.ts # getMetrics(), resetMetrics(), per-topic counters
1960
+ │ └── metrics-observability.spec.ts # onMessage/onRetry/onDlq/onDuplicate hooks
1961
+
1962
+ ├── nest/ # NestJS adapter — depends on @nestjs/common, reflect-metadata
1963
+ │ ├── kafka.module.ts # KafkaModule.register() / registerAsync(); DynamicModule,
1964
+ │ │ # isGlobal, named clients; onModuleInit / onModuleDestroy
1965
+ │ ├── kafka.explorer.ts # Auto-discovers @SubscribeTo() methods across all providers
1966
+ │ │ # at startup and calls startConsumer / startBatchConsumer
1967
+ │ ├── kafka.decorator.ts # @SubscribeTo(topic, options) method decorator;
1968
+ │ │ # @InjectKafkaClient(name?) parameter decorator
1969
+ │ ├── kafka.health.ts # KafkaHealthIndicator.check() — wraps kafka.checkStatus()
1970
+ │ ├── kafka.constants.ts # DI token constants (KAFKA_CLIENT, KAFKA_OPTIONS)
1971
+ │ └── __tests__/
1972
+ │ ├── kafka.decorator.spec.ts # @SubscribeTo / @InjectKafkaClient metadata
1973
+ │ ├── kafka.explorer.spec.ts # Explorer discovery and subscription wiring
1974
+ │ └── kafka.health.spec.ts # KafkaHealthIndicator up/down responses
1975
+
1976
+ ├── testing/ # Testing utilities — no runtime Kafka deps
1977
+ │ ├── index.ts # Re-exports createMockKafkaClient, KafkaTestContainer
1978
+ │ ├── mock-client.ts # createMockKafkaClient<T>() — jest.fn() on every
1979
+ │ │ # IKafkaClient method with sensible defaults
1980
+ │ ├── test-container.ts # KafkaTestContainer — thin @testcontainers/kafka wrapper;
1981
+ │ │ # transaction coordinator warmup, topic pre-creation
1982
+ │ └── __tests__/
1983
+ │ ├── mock-client.spec.ts # Mock client method stubs and overrides
1984
+ │ └── test-container.spec.ts # Container start/stop lifecycle
1985
+
1986
+ ├── integration/ # Integration tests — require Docker (testcontainers)
1987
+ │ ├── global-setup.ts # Start shared Kafka container before all suites
1988
+ │ ├── global-teardown.ts # Stop container after all suites
1989
+ │ ├── helpers.ts # createClient(), waitForMessages(), unique topic names
1990
+ │ ├── basic.integration.spec.ts # Send/receive, headers, batch, fromBeginning
1991
+ │ ├── consumer.integration.spec.ts # startConsumer(), pause/resume, stopConsumer()
1992
+ │ ├── transaction.integration.spec.ts # Atomic sends, rollback on error
1993
+ │ ├── retry.integration.spec.ts # In-process retry, retryTopics chain, DLQ
1994
+ │ ├── deduplication.integration.spec.ts # Lamport clock dedup with real broker
1995
+ │ ├── consumer-lag.integration.spec.ts # getConsumerLag() against real offsets
1996
+ │ ├── consumer-handle.integration.spec.ts # ConsumerHandle.stop() lifecycle
1997
+ │ ├── graceful-shutdown.integration.spec.ts # disconnect() drains in-flight handlers
1998
+ │ ├── schema.integration.spec.ts # Schema validation send+consume round-trip
1999
+ │ ├── otel.integration.spec.ts # OpenTelemetry span propagation end-to-end
2000
+ │ └── chaos.integration.spec.ts # Fault injection — broker restarts, rebalances
2001
+
2002
+ └── __mocks__/
2003
+ └── @confluentinc/
2004
+ └── kafka-javascript.ts # Manual Jest mock — Kafka, Producer, Consumer stubs;
2005
+ # mockSend, mockTxSend, mockCommit, mockSeek, …
1890
2006
  ```
1891
2007
 
1892
2008
  All exported types and methods have JSDoc comments — your IDE will show inline docs and autocomplete.
@@ -1816,6 +1816,8 @@ var KafkaClient = class _KafkaClient {
1816
1816
  txId;
1817
1817
  /** Monotonically increasing Lamport clock stamped on every outgoing message. */
1818
1818
  _lamportClock = 0;
1819
+ /** Topics to scan for the highest Lamport clock value on `connectProducer()`. */
1820
+ clockRecoveryTopics;
1819
1821
  /** Per-groupId deduplication state: `"topic:partition"` → last processed clock. */
1820
1822
  dedupStates = /* @__PURE__ */ new Map();
1821
1823
  circuitBreaker;
@@ -1851,6 +1853,7 @@ var KafkaClient = class _KafkaClient {
1851
1853
  this.onTtlExpired = options?.onTtlExpired;
1852
1854
  this.onRebalance = options?.onRebalance;
1853
1855
  this.txId = options?.transactionalId ?? `${clientId}-tx`;
1856
+ this.clockRecoveryTopics = options?.clockRecovery?.topics ?? [];
1854
1857
  this.kafka = new KafkaClass({
1855
1858
  kafkaJS: {
1856
1859
  clientId: this.clientId,
@@ -1998,8 +2001,109 @@ var KafkaClient = class _KafkaClient {
1998
2001
  */
1999
2002
  async connectProducer() {
2000
2003
  await this.producer.connect();
2004
+ await this.recoverLamportClock(this.clockRecoveryTopics);
2001
2005
  this.logger.log("Producer connected");
2002
2006
  }
2007
+ /**
2008
+ * Recover the Lamport clock from the last message across the given topics.
2009
+ *
2010
+ * For each topic, fetches partition high-watermarks via admin, creates a
2011
+ * short-lived consumer, seeks every non-empty partition to its last offset
2012
+ * (`highWatermark − 1`), reads one message per partition, and extracts the
2013
+ * maximum `x-lamport-clock` header value. On completion `_lamportClock` is
2014
+ * set to that maximum so the next `++_lamportClock` yields a strictly greater
2015
+ * value than any previously sent clock.
2016
+ *
2017
+ * Topics that are empty or missing are silently skipped.
2018
+ */
2019
+ async recoverLamportClock(topics) {
2020
+ if (topics.length === 0) return;
2021
+ this.logger.log(
2022
+ `Clock recovery: scanning ${topics.length} topic(s) for Lamport clock...`
2023
+ );
2024
+ await this.adminOps.ensureConnected();
2025
+ const partitionsToRead = [];
2026
+ for (const t of topics) {
2027
+ let offsets;
2028
+ try {
2029
+ offsets = await this.adminOps.admin.fetchTopicOffsets(t);
2030
+ } catch {
2031
+ this.logger.warn(
2032
+ `Clock recovery: could not fetch offsets for "${t}", skipping`
2033
+ );
2034
+ continue;
2035
+ }
2036
+ for (const { partition, high, low } of offsets) {
2037
+ if (parseInt(high, 10) > parseInt(low, 10)) {
2038
+ partitionsToRead.push({
2039
+ topic: t,
2040
+ partition,
2041
+ lastOffset: String(parseInt(high, 10) - 1)
2042
+ });
2043
+ }
2044
+ }
2045
+ }
2046
+ if (partitionsToRead.length === 0) {
2047
+ this.logger.log(
2048
+ "Clock recovery: all topics empty \u2014 keeping Lamport clock at 0"
2049
+ );
2050
+ return;
2051
+ }
2052
+ const recoveryGroupId = `${this.clientId}-clock-recovery-${Date.now()}`;
2053
+ let maxClock = -1;
2054
+ await new Promise((resolve, reject) => {
2055
+ const consumer = this.kafka.consumer({
2056
+ kafkaJS: { groupId: recoveryGroupId }
2057
+ });
2058
+ const remaining = new Set(
2059
+ partitionsToRead.map((p) => `${p.topic}:${p.partition}`)
2060
+ );
2061
+ const cleanup = () => {
2062
+ consumer.disconnect().catch(() => {
2063
+ });
2064
+ };
2065
+ consumer.connect().then(async () => {
2066
+ const uniqueTopics = [
2067
+ ...new Set(partitionsToRead.map((p) => p.topic))
2068
+ ];
2069
+ await consumer.subscribe({ topics: uniqueTopics });
2070
+ for (const { topic: t, partition, lastOffset } of partitionsToRead) {
2071
+ consumer.seek({ topic: t, partition, offset: lastOffset });
2072
+ }
2073
+ }).then(
2074
+ () => consumer.run({
2075
+ eachMessage: async ({ topic: t, partition, message }) => {
2076
+ const key = `${t}:${partition}`;
2077
+ if (!remaining.has(key)) return;
2078
+ remaining.delete(key);
2079
+ const clockHeader = message.headers?.[HEADER_LAMPORT_CLOCK];
2080
+ if (clockHeader !== void 0) {
2081
+ const raw = Buffer.isBuffer(clockHeader) ? clockHeader.toString() : String(clockHeader);
2082
+ const clock = Number(raw);
2083
+ if (!isNaN(clock) && clock > maxClock) maxClock = clock;
2084
+ }
2085
+ if (remaining.size === 0) {
2086
+ cleanup();
2087
+ resolve();
2088
+ }
2089
+ }
2090
+ })
2091
+ ).catch((err) => {
2092
+ cleanup();
2093
+ reject(err);
2094
+ });
2095
+ });
2096
+ if (maxClock >= 0) {
2097
+ this._lamportClock = maxClock;
2098
+ this.logger.log(
2099
+ `Clock recovery: Lamport clock restored \u2014 next clock will be ${maxClock + 1}`
2100
+ );
2101
+ } else {
2102
+ this.logger.log(
2103
+ "Clock recovery: no x-lamport-clock headers found \u2014 keeping clock at 0"
2104
+ );
2105
+ }
2106
+ }
2003
2107
  /**
2004
2108
  * @internal Not part of `IKafkaClient` — use `disconnect()` for full teardown.
2005
2109
  */
@@ -2658,4 +2762,4 @@ export {
2658
2762
  KafkaClient,
2659
2763
  topic
2660
2764
  };
2661
- //# sourceMappingURL=chunk-XP7LLRGQ.mjs.map
2765
+ //# sourceMappingURL=chunk-BVWRZTMD.mjs.map