kafka-ts 0.0.1-beta.2 → 0.0.1-beta.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/api/fetch.ts CHANGED
@@ -1,3 +1,4 @@
1
+ import { findCodec } from '../codecs';
1
2
  import { createApi } from '../utils/api';
2
3
  import { Decoder } from '../utils/decoder';
3
4
  import { KafkaTSApiError } from '../utils/error';
@@ -68,7 +69,7 @@ export const FETCH = createApi({
68
69
  )
69
70
  .writeCompactString(data.rackId)
70
71
  .writeUVarInt(0),
71
- response: (decoder) => {
72
+ response: async (decoder) => {
72
73
  const result = {
73
74
  _tag: decoder.readTagBuffer(),
74
75
  throttleTimeMs: decoder.readInt32(),
@@ -88,56 +89,105 @@ export const FETCH = createApi({
88
89
  _tag: abortedTransaction.readTagBuffer(),
89
90
  })),
90
91
  preferredReadReplica: partition.readInt32(),
91
- records: decodeRecords(partition),
92
+ records: decodeRecordBatch(partition),
92
93
  _tag: partition.readTagBuffer(),
93
94
  })),
94
95
  _tag: response.readTagBuffer(),
95
96
  })),
96
97
  _tag2: decoder.readTagBuffer(),
97
98
  };
99
+
98
100
  if (result.errorCode) throw new KafkaTSApiError(result.errorCode, null, result);
99
101
  result.responses.forEach((response) => {
100
102
  response.partitions.forEach((partition) => {
101
103
  if (partition.errorCode) throw new KafkaTSApiError(partition.errorCode, null, result);
102
104
  });
103
105
  });
104
- return result;
106
+
107
+ const decompressedResponses = await Promise.all(
108
+ result.responses.map(async (response) => ({
109
+ ...response,
110
+ partitions: await Promise.all(
111
+ response.partitions.map(async (partition) => ({
112
+ ...partition,
113
+ records: await Promise.all(
114
+ partition.records.map(async ({ recordsLength, compressedRecords, ...record }) => {
115
+ const { decompress } = findCodec(record.compression);
116
+ const decompressedRecords = await decompress(compressedRecords);
117
+ const decompressedDecoder = new Decoder(
118
+ Buffer.concat([recordsLength, decompressedRecords]),
119
+ );
120
+ return { ...record, records: decodeRecord(decompressedDecoder) };
121
+ }),
122
+ ),
123
+ })),
124
+ ),
125
+ })),
126
+ );
127
+
128
+ return { ...result, responses: decompressedResponses };
105
129
  },
106
130
  });
107
131
 
108
- const decodeRecords = (decoder: Decoder) => {
132
+ const decodeRecordBatch = (decoder: Decoder) => {
109
133
  const size = decoder.readUVarInt() - 1;
110
134
  if (size <= 0) {
111
135
  return [];
112
136
  }
113
137
 
138
+ const recordBatchDecoder = new Decoder(decoder.read(size));
139
+
114
140
  const results = [];
115
- while (decoder.getBufferLength() > decoder.getOffset() + 49) {
141
+ while (recordBatchDecoder.getBufferLength() > recordBatchDecoder.getOffset()) {
142
+ const baseOffset = recordBatchDecoder.readInt64();
143
+ const batchLength = recordBatchDecoder.readInt32();
144
+
145
+ const batchDecoder = new Decoder(recordBatchDecoder.read(batchLength));
146
+
147
+ const result = {
148
+ baseOffset,
149
+ batchLength,
150
+ partitionLeaderEpoch: batchDecoder.readInt32(),
151
+ magic: batchDecoder.readInt8(),
152
+ crc: batchDecoder.readUInt32(),
153
+ attributes: batchDecoder.readInt16(),
154
+ lastOffsetDelta: batchDecoder.readInt32(),
155
+ baseTimestamp: batchDecoder.readInt64(),
156
+ maxTimestamp: batchDecoder.readInt64(),
157
+ producerId: batchDecoder.readInt64(),
158
+ producerEpoch: batchDecoder.readInt16(),
159
+ baseSequence: batchDecoder.readInt32(),
160
+ recordsLength: batchDecoder.read(4),
161
+ compressedRecords: batchDecoder.read(),
162
+ };
163
+
164
+ const compression = result.attributes & 0x07;
165
+ const timestampType = (result.attributes & 0x08) >> 3 ? 'LogAppendTime' : 'CreateTime';
166
+ const isTransactional = !!((result.attributes & 0x10) >> 4);
167
+ const isControlBatch = !!((result.attributes & 0x20) >> 5);
168
+ const hasDeleteHorizonMs = !!((result.attributes & 0x40) >> 6);
169
+
116
170
  results.push({
117
- baseOffset: decoder.readInt64(),
118
- batchLength: decoder.readInt32(),
119
- partitionLeaderEpoch: decoder.readInt32(),
120
- magic: decoder.readInt8(),
121
- crc: decoder.readUInt32(),
122
- attributes: decoder.readInt16(),
123
- lastOffsetDelta: decoder.readInt32(),
124
- baseTimestamp: decoder.readInt64(),
125
- maxTimestamp: decoder.readInt64(),
126
- producerId: decoder.readInt64(),
127
- producerEpoch: decoder.readInt16(),
128
- baseSequence: decoder.readInt32(),
129
- records: decoder.readRecords((record) => ({
130
- attributes: record.readInt8(),
131
- timestampDelta: record.readVarLong(),
132
- offsetDelta: record.readVarInt(),
133
- key: record.readVarIntBuffer(),
134
- value: record.readVarIntBuffer(),
135
- headers: record.readCompactArray((header) => ({
136
- key: header.readVarIntBuffer(),
137
- value: header.readVarIntBuffer(),
138
- })),
139
- })),
171
+ ...result,
172
+ compression,
173
+ timestampType,
174
+ isTransactional,
175
+ isControlBatch,
176
+ hasDeleteHorizonMs,
140
177
  });
141
178
  }
142
179
  return results;
143
180
  };
181
+
182
+ const decodeRecord = (decoder: Decoder) =>
183
+ decoder.readRecords((record) => ({
184
+ attributes: record.readInt8(),
185
+ timestampDelta: record.readVarLong(),
186
+ offsetDelta: record.readVarInt(),
187
+ key: record.readVarIntBuffer(),
188
+ value: record.readVarIntBuffer(),
189
+ headers: record.readVarIntArray((header) => ({
190
+ key: header.readVarIntBuffer(),
191
+ value: header.readVarIntBuffer(),
192
+ })),
193
+ }));
@@ -21,7 +21,8 @@ describe.sequential('Request handler', () => {
21
21
  let cluster: Cluster;
22
22
 
23
23
  beforeAll(async () => {
24
- cluster = await kafka.createCluster().connect();
24
+ cluster = await kafka.createCluster();
25
+ await cluster.connect();
25
26
 
26
27
  const metadataResult = await cluster.sendRequest(API.METADATA, {
27
28
  topics: null,
@@ -52,8 +53,8 @@ describe.sequential('Request handler', () => {
52
53
  topics: [
53
54
  {
54
55
  name: 'kafka-ts-test-topic',
55
- numPartitions: 1,
56
- replicationFactor: 1,
56
+ numPartitions: 10,
57
+ replicationFactor: 3,
57
58
  assignments: [],
58
59
  configs: [],
59
60
  },
@@ -89,6 +90,7 @@ describe.sequential('Request handler', () => {
89
90
  expect(result).toMatchSnapshot();
90
91
  });
91
92
 
93
+ let partitionIndex = 0;
92
94
  let leaderId = 0;
93
95
 
94
96
  it('should request metadata for a topic', async () => {
@@ -97,6 +99,7 @@ describe.sequential('Request handler', () => {
97
99
  allowTopicAutoCreation: false,
98
100
  includeTopicAuthorizedOperations: false,
99
101
  });
102
+ partitionIndex = result.topics[0].partitions[0].partitionIndex;
100
103
  leaderId = result.topics[0].partitions[0].leaderId;
101
104
  result.controllerId = 0;
102
105
  result.topics.forEach((topic) => {
@@ -134,7 +137,7 @@ describe.sequential('Request handler', () => {
134
137
  name: 'kafka-ts-test-topic',
135
138
  partitionData: [
136
139
  {
137
- index: 0,
140
+ index: partitionIndex,
138
141
  baseOffset: 0n,
139
142
  partitionLeaderEpoch: 0,
140
143
  attributes: 0,
@@ -180,7 +183,7 @@ describe.sequential('Request handler', () => {
180
183
  topicId,
181
184
  partitions: [
182
185
  {
183
- partition: 0,
186
+ partition: partitionIndex,
184
187
  currentLeaderEpoch: -1,
185
188
  fetchOffset: 0n,
186
189
  lastFetchedEpoch: 0,
package/src/cluster.ts CHANGED
@@ -3,7 +3,8 @@ import { TLSSocketOptions } from 'tls';
3
3
  import { API } from './api';
4
4
  import { Broker, SASLProvider } from './broker';
5
5
  import { SendRequest } from './connection';
6
- import { ConnectionError, KafkaTSError } from './utils/error';
6
+ import { KafkaTSError } from './utils/error';
7
+ import { log } from './utils/logger';
7
8
 
8
9
  type ClusterOptions = {
9
10
  clientId: string | null;
@@ -13,73 +14,77 @@ type ClusterOptions = {
13
14
  };
14
15
 
15
16
  export class Cluster {
16
- private seedBroker: Broker;
17
- private brokerById: Record<number, Broker> = {};
17
+ private seedBroker = new Broker({ clientId: null, sasl: null, ssl: null, options: { port: 9092 } });
18
+ private brokers: { nodeId: number; broker: Broker }[] = [];
19
+ private brokerMetadata: Record<number, Awaited<ReturnType<(typeof API.METADATA)['response']>>['brokers'][number]> =
20
+ {};
18
21
 
19
- constructor(private options: ClusterOptions) {
20
- this.seedBroker = new Broker({
21
- clientId: this.options.clientId,
22
- sasl: this.options.sasl,
23
- ssl: this.options.ssl,
24
- options: this.options.bootstrapServers[0],
25
- });
26
- }
22
+ constructor(private options: ClusterOptions) {}
27
23
 
28
24
  public async connect() {
29
- await this.connectSeedBroker();
25
+ this.seedBroker = await this.findSeedBroker();
26
+
30
27
  const metadata = await this.sendRequest(API.METADATA, {
31
28
  allowTopicAutoCreation: false,
32
29
  includeTopicAuthorizedOperations: false,
33
30
  topics: [],
34
31
  });
35
-
36
- this.brokerById = Object.fromEntries(
37
- metadata.brokers.map(({ nodeId, ...options }) => [
38
- nodeId,
39
- new Broker({
40
- clientId: this.options.clientId,
41
- sasl: this.options.sasl,
42
- ssl: this.options.ssl,
43
- options,
44
- }),
45
- ]),
46
- );
47
- return this;
32
+ this.brokerMetadata = Object.fromEntries(metadata.brokers.map((options) => [options.nodeId, options]));
48
33
  }
49
34
 
50
35
  public async disconnect() {
51
- await Promise.all([
52
- this.seedBroker.disconnect(),
53
- ...Object.values(this.brokerById).map((broker) => broker.disconnect()),
54
- ]);
36
+ await Promise.all(this.brokers.map((x) => x.broker.disconnect()));
55
37
  }
56
38
 
39
+ public setSeedBroker = async (nodeId: number) => {
40
+ await this.releaseBroker(this.seedBroker);
41
+ this.seedBroker = await this.acquireBroker(nodeId);
42
+ };
43
+
57
44
  public sendRequest: SendRequest = (...args) => this.seedBroker.sendRequest(...args);
58
45
 
59
46
  public sendRequestToNode =
60
47
  (nodeId: number): SendRequest =>
61
48
  async (...args) => {
62
- const broker = this.brokerById[nodeId];
49
+ let broker = this.brokers.find((x) => x.nodeId === nodeId)?.broker;
63
50
  if (!broker) {
64
- throw new ConnectionError(`Broker ${nodeId} is not available`);
51
+ broker = await this.acquireBroker(nodeId);
65
52
  }
66
- await broker.ensureConnected();
67
53
  return broker.sendRequest(...args);
68
54
  };
69
55
 
70
- private async connectSeedBroker() {
56
+ public async acquireBroker(nodeId: number) {
57
+ const broker = new Broker({
58
+ clientId: this.options.clientId,
59
+ sasl: this.options.sasl,
60
+ ssl: this.options.ssl,
61
+ options: this.brokerMetadata[nodeId],
62
+ });
63
+ this.brokers.push({ nodeId, broker });
64
+ await broker.connect();
65
+ return broker;
66
+ }
67
+
68
+ public async releaseBroker(broker: Broker) {
69
+ await broker.disconnect();
70
+ this.brokers = this.brokers.filter((x) => x.broker !== broker);
71
+ };
72
+
73
+ private async findSeedBroker() {
71
74
  const randomizedBrokers = this.options.bootstrapServers.toSorted(() => Math.random() - 0.5);
72
75
  for (const options of randomizedBrokers) {
73
76
  try {
74
- this.seedBroker = await new Broker({
77
+ const broker = await new Broker({
75
78
  clientId: this.options.clientId,
76
79
  sasl: this.options.sasl,
77
80
  ssl: this.options.ssl,
78
81
  options,
79
- }).connect();
80
- return;
82
+ });
83
+ await broker.connect();
84
+ this.brokers.push({ nodeId: -1, broker });
85
+ return broker;
81
86
  } catch (error) {
82
- console.warn(`Failed to connect to seed broker ${options.host}:${options.port}`, error);
87
+ log.warn(`Failed to connect to seed broker ${options.host}:${options.port}`, error);
83
88
  }
84
89
  }
85
90
  throw new KafkaTSError('No seed brokers found');
@@ -0,0 +1,9 @@
1
+ import { gzip, unzip } from 'zlib';
2
+ import { Codec } from './types';
3
+
4
+ export const GZIP: Codec = {
5
+ compress: async (data) =>
6
+ new Promise<Buffer>((resolve, reject) => gzip(data, (err, result) => (err ? reject(err) : resolve(result)))),
7
+ decompress: async (data) =>
8
+ new Promise<Buffer>((resolve, reject) => unzip(data, (err, result) => (err ? reject(err) : resolve(result)))),
9
+ };
@@ -0,0 +1,16 @@
1
+ import { GZIP } from './gzip';
2
+ import { NONE } from './none';
3
+ import { Codec } from './types';
4
+
5
+ const codecs: Record<number, Codec> = {
6
+ 0: NONE,
7
+ 1: GZIP,
8
+ };
9
+
10
+ export const findCodec = (type: number) => {
11
+ const codec = codecs[type];
12
+ if (!codec) {
13
+ throw new Error(`Unsupported codec: ${type}`);
14
+ }
15
+ return codec;
16
+ };
@@ -0,0 +1,6 @@
1
+ import { Codec } from './types';
2
+
3
+ export const NONE: Codec = {
4
+ compress: (data: Buffer) => Promise.resolve(data),
5
+ decompress: (data: Buffer) => Promise.resolve(data),
6
+ };
@@ -0,0 +1,4 @@
1
+ export type Codec = {
2
+ compress: (data: Buffer) => Promise<Buffer>;
3
+ decompress: (data: Buffer) => Promise<Buffer>;
4
+ };
package/src/connection.ts CHANGED
@@ -59,6 +59,7 @@ export class Connection {
59
59
  });
60
60
  }
61
61
 
62
+ @trace()
62
63
  public disconnect() {
63
64
  this.socket.removeAllListeners();
64
65
  return new Promise<void>((resolve) => {
@@ -69,7 +70,7 @@ export class Connection {
69
70
  });
70
71
  }
71
72
 
72
- @trace((api, body) => ({ apiName: getApiName(api), body }))
73
+ @trace((api, body) => ({ message: getApiName(api), body }))
73
74
  public async sendRequest<Request, Response>(api: Api<Request, Response>, body: Request): Promise<Response> {
74
75
  const correlationId = this.nextCorrelationId();
75
76
 
@@ -90,7 +91,7 @@ export class Connection {
90
91
  reject(error);
91
92
  }
92
93
  });
93
- const response = api.response(responseDecoder);
94
+ const response = await api.response(responseDecoder);
94
95
 
95
96
  assert(
96
97
  responseDecoder.getOffset() - 4 === responseSize,
@@ -30,6 +30,8 @@ export class ConsumerGroup {
30
30
 
31
31
  public async join() {
32
32
  await this.findCoordinator();
33
+ await this.options.cluster.setSeedBroker(this.coordinatorId);
34
+
33
35
  await this.joinGroup();
34
36
  await this.syncGroup();
35
37
  await this.offsetFetch();
@@ -70,7 +72,7 @@ export class ConsumerGroup {
70
72
  private async joinGroup(): Promise<void> {
71
73
  const { cluster, groupId, groupInstanceId, sessionTimeoutMs, rebalanceTimeoutMs, topics } = this.options;
72
74
  try {
73
- const response = await cluster.sendRequestToNode(this.coordinatorId)(API.JOIN_GROUP, {
75
+ const response = await cluster.sendRequest(API.JOIN_GROUP, {
74
76
  groupId,
75
77
  groupInstanceId,
76
78
  memberId: this.memberId,
@@ -113,7 +115,7 @@ export class ConsumerGroup {
113
115
  assignments = Object.entries(memberAssignments).map(([memberId, assignment]) => ({ memberId, assignment }));
114
116
  }
115
117
 
116
- const response = await cluster.sendRequestToNode(this.coordinatorId)(API.SYNC_GROUP, {
118
+ const response = await cluster.sendRequest(API.SYNC_GROUP, {
117
119
  groupId,
118
120
  groupInstanceId,
119
121
  memberId: this.memberId,
@@ -144,7 +146,7 @@ export class ConsumerGroup {
144
146
  };
145
147
  if (!request.groups.length) return;
146
148
 
147
- const response = await cluster.sendRequestToNode(this.coordinatorId)(API.OFFSET_FETCH, request);
149
+ const response = await cluster.sendRequest(API.OFFSET_FETCH, request);
148
150
  response.groups.forEach((group) => {
149
151
  group.topics.forEach((topic) => {
150
152
  topic.partitions
@@ -177,13 +179,13 @@ export class ConsumerGroup {
177
179
  if (!request.topics.length) {
178
180
  return;
179
181
  }
180
- await cluster.sendRequestToNode(this.coordinatorId)(API.OFFSET_COMMIT, request);
182
+ await cluster.sendRequest(API.OFFSET_COMMIT, request);
181
183
  offsetManager.flush();
182
184
  }
183
185
 
184
186
  public async heartbeat() {
185
187
  const { cluster, groupId, groupInstanceId } = this.options;
186
- await cluster.sendRequestToNode(this.coordinatorId)(API.HEARTBEAT, {
188
+ await cluster.sendRequest(API.HEARTBEAT, {
187
189
  groupId,
188
190
  groupInstanceId,
189
191
  memberId: this.memberId,
@@ -195,7 +197,7 @@ export class ConsumerGroup {
195
197
  const { cluster, groupId, groupInstanceId } = this.options;
196
198
  this.stopHeartbeater();
197
199
  try {
198
- await cluster.sendRequestToNode(this.coordinatorId)(API.LEAVE_GROUP, {
200
+ await cluster.sendRequest(API.LEAVE_GROUP, {
199
201
  groupId,
200
202
  members: [{ memberId: this.memberId, groupInstanceId, reason: null }],
201
203
  });
@@ -2,16 +2,19 @@ import { API, API_ERROR } from '../api';
2
2
  import { IsolationLevel } from '../api/fetch';
3
3
  import { Assignment } from '../api/sync-group';
4
4
  import { Cluster } from '../cluster';
5
- import { distributeAssignmentsToNodes } from '../distributors/assignments-to-replicas';
5
+ import { distributeMessagesToTopicPartitionLeaders } from '../distributors/messages-to-topic-partition-leaders';
6
6
  import { Message } from '../types';
7
7
  import { delay } from '../utils/delay';
8
8
  import { ConnectionError, KafkaTSApiError } from '../utils/error';
9
- import { defaultRetrier, Retrier } from '../utils/retrier';
9
+ import { log } from '../utils/logger';
10
+ import { createTracer } from '../utils/tracer';
10
11
  import { ConsumerGroup } from './consumer-group';
11
12
  import { ConsumerMetadata } from './consumer-metadata';
12
- import { FetchManager, BatchGranularity } from './fetch-manager';
13
+ import { BatchGranularity, FetchManager } from './fetch-manager';
13
14
  import { OffsetManager } from './offset-manager';
14
15
 
16
+ const trace = createTracer('Consumer');
17
+
15
18
  export type ConsumerOptions = {
16
19
  topics: string[];
17
20
  groupId?: string | null;
@@ -26,7 +29,6 @@ export type ConsumerOptions = {
26
29
  partitionMaxBytes?: number;
27
30
  allowTopicAutoCreation?: boolean;
28
31
  fromBeginning?: boolean;
29
- retrier?: Retrier;
30
32
  batchGranularity?: BatchGranularity;
31
33
  concurrency?: number;
32
34
  } & ({ onBatch: (messages: Required<Message>[]) => unknown } | { onMessage: (message: Required<Message>) => unknown });
@@ -57,7 +59,6 @@ export class Consumer {
57
59
  isolationLevel: options.isolationLevel ?? IsolationLevel.READ_UNCOMMITTED,
58
60
  allowTopicAutoCreation: options.allowTopicAutoCreation ?? false,
59
61
  fromBeginning: options.fromBeginning ?? false,
60
- retrier: options.retrier ?? defaultRetrier,
61
62
  batchGranularity: options.batchGranularity ?? 'partition',
62
63
  concurrency: options.concurrency ?? 1,
63
64
  };
@@ -94,16 +95,17 @@ export class Consumer {
94
95
  await this.offsetManager.fetchOffsets({ fromBeginning });
95
96
  await this.consumerGroup?.join();
96
97
  } catch (error) {
97
- console.error(error);
98
- console.debug(`Restarting consumer in 1 second...`);
98
+ log.error('Failed to start consumer', error);
99
+ log.debug(`Restarting consumer in 1 second...`);
99
100
  await delay(1000);
100
101
 
101
102
  if (this.stopHook) return (this.stopHook as () => void)();
102
103
  return this.close(true).then(() => this.start());
103
104
  }
104
- setImmediate(() => this.startFetchManager());
105
+ this.startFetchManager();
105
106
  }
106
107
 
108
+ @trace()
107
109
  public async close(force = false): Promise<void> {
108
110
  if (!force) {
109
111
  await new Promise<void>(async (resolve) => {
@@ -111,22 +113,31 @@ export class Consumer {
111
113
  await this.fetchManager?.stop();
112
114
  });
113
115
  }
114
- await this.consumerGroup
115
- ?.leaveGroup()
116
- .catch((error) => console.warn(`Failed to leave group: ${error.message}`));
117
- await this.cluster.disconnect().catch((error) => console.warn(`Failed to disconnect: ${error.message}`));
116
+ await this.consumerGroup?.leaveGroup().catch((error) => log.warn(`Failed to leave group: ${error.message}`));
117
+ await this.cluster.disconnect().catch((error) => log.warn(`Failed to disconnect: ${error.message}`));
118
118
  }
119
119
 
120
120
  private startFetchManager = async () => {
121
121
  const { batchGranularity, concurrency } = this.options;
122
122
 
123
123
  while (!this.stopHook) {
124
+ // TODO: If leader is not available, find another read replica
124
125
  const nodeAssignments = Object.entries(
125
- distributeAssignmentsToNodes(
126
- this.metadata.getAssignment(),
127
- this.metadata.getTopicPartitionReplicaIds(),
126
+ distributeMessagesToTopicPartitionLeaders(
127
+ Object.entries(this.metadata.getAssignment()).flatMap(([topic, partitions]) =>
128
+ partitions.map((partition) => ({ topic, partition })),
129
+ ),
130
+ this.metadata.getTopicPartitionLeaderIds(),
131
+ ),
132
+ ).map(([nodeId, assignment]) => ({
133
+ nodeId: parseInt(nodeId),
134
+ assignment: Object.fromEntries(
135
+ Object.entries(assignment).map(([topic, partitions]) => [
136
+ topic,
137
+ Object.keys(partitions).map(Number),
138
+ ]),
128
139
  ),
129
- ).map(([nodeId, assignment]) => ({ nodeId: parseInt(nodeId), assignment }));
140
+ }));
130
141
 
131
142
  const numPartitions = Object.values(this.metadata.getAssignment()).flat().length;
132
143
  const numProcessors = Math.min(concurrency, numPartitions);
@@ -145,7 +156,7 @@ export class Consumer {
145
156
  await this.fetchManager.start();
146
157
 
147
158
  if (!nodeAssignments.length) {
148
- console.debug('No partitions assigned. Waiting for reassignment...');
159
+ log.debug('No partitions assigned. Waiting for reassignment...');
149
160
  await delay(this.options.maxWaitMs);
150
161
  await this.consumerGroup?.handleLastHeartbeat();
151
162
  }
@@ -153,11 +164,11 @@ export class Consumer {
153
164
  await this.fetchManager.stop();
154
165
 
155
166
  if ((error as KafkaTSApiError).errorCode === API_ERROR.REBALANCE_IN_PROGRESS) {
156
- console.debug('Rebalance in progress...');
167
+ log.debug('Rebalance in progress...');
157
168
  continue;
158
169
  }
159
170
  if ((error as KafkaTSApiError).errorCode === API_ERROR.FENCED_INSTANCE_ID) {
160
- console.debug('New consumer with the same groupInstanceId joined. Exiting the consumer...');
171
+ log.debug('New consumer with the same groupInstanceId joined. Exiting the consumer...');
161
172
  this.close();
162
173
  break;
163
174
  }
@@ -165,11 +176,11 @@ export class Consumer {
165
176
  error instanceof ConnectionError ||
166
177
  (error instanceof KafkaTSApiError && error.errorCode === API_ERROR.NOT_COORDINATOR)
167
178
  ) {
168
- console.debug(`${error.message}. Restarting consumer...`);
179
+ log.debug(`${error.message}. Restarting consumer...`);
169
180
  this.close().then(() => this.start());
170
181
  break;
171
182
  }
172
- console.error(error);
183
+ log.error((error as Error).message, error);
173
184
  this.close();
174
185
  break;
175
186
  }
@@ -177,12 +188,12 @@ export class Consumer {
177
188
  this.stopHook?.();
178
189
  };
179
190
 
191
+ @trace()
180
192
  private async process(messages: Required<Message>[]) {
181
193
  const { options } = this;
182
- const { retrier } = options;
183
194
 
184
195
  if ('onBatch' in options) {
185
- await retrier(() => options.onBatch(messages));
196
+ await options.onBatch(messages);
186
197
 
187
198
  messages.forEach(({ topic, partition, offset }) =>
188
199
  this.offsetManager.resolve(topic, partition, offset + 1n),
@@ -190,7 +201,7 @@ export class Consumer {
190
201
  } else if ('onMessage' in options) {
191
202
  try {
192
203
  for (const message of messages) {
193
- await retrier(() => options.onMessage(message));
204
+ await options.onMessage(message);
194
205
 
195
206
  const { topic, partition, offset } = message;
196
207
  this.offsetManager.resolve(topic, partition, offset + 1n);