kafka-ts 0.0.14 → 0.0.17-beta.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -38,7 +38,7 @@ const consumer = await kafka.startConsumer({
38
38
  ```typescript
39
39
  export const producer = kafka.createProducer();
40
40
 
41
- await producer.send([{ topic: 'my-topic', partition: 0, key: 'key', value: 'value' }]);
41
+ await producer.send([{ topic: 'my-topic', key: 'key', value: 'value' }]);
42
42
  ```
43
43
 
44
44
  #### Low-level API
@@ -116,6 +116,26 @@ After each batch is processed, the consumer will commit offsets for the processe
116
116
 
117
117
  `concurrency` controls how many aforementioned batches are processed concurrently.
118
118
 
119
+ #### Partitioning
120
+
121
+ By default, messages are partitioned by message key or round-robin if the key is null or undefined. Partition can be overwritten by `partition` property in the message. You can also override the default partitioner per producer instance `kafka.createProducer({ partitioner: customPartitioner })`.
122
+
123
+ A simple example how to partition messages by the value in message header `x-partition-key`:
124
+
125
+ ```typescript
126
+ import type { Partitioner } from 'kafka-ts';
127
+ import { defaultPartitioner } from 'kafka-ts';
128
+
129
+ const myPartitioner: Partitioner = (context) => {
130
+ const partition = defaultPartitioner(context);
131
+ return (message) => partition({ ...message, key: message.headers?.['x-partition-key'] });
132
+ };
133
+
134
+ const producer = kafka.createProducer({ partitioner: myPartitioner });
135
+
136
+ await producer.send([{ topic: 'my-topic', value: 'value', headers: { 'x-partition-key': '123' } }]);
137
+ ```
138
+
119
139
  ## Motivation
120
140
 
121
141
  The existing low-level libraries (e.g. node-rdkafka) are bindings on librdkafka, which doesn't give enough control over the consumer logic.
package/dist/cluster.js CHANGED
@@ -27,7 +27,14 @@ class Cluster {
27
27
  if (!this.seedBroker) {
28
28
  return this.connect();
29
29
  }
30
- await Promise.all([this.seedBroker, ...Object.values(this.brokerById)].map((x) => x.ensureConnected()));
30
+ try {
31
+ await Promise.all([this.seedBroker, ...Object.values(this.brokerById)].map((x) => x.ensureConnected()));
32
+ }
33
+ catch {
34
+ logger_1.log.warn('Failed to connect to known brokers, reconnecting...');
35
+ await this.disconnect();
36
+ return this.connect();
37
+ }
31
38
  }
32
39
  async disconnect() {
33
40
  await Promise.all([
@@ -84,7 +84,7 @@ class Connection {
84
84
  disconnect() {
85
85
  this.socket.removeAllListeners();
86
86
  return new Promise((resolve) => {
87
- if (this.socket.pending) {
87
+ if (!this.isConnected()) {
88
88
  return resolve();
89
89
  }
90
90
  this.socket.end(resolve);
@@ -122,6 +122,7 @@ class Connection {
122
122
  }
123
123
  catch (error) {
124
124
  if (error instanceof error_1.KafkaTSApiError) {
125
+ error.apiName = apiName;
125
126
  error.request = body;
126
127
  }
127
128
  throw error;
@@ -42,6 +42,7 @@ export declare class Consumer extends EventEmitter<{
42
42
  start(): Promise<void>;
43
43
  close(force?: boolean): Promise<void>;
44
44
  private startFetchManager;
45
+ private waitForReassignment;
45
46
  private process;
46
47
  private fetch;
47
48
  }
@@ -80,7 +80,7 @@ class Consumer extends events_1.default {
80
80
  this.stopHook = undefined;
81
81
  try {
82
82
  await this.cluster.connect();
83
- await this.metadata.fetchMetadataIfNecessary({ topics, allowTopicAutoCreation });
83
+ await this.metadata.fetchMetadata({ topics, allowTopicAutoCreation });
84
84
  this.metadata.setAssignment(this.metadata.getTopicPartitions());
85
85
  await this.offsetManager.fetchOffsets({ fromBeginning });
86
86
  await this.consumerGroup?.init();
@@ -131,15 +131,13 @@ class Consumer extends events_1.default {
131
131
  });
132
132
  await this.fetchManager.start();
133
133
  if (!nodeAssignments.length) {
134
- logger_1.log.debug('No partitions assigned. Waiting for reassignment...', { groupId });
135
- await (0, delay_1.delay)(this.options.maxWaitMs);
136
- this.consumerGroup?.handleLastHeartbeat();
134
+ await this.waitForReassignment();
137
135
  }
138
136
  }
139
137
  catch (error) {
140
138
  await this.fetchManager?.stop();
141
139
  if (error.errorCode === api_1.API_ERROR.REBALANCE_IN_PROGRESS) {
142
- logger_1.log.debug('Rebalance in progress...');
140
+ logger_1.log.debug('Rebalance in progress...', { apiName: error.apiName, groupId });
143
141
  continue;
144
142
  }
145
143
  if (error.errorCode === api_1.API_ERROR.FENCED_INSTANCE_ID) {
@@ -162,6 +160,14 @@ class Consumer extends events_1.default {
162
160
  }
163
161
  this.stopHook?.();
164
162
  }
163
+ async waitForReassignment() {
164
+ const { groupId } = this.options;
165
+ logger_1.log.debug('No partitions assigned. Waiting for reassignment...', { groupId });
166
+ while (!this.stopHook) {
167
+ await (0, delay_1.delay)(1000);
168
+ this.consumerGroup?.handleLastHeartbeat();
169
+ }
170
+ }
165
171
  async process(messages) {
166
172
  const { options } = this;
167
173
  const { retrier } = options;
@@ -19,6 +19,9 @@ export declare class Metadata {
19
19
  topics: string[];
20
20
  allowTopicAutoCreation: boolean;
21
21
  }): Promise<void>;
22
- private fetchMetadata;
22
+ fetchMetadata({ topics, allowTopicAutoCreation, }: {
23
+ topics: string[] | null;
24
+ allowTopicAutoCreation: boolean;
25
+ }): Promise<void>;
23
26
  }
24
27
  export {};
package/dist/metadata.js CHANGED
@@ -104,3 +104,9 @@ __decorate([
104
104
  __metadata("design:paramtypes", [Object]),
105
105
  __metadata("design:returntype", Promise)
106
106
  ], Metadata.prototype, "fetchMetadataIfNecessary", null);
107
+ __decorate([
108
+ trace(),
109
+ __metadata("design:type", Function),
110
+ __metadata("design:paramtypes", [Object]),
111
+ __metadata("design:returntype", Promise)
112
+ ], Metadata.prototype, "fetchMetadata", null);
@@ -76,7 +76,10 @@ class Producer {
76
76
  offsetDelta: index,
77
77
  key: message.key ?? null,
78
78
  value: message.value,
79
- headers: Object.entries(message.headers ?? {}).map(([key, value]) => ({ key, value })),
79
+ headers: Object.entries(message.headers ?? {}).map(([key, value]) => ({
80
+ key,
81
+ value,
82
+ })),
80
83
  })),
81
84
  };
82
85
  }),
@@ -95,7 +98,10 @@ class Producer {
95
98
  }));
96
99
  }
97
100
  catch (error) {
98
- if ((error instanceof error_1.KafkaTSApiError) && error.errorCode === api_1.API_ERROR.OUT_OF_ORDER_SEQUENCE_NUMBER) {
101
+ if (error instanceof error_1.KafkaTSApiError && error.errorCode === api_1.API_ERROR.NOT_LEADER_OR_FOLLOWER) {
102
+ await this.metadata.fetchMetadata({ topics, allowTopicAutoCreation });
103
+ }
104
+ if (error instanceof error_1.KafkaTSApiError && error.errorCode === api_1.API_ERROR.OUT_OF_ORDER_SEQUENCE_NUMBER) {
99
105
  await this.initProducerId();
100
106
  }
101
107
  throw error;
@@ -5,6 +5,7 @@ export declare class KafkaTSApiError<T = any> extends KafkaTSError {
5
5
  errorCode: number;
6
6
  errorMessage: string | null;
7
7
  response: T;
8
+ apiName: string | undefined;
8
9
  request: unknown | undefined;
9
10
  constructor(errorCode: number, errorMessage: string | null, response: T);
10
11
  }
@@ -13,6 +13,7 @@ class KafkaTSApiError extends KafkaTSError {
13
13
  errorCode;
14
14
  errorMessage;
15
15
  response;
16
+ apiName;
16
17
  request;
17
18
  constructor(errorCode, errorMessage, response) {
18
19
  const [errorName] = Object.entries(api_1.API_ERROR).find(([, value]) => value === errorCode) ?? ['UNKNOWN'];
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "kafka-ts",
3
- "version": "0.0.14",
3
+ "version": "0.0.17-beta.0",
4
4
  "main": "dist/index.js",
5
5
  "author": "Priit Käärd",
6
6
  "license": "MIT",