kafka-ts 0.0.4 → 0.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -85,6 +85,37 @@ See the [examples](./examples) for more detailed examples.
85
85
 
86
86
  By default KafkaTS logs out using a JSON logger. This can be globally replaced by calling setLogger method (see [src/utils/logger.ts](./src/utils/logger.ts))
87
87
 
88
+ #### Retries
89
+
90
+ By default KafkaTS retries onBatch and onMessage using an exponential backoff strategy (see [src/utils/retrier.ts](./src/utils/retrier.ts)). In case of failure, processed message offsets are committed, and the consumer is restarted.
91
+
92
+ In case you want to skip failed messages or implement a DLQ-like mechanism, you can overwrite `retrier` on `startConsumer()` and execute your own logic `onFailure`.
93
+
94
+ Example if you simply want to skip the failing messages:
95
+
96
+ ```typescript
97
+ await kafka.startConsumer({
98
+ // ...
99
+ retrier: createExponentialBackoffRetrier({ onFailure: () => {} }),
100
+ });
101
+ ```
102
+
103
+ #### Concurrency control
104
+
105
+ Depending on the use case, you might want to control `concurrency` and `batchGranularity`.
106
+
107
+ When subscribing to a topic, the consumer group leader will distribute all subscribed topic partitions to consumers within the group. Each consumer will then fetch messages only from partitions assigned to them.
108
+
109
+ `batchGranularity` controls how messages are split into batches from a fetch response:
110
+
111
+ - **broker** - (default) all messages received from a single kafka broker will be included in a single batch.
112
+ - **topic** - all messages received from a single broker and topic will be included in a single batch.
113
+ - **partition** - a batch will only include messages from a single partition.
114
+
115
+ After each batch is processed, the consumer will commit offsets for the processed messages. The more granual the batch is, the more often offsets are committed.
116
+
117
+ `concurrency` controls how many aforementioned batches are processed concurrently.
118
+
88
119
  ## Motivation
89
120
 
90
121
  The existing low-level libraries (e.g. node-rdkafka) are bindings on librdkafka, which doesn't give enough control over the consumer logic.
@@ -132,7 +163,7 @@ Custom SASL mechanisms can be implemented following the `SASLProvider` interface
132
163
  | partitionMaxBytes | number | false | 1_048_576 | Maximum number of bytes to return per partition in the fetch response |
133
164
  | allowTopicAutoCreation | boolean | false | false | Allow kafka to auto-create topic when it doesn't exist |
134
165
  | fromBeginning | boolean | false | false | Start consuming from the beginning of the topic |
135
- | batchGranularity | BatchGranularity | false | partition | Controls messages split from fetch response. Also controls how often offsets are committed. **onBatch** will include messages:<br/>- **partition** - from a single batch<br/>- **topic** - from all topic partitions<br/>- **broker** - from all assignned topics and partitions |
166
+ | batchGranularity | BatchGranularity | false | broker | Controls messages split from fetch response. Also controls how often offsets are committed. **onBatch** will include messages:<br/>- **partition** - from a single batch<br/>- **topic** - from all topic partitions<br/>- **broker** - from all assignned topics and partitions |
136
167
  | concurrency | number | false | 1 | How many batches to process concurrently |
137
168
  | onMessage | (message: Message) => Promise<unknown> | true | | Callback executed on every message |
138
169
  | onBatch | (batch: Message[]) => Promise<unknown> | true | | Callback executed on every batch of messages (based on **batchGranuality**) |
@@ -3,6 +3,7 @@ import EventEmitter from 'events';
3
3
  import { IsolationLevel } from '../api/fetch';
4
4
  import { Cluster } from '../cluster';
5
5
  import { Message } from '../types';
6
+ import { Retrier } from '../utils/retrier';
6
7
  import { BatchGranularity } from './fetch-manager';
7
8
  export type ConsumerOptions = {
8
9
  topics: string[];
@@ -20,6 +21,7 @@ export type ConsumerOptions = {
20
21
  fromBeginning?: boolean;
21
22
  batchGranularity?: BatchGranularity;
22
23
  concurrency?: number;
24
+ retrier?: Retrier;
23
25
  } & ({
24
26
  onBatch: (messages: Required<Message>[]) => unknown;
25
27
  } | {
@@ -19,6 +19,7 @@ const messages_to_topic_partition_leaders_1 = require("../distributors/messages-
19
19
  const delay_1 = require("../utils/delay");
20
20
  const error_1 = require("../utils/error");
21
21
  const logger_1 = require("../utils/logger");
22
+ const retrier_1 = require("../utils/retrier");
22
23
  const tracer_1 = require("../utils/tracer");
23
24
  const consumer_group_1 = require("./consumer-group");
24
25
  const consumer_metadata_1 = require("./consumer-metadata");
@@ -50,8 +51,9 @@ class Consumer extends events_1.default {
50
51
  isolationLevel: options.isolationLevel ?? 0 /* IsolationLevel.READ_UNCOMMITTED */,
51
52
  allowTopicAutoCreation: options.allowTopicAutoCreation ?? false,
52
53
  fromBeginning: options.fromBeginning ?? false,
53
- batchGranularity: options.batchGranularity ?? 'partition',
54
+ batchGranularity: options.batchGranularity ?? 'broker',
54
55
  concurrency: options.concurrency ?? 1,
56
+ retrier: options.retrier ?? retrier_1.defaultRetrier,
55
57
  };
56
58
  this.metadata = new consumer_metadata_1.ConsumerMetadata({ cluster: this.cluster });
57
59
  this.offsetManager = new offset_manager_1.OffsetManager({
@@ -84,7 +86,7 @@ class Consumer extends events_1.default {
84
86
  await this.consumerGroup?.join();
85
87
  }
86
88
  catch (error) {
87
- logger_1.log.warn('Failed to start consumer', error);
89
+ logger_1.log.error('Failed to start consumer', error);
88
90
  logger_1.log.debug(`Restarting consumer in 1 second...`);
89
91
  await (0, delay_1.delay)(1000);
90
92
  if (this.stopHook)
@@ -152,7 +154,9 @@ class Consumer extends events_1.default {
152
154
  break;
153
155
  }
154
156
  logger_1.log.error(error.message, error);
155
- this.close();
157
+ logger_1.log.debug(`Restarting consumer in 1 second...`);
158
+ await (0, delay_1.delay)(1000);
159
+ this.close().then(() => this.start());
156
160
  break;
157
161
  }
158
162
  }
@@ -160,24 +164,31 @@ class Consumer extends events_1.default {
160
164
  }
161
165
  async process(messages) {
162
166
  const { options } = this;
167
+ const { retrier } = options;
163
168
  const topicPartitions = {};
164
169
  for (const { topic, partition } of messages) {
165
170
  topicPartitions[topic] ??= new Set();
166
171
  topicPartitions[topic].add(partition);
167
172
  }
173
+ const commit = async () => {
174
+ await this.consumerGroup?.offsetCommit(topicPartitions);
175
+ this.offsetManager.flush(topicPartitions);
176
+ };
168
177
  if ('onBatch' in options) {
169
- await options.onBatch(messages);
178
+ await retrier(() => options.onBatch(messages));
170
179
  messages.forEach(({ topic, partition, offset }) => this.offsetManager.resolve(topic, partition, offset + 1n));
171
180
  }
172
181
  else if ('onMessage' in options) {
173
182
  for (const message of messages) {
174
- await options.onMessage(message);
183
+ await retrier(() => options.onMessage(message)).catch(async (error) => {
184
+ await commit().catch();
185
+ throw error;
186
+ });
175
187
  const { topic, partition, offset } = message;
176
188
  this.offsetManager.resolve(topic, partition, offset + 1n);
177
189
  }
178
190
  }
179
- await this.consumerGroup?.offsetCommit(topicPartitions);
180
- this.offsetManager.flush(topicPartitions);
191
+ await commit();
181
192
  }
182
193
  fetch(nodeId, assignment) {
183
194
  const { rackId, maxWaitMs, minBytes, maxBytes, partitionMaxBytes, isolationLevel } = this.options;
package/dist/index.d.ts CHANGED
@@ -6,4 +6,5 @@ export * from './distributors/partitioner';
6
6
  export * from './types';
7
7
  export * from './utils/error';
8
8
  export * from './utils/logger';
9
+ export * from './utils/retrier';
9
10
  export { Tracer, setTracer } from './utils/tracer';
package/dist/index.js CHANGED
@@ -22,5 +22,6 @@ __exportStar(require("./distributors/partitioner"), exports);
22
22
  __exportStar(require("./types"), exports);
23
23
  __exportStar(require("./utils/error"), exports);
24
24
  __exportStar(require("./utils/logger"), exports);
25
+ __exportStar(require("./utils/retrier"), exports);
25
26
  var tracer_1 = require("./utils/tracer");
26
27
  Object.defineProperty(exports, "setTracer", { enumerable: true, get: function () { return tracer_1.setTracer; } });
@@ -1,10 +1,9 @@
1
1
  export type Retrier = (func: () => unknown) => Promise<void>;
2
- export declare const createExponentialBackoffRetrier: (options: {
3
- onFailure?: (error: unknown) => Promise<void>;
4
- maxRetries?: number;
2
+ export declare const createExponentialBackoffRetrier: ({ retries, initialDelayMs, maxDelayMs, multiplier, onFailure, }?: {
3
+ retries?: number;
5
4
  initialDelayMs?: number;
6
5
  maxDelayMs?: number;
7
6
  multiplier?: number;
8
- retry?: number;
7
+ onFailure?: (error: unknown) => unknown;
9
8
  }) => Retrier;
10
9
  export declare const defaultRetrier: Retrier;
@@ -2,21 +2,26 @@
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.defaultRetrier = exports.createExponentialBackoffRetrier = void 0;
4
4
  const delay_1 = require("./delay");
5
- const createExponentialBackoffRetrier = (options) => async (func) => {
6
- try {
7
- await func();
8
- }
9
- catch (error) {
10
- const { retry = 0, maxRetries = 3, onFailure = (error) => {
11
- throw error;
12
- }, initialDelayMs = 100, maxDelayMs = 3000, multiplier = 2, } = options;
13
- const isMaxRetriesExceeded = retry > maxRetries;
14
- if (isMaxRetriesExceeded)
15
- return onFailure(error);
16
- const delayMs = Math.min(maxDelayMs, initialDelayMs * multiplier ** retry);
5
+ const createExponentialBackoffRetrier = ({ retries = 5, initialDelayMs = 100, maxDelayMs = 3000, multiplier = 2, onFailure = (error) => {
6
+ throw error;
7
+ }, } = {}) => async (func) => {
8
+ let retriesLeft = retries;
9
+ let delayMs = initialDelayMs;
10
+ let lastError;
11
+ while (true) {
12
+ try {
13
+ await func();
14
+ return;
15
+ }
16
+ catch (error) {
17
+ lastError = error;
18
+ }
19
+ if (--retriesLeft < 1)
20
+ break;
17
21
  await (0, delay_1.delay)(delayMs);
18
- return (0, exports.createExponentialBackoffRetrier)({ ...options, retry: retry + 1 })(func);
22
+ delayMs = Math.min(maxDelayMs, delayMs * multiplier);
19
23
  }
24
+ await onFailure(lastError);
20
25
  };
21
26
  exports.createExponentialBackoffRetrier = createExponentialBackoffRetrier;
22
- exports.defaultRetrier = (0, exports.createExponentialBackoffRetrier)({});
27
+ exports.defaultRetrier = (0, exports.createExponentialBackoffRetrier)();
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "kafka-ts",
3
- "version": "0.0.4",
3
+ "version": "0.0.5",
4
4
  "main": "dist/index.js",
5
5
  "author": "Priit Käärd",
6
6
  "license": "MIT",