kafka-ts 0.0.6-beta.0 → 0.0.6-beta.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -112,9 +112,17 @@ class Connection {
112
112
  }
113
113
  });
114
114
  clearTimeout(timeout);
115
- const response = await api.response(responseDecoder);
116
- (0, assert_1.default)(responseDecoder.getOffset() - 4 === responseSize, `Buffer not correctly consumed: ${responseDecoder.getOffset() - 4} !== ${responseSize}`);
117
- return response;
115
+ try {
116
+ const response = await api.response(responseDecoder);
117
+ (0, assert_1.default)(responseDecoder.getOffset() - 4 === responseSize, `Buffer not correctly consumed: ${responseDecoder.getOffset() - 4} !== ${responseSize}`);
118
+ return response;
119
+ }
120
+ catch (error) {
121
+ if (error instanceof error_1.KafkaTSApiError) {
122
+ error.request = JSON.stringify(body, logger_1.jsonSerializer);
123
+ }
124
+ throw error;
125
+ }
118
126
  }
119
127
  write(buffer) {
120
128
  return new Promise((resolve, reject) => {
@@ -49,7 +49,13 @@ class OffsetManager {
49
49
  .flatMap(([topic, partitions]) => partitions.map((partition) => ({ topic, partition })))
50
50
  .map(({ topic, partition }) => ({
51
51
  name: topic,
52
- partitions: [{ partitionIndex: partition, currentLeaderEpoch: -1, timestamp: -1n }],
52
+ partitions: [
53
+ {
54
+ partitionIndex: partition,
55
+ currentLeaderEpoch: -1,
56
+ timestamp: fromBeginning ? -2n : -1n,
57
+ },
58
+ ],
53
59
  })),
54
60
  });
55
61
  const topicPartitions = {};
@@ -57,7 +63,7 @@ class OffsetManager {
57
63
  topicPartitions[name] ??= new Set();
58
64
  partitions.forEach(({ partitionIndex, offset }) => {
59
65
  topicPartitions[name].add(partitionIndex);
60
- this.resolve(name, partitionIndex, fromBeginning ? 0n : offset);
66
+ this.resolve(name, partitionIndex, offset);
61
67
  });
62
68
  });
63
69
  this.flush(topicPartitions);
@@ -20,5 +20,6 @@ export declare class Producer {
20
20
  close(): Promise<void>;
21
21
  private ensureConnected;
22
22
  private initProducerId;
23
- private getBaseSequence;
23
+ private nextBaseSequence;
24
+ private revertBaseSequence;
24
25
  }
@@ -43,11 +43,8 @@ class Producer {
43
43
  const topics = Array.from(new Set(messages.map((message) => message.topic)));
44
44
  await this.metadata.fetchMetadataIfNecessary({ topics, allowTopicAutoCreation });
45
45
  const nodeTopicPartitionMessages = (0, messages_to_topic_partition_leaders_1.distributeMessagesToTopicPartitionLeaders)(messages.map((message) => ({ ...message, partition: this.partition(message) })), this.metadata.getTopicPartitionLeaderIds());
46
- await Promise.all(Object.entries(nodeTopicPartitionMessages).map(([nodeId, topicPartitionMessages]) => this.cluster.sendRequestToNode(parseInt(nodeId))(api_1.API.PRODUCE, {
47
- transactionalId: null,
48
- acks,
49
- timeoutMs: 5000,
50
- topicData: Object.entries(topicPartitionMessages).map(([topic, partitionMessages]) => ({
46
+ await Promise.all(Object.entries(nodeTopicPartitionMessages).map(async ([nodeId, topicPartitionMessages]) => {
47
+ const topicData = Object.entries(topicPartitionMessages).map(([topic, partitionMessages]) => ({
51
48
  name: topic,
52
49
  partitionData: Object.entries(partitionMessages).map(([partition, messages]) => {
53
50
  const partitionIndex = parseInt(partition);
@@ -61,7 +58,7 @@ class Producer {
61
58
  maxTimestamp = timestamp;
62
59
  }
63
60
  });
64
- const baseSequence = this.getBaseSequence(topic, partitionIndex, messages.length);
61
+ const baseSequence = this.nextBaseSequence(topic, partitionIndex, messages.length);
65
62
  return {
66
63
  index: partitionIndex,
67
64
  baseOffset: 0n,
@@ -86,8 +83,24 @@ class Producer {
86
83
  })),
87
84
  };
88
85
  }),
89
- })),
90
- })));
86
+ }));
87
+ try {
88
+ return await this.cluster.sendRequestToNode(parseInt(nodeId))(api_1.API.PRODUCE, {
89
+ transactionalId: null,
90
+ acks,
91
+ timeoutMs: 5000,
92
+ topicData,
93
+ });
94
+ }
95
+ catch (error) {
96
+ topicData.forEach(({ name, partitionData }) => {
97
+ partitionData.forEach(({ index, records }) => {
98
+ this.revertBaseSequence(name, index, records.length);
99
+ });
100
+ });
101
+ throw error;
102
+ }
103
+ }));
91
104
  }
92
105
  async close() {
93
106
  await this.cluster.disconnect();
@@ -116,13 +129,16 @@ class Producer {
116
129
  throw error;
117
130
  }
118
131
  }
119
- getBaseSequence(topic, partition, messagesCount) {
132
+ nextBaseSequence(topic, partition, messagesCount) {
120
133
  this.sequences[topic] ??= {};
121
134
  this.sequences[topic][partition] ??= 0;
122
135
  const baseSequence = this.sequences[topic][partition];
123
136
  this.sequences[topic][partition] += messagesCount;
124
137
  return baseSequence;
125
138
  }
139
+ revertBaseSequence(topic, partition, messagesCount) {
140
+ this.sequences[topic][partition] -= messagesCount;
141
+ }
126
142
  }
127
143
  exports.Producer = Producer;
128
144
  __decorate([
@@ -5,6 +5,7 @@ export declare class KafkaTSApiError<T = any> extends KafkaTSError {
5
5
  errorCode: number;
6
6
  errorMessage: string | null;
7
7
  response: T;
8
+ request: string | undefined;
8
9
  constructor(errorCode: number, errorMessage: string | null, response: T);
9
10
  }
10
11
  export declare class ConnectionError extends KafkaTSError {
@@ -13,6 +13,7 @@ class KafkaTSApiError extends KafkaTSError {
13
13
  errorCode;
14
14
  errorMessage;
15
15
  response;
16
+ request;
16
17
  constructor(errorCode, errorMessage, response) {
17
18
  const [errorName] = Object.entries(api_1.API_ERROR).find(([, value]) => value === errorCode) ?? ['UNKNOWN'];
18
19
  super(`${errorName}${errorMessage ? `: ${errorMessage}` : ''}`);
@@ -3,7 +3,13 @@ Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.setLogger = exports.log = exports.jsonSerializer = void 0;
4
4
  const jsonSerializer = (_, v) => {
5
5
  if (v instanceof Error) {
6
- return { name: v.name, message: v.message, stack: v.stack, cause: v.cause };
6
+ return Object.getOwnPropertyNames(v).reduce((acc, key) => {
7
+ acc[key] = v[key];
8
+ return acc;
9
+ }, {});
10
+ }
11
+ if (Buffer.isBuffer(v)) {
12
+ return v.toString();
7
13
  }
8
14
  if (typeof v === 'bigint') {
9
15
  return v.toString();
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "kafka-ts",
3
- "version": "0.0.6-beta.0",
3
+ "version": "0.0.6-beta.2",
4
4
  "main": "dist/index.js",
5
5
  "author": "Priit Käärd",
6
6
  "license": "MIT",
@@ -10,7 +10,7 @@
10
10
  },
11
11
  "scripts": {
12
12
  "start": "docker-compose down && KAFKA_VERSION=3.7.1 docker-compose up -d && sleep 5 && bash ./scripts/create-scram-user.sh",
13
- "version:beta": "npm version prerelease --preid=beta",
13
+ "version:prerelease": "npm version prerelease",
14
14
  "version:patch": "npm version patch",
15
15
  "format": "prettier --write .",
16
16
  "build": "tsc",