kafka-ts 0.0.1-beta.3 → 0.0.1-beta.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,12 +1,18 @@
1
+ import { jsonSerializer, log } from 'kafka-ts';
1
2
  import { kafka } from './client';
3
+ import { delay } from '../../dist/utils/delay';
2
4
 
3
5
  (async () => {
4
6
  const consumer = await kafka.startConsumer({
5
7
  groupId: 'example-group',
6
8
  groupInstanceId: 'example-group-instance',
7
9
  topics: ['my-topic'],
10
+ allowTopicAutoCreation: true,
8
11
  onBatch: (batch) => {
9
- console.log(batch);
12
+ log.info(
13
+ `Received batch: ${JSON.stringify(batch.map((message) => ({ ...message, value: message.value?.toString() })), jsonSerializer)}`,
14
+ );
15
+ log.info(`Latency: ${Date.now() - parseInt(batch[0].timestamp.toString())}ms`)
10
16
  },
11
17
  batchGranularity: 'broker',
12
18
  concurrency: 10,
@@ -1,4 +1,4 @@
1
- import { API, API_ERROR, KafkaTSApiError } from 'kafka-ts';
1
+ import { API, API_ERROR, KafkaTSApiError, log } from 'kafka-ts';
2
2
  import { kafka } from './client';
3
3
 
4
4
  (async () => {
@@ -19,7 +19,7 @@ import { kafka } from './client';
19
19
  {
20
20
  name: 'my-topic',
21
21
  numPartitions: 10,
22
- replicationFactor: 3,
22
+ replicationFactor: 1,
23
23
  assignments: [],
24
24
  configs: [],
25
25
  },
@@ -37,7 +37,7 @@ import { kafka } from './client';
37
37
  topics: [{ id: null, name: 'my-topic' }],
38
38
  });
39
39
 
40
- console.log(metadata);
40
+ log.info('Metadata', metadata);
41
41
 
42
42
  await cluster.disconnect();
43
43
  })();
@@ -9,10 +9,10 @@ process.stdout.write('> ');
9
9
  rl.on('line', async (line) => {
10
10
  await producer.send([
11
11
  {
12
- topic: 'example-topic-f',
12
+ topic: 'my-topic',
13
13
  value: Buffer.from(line),
14
14
  },
15
- ]);
15
+ ], { acks: -1 });
16
16
  process.stdout.write('> ');
17
17
  });
18
18
 
@@ -1,3 +1,4 @@
1
+ import { log } from 'kafka-ts';
1
2
  import { kafka } from './client';
2
3
 
3
4
  (async () => {
@@ -15,7 +16,7 @@ import { kafka } from './client';
15
16
  offset: 0n,
16
17
  })),
17
18
  );
18
- console.log(`Replicated ${messages.length} messages`);
19
+ log.info(`Replicated ${messages.length} messages`);
19
20
  },
20
21
  });
21
22
  process.on('SIGINT', async () => {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "kafka-ts",
3
- "version": "0.0.1-beta.3",
3
+ "version": "0.0.1-beta.4",
4
4
  "main": "dist/index.js",
5
5
  "author": "Priit Käärd",
6
6
  "license": "MIT",
@@ -282,8 +282,8 @@ exports[`Request handler > should create topics 1`] = `
282
282
  "errorCode": 0,
283
283
  "errorMessage": null,
284
284
  "name": "kafka-ts-test-topic",
285
- "numPartitions": 1,
286
- "replicationFactor": 1,
285
+ "numPartitions": 10,
286
+ "replicationFactor": 3,
287
287
  "topicId": "Any<UUID>",
288
288
  },
289
289
  ],
@@ -348,7 +348,11 @@ exports[`Request handler > should fetch messages 1`] = `
348
348
  "baseSequence": 0,
349
349
  "baseTimestamp": 0n,
350
350
  "batchLength": 94,
351
+ "compression": 0,
351
352
  "crc": 0,
353
+ "hasDeleteHorizonMs": false,
354
+ "isControlBatch": false,
355
+ "isTransactional": false,
352
356
  "lastOffsetDelta": 0,
353
357
  "magic": 2,
354
358
  "maxTimestamp": 0n,
@@ -416,6 +420,7 @@ exports[`Request handler > should fetch messages 1`] = `
416
420
  },
417
421
  },
418
422
  ],
423
+ "timestampType": "CreateTime",
419
424
  },
420
425
  ],
421
426
  },
@@ -944,6 +949,132 @@ exports[`Request handler > should request metadata for a topic 1`] = `
944
949
  0,
945
950
  ],
946
951
  },
952
+ {
953
+ "_tag": undefined,
954
+ "errorCode": 0,
955
+ "isrNodes": [
956
+ 0,
957
+ ],
958
+ "leaderEpoch": 0,
959
+ "leaderId": 0,
960
+ "offlineReplicas": [],
961
+ "partitionIndex": 8,
962
+ "replicaNodes": [
963
+ 0,
964
+ ],
965
+ },
966
+ {
967
+ "_tag": undefined,
968
+ "errorCode": 0,
969
+ "isrNodes": [
970
+ 0,
971
+ ],
972
+ "leaderEpoch": 0,
973
+ "leaderId": 0,
974
+ "offlineReplicas": [],
975
+ "partitionIndex": 1,
976
+ "replicaNodes": [
977
+ 0,
978
+ ],
979
+ },
980
+ {
981
+ "_tag": undefined,
982
+ "errorCode": 0,
983
+ "isrNodes": [
984
+ 0,
985
+ ],
986
+ "leaderEpoch": 0,
987
+ "leaderId": 0,
988
+ "offlineReplicas": [],
989
+ "partitionIndex": 2,
990
+ "replicaNodes": [
991
+ 0,
992
+ ],
993
+ },
994
+ {
995
+ "_tag": undefined,
996
+ "errorCode": 0,
997
+ "isrNodes": [
998
+ 0,
999
+ ],
1000
+ "leaderEpoch": 0,
1001
+ "leaderId": 0,
1002
+ "offlineReplicas": [],
1003
+ "partitionIndex": 6,
1004
+ "replicaNodes": [
1005
+ 0,
1006
+ ],
1007
+ },
1008
+ {
1009
+ "_tag": undefined,
1010
+ "errorCode": 0,
1011
+ "isrNodes": [
1012
+ 0,
1013
+ ],
1014
+ "leaderEpoch": 0,
1015
+ "leaderId": 0,
1016
+ "offlineReplicas": [],
1017
+ "partitionIndex": 5,
1018
+ "replicaNodes": [
1019
+ 0,
1020
+ ],
1021
+ },
1022
+ {
1023
+ "_tag": undefined,
1024
+ "errorCode": 0,
1025
+ "isrNodes": [
1026
+ 0,
1027
+ ],
1028
+ "leaderEpoch": 0,
1029
+ "leaderId": 0,
1030
+ "offlineReplicas": [],
1031
+ "partitionIndex": 7,
1032
+ "replicaNodes": [
1033
+ 0,
1034
+ ],
1035
+ },
1036
+ {
1037
+ "_tag": undefined,
1038
+ "errorCode": 0,
1039
+ "isrNodes": [
1040
+ 0,
1041
+ ],
1042
+ "leaderEpoch": 0,
1043
+ "leaderId": 0,
1044
+ "offlineReplicas": [],
1045
+ "partitionIndex": 4,
1046
+ "replicaNodes": [
1047
+ 0,
1048
+ ],
1049
+ },
1050
+ {
1051
+ "_tag": undefined,
1052
+ "errorCode": 0,
1053
+ "isrNodes": [
1054
+ 0,
1055
+ ],
1056
+ "leaderEpoch": 0,
1057
+ "leaderId": 0,
1058
+ "offlineReplicas": [],
1059
+ "partitionIndex": 9,
1060
+ "replicaNodes": [
1061
+ 0,
1062
+ ],
1063
+ },
1064
+ {
1065
+ "_tag": undefined,
1066
+ "errorCode": 0,
1067
+ "isrNodes": [
1068
+ 0,
1069
+ ],
1070
+ "leaderEpoch": 0,
1071
+ "leaderId": 0,
1072
+ "offlineReplicas": [],
1073
+ "partitionIndex": 3,
1074
+ "replicaNodes": [
1075
+ 0,
1076
+ ],
1077
+ },
947
1078
  ],
948
1079
  "topicAuthorizedOperations": -2147483648,
949
1080
  "topicId": "Any<UUID>",
@@ -998,7 +1129,7 @@ exports[`Request handler > should request metadata for all topics 1`] = `
998
1129
  "leaderEpoch": 0,
999
1130
  "leaderId": 0,
1000
1131
  "offlineReplicas": [],
1001
- "partitionIndex": 5,
1132
+ "partitionIndex": 9,
1002
1133
  "replicaNodes": [
1003
1134
  0,
1004
1135
  ],
@@ -1026,7 +1157,7 @@ exports[`Request handler > should request metadata for all topics 1`] = `
1026
1157
  "leaderEpoch": 0,
1027
1158
  "leaderId": 0,
1028
1159
  "offlineReplicas": [],
1029
- "partitionIndex": 0,
1160
+ "partitionIndex": 1,
1030
1161
  "replicaNodes": [
1031
1162
  0,
1032
1163
  ],
@@ -1054,7 +1185,7 @@ exports[`Request handler > should request metadata for all topics 1`] = `
1054
1185
  "leaderEpoch": 0,
1055
1186
  "leaderId": 0,
1056
1187
  "offlineReplicas": [],
1057
- "partitionIndex": 7,
1188
+ "partitionIndex": 5,
1058
1189
  "replicaNodes": [
1059
1190
  0,
1060
1191
  ],
@@ -1068,7 +1199,7 @@ exports[`Request handler > should request metadata for all topics 1`] = `
1068
1199
  "leaderEpoch": 0,
1069
1200
  "leaderId": 0,
1070
1201
  "offlineReplicas": [],
1071
- "partitionIndex": 1,
1202
+ "partitionIndex": 3,
1072
1203
  "replicaNodes": [
1073
1204
  0,
1074
1205
  ],
@@ -1096,7 +1227,7 @@ exports[`Request handler > should request metadata for all topics 1`] = `
1096
1227
  "leaderEpoch": 0,
1097
1228
  "leaderId": 0,
1098
1229
  "offlineReplicas": [],
1099
- "partitionIndex": 9,
1230
+ "partitionIndex": 0,
1100
1231
  "replicaNodes": [
1101
1232
  0,
1102
1233
  ],
@@ -1110,7 +1241,7 @@ exports[`Request handler > should request metadata for all topics 1`] = `
1110
1241
  "leaderEpoch": 0,
1111
1242
  "leaderId": 0,
1112
1243
  "offlineReplicas": [],
1113
- "partitionIndex": 3,
1244
+ "partitionIndex": 4,
1114
1245
  "replicaNodes": [
1115
1246
  0,
1116
1247
  ],
@@ -1124,7 +1255,7 @@ exports[`Request handler > should request metadata for all topics 1`] = `
1124
1255
  "leaderEpoch": 0,
1125
1256
  "leaderId": 0,
1126
1257
  "offlineReplicas": [],
1127
- "partitionIndex": 4,
1258
+ "partitionIndex": 7,
1128
1259
  "replicaNodes": [
1129
1260
  0,
1130
1261
  ],
@@ -1153,6 +1284,132 @@ exports[`Request handler > should request metadata for all topics 1`] = `
1153
1284
  0,
1154
1285
  ],
1155
1286
  },
1287
+ {
1288
+ "_tag": undefined,
1289
+ "errorCode": 0,
1290
+ "isrNodes": [
1291
+ 0,
1292
+ ],
1293
+ "leaderEpoch": 0,
1294
+ "leaderId": 0,
1295
+ "offlineReplicas": [],
1296
+ "partitionIndex": 8,
1297
+ "replicaNodes": [
1298
+ 0,
1299
+ ],
1300
+ },
1301
+ {
1302
+ "_tag": undefined,
1303
+ "errorCode": 0,
1304
+ "isrNodes": [
1305
+ 0,
1306
+ ],
1307
+ "leaderEpoch": 0,
1308
+ "leaderId": 0,
1309
+ "offlineReplicas": [],
1310
+ "partitionIndex": 1,
1311
+ "replicaNodes": [
1312
+ 0,
1313
+ ],
1314
+ },
1315
+ {
1316
+ "_tag": undefined,
1317
+ "errorCode": 0,
1318
+ "isrNodes": [
1319
+ 0,
1320
+ ],
1321
+ "leaderEpoch": 0,
1322
+ "leaderId": 0,
1323
+ "offlineReplicas": [],
1324
+ "partitionIndex": 2,
1325
+ "replicaNodes": [
1326
+ 0,
1327
+ ],
1328
+ },
1329
+ {
1330
+ "_tag": undefined,
1331
+ "errorCode": 0,
1332
+ "isrNodes": [
1333
+ 0,
1334
+ ],
1335
+ "leaderEpoch": 0,
1336
+ "leaderId": 0,
1337
+ "offlineReplicas": [],
1338
+ "partitionIndex": 6,
1339
+ "replicaNodes": [
1340
+ 0,
1341
+ ],
1342
+ },
1343
+ {
1344
+ "_tag": undefined,
1345
+ "errorCode": 0,
1346
+ "isrNodes": [
1347
+ 0,
1348
+ ],
1349
+ "leaderEpoch": 0,
1350
+ "leaderId": 0,
1351
+ "offlineReplicas": [],
1352
+ "partitionIndex": 5,
1353
+ "replicaNodes": [
1354
+ 0,
1355
+ ],
1356
+ },
1357
+ {
1358
+ "_tag": undefined,
1359
+ "errorCode": 0,
1360
+ "isrNodes": [
1361
+ 0,
1362
+ ],
1363
+ "leaderEpoch": 0,
1364
+ "leaderId": 0,
1365
+ "offlineReplicas": [],
1366
+ "partitionIndex": 7,
1367
+ "replicaNodes": [
1368
+ 0,
1369
+ ],
1370
+ },
1371
+ {
1372
+ "_tag": undefined,
1373
+ "errorCode": 0,
1374
+ "isrNodes": [
1375
+ 0,
1376
+ ],
1377
+ "leaderEpoch": 0,
1378
+ "leaderId": 0,
1379
+ "offlineReplicas": [],
1380
+ "partitionIndex": 4,
1381
+ "replicaNodes": [
1382
+ 0,
1383
+ ],
1384
+ },
1385
+ {
1386
+ "_tag": undefined,
1387
+ "errorCode": 0,
1388
+ "isrNodes": [
1389
+ 0,
1390
+ ],
1391
+ "leaderEpoch": 0,
1392
+ "leaderId": 0,
1393
+ "offlineReplicas": [],
1394
+ "partitionIndex": 9,
1395
+ "replicaNodes": [
1396
+ 0,
1397
+ ],
1398
+ },
1399
+ {
1400
+ "_tag": undefined,
1401
+ "errorCode": 0,
1402
+ "isrNodes": [
1403
+ 0,
1404
+ ],
1405
+ "leaderEpoch": 0,
1406
+ "leaderId": 0,
1407
+ "offlineReplicas": [],
1408
+ "partitionIndex": 3,
1409
+ "replicaNodes": [
1410
+ 0,
1411
+ ],
1412
+ },
1156
1413
  ],
1157
1414
  "topicAuthorizedOperations": -2147483648,
1158
1415
  "topicId": "Any<UUID>",
package/src/api/fetch.ts CHANGED
@@ -1,3 +1,4 @@
1
+ import { findCodec } from '../codecs';
1
2
  import { createApi } from '../utils/api';
2
3
  import { Decoder } from '../utils/decoder';
3
4
  import { KafkaTSApiError } from '../utils/error';
@@ -68,7 +69,7 @@ export const FETCH = createApi({
68
69
  )
69
70
  .writeCompactString(data.rackId)
70
71
  .writeUVarInt(0),
71
- response: (decoder) => {
72
+ response: async (decoder) => {
72
73
  const result = {
73
74
  _tag: decoder.readTagBuffer(),
74
75
  throttleTimeMs: decoder.readInt32(),
@@ -88,56 +89,105 @@ export const FETCH = createApi({
88
89
  _tag: abortedTransaction.readTagBuffer(),
89
90
  })),
90
91
  preferredReadReplica: partition.readInt32(),
91
- records: decodeRecords(partition),
92
+ records: decodeRecordBatch(partition),
92
93
  _tag: partition.readTagBuffer(),
93
94
  })),
94
95
  _tag: response.readTagBuffer(),
95
96
  })),
96
97
  _tag2: decoder.readTagBuffer(),
97
98
  };
99
+
98
100
  if (result.errorCode) throw new KafkaTSApiError(result.errorCode, null, result);
99
101
  result.responses.forEach((response) => {
100
102
  response.partitions.forEach((partition) => {
101
103
  if (partition.errorCode) throw new KafkaTSApiError(partition.errorCode, null, result);
102
104
  });
103
105
  });
104
- return result;
106
+
107
+ const decompressedResponses = await Promise.all(
108
+ result.responses.map(async (response) => ({
109
+ ...response,
110
+ partitions: await Promise.all(
111
+ response.partitions.map(async (partition) => ({
112
+ ...partition,
113
+ records: await Promise.all(
114
+ partition.records.map(async ({ recordsLength, compressedRecords, ...record }) => {
115
+ const { decompress } = findCodec(record.compression);
116
+ const decompressedRecords = await decompress(compressedRecords);
117
+ const decompressedDecoder = new Decoder(
118
+ Buffer.concat([recordsLength, decompressedRecords]),
119
+ );
120
+ return { ...record, records: decodeRecord(decompressedDecoder) };
121
+ }),
122
+ ),
123
+ })),
124
+ ),
125
+ })),
126
+ );
127
+
128
+ return { ...result, responses: decompressedResponses };
105
129
  },
106
130
  });
107
131
 
108
- const decodeRecords = (decoder: Decoder) => {
132
+ const decodeRecordBatch = (decoder: Decoder) => {
109
133
  const size = decoder.readUVarInt() - 1;
110
134
  if (size <= 0) {
111
135
  return [];
112
136
  }
113
137
 
138
+ const recordBatchDecoder = new Decoder(decoder.read(size));
139
+
114
140
  const results = [];
115
- while (decoder.getBufferLength() > decoder.getOffset() + 49) {
141
+ while (recordBatchDecoder.getBufferLength() > recordBatchDecoder.getOffset()) {
142
+ const baseOffset = recordBatchDecoder.readInt64();
143
+ const batchLength = recordBatchDecoder.readInt32();
144
+
145
+ const batchDecoder = new Decoder(recordBatchDecoder.read(batchLength));
146
+
147
+ const result = {
148
+ baseOffset,
149
+ batchLength,
150
+ partitionLeaderEpoch: batchDecoder.readInt32(),
151
+ magic: batchDecoder.readInt8(),
152
+ crc: batchDecoder.readUInt32(),
153
+ attributes: batchDecoder.readInt16(),
154
+ lastOffsetDelta: batchDecoder.readInt32(),
155
+ baseTimestamp: batchDecoder.readInt64(),
156
+ maxTimestamp: batchDecoder.readInt64(),
157
+ producerId: batchDecoder.readInt64(),
158
+ producerEpoch: batchDecoder.readInt16(),
159
+ baseSequence: batchDecoder.readInt32(),
160
+ recordsLength: batchDecoder.read(4),
161
+ compressedRecords: batchDecoder.read(),
162
+ };
163
+
164
+ const compression = result.attributes & 0x07;
165
+ const timestampType = (result.attributes & 0x08) >> 3 ? 'LogAppendTime' : 'CreateTime';
166
+ const isTransactional = !!((result.attributes & 0x10) >> 4);
167
+ const isControlBatch = !!((result.attributes & 0x20) >> 5);
168
+ const hasDeleteHorizonMs = !!((result.attributes & 0x40) >> 6);
169
+
116
170
  results.push({
117
- baseOffset: decoder.readInt64(),
118
- batchLength: decoder.readInt32(),
119
- partitionLeaderEpoch: decoder.readInt32(),
120
- magic: decoder.readInt8(),
121
- crc: decoder.readUInt32(),
122
- attributes: decoder.readInt16(),
123
- lastOffsetDelta: decoder.readInt32(),
124
- baseTimestamp: decoder.readInt64(),
125
- maxTimestamp: decoder.readInt64(),
126
- producerId: decoder.readInt64(),
127
- producerEpoch: decoder.readInt16(),
128
- baseSequence: decoder.readInt32(),
129
- records: decoder.readRecords((record) => ({
130
- attributes: record.readInt8(),
131
- timestampDelta: record.readVarLong(),
132
- offsetDelta: record.readVarInt(),
133
- key: record.readVarIntBuffer(),
134
- value: record.readVarIntBuffer(),
135
- headers: record.readCompactArray((header) => ({
136
- key: header.readVarIntBuffer(),
137
- value: header.readVarIntBuffer(),
138
- })),
139
- })),
171
+ ...result,
172
+ compression,
173
+ timestampType,
174
+ isTransactional,
175
+ isControlBatch,
176
+ hasDeleteHorizonMs,
140
177
  });
141
178
  }
142
179
  return results;
143
180
  };
181
+
182
+ const decodeRecord = (decoder: Decoder) =>
183
+ decoder.readRecords((record) => ({
184
+ attributes: record.readInt8(),
185
+ timestampDelta: record.readVarLong(),
186
+ offsetDelta: record.readVarInt(),
187
+ key: record.readVarIntBuffer(),
188
+ value: record.readVarIntBuffer(),
189
+ headers: record.readVarIntArray((header) => ({
190
+ key: header.readVarIntBuffer(),
191
+ value: header.readVarIntBuffer(),
192
+ })),
193
+ }));
@@ -21,7 +21,8 @@ describe.sequential('Request handler', () => {
21
21
  let cluster: Cluster;
22
22
 
23
23
  beforeAll(async () => {
24
- cluster = await kafka.createCluster().connect();
24
+ cluster = await kafka.createCluster();
25
+ await cluster.connect();
25
26
 
26
27
  const metadataResult = await cluster.sendRequest(API.METADATA, {
27
28
  topics: null,
@@ -52,8 +53,8 @@ describe.sequential('Request handler', () => {
52
53
  topics: [
53
54
  {
54
55
  name: 'kafka-ts-test-topic',
55
- numPartitions: 1,
56
- replicationFactor: 1,
56
+ numPartitions: 10,
57
+ replicationFactor: 3,
57
58
  assignments: [],
58
59
  configs: [],
59
60
  },
@@ -89,6 +90,7 @@ describe.sequential('Request handler', () => {
89
90
  expect(result).toMatchSnapshot();
90
91
  });
91
92
 
93
+ let partitionIndex = 0;
92
94
  let leaderId = 0;
93
95
 
94
96
  it('should request metadata for a topic', async () => {
@@ -97,6 +99,7 @@ describe.sequential('Request handler', () => {
97
99
  allowTopicAutoCreation: false,
98
100
  includeTopicAuthorizedOperations: false,
99
101
  });
102
+ partitionIndex = result.topics[0].partitions[0].partitionIndex;
100
103
  leaderId = result.topics[0].partitions[0].leaderId;
101
104
  result.controllerId = 0;
102
105
  result.topics.forEach((topic) => {
@@ -134,7 +137,7 @@ describe.sequential('Request handler', () => {
134
137
  name: 'kafka-ts-test-topic',
135
138
  partitionData: [
136
139
  {
137
- index: 0,
140
+ index: partitionIndex,
138
141
  baseOffset: 0n,
139
142
  partitionLeaderEpoch: 0,
140
143
  attributes: 0,
@@ -180,7 +183,7 @@ describe.sequential('Request handler', () => {
180
183
  topicId,
181
184
  partitions: [
182
185
  {
183
- partition: 0,
186
+ partition: partitionIndex,
184
187
  currentLeaderEpoch: -1,
185
188
  fetchOffset: 0n,
186
189
  lastFetchedEpoch: 0,