kafka-ts 1.3.1-beta.1 → 1.3.1-beta.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +7 -7
- package/dist/api/alter-configs.d.ts +26 -0
- package/dist/api/alter-configs.js +33 -0
- package/dist/api/api-versions.d.ts +5 -2
- package/dist/api/api-versions.js +13 -0
- package/dist/api/create-topics.d.ts +14 -12
- package/dist/api/create-topics.js +104 -12
- package/dist/api/delete-topics.d.ts +10 -8
- package/dist/api/delete-topics.js +61 -7
- package/dist/api/fetch.d.ts +15 -12
- package/dist/api/fetch.js +131 -13
- package/dist/api/find-coordinator.d.ts +9 -7
- package/dist/api/find-coordinator.js +63 -5
- package/dist/api/heartbeat.d.ts +7 -5
- package/dist/api/heartbeat.js +42 -4
- package/dist/api/index.d.ts +47 -118
- package/dist/api/init-producer-id.d.ts +7 -5
- package/dist/api/init-producer-id.js +53 -9
- package/dist/api/join-group.d.ts +9 -7
- package/dist/api/join-group.js +95 -6
- package/dist/api/leave-group.d.ts +8 -6
- package/dist/api/leave-group.js +49 -6
- package/dist/api/list-offsets.d.ts +9 -7
- package/dist/api/list-offsets.js +85 -8
- package/dist/api/metadata.d.ts +10 -9
- package/dist/api/metadata.js +109 -8
- package/dist/api/offset-commit.d.ts +10 -8
- package/dist/api/offset-commit.js +88 -8
- package/dist/api/offset-fetch.d.ts +11 -9
- package/dist/api/offset-fetch.js +94 -9
- package/dist/api/produce.d.ts +8 -10
- package/dist/api/produce.js +132 -38
- package/dist/api/sasl-authenticate.d.ts +8 -6
- package/dist/api/sasl-authenticate.js +43 -3
- package/dist/api/sasl-handshake.d.ts +7 -4
- package/dist/api/sasl-handshake.js +10 -0
- package/dist/api/sync-group.d.ts +7 -5
- package/dist/api/sync-group.js +62 -5
- package/dist/broker.js +6 -5
- package/dist/cluster.test.js +17 -14
- package/dist/connection.d.ts +11 -1
- package/dist/connection.js +27 -2
- package/dist/consumer/consumer.js +13 -9
- package/dist/consumer/metadata.d.ts +24 -0
- package/dist/consumer/metadata.js +64 -0
- package/dist/distributors/messages-to-topic-partition-leaders.d.ts +17 -0
- package/dist/distributors/messages-to-topic-partition-leaders.js +15 -0
- package/dist/distributors/messages-to-topic-partition-leaders.test.d.ts +1 -0
- package/dist/distributors/messages-to-topic-partition-leaders.test.js +30 -0
- package/dist/examples/src/replicator.js +34 -0
- package/dist/examples/src/utils/json.js +5 -0
- package/dist/request-handler.d.ts +16 -0
- package/dist/request-handler.js +67 -0
- package/dist/request-handler.test.d.ts +1 -0
- package/dist/request-handler.test.js +340 -0
- package/dist/src/api/api-versions.js +18 -0
- package/dist/src/api/create-topics.js +46 -0
- package/dist/src/api/delete-topics.js +26 -0
- package/dist/src/api/fetch.js +95 -0
- package/dist/src/api/find-coordinator.js +34 -0
- package/dist/src/api/heartbeat.js +22 -0
- package/dist/src/api/index.js +38 -0
- package/dist/src/api/init-producer-id.js +24 -0
- package/dist/src/api/join-group.js +48 -0
- package/dist/src/api/leave-group.js +30 -0
- package/dist/src/api/list-offsets.js +39 -0
- package/dist/src/api/metadata.js +47 -0
- package/dist/src/api/offset-commit.js +39 -0
- package/dist/src/api/offset-fetch.js +44 -0
- package/dist/src/api/produce.js +119 -0
- package/dist/src/api/sync-group.js +31 -0
- package/dist/src/broker.js +35 -0
- package/dist/src/connection.js +21 -0
- package/dist/src/consumer/consumer-group.js +131 -0
- package/dist/src/consumer/consumer.js +103 -0
- package/dist/src/consumer/metadata.js +52 -0
- package/dist/src/consumer/offset-manager.js +23 -0
- package/dist/src/index.js +19 -0
- package/dist/src/producer/producer.js +84 -0
- package/dist/src/request-handler.js +57 -0
- package/dist/src/request-handler.test.js +321 -0
- package/dist/src/types.js +2 -0
- package/dist/src/utils/api.js +5 -0
- package/dist/src/utils/decoder.js +161 -0
- package/dist/src/utils/encoder.js +137 -0
- package/dist/src/utils/error.js +10 -0
- package/dist/utils/api.d.ts +4 -1
- package/dist/utils/cached.d.ts +3 -0
- package/dist/utils/cached.js +19 -0
- package/dist/utils/debug.d.ts +2 -0
- package/dist/utils/debug.js +11 -0
- package/dist/utils/decoder.d.ts +2 -2
- package/dist/utils/decoder.js +14 -1
- package/dist/utils/encoder.d.ts +1 -0
- package/dist/utils/encoder.js +14 -0
- package/dist/utils/lock.d.ts +8 -0
- package/dist/utils/lock.js +44 -0
- package/dist/utils/memo.d.ts +1 -0
- package/dist/utils/memo.js +16 -0
- package/dist/utils/mutex.d.ts +3 -0
- package/dist/utils/mutex.js +32 -0
- package/package.json +1 -1
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.startConsumer = void 0;
|
|
4
|
+
const fetch_1 = require("../api/fetch");
|
|
5
|
+
const broker_1 = require("../broker");
|
|
6
|
+
const consumer_group_1 = require("./consumer-group");
|
|
7
|
+
const metadata_1 = require("./metadata");
|
|
8
|
+
const offset_manager_1 = require("./offset-manager");
|
|
9
|
+
const startConsumer = async ({ brokers, clientId = null, groupId, topics, groupInstanceId = null, rackId = "", isolationLevel = 0 /* IsolationLevel.READ_UNCOMMITTED */, sessionTimeoutMs = 30_000, rebalanceTimeoutMs = 60_000, maxWaitMs = 5000, minBytes = 1, maxBytes = 1_000_000, partitionMaxBytes = 1_000_000, allowTopicAutoCreation = true, fromBeginning = false, ...options }) => {
|
|
10
|
+
let coordinator = await (0, broker_1.connectBroker)({ clientId, options: brokers[0] });
|
|
11
|
+
let stopHook;
|
|
12
|
+
const offsetManager = (0, offset_manager_1.createOffsetManager)();
|
|
13
|
+
const metadata = (0, metadata_1.createMetadata)({
|
|
14
|
+
topics,
|
|
15
|
+
isolationLevel,
|
|
16
|
+
allowTopicAutoCreation,
|
|
17
|
+
fromBeginning,
|
|
18
|
+
coordinator,
|
|
19
|
+
offsetManager,
|
|
20
|
+
});
|
|
21
|
+
const consumerGroup = groupId
|
|
22
|
+
? (0, consumer_group_1.createConsumerGroup)({
|
|
23
|
+
topics,
|
|
24
|
+
groupId,
|
|
25
|
+
groupInstanceId,
|
|
26
|
+
sessionTimeoutMs,
|
|
27
|
+
rebalanceTimeoutMs,
|
|
28
|
+
coordinator,
|
|
29
|
+
metadata,
|
|
30
|
+
offsetManager,
|
|
31
|
+
})
|
|
32
|
+
: undefined;
|
|
33
|
+
const fetch = async () => {
|
|
34
|
+
const response = await coordinator.sendRequest(fetch_1.FETCH, {
|
|
35
|
+
maxWaitMs,
|
|
36
|
+
minBytes,
|
|
37
|
+
maxBytes,
|
|
38
|
+
isolationLevel,
|
|
39
|
+
sessionId: 0,
|
|
40
|
+
sessionEpoch: -1,
|
|
41
|
+
topics: Object.entries(metadata.getAssignment())
|
|
42
|
+
.flatMap(([topic, partitions]) => partitions.map((partition) => ({ topic, partition })))
|
|
43
|
+
.map(({ topic, partition }) => ({
|
|
44
|
+
topicId: metadata.getTopicIdByName(topic),
|
|
45
|
+
partitions: [
|
|
46
|
+
{
|
|
47
|
+
partition,
|
|
48
|
+
currentLeaderEpoch: -1,
|
|
49
|
+
fetchOffset: offsetManager.getCurrentOffset(topic, partition),
|
|
50
|
+
lastFetchedEpoch: -1,
|
|
51
|
+
logStartOffset: 0n,
|
|
52
|
+
partitionMaxBytes,
|
|
53
|
+
},
|
|
54
|
+
],
|
|
55
|
+
})),
|
|
56
|
+
forgottenTopicsData: [],
|
|
57
|
+
rackId,
|
|
58
|
+
});
|
|
59
|
+
return response;
|
|
60
|
+
};
|
|
61
|
+
const fetchLoop = async () => {
|
|
62
|
+
while (!stopHook) {
|
|
63
|
+
const batch = await fetch();
|
|
64
|
+
const messages = batch.responses.flatMap(({ topicId, partitions }) => partitions.flatMap(({ partitionIndex, records }) => records.flatMap(({ baseTimestamp, baseOffset, records }) => records.map((message) => ({
|
|
65
|
+
topic: metadata.getTopicNameById(topicId),
|
|
66
|
+
partition: partitionIndex,
|
|
67
|
+
key: message.key ?? null,
|
|
68
|
+
value: message.value ?? null,
|
|
69
|
+
headers: Object.fromEntries(message.headers.map(({ key, value }) => [key, value])),
|
|
70
|
+
timestamp: baseTimestamp + BigInt(message.timestampDelta),
|
|
71
|
+
offset: baseOffset + BigInt(message.offsetDelta),
|
|
72
|
+
})))));
|
|
73
|
+
// TODO: Implement exponential backoff
|
|
74
|
+
try {
|
|
75
|
+
if ("onBatch" in options) {
|
|
76
|
+
await options.onBatch(messages);
|
|
77
|
+
}
|
|
78
|
+
else if ("onMessage" in options) {
|
|
79
|
+
for (const message of messages) {
|
|
80
|
+
await options.onMessage(message);
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
catch (error) {
|
|
85
|
+
console.error(error);
|
|
86
|
+
continue;
|
|
87
|
+
}
|
|
88
|
+
messages.forEach(({ topic, partition, offset }) => offsetManager.resolve(topic, partition, offset));
|
|
89
|
+
await consumerGroup?.commit();
|
|
90
|
+
}
|
|
91
|
+
stopHook();
|
|
92
|
+
};
|
|
93
|
+
const close = async () => {
|
|
94
|
+
await new Promise((resolve) => (stopHook = resolve));
|
|
95
|
+
await consumerGroup?.leave();
|
|
96
|
+
await coordinator.disconnect();
|
|
97
|
+
};
|
|
98
|
+
await metadata.init();
|
|
99
|
+
await consumerGroup?.join();
|
|
100
|
+
fetchLoop();
|
|
101
|
+
return { close };
|
|
102
|
+
};
|
|
103
|
+
exports.startConsumer = startConsumer;
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.createMetadata = void 0;
|
|
4
|
+
const api_1 = require("../api");
|
|
5
|
+
const createMetadata = ({ topics, isolationLevel, allowTopicAutoCreation, fromBeginning, coordinator, offsetManager, }) => {
|
|
6
|
+
let topicPartitions = {};
|
|
7
|
+
let topicNameById = {};
|
|
8
|
+
let topicIdByName = {};
|
|
9
|
+
let assignment = {};
|
|
10
|
+
const fetchMetadata = async () => {
|
|
11
|
+
const response = await coordinator.sendRequest(api_1.API.METADATA, {
|
|
12
|
+
allowTopicAutoCreation,
|
|
13
|
+
includeTopicAuthorizedOperations: false,
|
|
14
|
+
topics: topics.map((name) => ({ id: null, name })),
|
|
15
|
+
});
|
|
16
|
+
topicPartitions = Object.fromEntries(response.topics.map((topic) => [topic.name, topic.partitions.map((partition) => partition.partitionIndex)]));
|
|
17
|
+
topicNameById = Object.fromEntries(response.topics.map((topic) => [topic.topicId, topic.name]));
|
|
18
|
+
topicIdByName = Object.fromEntries(response.topics.map((topic) => [topic.name, topic.topicId]));
|
|
19
|
+
assignment = topicPartitions;
|
|
20
|
+
};
|
|
21
|
+
const listOffsets = async () => {
|
|
22
|
+
const offsets = await coordinator.sendRequest(api_1.API.LIST_OFFSETS, {
|
|
23
|
+
replicaId: -1,
|
|
24
|
+
isolationLevel,
|
|
25
|
+
topics: Object.entries(assignment)
|
|
26
|
+
.flatMap(([topic, partitions]) => partitions.map((partition) => ({ topic, partition })))
|
|
27
|
+
.map(({ topic, partition }) => ({
|
|
28
|
+
name: topic,
|
|
29
|
+
partitions: [{ partitionIndex: partition, currentLeaderEpoch: -1, timestamp: -1n }],
|
|
30
|
+
})),
|
|
31
|
+
});
|
|
32
|
+
offsets.topics.forEach(({ name, partitions }) => {
|
|
33
|
+
partitions.forEach(({ partitionIndex, offset }) => {
|
|
34
|
+
offsetManager.resolve(name, partitionIndex, fromBeginning ? 0n : offset - 1n);
|
|
35
|
+
});
|
|
36
|
+
});
|
|
37
|
+
};
|
|
38
|
+
return {
|
|
39
|
+
init: async () => {
|
|
40
|
+
await fetchMetadata();
|
|
41
|
+
await listOffsets();
|
|
42
|
+
},
|
|
43
|
+
getTopicPartitions: () => topicPartitions,
|
|
44
|
+
getTopicIdByName: (name) => topicIdByName[name],
|
|
45
|
+
getTopicNameById: (id) => topicNameById[id],
|
|
46
|
+
getAssignment: () => assignment,
|
|
47
|
+
setAssignment: (newAssignment) => {
|
|
48
|
+
assignment = newAssignment;
|
|
49
|
+
},
|
|
50
|
+
};
|
|
51
|
+
};
|
|
52
|
+
exports.createMetadata = createMetadata;
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.createOffsetManager = void 0;
|
|
4
|
+
const createOffsetManager = () => {
|
|
5
|
+
let currentOffsets = {};
|
|
6
|
+
let pendingOffsets = {};
|
|
7
|
+
const resolve = (topic, partition, offset) => {
|
|
8
|
+
pendingOffsets[topic] ??= {};
|
|
9
|
+
pendingOffsets[topic][partition] = offset + 1n;
|
|
10
|
+
currentOffsets[topic] ??= {};
|
|
11
|
+
currentOffsets[topic][partition] = offset + 1n;
|
|
12
|
+
};
|
|
13
|
+
const flush = () => {
|
|
14
|
+
pendingOffsets = {};
|
|
15
|
+
};
|
|
16
|
+
return {
|
|
17
|
+
getCurrentOffset: (topic, partition) => currentOffsets[topic]?.[partition] ?? 0n,
|
|
18
|
+
resolve,
|
|
19
|
+
flush,
|
|
20
|
+
getPendingOffsets: () => pendingOffsets,
|
|
21
|
+
};
|
|
22
|
+
};
|
|
23
|
+
exports.createOffsetManager = createOffsetManager;
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
+
if (k2 === undefined) k2 = k;
|
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
+
}
|
|
8
|
+
Object.defineProperty(o, k2, desc);
|
|
9
|
+
}) : (function(o, m, k, k2) {
|
|
10
|
+
if (k2 === undefined) k2 = k;
|
|
11
|
+
o[k2] = m[k];
|
|
12
|
+
}));
|
|
13
|
+
var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
14
|
+
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
|
15
|
+
};
|
|
16
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
17
|
+
__exportStar(require("./consumer/consumer"), exports);
|
|
18
|
+
__exportStar(require("./producer/producer"), exports);
|
|
19
|
+
__exportStar(require("./types"), exports);
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.createProducer = void 0;
|
|
4
|
+
const api_1 = require("../api");
|
|
5
|
+
const broker_1 = require("../broker");
|
|
6
|
+
const createProducer = ({ clientId = null, brokers }) => {
|
|
7
|
+
let producerId = 0n;
|
|
8
|
+
let producerEpoch = 0;
|
|
9
|
+
let sequences = {};
|
|
10
|
+
let connect = (async () => {
|
|
11
|
+
const broker = await (0, broker_1.connectBroker)({ clientId, options: brokers[0] });
|
|
12
|
+
const result = await broker.sendRequest(api_1.API.INIT_PRODUCER_ID, {
|
|
13
|
+
transactionalId: null,
|
|
14
|
+
transactionTimeoutMs: 0,
|
|
15
|
+
producerId,
|
|
16
|
+
producerEpoch,
|
|
17
|
+
});
|
|
18
|
+
producerId = result.producerId;
|
|
19
|
+
producerEpoch = result.producerEpoch;
|
|
20
|
+
sequences = {};
|
|
21
|
+
return broker;
|
|
22
|
+
})();
|
|
23
|
+
const getSequence = (topic, partition) => {
|
|
24
|
+
sequences[topic] ??= {};
|
|
25
|
+
sequences[topic][partition] ??= 0;
|
|
26
|
+
return sequences[topic][partition]++;
|
|
27
|
+
};
|
|
28
|
+
const send = async (messages) => {
|
|
29
|
+
const { sendRequest } = await connect;
|
|
30
|
+
const topicPartitionMessages = {};
|
|
31
|
+
messages.forEach((message) => {
|
|
32
|
+
topicPartitionMessages[message.topic] ??= {};
|
|
33
|
+
topicPartitionMessages[message.topic][message.partition] ??= [];
|
|
34
|
+
topicPartitionMessages[message.topic][message.partition].push(message);
|
|
35
|
+
});
|
|
36
|
+
await sendRequest(api_1.API.PRODUCE, {
|
|
37
|
+
transactionalId: null,
|
|
38
|
+
acks: 1,
|
|
39
|
+
timeoutMs: 5000,
|
|
40
|
+
topicData: Object.entries(topicPartitionMessages).map(([topic, partitionMessages]) => ({
|
|
41
|
+
name: topic,
|
|
42
|
+
partitionData: Object.entries(partitionMessages).map(([partition, messages]) => {
|
|
43
|
+
let baseTimestamp;
|
|
44
|
+
let maxTimestamp;
|
|
45
|
+
messages.forEach(message => {
|
|
46
|
+
if (!baseTimestamp || message.timestamp < baseTimestamp) {
|
|
47
|
+
baseTimestamp = message.timestamp;
|
|
48
|
+
}
|
|
49
|
+
if (!maxTimestamp || message.timestamp > maxTimestamp) {
|
|
50
|
+
maxTimestamp = message.timestamp;
|
|
51
|
+
}
|
|
52
|
+
});
|
|
53
|
+
return ({
|
|
54
|
+
index: parseInt(partition),
|
|
55
|
+
baseOffset: 0n,
|
|
56
|
+
partitionLeaderEpoch: -1,
|
|
57
|
+
attributes: 0,
|
|
58
|
+
lastOffsetDelta: messages.length - 1,
|
|
59
|
+
baseTimestamp: baseTimestamp ?? 0n,
|
|
60
|
+
maxTimestamp: maxTimestamp ?? 0n,
|
|
61
|
+
producerId,
|
|
62
|
+
producerEpoch: 0,
|
|
63
|
+
baseSequence: getSequence(topic, parseInt(partition)),
|
|
64
|
+
records: messages.map((message, index) => ({
|
|
65
|
+
attributes: 0,
|
|
66
|
+
timestampDelta: message.timestamp - (baseTimestamp ?? 0n),
|
|
67
|
+
offsetDelta: index,
|
|
68
|
+
key: message.key,
|
|
69
|
+
value: message.value,
|
|
70
|
+
headers: Object.entries(message.headers).map(([key, value]) => ({ key, value })),
|
|
71
|
+
})),
|
|
72
|
+
});
|
|
73
|
+
}),
|
|
74
|
+
})),
|
|
75
|
+
});
|
|
76
|
+
};
|
|
77
|
+
const close = async () => {
|
|
78
|
+
const { disconnect } = await connect;
|
|
79
|
+
// TODO: wait for inflight requests to complete
|
|
80
|
+
await disconnect();
|
|
81
|
+
};
|
|
82
|
+
return { send, close };
|
|
83
|
+
};
|
|
84
|
+
exports.createProducer = createProducer;
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.createRequestHandler = void 0;
|
|
7
|
+
const node_assert_1 = __importDefault(require("node:assert"));
|
|
8
|
+
const api_1 = require("./api");
|
|
9
|
+
const decoder_1 = require("./utils/decoder");
|
|
10
|
+
const encoder_1 = require("./utils/encoder");
|
|
11
|
+
const createRequestHandler = ({ clientId, connection }) => {
|
|
12
|
+
const requestQueue = {};
|
|
13
|
+
let currentBuffer = null;
|
|
14
|
+
const handleData = (buffer) => {
|
|
15
|
+
currentBuffer = currentBuffer ? Buffer.concat([currentBuffer, buffer]) : buffer;
|
|
16
|
+
const decoder = (0, decoder_1.createDecoder)({ buffer: currentBuffer });
|
|
17
|
+
const size = decoder.readInt32();
|
|
18
|
+
if (size > decoder.buffer.length) {
|
|
19
|
+
return;
|
|
20
|
+
}
|
|
21
|
+
const correlationId = decoder.readInt32();
|
|
22
|
+
const request = requestQueue[correlationId];
|
|
23
|
+
delete requestQueue[correlationId];
|
|
24
|
+
request.callback(decoder);
|
|
25
|
+
// console.log({
|
|
26
|
+
// offset: decoder.offset,
|
|
27
|
+
// length: decoder.buffer.length,
|
|
28
|
+
// rest: decoder.buffer.subarray(decoder.offset, decoder.buffer.length)?.toString(),
|
|
29
|
+
// });
|
|
30
|
+
(0, node_assert_1.default)(decoder.offset - 4 === size, `Buffer not correctly consumed: ${decoder.offset - 4} !== ${buffer.length}`);
|
|
31
|
+
currentBuffer = null;
|
|
32
|
+
};
|
|
33
|
+
const sendRequest = (api, args) => {
|
|
34
|
+
const [apiName] = Object.entries(api_1.API).find(([, value]) => value === api) ?? ["UNKNOWN"];
|
|
35
|
+
console.log(`[sendRequest] ${apiName}`);
|
|
36
|
+
const correlationId = Math.floor(Math.random() * 1000000);
|
|
37
|
+
const encoder = (0, encoder_1.createEncoder)()
|
|
38
|
+
.writeInt16(api.apiKey)
|
|
39
|
+
.writeInt16(api.apiVersion)
|
|
40
|
+
.writeInt32(correlationId)
|
|
41
|
+
.writeString(clientId);
|
|
42
|
+
const request = api.request(encoder, args);
|
|
43
|
+
const buffer = (0, encoder_1.createEncoder)()
|
|
44
|
+
.writeInt32(request.length)
|
|
45
|
+
.write(request)
|
|
46
|
+
.value();
|
|
47
|
+
return new Promise((resolve, reject) => {
|
|
48
|
+
requestQueue[correlationId] = {
|
|
49
|
+
callback: (decoder) => resolve(api.response(decoder)),
|
|
50
|
+
};
|
|
51
|
+
connection.socket.write(buffer, (err) => (err ? reject(err) : undefined));
|
|
52
|
+
});
|
|
53
|
+
};
|
|
54
|
+
connection.socket.on("data", handleData);
|
|
55
|
+
return { sendRequest };
|
|
56
|
+
};
|
|
57
|
+
exports.createRequestHandler = createRequestHandler;
|
|
@@ -0,0 +1,321 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
const vitest_1 = require("vitest");
|
|
4
|
+
const api_versions_1 = require("./api/api-versions");
|
|
5
|
+
const create_topics_1 = require("./api/create-topics");
|
|
6
|
+
const fetch_1 = require("./api/fetch");
|
|
7
|
+
const metadata_1 = require("./api/metadata");
|
|
8
|
+
const connection_1 = require("./connection");
|
|
9
|
+
const request_handler_1 = require("./request-handler");
|
|
10
|
+
const delete_topics_1 = require("./api/delete-topics");
|
|
11
|
+
const produce_1 = require("./api/produce");
|
|
12
|
+
const find_coordinator_1 = require("./api/find-coordinator");
|
|
13
|
+
const join_group_1 = require("./api/join-group");
|
|
14
|
+
const crypto_1 = require("crypto");
|
|
15
|
+
const sync_group_1 = require("./api/sync-group");
|
|
16
|
+
const offset_commit_1 = require("./api/offset-commit");
|
|
17
|
+
const offset_fetch_1 = require("./api/offset-fetch");
|
|
18
|
+
const heartbeat_1 = require("./api/heartbeat");
|
|
19
|
+
const leave_group_1 = require("./api/leave-group");
|
|
20
|
+
const init_producer_id_1 = require("./api/init-producer-id");
|
|
21
|
+
vitest_1.describe.sequential("Request handler", () => {
|
|
22
|
+
const connection = (0, connection_1.createConnection)({ host: "localhost", port: 9092 });
|
|
23
|
+
let sendRequest;
|
|
24
|
+
const groupId = (0, crypto_1.randomBytes)(16).toString("hex");
|
|
25
|
+
(0, vitest_1.beforeAll)(async () => {
|
|
26
|
+
await connection.connect();
|
|
27
|
+
const handler = (0, request_handler_1.createRequestHandler)({ clientId: 'kafkats', connection });
|
|
28
|
+
sendRequest = handler.sendRequest;
|
|
29
|
+
const metadataResult = await sendRequest(metadata_1.METADATA, {
|
|
30
|
+
topics: null,
|
|
31
|
+
allowTopicAutoCreation: false,
|
|
32
|
+
includeTopicAuthorizedOperations: false,
|
|
33
|
+
});
|
|
34
|
+
if (metadataResult.topics.some((topic) => topic.name === "kafkats-test-topic")) {
|
|
35
|
+
await sendRequest(delete_topics_1.DELETE_TOPICS, {
|
|
36
|
+
topics: [{ name: "kafkats-test-topic", topicId: null }],
|
|
37
|
+
timeoutMs: 10000,
|
|
38
|
+
});
|
|
39
|
+
}
|
|
40
|
+
});
|
|
41
|
+
(0, vitest_1.afterAll)(async () => {
|
|
42
|
+
await connection.disconnect();
|
|
43
|
+
});
|
|
44
|
+
(0, vitest_1.it)("should request api versions", async () => {
|
|
45
|
+
const result = await sendRequest(api_versions_1.API_VERSIONS, {});
|
|
46
|
+
(0, vitest_1.expect)(result).toMatchSnapshot();
|
|
47
|
+
});
|
|
48
|
+
let topicId = "d6718d178e1b47c886441ad2d19faea5";
|
|
49
|
+
(0, vitest_1.it)("should create topics", async () => {
|
|
50
|
+
const result = await sendRequest(create_topics_1.CREATE_TOPICS, {
|
|
51
|
+
topics: [
|
|
52
|
+
{
|
|
53
|
+
name: "kafkats-test-topic",
|
|
54
|
+
numPartitions: 1,
|
|
55
|
+
replicationFactor: 1,
|
|
56
|
+
assignments: [],
|
|
57
|
+
configs: [],
|
|
58
|
+
},
|
|
59
|
+
],
|
|
60
|
+
timeoutMs: 10000,
|
|
61
|
+
validateOnly: false,
|
|
62
|
+
});
|
|
63
|
+
topicId = result.topics[0].topicId;
|
|
64
|
+
console.log({ topicId });
|
|
65
|
+
result.topics.forEach((topic) => {
|
|
66
|
+
topic.topicId = "Any<UUID>";
|
|
67
|
+
});
|
|
68
|
+
(0, vitest_1.expect)(result).toMatchSnapshot();
|
|
69
|
+
});
|
|
70
|
+
(0, vitest_1.it)("should request metadata for all topics", async () => {
|
|
71
|
+
const result = await sendRequest(metadata_1.METADATA, {
|
|
72
|
+
topics: null,
|
|
73
|
+
allowTopicAutoCreation: false,
|
|
74
|
+
includeTopicAuthorizedOperations: false,
|
|
75
|
+
});
|
|
76
|
+
result.topics.forEach((topic) => {
|
|
77
|
+
topic.topicId = "Any<UUID>";
|
|
78
|
+
});
|
|
79
|
+
(0, vitest_1.expect)(result).toMatchSnapshot();
|
|
80
|
+
});
|
|
81
|
+
(0, vitest_1.it)("should request metadata for a topic", async () => {
|
|
82
|
+
const result = await sendRequest(metadata_1.METADATA, {
|
|
83
|
+
topics: [{ id: topicId, name: "kafkats-test-topic" }],
|
|
84
|
+
allowTopicAutoCreation: false,
|
|
85
|
+
includeTopicAuthorizedOperations: false,
|
|
86
|
+
});
|
|
87
|
+
result.topics.forEach((topic) => {
|
|
88
|
+
topic.topicId = "Any<UUID>";
|
|
89
|
+
});
|
|
90
|
+
(0, vitest_1.expect)(result).toMatchSnapshot();
|
|
91
|
+
});
|
|
92
|
+
(0, vitest_1.it)('should init producer id', async () => {
|
|
93
|
+
const result = await sendRequest(init_producer_id_1.INIT_PRODUCER_ID, {
|
|
94
|
+
transactionalId: null,
|
|
95
|
+
transactionTimeoutMs: 0,
|
|
96
|
+
producerId: 0n,
|
|
97
|
+
producerEpoch: 0,
|
|
98
|
+
});
|
|
99
|
+
(0, vitest_1.expect)(result).toMatchSnapshot();
|
|
100
|
+
});
|
|
101
|
+
(0, vitest_1.it)("should produce messages", async () => {
|
|
102
|
+
const now = Date.now();
|
|
103
|
+
const result = await sendRequest(produce_1.PRODUCE, {
|
|
104
|
+
transactionalId: null,
|
|
105
|
+
timeoutMs: 10000,
|
|
106
|
+
acks: 1,
|
|
107
|
+
topicData: [
|
|
108
|
+
{
|
|
109
|
+
name: "kafkats-test-topic",
|
|
110
|
+
partitionData: [
|
|
111
|
+
{
|
|
112
|
+
index: 0,
|
|
113
|
+
baseOffset: 0n,
|
|
114
|
+
partitionLeaderEpoch: 0,
|
|
115
|
+
attributes: 0,
|
|
116
|
+
baseSequence: 0,
|
|
117
|
+
baseTimestamp: BigInt(now),
|
|
118
|
+
lastOffsetDelta: 0,
|
|
119
|
+
maxTimestamp: BigInt(now),
|
|
120
|
+
producerEpoch: 0,
|
|
121
|
+
producerId: 9n,
|
|
122
|
+
records: [
|
|
123
|
+
{
|
|
124
|
+
attributes: 0,
|
|
125
|
+
offsetDelta: 0,
|
|
126
|
+
timestampDelta: 0n,
|
|
127
|
+
key: "key",
|
|
128
|
+
value: "value",
|
|
129
|
+
headers: [
|
|
130
|
+
{
|
|
131
|
+
key: "header-key",
|
|
132
|
+
value: "header-value",
|
|
133
|
+
},
|
|
134
|
+
],
|
|
135
|
+
},
|
|
136
|
+
],
|
|
137
|
+
},
|
|
138
|
+
],
|
|
139
|
+
},
|
|
140
|
+
],
|
|
141
|
+
});
|
|
142
|
+
(0, vitest_1.expect)(result).toMatchSnapshot();
|
|
143
|
+
});
|
|
144
|
+
(0, vitest_1.it)("should fetch messages", async () => {
|
|
145
|
+
const result = await sendRequest(fetch_1.FETCH, {
|
|
146
|
+
maxWaitMs: 100,
|
|
147
|
+
minBytes: 1,
|
|
148
|
+
maxBytes: 10485760,
|
|
149
|
+
isolationLevel: 1,
|
|
150
|
+
sessionId: 0,
|
|
151
|
+
sessionEpoch: -1,
|
|
152
|
+
topics: [
|
|
153
|
+
{
|
|
154
|
+
topicId,
|
|
155
|
+
partitions: [
|
|
156
|
+
{
|
|
157
|
+
partition: 0,
|
|
158
|
+
currentLeaderEpoch: -1,
|
|
159
|
+
fetchOffset: 0n,
|
|
160
|
+
lastFetchedEpoch: 0,
|
|
161
|
+
logStartOffset: -1n,
|
|
162
|
+
partitionMaxBytes: 10485760,
|
|
163
|
+
},
|
|
164
|
+
],
|
|
165
|
+
},
|
|
166
|
+
],
|
|
167
|
+
forgottenTopicsData: [],
|
|
168
|
+
rackId: "",
|
|
169
|
+
});
|
|
170
|
+
result.responses.forEach((response) => {
|
|
171
|
+
response.topicId = "Any<UUID>";
|
|
172
|
+
response.partitions.forEach((partition) => {
|
|
173
|
+
partition.records.forEach((record) => {
|
|
174
|
+
(0, vitest_1.expect)(record.baseTimestamp).toBeGreaterThan(1721926744730n);
|
|
175
|
+
(0, vitest_1.expect)(record.maxTimestamp).toBeGreaterThan(1721926744730n);
|
|
176
|
+
(0, vitest_1.expect)(record.crc).toBeGreaterThan(0);
|
|
177
|
+
record.baseTimestamp = 0n;
|
|
178
|
+
record.maxTimestamp = 0n;
|
|
179
|
+
record.crc = 0;
|
|
180
|
+
});
|
|
181
|
+
});
|
|
182
|
+
});
|
|
183
|
+
(0, vitest_1.expect)(result).toMatchSnapshot();
|
|
184
|
+
});
|
|
185
|
+
(0, vitest_1.it)("should find coordinator", async () => {
|
|
186
|
+
const result = await sendRequest(find_coordinator_1.FIND_COORDINATOR, { keyType: find_coordinator_1.KEY_TYPE.GROUP, keys: [groupId] });
|
|
187
|
+
result.coordinators.forEach((coordinator) => {
|
|
188
|
+
coordinator.key = "Any<String>";
|
|
189
|
+
});
|
|
190
|
+
(0, vitest_1.expect)(result).toMatchSnapshot();
|
|
191
|
+
});
|
|
192
|
+
let memberId = "";
|
|
193
|
+
(0, vitest_1.it)("should fail join group request with new memberId", async () => {
|
|
194
|
+
const result = await sendRequest(join_group_1.JOIN_GROUP, {
|
|
195
|
+
groupId: groupId,
|
|
196
|
+
sessionTimeoutMs: 30000,
|
|
197
|
+
rebalanceTimeoutMs: 60000,
|
|
198
|
+
memberId,
|
|
199
|
+
groupInstanceId: null,
|
|
200
|
+
protocolType: "consumer",
|
|
201
|
+
protocols: [
|
|
202
|
+
{
|
|
203
|
+
name: "RoundRobinAssigner",
|
|
204
|
+
metadata: { version: 0, topics: ["kafkats-test-topic"] },
|
|
205
|
+
},
|
|
206
|
+
],
|
|
207
|
+
reason: null,
|
|
208
|
+
});
|
|
209
|
+
memberId = String(result.memberId ?? "");
|
|
210
|
+
result.memberId = "Any<UUID>";
|
|
211
|
+
(0, vitest_1.expect)(result).toMatchSnapshot();
|
|
212
|
+
});
|
|
213
|
+
(0, vitest_1.it)("should join group", async () => {
|
|
214
|
+
const result = await sendRequest(join_group_1.JOIN_GROUP, {
|
|
215
|
+
groupId: groupId,
|
|
216
|
+
sessionTimeoutMs: 30000,
|
|
217
|
+
rebalanceTimeoutMs: 60000,
|
|
218
|
+
memberId,
|
|
219
|
+
groupInstanceId: null,
|
|
220
|
+
protocolType: "consumer",
|
|
221
|
+
protocols: [
|
|
222
|
+
{
|
|
223
|
+
name: "RoundRobinAssigner",
|
|
224
|
+
metadata: { version: 0, topics: ["kafkats-test-topic"] },
|
|
225
|
+
},
|
|
226
|
+
],
|
|
227
|
+
reason: null,
|
|
228
|
+
});
|
|
229
|
+
result.memberId = "Any<UUID>";
|
|
230
|
+
result.leader = "Any<UUID>";
|
|
231
|
+
result.members.forEach((member) => {
|
|
232
|
+
member.memberId = "Any<UUID>";
|
|
233
|
+
});
|
|
234
|
+
(0, vitest_1.expect)(result).toMatchSnapshot();
|
|
235
|
+
});
|
|
236
|
+
(0, vitest_1.it)("should sync group", async () => {
|
|
237
|
+
const result = await sendRequest(sync_group_1.SYNC_GROUP, {
|
|
238
|
+
groupId,
|
|
239
|
+
generationId: 1,
|
|
240
|
+
memberId,
|
|
241
|
+
groupInstanceId: null,
|
|
242
|
+
protocolType: "consumer",
|
|
243
|
+
protocolName: "RoundRobinAssigner",
|
|
244
|
+
assignments: [
|
|
245
|
+
{
|
|
246
|
+
memberId,
|
|
247
|
+
assignment: { "kafka-test-topic": [0] },
|
|
248
|
+
},
|
|
249
|
+
],
|
|
250
|
+
});
|
|
251
|
+
(0, vitest_1.expect)(result).toMatchSnapshot();
|
|
252
|
+
});
|
|
253
|
+
(0, vitest_1.it)("should commit offsets", async () => {
|
|
254
|
+
const result = await sendRequest(offset_commit_1.OFFSET_COMMIT, {
|
|
255
|
+
groupId,
|
|
256
|
+
generationIdOrMemberEpoch: 1,
|
|
257
|
+
memberId,
|
|
258
|
+
groupInstanceId: null,
|
|
259
|
+
topics: [
|
|
260
|
+
{
|
|
261
|
+
name: "kafkats-test-topic",
|
|
262
|
+
partitions: [
|
|
263
|
+
{ partitionIndex: 0, committedOffset: 1n, committedLeaderEpoch: 0, committedMetadata: null },
|
|
264
|
+
],
|
|
265
|
+
},
|
|
266
|
+
],
|
|
267
|
+
});
|
|
268
|
+
(0, vitest_1.expect)(result).toMatchSnapshot();
|
|
269
|
+
});
|
|
270
|
+
(0, vitest_1.it)("should fetch offsets", async () => {
|
|
271
|
+
const result = await sendRequest(offset_fetch_1.OFFSET_FETCH, {
|
|
272
|
+
groups: [
|
|
273
|
+
{
|
|
274
|
+
groupId,
|
|
275
|
+
memberId,
|
|
276
|
+
memberEpoch: 0,
|
|
277
|
+
topics: [
|
|
278
|
+
{
|
|
279
|
+
name: "kafkats-test-topic",
|
|
280
|
+
partitionIndexes: [0],
|
|
281
|
+
},
|
|
282
|
+
],
|
|
283
|
+
},
|
|
284
|
+
],
|
|
285
|
+
requireStable: false,
|
|
286
|
+
});
|
|
287
|
+
result.groups.forEach((group) => {
|
|
288
|
+
group.groupId = "Any<String>";
|
|
289
|
+
});
|
|
290
|
+
(0, vitest_1.expect)(result).toMatchSnapshot();
|
|
291
|
+
});
|
|
292
|
+
(0, vitest_1.it)("should heartbeat", async () => {
|
|
293
|
+
const result = await sendRequest(heartbeat_1.HEARTBEAT, {
|
|
294
|
+
groupId,
|
|
295
|
+
generationId: 1,
|
|
296
|
+
memberId,
|
|
297
|
+
groupInstanceId: null,
|
|
298
|
+
});
|
|
299
|
+
(0, vitest_1.expect)(result).toMatchSnapshot();
|
|
300
|
+
});
|
|
301
|
+
(0, vitest_1.it)("should leave group", async () => {
|
|
302
|
+
const result = await sendRequest(leave_group_1.LEAVE_GROUP, {
|
|
303
|
+
groupId,
|
|
304
|
+
members: [{ memberId, groupInstanceId: null, reason: null }],
|
|
305
|
+
});
|
|
306
|
+
result.members.forEach((member) => {
|
|
307
|
+
member.memberId = "Any<UUID>";
|
|
308
|
+
});
|
|
309
|
+
(0, vitest_1.expect)(result).toMatchSnapshot();
|
|
310
|
+
});
|
|
311
|
+
vitest_1.it.skip("should delete topics", async () => {
|
|
312
|
+
const result = await sendRequest(delete_topics_1.DELETE_TOPICS, {
|
|
313
|
+
topics: [{ name: "kafkats-test-topic", topicId: null }],
|
|
314
|
+
timeoutMs: 10000,
|
|
315
|
+
});
|
|
316
|
+
result.responses.forEach((response) => {
|
|
317
|
+
response.topicId = "Any<UUID>";
|
|
318
|
+
});
|
|
319
|
+
(0, vitest_1.expect)(result).toMatchSnapshot();
|
|
320
|
+
});
|
|
321
|
+
});
|