kafka-ts 0.0.3-beta → 0.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +72 -8
- package/dist/api/api-versions.d.ts +9 -0
- package/{src/api/api-versions.ts → dist/api/api-versions.js} +8 -5
- package/dist/api/create-topics.d.ts +38 -0
- package/dist/api/create-topics.js +53 -0
- package/dist/api/delete-topics.d.ts +18 -0
- package/dist/api/delete-topics.js +33 -0
- package/dist/api/fetch.d.ts +84 -0
- package/dist/api/fetch.js +142 -0
- package/dist/api/find-coordinator.d.ts +21 -0
- package/{src/api/find-coordinator.ts → dist/api/find-coordinator.js} +14 -14
- package/dist/api/heartbeat.d.ts +11 -0
- package/dist/api/heartbeat.js +27 -0
- package/dist/api/index.d.ts +576 -0
- package/{src/api/index.ts → dist/api/index.js} +42 -41
- package/dist/api/init-producer-id.d.ts +13 -0
- package/dist/api/init-producer-id.js +29 -0
- package/dist/api/join-group.d.ts +34 -0
- package/dist/api/join-group.js +51 -0
- package/dist/api/leave-group.d.ts +19 -0
- package/dist/api/leave-group.js +39 -0
- package/dist/api/list-offsets.d.ts +29 -0
- package/dist/api/list-offsets.js +48 -0
- package/dist/api/metadata.d.ts +40 -0
- package/{src/api/metadata.ts → dist/api/metadata.js} +18 -26
- package/dist/api/offset-commit.d.ts +28 -0
- package/dist/api/offset-commit.js +48 -0
- package/dist/api/offset-fetch.d.ts +31 -0
- package/dist/api/offset-fetch.js +55 -0
- package/dist/api/produce.d.ts +54 -0
- package/{src/api/produce.ts → dist/api/produce.js} +55 -102
- package/dist/api/sasl-authenticate.d.ts +11 -0
- package/dist/api/sasl-authenticate.js +23 -0
- package/dist/api/sasl-handshake.d.ts +6 -0
- package/dist/api/sasl-handshake.js +19 -0
- package/dist/api/sync-group.d.ts +24 -0
- package/dist/api/sync-group.js +36 -0
- package/dist/auth/index.d.ts +2 -0
- package/dist/auth/index.js +8 -0
- package/dist/auth/plain.d.ts +5 -0
- package/dist/auth/plain.js +12 -0
- package/dist/auth/scram.d.ts +9 -0
- package/dist/auth/scram.js +40 -0
- package/dist/broker.d.ts +30 -0
- package/dist/broker.js +55 -0
- package/dist/client.d.ts +22 -0
- package/dist/client.js +36 -0
- package/dist/cluster.d.ts +27 -0
- package/dist/cluster.js +70 -0
- package/dist/cluster.test.d.ts +1 -0
- package/{src/cluster.test.ts → dist/cluster.test.js} +87 -113
- package/dist/codecs/gzip.d.ts +2 -0
- package/dist/codecs/gzip.js +8 -0
- package/dist/codecs/index.d.ts +2 -0
- package/dist/codecs/index.js +17 -0
- package/dist/codecs/none.d.ts +2 -0
- package/dist/codecs/none.js +7 -0
- package/dist/codecs/types.d.ts +5 -0
- package/dist/codecs/types.js +2 -0
- package/dist/connection.d.ts +26 -0
- package/dist/connection.js +175 -0
- package/dist/consumer/consumer-group.d.ts +41 -0
- package/dist/consumer/consumer-group.js +215 -0
- package/dist/consumer/consumer-metadata.d.ts +7 -0
- package/dist/consumer/consumer-metadata.js +14 -0
- package/dist/consumer/consumer.d.ts +44 -0
- package/dist/consumer/consumer.js +225 -0
- package/dist/consumer/fetch-manager.d.ts +33 -0
- package/dist/consumer/fetch-manager.js +140 -0
- package/dist/consumer/fetcher.d.ts +25 -0
- package/dist/consumer/fetcher.js +64 -0
- package/dist/consumer/offset-manager.d.ts +22 -0
- package/dist/consumer/offset-manager.js +66 -0
- package/dist/consumer/processor.d.ts +19 -0
- package/dist/consumer/processor.js +59 -0
- package/dist/distributors/assignments-to-replicas.d.ts +16 -0
- package/{src/distributors/assignments-to-replicas.ts → dist/distributors/assignments-to-replicas.js} +15 -41
- package/dist/distributors/assignments-to-replicas.test.d.ts +1 -0
- package/dist/distributors/assignments-to-replicas.test.js +40 -0
- package/dist/distributors/messages-to-topic-partition-leaders.d.ts +17 -0
- package/dist/distributors/messages-to-topic-partition-leaders.js +15 -0
- package/dist/distributors/messages-to-topic-partition-leaders.test.d.ts +1 -0
- package/dist/distributors/messages-to-topic-partition-leaders.test.js +30 -0
- package/dist/distributors/partitioner.d.ts +7 -0
- package/dist/distributors/partitioner.js +23 -0
- package/dist/index.d.ts +9 -0
- package/dist/index.js +26 -0
- package/dist/metadata.d.ts +24 -0
- package/dist/metadata.js +106 -0
- package/dist/producer/producer.d.ts +24 -0
- package/dist/producer/producer.js +131 -0
- package/{src/types.ts → dist/types.d.ts} +4 -4
- package/dist/types.js +2 -0
- package/{src/utils/api.ts → dist/utils/api.d.ts} +2 -4
- package/dist/utils/api.js +5 -0
- package/dist/utils/crypto.d.ts +8 -0
- package/dist/utils/crypto.js +18 -0
- package/dist/utils/decoder.d.ts +30 -0
- package/{src/utils/decoder.ts → dist/utils/decoder.js} +41 -57
- package/dist/utils/delay.d.ts +1 -0
- package/dist/utils/delay.js +5 -0
- package/dist/utils/encoder.d.ts +28 -0
- package/{src/utils/encoder.ts → dist/utils/encoder.js} +50 -66
- package/dist/utils/error.d.ts +11 -0
- package/dist/utils/error.js +27 -0
- package/dist/utils/logger.d.ts +9 -0
- package/dist/utils/logger.js +32 -0
- package/dist/utils/memo.d.ts +1 -0
- package/{src/utils/memo.ts → dist/utils/memo.js} +7 -3
- package/dist/utils/murmur2.d.ts +3 -0
- package/dist/utils/murmur2.js +40 -0
- package/dist/utils/retrier.d.ts +10 -0
- package/dist/utils/retrier.js +22 -0
- package/dist/utils/tracer.d.ts +5 -0
- package/dist/utils/tracer.js +39 -0
- package/package.json +11 -2
- package/.github/workflows/release.yml +0 -17
- package/.prettierrc +0 -8
- package/certs/ca.crt +0 -29
- package/certs/ca.key +0 -52
- package/certs/ca.srl +0 -1
- package/certs/kafka.crt +0 -29
- package/certs/kafka.csr +0 -26
- package/certs/kafka.key +0 -52
- package/certs/kafka.keystore.jks +0 -0
- package/certs/kafka.truststore.jks +0 -0
- package/docker-compose.yml +0 -104
- package/examples/package-lock.json +0 -31
- package/examples/package.json +0 -14
- package/examples/src/client.ts +0 -9
- package/examples/src/consumer.ts +0 -18
- package/examples/src/create-topic.ts +0 -44
- package/examples/src/producer.ts +0 -24
- package/examples/src/replicator.ts +0 -25
- package/examples/src/utils/delay.ts +0 -1
- package/examples/src/utils/json.ts +0 -1
- package/examples/tsconfig.json +0 -7
- package/log4j.properties +0 -95
- package/scripts/generate-certs.sh +0 -24
- package/src/__snapshots__/request-handler.test.ts.snap +0 -978
- package/src/api/create-topics.ts +0 -78
- package/src/api/delete-topics.ts +0 -42
- package/src/api/fetch.ts +0 -143
- package/src/api/heartbeat.ts +0 -33
- package/src/api/init-producer-id.ts +0 -35
- package/src/api/join-group.ts +0 -67
- package/src/api/leave-group.ts +0 -48
- package/src/api/list-offsets.ts +0 -65
- package/src/api/offset-commit.ts +0 -67
- package/src/api/offset-fetch.ts +0 -74
- package/src/api/sasl-authenticate.ts +0 -21
- package/src/api/sasl-handshake.ts +0 -16
- package/src/api/sync-group.ts +0 -54
- package/src/broker.ts +0 -74
- package/src/client.ts +0 -47
- package/src/cluster.ts +0 -87
- package/src/connection.ts +0 -143
- package/src/consumer/consumer-group.ts +0 -209
- package/src/consumer/consumer-metadata.ts +0 -14
- package/src/consumer/consumer.ts +0 -231
- package/src/consumer/fetch-manager.ts +0 -179
- package/src/consumer/fetcher.ts +0 -57
- package/src/consumer/offset-manager.ts +0 -93
- package/src/consumer/processor.ts +0 -47
- package/src/distributors/assignments-to-replicas.test.ts +0 -43
- package/src/distributors/messages-to-topic-partition-leaders.test.ts +0 -32
- package/src/distributors/messages-to-topic-partition-leaders.ts +0 -19
- package/src/index.ts +0 -4
- package/src/metadata.ts +0 -122
- package/src/producer/producer.ts +0 -132
- package/src/utils/debug.ts +0 -9
- package/src/utils/delay.ts +0 -1
- package/src/utils/error.ts +0 -21
- package/src/utils/retrier.ts +0 -39
- package/src/utils/tracer.ts +0 -31
- package/tsconfig.json +0 -17
package/src/consumer/consumer.ts
DELETED
|
@@ -1,231 +0,0 @@
|
|
|
1
|
-
import { API, API_ERROR } from '../api';
|
|
2
|
-
import { IsolationLevel } from '../api/fetch';
|
|
3
|
-
import { Assignment } from '../api/sync-group';
|
|
4
|
-
import { Cluster } from '../cluster';
|
|
5
|
-
import { distributeAssignmentsToNodes } from '../distributors/assignments-to-replicas';
|
|
6
|
-
import { Message } from '../types';
|
|
7
|
-
import { delay } from '../utils/delay';
|
|
8
|
-
import { ConnectionError, KafkaTSApiError } from '../utils/error';
|
|
9
|
-
import { defaultRetrier, Retrier } from '../utils/retrier';
|
|
10
|
-
import { ConsumerGroup } from './consumer-group';
|
|
11
|
-
import { ConsumerMetadata } from './consumer-metadata';
|
|
12
|
-
import { FetchManager, Granularity } from './fetch-manager';
|
|
13
|
-
import { OffsetManager } from './offset-manager';
|
|
14
|
-
|
|
15
|
-
export type ConsumerOptions = {
|
|
16
|
-
topics: string[];
|
|
17
|
-
groupId?: string | null;
|
|
18
|
-
groupInstanceId?: string | null;
|
|
19
|
-
rackId?: string;
|
|
20
|
-
isolationLevel?: IsolationLevel;
|
|
21
|
-
sessionTimeoutMs?: number;
|
|
22
|
-
rebalanceTimeoutMs?: number;
|
|
23
|
-
maxWaitMs?: number;
|
|
24
|
-
minBytes?: number;
|
|
25
|
-
maxBytes?: number;
|
|
26
|
-
partitionMaxBytes?: number;
|
|
27
|
-
allowTopicAutoCreation?: boolean;
|
|
28
|
-
fromBeginning?: boolean;
|
|
29
|
-
retrier?: Retrier;
|
|
30
|
-
granularity?: Granularity;
|
|
31
|
-
concurrency?: number;
|
|
32
|
-
} & ({ onBatch: (messages: Required<Message>[]) => unknown } | { onMessage: (message: Required<Message>) => unknown });
|
|
33
|
-
|
|
34
|
-
export class Consumer {
|
|
35
|
-
private options: Required<ConsumerOptions>;
|
|
36
|
-
private metadata: ConsumerMetadata;
|
|
37
|
-
private consumerGroup: ConsumerGroup | undefined;
|
|
38
|
-
private offsetManager: OffsetManager;
|
|
39
|
-
private fetchManager?: FetchManager;
|
|
40
|
-
private stopHook: (() => void) | undefined;
|
|
41
|
-
|
|
42
|
-
constructor(
|
|
43
|
-
private cluster: Cluster,
|
|
44
|
-
options: ConsumerOptions,
|
|
45
|
-
) {
|
|
46
|
-
this.options = {
|
|
47
|
-
...options,
|
|
48
|
-
groupId: options.groupId ?? null,
|
|
49
|
-
groupInstanceId: options.groupInstanceId ?? null,
|
|
50
|
-
rackId: options.rackId ?? '',
|
|
51
|
-
sessionTimeoutMs: options.sessionTimeoutMs ?? 30_000,
|
|
52
|
-
rebalanceTimeoutMs: options.rebalanceTimeoutMs ?? 60_000,
|
|
53
|
-
maxWaitMs: options.maxWaitMs ?? 5000,
|
|
54
|
-
minBytes: options.minBytes ?? 1,
|
|
55
|
-
maxBytes: options.maxBytes ?? 1_000_000,
|
|
56
|
-
partitionMaxBytes: options.partitionMaxBytes ?? 1_000_000,
|
|
57
|
-
isolationLevel: options.isolationLevel ?? IsolationLevel.READ_UNCOMMITTED,
|
|
58
|
-
allowTopicAutoCreation: options.allowTopicAutoCreation ?? false,
|
|
59
|
-
fromBeginning: options.fromBeginning ?? false,
|
|
60
|
-
retrier: options.retrier ?? defaultRetrier,
|
|
61
|
-
granularity: options.granularity ?? 'broker',
|
|
62
|
-
concurrency: options.concurrency ?? 1,
|
|
63
|
-
};
|
|
64
|
-
|
|
65
|
-
this.metadata = new ConsumerMetadata({ cluster: this.cluster });
|
|
66
|
-
this.offsetManager = new OffsetManager({
|
|
67
|
-
cluster: this.cluster,
|
|
68
|
-
metadata: this.metadata,
|
|
69
|
-
isolationLevel: this.options.isolationLevel,
|
|
70
|
-
});
|
|
71
|
-
this.consumerGroup = this.options.groupId
|
|
72
|
-
? new ConsumerGroup({
|
|
73
|
-
cluster: this.cluster,
|
|
74
|
-
topics: this.options.topics,
|
|
75
|
-
groupId: this.options.groupId,
|
|
76
|
-
groupInstanceId: this.options.groupInstanceId,
|
|
77
|
-
sessionTimeoutMs: this.options.sessionTimeoutMs,
|
|
78
|
-
rebalanceTimeoutMs: this.options.rebalanceTimeoutMs,
|
|
79
|
-
metadata: this.metadata,
|
|
80
|
-
offsetManager: this.offsetManager,
|
|
81
|
-
})
|
|
82
|
-
: undefined;
|
|
83
|
-
}
|
|
84
|
-
|
|
85
|
-
public async start(): Promise<void> {
|
|
86
|
-
const { topics, allowTopicAutoCreation, fromBeginning } = this.options;
|
|
87
|
-
|
|
88
|
-
this.stopHook = undefined;
|
|
89
|
-
|
|
90
|
-
try {
|
|
91
|
-
await this.cluster.connect();
|
|
92
|
-
await this.metadata.fetchMetadataIfNecessary({ topics, allowTopicAutoCreation });
|
|
93
|
-
this.metadata.setAssignment(this.metadata.getTopicPartitions());
|
|
94
|
-
await this.offsetManager.fetchOffsets({ fromBeginning });
|
|
95
|
-
await this.consumerGroup?.join();
|
|
96
|
-
} catch (error) {
|
|
97
|
-
console.error(error);
|
|
98
|
-
console.debug(`Restarting consumer in 1 second...`);
|
|
99
|
-
await delay(1000);
|
|
100
|
-
|
|
101
|
-
if (this.stopHook) return (this.stopHook as () => void)();
|
|
102
|
-
return this.close(true).then(() => this.start());
|
|
103
|
-
}
|
|
104
|
-
setImmediate(() => this.startFetchManager());
|
|
105
|
-
}
|
|
106
|
-
|
|
107
|
-
public async close(force = false): Promise<void> {
|
|
108
|
-
if (!force) {
|
|
109
|
-
await new Promise<void>(async (resolve) => {
|
|
110
|
-
this.stopHook = resolve;
|
|
111
|
-
await this.fetchManager?.stop();
|
|
112
|
-
});
|
|
113
|
-
}
|
|
114
|
-
await this.consumerGroup
|
|
115
|
-
?.leaveGroup()
|
|
116
|
-
.catch((error) => console.warn(`Failed to leave group: ${error.message}`));
|
|
117
|
-
await this.cluster.disconnect().catch((error) => console.warn(`Failed to disconnect: ${error.message}`));
|
|
118
|
-
}
|
|
119
|
-
|
|
120
|
-
private startFetchManager = async () => {
|
|
121
|
-
const { granularity, concurrency } = this.options;
|
|
122
|
-
|
|
123
|
-
while (!this.stopHook) {
|
|
124
|
-
const nodeAssignments = Object.entries(
|
|
125
|
-
distributeAssignmentsToNodes(
|
|
126
|
-
this.metadata.getAssignment(),
|
|
127
|
-
this.metadata.getTopicPartitionReplicaIds(),
|
|
128
|
-
),
|
|
129
|
-
).map(([nodeId, assignment]) => ({ nodeId: parseInt(nodeId), assignment }));
|
|
130
|
-
|
|
131
|
-
const numPartitions = Object.values(this.metadata.getAssignment()).flat().length;
|
|
132
|
-
const numProcessors = Math.min(concurrency, numPartitions);
|
|
133
|
-
|
|
134
|
-
this.fetchManager = new FetchManager({
|
|
135
|
-
fetch: this.fetch.bind(this),
|
|
136
|
-
process: this.process.bind(this),
|
|
137
|
-
metadata: this.metadata,
|
|
138
|
-
consumerGroup: this.consumerGroup,
|
|
139
|
-
nodeAssignments,
|
|
140
|
-
granularity,
|
|
141
|
-
concurrency: numProcessors,
|
|
142
|
-
});
|
|
143
|
-
|
|
144
|
-
try {
|
|
145
|
-
await this.fetchManager.start();
|
|
146
|
-
|
|
147
|
-
if (!nodeAssignments.length) {
|
|
148
|
-
console.debug('No partitions assigned. Waiting for reassignment...');
|
|
149
|
-
await delay(this.options.maxWaitMs);
|
|
150
|
-
await this.consumerGroup?.handleLastHeartbeat();
|
|
151
|
-
}
|
|
152
|
-
} catch (error) {
|
|
153
|
-
await this.fetchManager.stop();
|
|
154
|
-
|
|
155
|
-
if ((error as KafkaTSApiError).errorCode === API_ERROR.REBALANCE_IN_PROGRESS) {
|
|
156
|
-
console.debug('Rebalance in progress...');
|
|
157
|
-
continue;
|
|
158
|
-
}
|
|
159
|
-
if ((error as KafkaTSApiError).errorCode === API_ERROR.FENCED_INSTANCE_ID) {
|
|
160
|
-
console.debug('New consumer with the same groupInstanceId joined. Exiting the consumer...');
|
|
161
|
-
this.close();
|
|
162
|
-
break;
|
|
163
|
-
}
|
|
164
|
-
if (
|
|
165
|
-
error instanceof ConnectionError ||
|
|
166
|
-
(error instanceof KafkaTSApiError && error.errorCode === API_ERROR.NOT_COORDINATOR)
|
|
167
|
-
) {
|
|
168
|
-
console.debug(`${error.message}. Restarting consumer...`);
|
|
169
|
-
this.close().then(() => this.start());
|
|
170
|
-
break;
|
|
171
|
-
}
|
|
172
|
-
console.error(error);
|
|
173
|
-
this.close();
|
|
174
|
-
break;
|
|
175
|
-
}
|
|
176
|
-
}
|
|
177
|
-
this.stopHook?.();
|
|
178
|
-
};
|
|
179
|
-
|
|
180
|
-
private async process(messages: Required<Message>[]) {
|
|
181
|
-
const { options } = this;
|
|
182
|
-
const { retrier } = options;
|
|
183
|
-
|
|
184
|
-
if ('onBatch' in options) {
|
|
185
|
-
await retrier(() => options.onBatch(messages));
|
|
186
|
-
|
|
187
|
-
messages.forEach(({ topic, partition, offset }) =>
|
|
188
|
-
this.offsetManager.resolve(topic, partition, offset + 1n),
|
|
189
|
-
);
|
|
190
|
-
} else if ('onMessage' in options) {
|
|
191
|
-
try {
|
|
192
|
-
for (const message of messages) {
|
|
193
|
-
await retrier(() => options.onMessage(message));
|
|
194
|
-
|
|
195
|
-
const { topic, partition, offset } = message;
|
|
196
|
-
this.offsetManager.resolve(topic, partition, offset + 1n);
|
|
197
|
-
}
|
|
198
|
-
} catch (error) {
|
|
199
|
-
await this.consumerGroup?.offsetCommit().catch(() => {});
|
|
200
|
-
throw error;
|
|
201
|
-
}
|
|
202
|
-
}
|
|
203
|
-
await this.consumerGroup?.offsetCommit();
|
|
204
|
-
}
|
|
205
|
-
|
|
206
|
-
private fetch(nodeId: number, assignment: Assignment) {
|
|
207
|
-
const { rackId, maxWaitMs, minBytes, maxBytes, partitionMaxBytes, isolationLevel } = this.options;
|
|
208
|
-
|
|
209
|
-
return this.cluster.sendRequestToNode(nodeId)(API.FETCH, {
|
|
210
|
-
maxWaitMs,
|
|
211
|
-
minBytes,
|
|
212
|
-
maxBytes,
|
|
213
|
-
isolationLevel,
|
|
214
|
-
sessionId: 0,
|
|
215
|
-
sessionEpoch: -1,
|
|
216
|
-
topics: Object.entries(assignment).map(([topic, partitions]) => ({
|
|
217
|
-
topicId: this.metadata.getTopicIdByName(topic),
|
|
218
|
-
partitions: partitions.map((partition) => ({
|
|
219
|
-
partition,
|
|
220
|
-
currentLeaderEpoch: -1,
|
|
221
|
-
fetchOffset: this.offsetManager.getCurrentOffset(topic, partition),
|
|
222
|
-
lastFetchedEpoch: -1,
|
|
223
|
-
logStartOffset: 0n,
|
|
224
|
-
partitionMaxBytes,
|
|
225
|
-
})),
|
|
226
|
-
})),
|
|
227
|
-
forgottenTopicsData: [],
|
|
228
|
-
rackId,
|
|
229
|
-
});
|
|
230
|
-
}
|
|
231
|
-
}
|
|
@@ -1,179 +0,0 @@
|
|
|
1
|
-
import EventEmitter from 'events';
|
|
2
|
-
import { API } from '../api';
|
|
3
|
-
import { Assignment } from '../api/sync-group';
|
|
4
|
-
import { Metadata } from '../metadata';
|
|
5
|
-
import { Batch, Message } from '../types';
|
|
6
|
-
import { KafkaTSError } from '../utils/error';
|
|
7
|
-
import { createTracer } from '../utils/tracer';
|
|
8
|
-
import { ConsumerGroup } from './consumer-group';
|
|
9
|
-
import { Fetcher } from './fetcher';
|
|
10
|
-
import { Processor } from './processor';
|
|
11
|
-
|
|
12
|
-
const trace = createTracer('FetchManager');
|
|
13
|
-
|
|
14
|
-
export type Granularity = 'partition' | 'topic' | 'broker';
|
|
15
|
-
|
|
16
|
-
type FetchManagerOptions = {
|
|
17
|
-
fetch: (nodeId: number, assignment: Assignment) => Promise<ReturnType<(typeof API.FETCH)['response']>>;
|
|
18
|
-
process: (batch: Batch) => Promise<void>;
|
|
19
|
-
metadata: Metadata;
|
|
20
|
-
consumerGroup?: ConsumerGroup;
|
|
21
|
-
nodeAssignments: { nodeId: number; assignment: Assignment }[];
|
|
22
|
-
granularity: Granularity;
|
|
23
|
-
concurrency: number;
|
|
24
|
-
};
|
|
25
|
-
|
|
26
|
-
type Checkpoint = { kind: 'checkpoint'; fetcherId: number };
|
|
27
|
-
type Entry = Batch | Checkpoint;
|
|
28
|
-
|
|
29
|
-
export class FetchManager extends EventEmitter<{ data: []; checkpoint: [number]; stop: [] }> {
|
|
30
|
-
private queue: Entry[] = [];
|
|
31
|
-
private isRunning = false;
|
|
32
|
-
private fetchers: Fetcher[];
|
|
33
|
-
private processors: Processor[];
|
|
34
|
-
|
|
35
|
-
constructor(private options: FetchManagerOptions) {
|
|
36
|
-
super();
|
|
37
|
-
|
|
38
|
-
const { fetch, process, consumerGroup, nodeAssignments, concurrency } = this.options;
|
|
39
|
-
|
|
40
|
-
this.fetchers = nodeAssignments.map(
|
|
41
|
-
({ nodeId, assignment }, index) =>
|
|
42
|
-
new Fetcher(index, {
|
|
43
|
-
nodeId,
|
|
44
|
-
assignment,
|
|
45
|
-
consumerGroup,
|
|
46
|
-
fetch,
|
|
47
|
-
onResponse: this.onResponse.bind(this),
|
|
48
|
-
}),
|
|
49
|
-
);
|
|
50
|
-
this.processors = Array.from({ length: concurrency }).map(
|
|
51
|
-
() => new Processor({ process, poll: this.poll.bind(this) }),
|
|
52
|
-
);
|
|
53
|
-
}
|
|
54
|
-
|
|
55
|
-
public async start() {
|
|
56
|
-
this.queue = [];
|
|
57
|
-
this.isRunning = true;
|
|
58
|
-
|
|
59
|
-
try {
|
|
60
|
-
await Promise.all([
|
|
61
|
-
...this.fetchers.map((fetcher) => fetcher.loop()),
|
|
62
|
-
...this.processors.map((processor) => processor.loop()),
|
|
63
|
-
]);
|
|
64
|
-
} finally {
|
|
65
|
-
this.isRunning = false;
|
|
66
|
-
this.emit('stop');
|
|
67
|
-
}
|
|
68
|
-
}
|
|
69
|
-
|
|
70
|
-
@trace()
|
|
71
|
-
public async stop() {
|
|
72
|
-
this.isRunning = false;
|
|
73
|
-
this.emit('stop');
|
|
74
|
-
|
|
75
|
-
await Promise.all([
|
|
76
|
-
...this.fetchers.map((fetcher) => fetcher.stop()),
|
|
77
|
-
...this.processors.map((processor) => processor.stop()),
|
|
78
|
-
]);
|
|
79
|
-
}
|
|
80
|
-
|
|
81
|
-
@trace()
|
|
82
|
-
public async poll(): Promise<Batch> {
|
|
83
|
-
if (!this.isRunning) {
|
|
84
|
-
return [];
|
|
85
|
-
}
|
|
86
|
-
|
|
87
|
-
const batch = this.queue.shift();
|
|
88
|
-
if (!batch) {
|
|
89
|
-
await new Promise<void>((resolve) => {
|
|
90
|
-
const onData = () => {
|
|
91
|
-
this.removeListener('stop', onStop);
|
|
92
|
-
resolve();
|
|
93
|
-
};
|
|
94
|
-
const onStop = () => {
|
|
95
|
-
this.removeListener('data', onData);
|
|
96
|
-
resolve();
|
|
97
|
-
};
|
|
98
|
-
this.once('data', onData);
|
|
99
|
-
this.once('stop', onStop);
|
|
100
|
-
});
|
|
101
|
-
return this.poll();
|
|
102
|
-
}
|
|
103
|
-
|
|
104
|
-
if ('kind' in batch && batch.kind === 'checkpoint') {
|
|
105
|
-
this.emit('checkpoint', batch.fetcherId);
|
|
106
|
-
return this.poll();
|
|
107
|
-
}
|
|
108
|
-
|
|
109
|
-
return batch as Exclude<Entry, Checkpoint>;
|
|
110
|
-
}
|
|
111
|
-
|
|
112
|
-
private async onResponse(fetcherId: number, response: ReturnType<(typeof API.FETCH)['response']>) {
|
|
113
|
-
const { metadata, granularity } = this.options;
|
|
114
|
-
|
|
115
|
-
const batches = fetchResponseToBatches(response, granularity, metadata);
|
|
116
|
-
if (batches.length) {
|
|
117
|
-
this.queue.push(...batches);
|
|
118
|
-
this.queue.push({ kind: 'checkpoint', fetcherId });
|
|
119
|
-
|
|
120
|
-
this.emit('data');
|
|
121
|
-
await new Promise<void>((resolve) => {
|
|
122
|
-
const onCheckpoint = (id: number) => {
|
|
123
|
-
if (id === fetcherId) {
|
|
124
|
-
this.removeListener('stop', onStop);
|
|
125
|
-
resolve();
|
|
126
|
-
}
|
|
127
|
-
};
|
|
128
|
-
const onStop = () => {
|
|
129
|
-
this.removeListener('checkpoint', onCheckpoint);
|
|
130
|
-
resolve();
|
|
131
|
-
};
|
|
132
|
-
this.once('checkpoint', onCheckpoint);
|
|
133
|
-
this.once('stop', onStop);
|
|
134
|
-
});
|
|
135
|
-
}
|
|
136
|
-
}
|
|
137
|
-
}
|
|
138
|
-
|
|
139
|
-
const fetchResponseToBatches = (
|
|
140
|
-
batch: ReturnType<typeof API.FETCH.response>,
|
|
141
|
-
granularity: Granularity,
|
|
142
|
-
metadata: Metadata,
|
|
143
|
-
): Batch[] => {
|
|
144
|
-
const brokerTopics = batch.responses.map(({ topicId, partitions }) =>
|
|
145
|
-
partitions.map(({ partitionIndex, records }) =>
|
|
146
|
-
records.flatMap(({ baseTimestamp, baseOffset, records }) =>
|
|
147
|
-
records.map(
|
|
148
|
-
(message): Required<Message> => ({
|
|
149
|
-
topic: metadata.getTopicNameById(topicId),
|
|
150
|
-
partition: partitionIndex,
|
|
151
|
-
key: message.key ?? null,
|
|
152
|
-
value: message.value ?? null,
|
|
153
|
-
headers: Object.fromEntries(message.headers.map(({ key, value }) => [key, value])),
|
|
154
|
-
timestamp: baseTimestamp + BigInt(message.timestampDelta),
|
|
155
|
-
offset: baseOffset + BigInt(message.offsetDelta),
|
|
156
|
-
}),
|
|
157
|
-
),
|
|
158
|
-
),
|
|
159
|
-
),
|
|
160
|
-
);
|
|
161
|
-
|
|
162
|
-
switch (granularity) {
|
|
163
|
-
case 'broker':
|
|
164
|
-
const messages = brokerTopics.flatMap((topicPartition) =>
|
|
165
|
-
topicPartition.flatMap((partitionMessages) => partitionMessages),
|
|
166
|
-
);
|
|
167
|
-
return messages.length ? [messages] : [];
|
|
168
|
-
case 'topic':
|
|
169
|
-
return brokerTopics
|
|
170
|
-
.map((topicPartition) => topicPartition.flatMap((partitionMessages) => partitionMessages))
|
|
171
|
-
.filter((messages) => messages.length);
|
|
172
|
-
case 'partition':
|
|
173
|
-
return brokerTopics.flatMap((topicPartition) =>
|
|
174
|
-
topicPartition.map((partitionMessages) => partitionMessages),
|
|
175
|
-
);
|
|
176
|
-
default:
|
|
177
|
-
throw new KafkaTSError(`Unhandled batch granularity: ${granularity}`);
|
|
178
|
-
}
|
|
179
|
-
};
|
package/src/consumer/fetcher.ts
DELETED
|
@@ -1,57 +0,0 @@
|
|
|
1
|
-
import { EventEmitter } from 'stream';
|
|
2
|
-
import { API } from '../api';
|
|
3
|
-
import { Assignment } from '../api/sync-group';
|
|
4
|
-
import { createTracer } from '../utils/tracer';
|
|
5
|
-
import { ConsumerGroup } from './consumer-group';
|
|
6
|
-
|
|
7
|
-
const trace = createTracer('Fetcher');
|
|
8
|
-
|
|
9
|
-
type FetcherOptions = {
|
|
10
|
-
nodeId: number;
|
|
11
|
-
assignment: Assignment;
|
|
12
|
-
consumerGroup?: ConsumerGroup;
|
|
13
|
-
fetch: (nodeId: number, assignment: Assignment) => Promise<ReturnType<(typeof API.FETCH)['response']>>;
|
|
14
|
-
onResponse: (fetcherId: number, response: ReturnType<(typeof API.FETCH)['response']>) => Promise<void>;
|
|
15
|
-
};
|
|
16
|
-
|
|
17
|
-
export class Fetcher extends EventEmitter<{ stop: []; stopped: []; data: []; drain: [] }> {
|
|
18
|
-
private isRunning = false;
|
|
19
|
-
|
|
20
|
-
constructor(
|
|
21
|
-
private fetcherId: number,
|
|
22
|
-
private options: FetcherOptions,
|
|
23
|
-
) {
|
|
24
|
-
super();
|
|
25
|
-
}
|
|
26
|
-
|
|
27
|
-
public async loop() {
|
|
28
|
-
const { nodeId, assignment, consumerGroup, fetch, onResponse } = this.options;
|
|
29
|
-
|
|
30
|
-
this.isRunning = true;
|
|
31
|
-
this.once('stop', () => (this.isRunning = false));
|
|
32
|
-
|
|
33
|
-
try {
|
|
34
|
-
while (this.isRunning) {
|
|
35
|
-
const response = await fetch(nodeId, assignment);
|
|
36
|
-
await consumerGroup?.handleLastHeartbeat();
|
|
37
|
-
await onResponse(this.fetcherId, response);
|
|
38
|
-
await consumerGroup?.handleLastHeartbeat();
|
|
39
|
-
}
|
|
40
|
-
} finally {
|
|
41
|
-
this.isRunning = false;
|
|
42
|
-
this.emit('stopped');
|
|
43
|
-
}
|
|
44
|
-
}
|
|
45
|
-
|
|
46
|
-
@trace()
|
|
47
|
-
public async stop() {
|
|
48
|
-
if (!this.isRunning) {
|
|
49
|
-
return;
|
|
50
|
-
}
|
|
51
|
-
|
|
52
|
-
this.emit('stop');
|
|
53
|
-
return new Promise<void>((resolve) => {
|
|
54
|
-
this.once('stopped', resolve);
|
|
55
|
-
});
|
|
56
|
-
}
|
|
57
|
-
}
|
|
@@ -1,93 +0,0 @@
|
|
|
1
|
-
import { API } from '../api';
|
|
2
|
-
import { IsolationLevel } from '../api/fetch';
|
|
3
|
-
import { Assignment } from '../api/sync-group';
|
|
4
|
-
import { Cluster } from '../cluster';
|
|
5
|
-
import { distributeMessagesToTopicPartitionLeaders } from '../distributors/messages-to-topic-partition-leaders';
|
|
6
|
-
import { ConsumerMetadata } from './consumer-metadata';
|
|
7
|
-
|
|
8
|
-
type OffsetManagerOptions = {
|
|
9
|
-
cluster: Cluster;
|
|
10
|
-
metadata: ConsumerMetadata;
|
|
11
|
-
isolationLevel: IsolationLevel;
|
|
12
|
-
};
|
|
13
|
-
|
|
14
|
-
export class OffsetManager {
|
|
15
|
-
private currentOffsets: Record<string, Record<number, bigint>> = {};
|
|
16
|
-
public pendingOffsets: Record<string, Record<number, bigint>> = {};
|
|
17
|
-
|
|
18
|
-
constructor(private options: OffsetManagerOptions) {}
|
|
19
|
-
|
|
20
|
-
public getCurrentOffset(topic: string, partition: number) {
|
|
21
|
-
return this.currentOffsets[topic]?.[partition] ?? 0n;
|
|
22
|
-
}
|
|
23
|
-
|
|
24
|
-
public resolve(topic: string, partition: number, offset: bigint) {
|
|
25
|
-
this.pendingOffsets[topic] ??= {};
|
|
26
|
-
this.pendingOffsets[topic][partition] = offset;
|
|
27
|
-
|
|
28
|
-
this.currentOffsets[topic] ??= {};
|
|
29
|
-
this.currentOffsets[topic][partition] = offset;
|
|
30
|
-
}
|
|
31
|
-
|
|
32
|
-
public flush() {
|
|
33
|
-
this.pendingOffsets = {};
|
|
34
|
-
}
|
|
35
|
-
|
|
36
|
-
public async fetchOffsets(options: { fromBeginning: boolean }) {
|
|
37
|
-
const { metadata } = this.options;
|
|
38
|
-
|
|
39
|
-
const topicPartitions = Object.entries(metadata.getAssignment()).flatMap(([topic, partitions]) =>
|
|
40
|
-
partitions.map((partition) => ({ topic, partition })),
|
|
41
|
-
);
|
|
42
|
-
const nodeTopicPartitions = distributeMessagesToTopicPartitionLeaders(
|
|
43
|
-
topicPartitions,
|
|
44
|
-
metadata.getTopicPartitionLeaderIds(),
|
|
45
|
-
);
|
|
46
|
-
|
|
47
|
-
await Promise.all(
|
|
48
|
-
Object.entries(nodeTopicPartitions).map(([nodeId, topicPartitions]) =>
|
|
49
|
-
this.listOffsets({
|
|
50
|
-
...options,
|
|
51
|
-
nodeId: parseInt(nodeId),
|
|
52
|
-
nodeAssignment: Object.fromEntries(
|
|
53
|
-
Object.entries(topicPartitions).map(
|
|
54
|
-
([topicName, partitions]) =>
|
|
55
|
-
[topicName, Object.keys(partitions).map(Number)] as [string, number[]],
|
|
56
|
-
),
|
|
57
|
-
),
|
|
58
|
-
}),
|
|
59
|
-
),
|
|
60
|
-
);
|
|
61
|
-
this.flush();
|
|
62
|
-
}
|
|
63
|
-
|
|
64
|
-
private async listOffsets({
|
|
65
|
-
nodeId,
|
|
66
|
-
nodeAssignment,
|
|
67
|
-
fromBeginning,
|
|
68
|
-
}: {
|
|
69
|
-
nodeId: number;
|
|
70
|
-
nodeAssignment: Assignment;
|
|
71
|
-
fromBeginning: boolean;
|
|
72
|
-
}) {
|
|
73
|
-
const { cluster, isolationLevel } = this.options;
|
|
74
|
-
|
|
75
|
-
const offsets = await cluster.sendRequestToNode(nodeId)(API.LIST_OFFSETS, {
|
|
76
|
-
replicaId: -1,
|
|
77
|
-
isolationLevel,
|
|
78
|
-
topics: Object.entries(nodeAssignment)
|
|
79
|
-
.flatMap(([topic, partitions]) => partitions.map((partition) => ({ topic, partition })))
|
|
80
|
-
.map(({ topic, partition }) => ({
|
|
81
|
-
name: topic,
|
|
82
|
-
partitions: [{ partitionIndex: partition, currentLeaderEpoch: -1, timestamp: -1n }],
|
|
83
|
-
})),
|
|
84
|
-
});
|
|
85
|
-
|
|
86
|
-
offsets.topics.forEach(({ name, partitions }) => {
|
|
87
|
-
partitions.forEach(({ partitionIndex, offset }) => {
|
|
88
|
-
this.resolve(name, partitionIndex, fromBeginning ? 0n : offset);
|
|
89
|
-
});
|
|
90
|
-
});
|
|
91
|
-
this.flush();
|
|
92
|
-
}
|
|
93
|
-
}
|
|
@@ -1,47 +0,0 @@
|
|
|
1
|
-
import { EventEmitter } from 'stream';
|
|
2
|
-
import { Batch } from '../types';
|
|
3
|
-
import { createTracer } from '../utils/tracer';
|
|
4
|
-
|
|
5
|
-
const trace = createTracer('Processor');
|
|
6
|
-
|
|
7
|
-
type ProcessorOptions = {
|
|
8
|
-
poll: () => Promise<Batch>;
|
|
9
|
-
process: (batch: Batch) => Promise<void>;
|
|
10
|
-
};
|
|
11
|
-
|
|
12
|
-
export class Processor extends EventEmitter<{ stop: []; stopped: [] }> {
|
|
13
|
-
private isRunning = false;
|
|
14
|
-
|
|
15
|
-
constructor(private options: ProcessorOptions) {
|
|
16
|
-
super();
|
|
17
|
-
}
|
|
18
|
-
|
|
19
|
-
public async loop() {
|
|
20
|
-
const { poll, process } = this.options;
|
|
21
|
-
|
|
22
|
-
this.isRunning = true;
|
|
23
|
-
this.once('stop', () => (this.isRunning = false));
|
|
24
|
-
|
|
25
|
-
try {
|
|
26
|
-
while (this.isRunning) {
|
|
27
|
-
const batch = await poll();
|
|
28
|
-
await process(batch);
|
|
29
|
-
}
|
|
30
|
-
} finally {
|
|
31
|
-
this.isRunning = false;
|
|
32
|
-
this.emit('stopped');
|
|
33
|
-
}
|
|
34
|
-
}
|
|
35
|
-
|
|
36
|
-
@trace()
|
|
37
|
-
public async stop() {
|
|
38
|
-
if (!this.isRunning) {
|
|
39
|
-
return;
|
|
40
|
-
}
|
|
41
|
-
|
|
42
|
-
return new Promise<void>((resolve) => {
|
|
43
|
-
this.once('stopped', resolve);
|
|
44
|
-
this.emit('stop');
|
|
45
|
-
});
|
|
46
|
-
}
|
|
47
|
-
}
|
|
@@ -1,43 +0,0 @@
|
|
|
1
|
-
import { describe, expect, it } from 'vitest';
|
|
2
|
-
import { distributeAssignmentsToNodesBalanced, distributeAssignmentsToNodesOptimized } from './assignments-to-replicas';
|
|
3
|
-
|
|
4
|
-
describe('Distribute assignments to replica ids', () => {
|
|
5
|
-
describe('distributeAssignmentsToNodesBalanced', () => {
|
|
6
|
-
it('smoke', () => {
|
|
7
|
-
const result = distributeAssignmentsToNodesBalanced({ topic: [0, 1] }, { topic: { 0: [0, 1], 1: [1, 2] } });
|
|
8
|
-
expect(result).toMatchInlineSnapshot(`
|
|
9
|
-
{
|
|
10
|
-
"1": {
|
|
11
|
-
"topic": [
|
|
12
|
-
0,
|
|
13
|
-
],
|
|
14
|
-
},
|
|
15
|
-
"2": {
|
|
16
|
-
"topic": [
|
|
17
|
-
1,
|
|
18
|
-
],
|
|
19
|
-
},
|
|
20
|
-
}
|
|
21
|
-
`);
|
|
22
|
-
});
|
|
23
|
-
});
|
|
24
|
-
|
|
25
|
-
describe('distributeAssignmentsToNodesOptimized', () => {
|
|
26
|
-
it('smoke', () => {
|
|
27
|
-
const result = distributeAssignmentsToNodesOptimized(
|
|
28
|
-
{ topic: [0, 1] },
|
|
29
|
-
{ topic: { 0: [0, 1], 1: [1, 2] } },
|
|
30
|
-
);
|
|
31
|
-
expect(result).toMatchInlineSnapshot(`
|
|
32
|
-
{
|
|
33
|
-
"1": {
|
|
34
|
-
"topic": [
|
|
35
|
-
0,
|
|
36
|
-
1,
|
|
37
|
-
],
|
|
38
|
-
},
|
|
39
|
-
}
|
|
40
|
-
`);
|
|
41
|
-
});
|
|
42
|
-
});
|
|
43
|
-
});
|
|
@@ -1,32 +0,0 @@
|
|
|
1
|
-
import { describe, expect, it } from 'vitest';
|
|
2
|
-
import { distributeMessagesToTopicPartitionLeaders } from './messages-to-topic-partition-leaders';
|
|
3
|
-
|
|
4
|
-
describe('Distribute messages to partition leader ids', () => {
|
|
5
|
-
describe('distributeMessagesToTopicPartitionLeaders', () => {
|
|
6
|
-
it('snoke', () => {
|
|
7
|
-
const result = distributeMessagesToTopicPartitionLeaders(
|
|
8
|
-
[{ topic: 'topic', partition: 0, key: null, value: null, offset: 0n, timestamp: 0n, headers: {} }],
|
|
9
|
-
{ topic: { 0: 1 } },
|
|
10
|
-
);
|
|
11
|
-
expect(result).toMatchInlineSnapshot(`
|
|
12
|
-
{
|
|
13
|
-
"1": {
|
|
14
|
-
"topic": {
|
|
15
|
-
"0": [
|
|
16
|
-
{
|
|
17
|
-
"headers": {},
|
|
18
|
-
"key": null,
|
|
19
|
-
"offset": 0n,
|
|
20
|
-
"partition": 0,
|
|
21
|
-
"timestamp": 0n,
|
|
22
|
-
"topic": "topic",
|
|
23
|
-
"value": null,
|
|
24
|
-
},
|
|
25
|
-
],
|
|
26
|
-
},
|
|
27
|
-
},
|
|
28
|
-
}
|
|
29
|
-
`);
|
|
30
|
-
});
|
|
31
|
-
});
|
|
32
|
-
});
|