kafka-ts 0.0.2-beta → 0.0.3-beta
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/workflows/release.yml +14 -14
- package/.prettierrc +3 -2
- package/README.md +43 -33
- package/docker-compose.yml +102 -102
- package/examples/package-lock.json +28 -28
- package/examples/package.json +12 -12
- package/examples/src/client.ts +6 -6
- package/examples/src/consumer.ts +9 -8
- package/examples/src/create-topic.ts +23 -16
- package/examples/src/producer.ts +7 -7
- package/examples/src/replicator.ts +4 -4
- package/examples/src/utils/delay.ts +1 -0
- package/examples/src/utils/json.ts +1 -1
- package/examples/tsconfig.json +2 -2
- package/package.json +21 -19
- package/src/api/api-versions.ts +2 -2
- package/src/api/create-topics.ts +2 -2
- package/src/api/delete-topics.ts +2 -2
- package/src/api/fetch.ts +3 -3
- package/src/api/find-coordinator.ts +2 -2
- package/src/api/heartbeat.ts +2 -2
- package/src/api/index.ts +18 -18
- package/src/api/init-producer-id.ts +2 -2
- package/src/api/join-group.ts +3 -3
- package/src/api/leave-group.ts +2 -2
- package/src/api/list-offsets.ts +3 -3
- package/src/api/metadata.ts +3 -3
- package/src/api/offset-commit.ts +2 -2
- package/src/api/offset-fetch.ts +2 -2
- package/src/api/produce.ts +3 -3
- package/src/api/sasl-authenticate.ts +2 -2
- package/src/api/sasl-handshake.ts +2 -2
- package/src/api/sync-group.ts +2 -2
- package/src/broker.ts +9 -9
- package/src/client.ts +6 -6
- package/src/cluster.test.ts +68 -68
- package/src/cluster.ts +7 -7
- package/src/connection.ts +17 -15
- package/src/consumer/consumer-group.ts +14 -14
- package/src/consumer/consumer-metadata.ts +2 -2
- package/src/consumer/consumer.ts +84 -82
- package/src/consumer/fetch-manager.ts +179 -0
- package/src/consumer/fetcher.ts +57 -0
- package/src/consumer/offset-manager.ts +6 -6
- package/src/consumer/processor.ts +47 -0
- package/src/distributors/assignments-to-replicas.test.ts +7 -7
- package/src/distributors/assignments-to-replicas.ts +1 -1
- package/src/distributors/messages-to-topic-partition-leaders.test.ts +6 -6
- package/src/index.ts +4 -3
- package/src/metadata.ts +4 -4
- package/src/producer/producer.ts +8 -8
- package/src/types.ts +2 -0
- package/src/utils/api.ts +4 -4
- package/src/utils/debug.ts +2 -2
- package/src/utils/decoder.ts +4 -4
- package/src/utils/encoder.ts +6 -6
- package/src/utils/error.ts +3 -3
- package/src/utils/retrier.ts +1 -1
- package/src/utils/tracer.ts +7 -4
- package/tsconfig.json +16 -16
package/src/consumer/consumer.ts
CHANGED
|
@@ -1,15 +1,16 @@
|
|
|
1
|
-
import { API, API_ERROR } from
|
|
2
|
-
import { IsolationLevel } from
|
|
3
|
-
import { Assignment } from
|
|
4
|
-
import { Cluster } from
|
|
5
|
-
import { distributeAssignmentsToNodes } from
|
|
6
|
-
import { Message } from
|
|
7
|
-
import { delay } from
|
|
8
|
-
import { ConnectionError, KafkaTSApiError } from
|
|
9
|
-
import { defaultRetrier, Retrier } from
|
|
10
|
-
import { ConsumerGroup } from
|
|
11
|
-
import { ConsumerMetadata } from
|
|
12
|
-
import {
|
|
1
|
+
import { API, API_ERROR } from '../api';
|
|
2
|
+
import { IsolationLevel } from '../api/fetch';
|
|
3
|
+
import { Assignment } from '../api/sync-group';
|
|
4
|
+
import { Cluster } from '../cluster';
|
|
5
|
+
import { distributeAssignmentsToNodes } from '../distributors/assignments-to-replicas';
|
|
6
|
+
import { Message } from '../types';
|
|
7
|
+
import { delay } from '../utils/delay';
|
|
8
|
+
import { ConnectionError, KafkaTSApiError } from '../utils/error';
|
|
9
|
+
import { defaultRetrier, Retrier } from '../utils/retrier';
|
|
10
|
+
import { ConsumerGroup } from './consumer-group';
|
|
11
|
+
import { ConsumerMetadata } from './consumer-metadata';
|
|
12
|
+
import { FetchManager, Granularity } from './fetch-manager';
|
|
13
|
+
import { OffsetManager } from './offset-manager';
|
|
13
14
|
|
|
14
15
|
export type ConsumerOptions = {
|
|
15
16
|
topics: string[];
|
|
@@ -26,13 +27,16 @@ export type ConsumerOptions = {
|
|
|
26
27
|
allowTopicAutoCreation?: boolean;
|
|
27
28
|
fromBeginning?: boolean;
|
|
28
29
|
retrier?: Retrier;
|
|
29
|
-
|
|
30
|
+
granularity?: Granularity;
|
|
31
|
+
concurrency?: number;
|
|
32
|
+
} & ({ onBatch: (messages: Required<Message>[]) => unknown } | { onMessage: (message: Required<Message>) => unknown });
|
|
30
33
|
|
|
31
34
|
export class Consumer {
|
|
32
35
|
private options: Required<ConsumerOptions>;
|
|
33
36
|
private metadata: ConsumerMetadata;
|
|
34
37
|
private consumerGroup: ConsumerGroup | undefined;
|
|
35
38
|
private offsetManager: OffsetManager;
|
|
39
|
+
private fetchManager?: FetchManager;
|
|
36
40
|
private stopHook: (() => void) | undefined;
|
|
37
41
|
|
|
38
42
|
constructor(
|
|
@@ -43,7 +47,7 @@ export class Consumer {
|
|
|
43
47
|
...options,
|
|
44
48
|
groupId: options.groupId ?? null,
|
|
45
49
|
groupInstanceId: options.groupInstanceId ?? null,
|
|
46
|
-
rackId: options.rackId ??
|
|
50
|
+
rackId: options.rackId ?? '',
|
|
47
51
|
sessionTimeoutMs: options.sessionTimeoutMs ?? 30_000,
|
|
48
52
|
rebalanceTimeoutMs: options.rebalanceTimeoutMs ?? 60_000,
|
|
49
53
|
maxWaitMs: options.maxWaitMs ?? 5000,
|
|
@@ -54,6 +58,8 @@ export class Consumer {
|
|
|
54
58
|
allowTopicAutoCreation: options.allowTopicAutoCreation ?? false,
|
|
55
59
|
fromBeginning: options.fromBeginning ?? false,
|
|
56
60
|
retrier: options.retrier ?? defaultRetrier,
|
|
61
|
+
granularity: options.granularity ?? 'broker',
|
|
62
|
+
concurrency: options.concurrency ?? 1,
|
|
57
63
|
};
|
|
58
64
|
|
|
59
65
|
this.metadata = new ConsumerMetadata({ cluster: this.cluster });
|
|
@@ -95,81 +101,63 @@ export class Consumer {
|
|
|
95
101
|
if (this.stopHook) return (this.stopHook as () => void)();
|
|
96
102
|
return this.close(true).then(() => this.start());
|
|
97
103
|
}
|
|
98
|
-
this.
|
|
104
|
+
setImmediate(() => this.startFetchManager());
|
|
99
105
|
}
|
|
100
106
|
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
107
|
+
public async close(force = false): Promise<void> {
|
|
108
|
+
if (!force) {
|
|
109
|
+
await new Promise<void>(async (resolve) => {
|
|
110
|
+
this.stopHook = resolve;
|
|
111
|
+
await this.fetchManager?.stop();
|
|
112
|
+
});
|
|
113
|
+
}
|
|
114
|
+
await this.consumerGroup
|
|
115
|
+
?.leaveGroup()
|
|
116
|
+
.catch((error) => console.warn(`Failed to leave group: ${error.message}`));
|
|
117
|
+
await this.cluster.disconnect().catch((error) => console.warn(`Failed to disconnect: ${error.message}`));
|
|
118
|
+
}
|
|
104
119
|
|
|
105
|
-
|
|
106
|
-
|
|
120
|
+
private startFetchManager = async () => {
|
|
121
|
+
const { granularity, concurrency } = this.options;
|
|
107
122
|
|
|
108
123
|
while (!this.stopHook) {
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
124
|
+
const nodeAssignments = Object.entries(
|
|
125
|
+
distributeAssignmentsToNodes(
|
|
126
|
+
this.metadata.getAssignment(),
|
|
127
|
+
this.metadata.getTopicPartitionReplicaIds(),
|
|
128
|
+
),
|
|
129
|
+
).map(([nodeId, assignment]) => ({ nodeId: parseInt(nodeId), assignment }));
|
|
130
|
+
|
|
131
|
+
const numPartitions = Object.values(this.metadata.getAssignment()).flat().length;
|
|
132
|
+
const numProcessors = Math.min(concurrency, numPartitions);
|
|
133
|
+
|
|
134
|
+
this.fetchManager = new FetchManager({
|
|
135
|
+
fetch: this.fetch.bind(this),
|
|
136
|
+
process: this.process.bind(this),
|
|
137
|
+
metadata: this.metadata,
|
|
138
|
+
consumerGroup: this.consumerGroup,
|
|
139
|
+
nodeAssignments,
|
|
140
|
+
granularity,
|
|
141
|
+
concurrency: numProcessors,
|
|
142
|
+
});
|
|
118
143
|
|
|
119
144
|
try {
|
|
120
|
-
|
|
121
|
-
const batch = await this.fetch(nodeId, assignment);
|
|
122
|
-
const messages = batch.responses.flatMap(({ topicId, partitions }) =>
|
|
123
|
-
partitions.flatMap(({ partitionIndex, records }) =>
|
|
124
|
-
records.flatMap(({ baseTimestamp, baseOffset, records }) =>
|
|
125
|
-
records.map(
|
|
126
|
-
(message): Required<Message> => ({
|
|
127
|
-
topic: this.metadata.getTopicNameById(topicId),
|
|
128
|
-
partition: partitionIndex,
|
|
129
|
-
key: message.key ?? null,
|
|
130
|
-
value: message.value ?? null,
|
|
131
|
-
headers: Object.fromEntries(
|
|
132
|
-
message.headers.map(({ key, value }) => [key, value]),
|
|
133
|
-
),
|
|
134
|
-
timestamp: baseTimestamp + BigInt(message.timestampDelta),
|
|
135
|
-
offset: baseOffset + BigInt(message.offsetDelta),
|
|
136
|
-
}),
|
|
137
|
-
),
|
|
138
|
-
),
|
|
139
|
-
),
|
|
140
|
-
);
|
|
141
|
-
|
|
142
|
-
if ("onBatch" in options) {
|
|
143
|
-
await retrier(() => options.onBatch(messages));
|
|
144
|
-
|
|
145
|
-
messages.forEach(({ topic, partition, offset }) =>
|
|
146
|
-
this.offsetManager.resolve(topic, partition, offset + 1n),
|
|
147
|
-
);
|
|
148
|
-
} else if ("onMessage" in options) {
|
|
149
|
-
for (const message of messages) {
|
|
150
|
-
await retrier(() => options.onMessage(message));
|
|
151
|
-
|
|
152
|
-
const { topic, partition, offset } = message;
|
|
153
|
-
this.offsetManager.resolve(topic, partition, offset + 1n);
|
|
154
|
-
}
|
|
155
|
-
}
|
|
156
|
-
await this.consumerGroup?.offsetCommit();
|
|
157
|
-
await this.consumerGroup?.handleLastHeartbeat();
|
|
158
|
-
}
|
|
145
|
+
await this.fetchManager.start();
|
|
159
146
|
|
|
160
147
|
if (!nodeAssignments.length) {
|
|
161
|
-
console.debug(
|
|
148
|
+
console.debug('No partitions assigned. Waiting for reassignment...');
|
|
162
149
|
await delay(this.options.maxWaitMs);
|
|
163
150
|
await this.consumerGroup?.handleLastHeartbeat();
|
|
164
151
|
}
|
|
165
152
|
} catch (error) {
|
|
153
|
+
await this.fetchManager.stop();
|
|
154
|
+
|
|
166
155
|
if ((error as KafkaTSApiError).errorCode === API_ERROR.REBALANCE_IN_PROGRESS) {
|
|
167
|
-
console.debug(
|
|
168
|
-
shouldReassign = true;
|
|
156
|
+
console.debug('Rebalance in progress...');
|
|
169
157
|
continue;
|
|
170
158
|
}
|
|
171
159
|
if ((error as KafkaTSApiError).errorCode === API_ERROR.FENCED_INSTANCE_ID) {
|
|
172
|
-
console.debug(
|
|
160
|
+
console.debug('New consumer with the same groupInstanceId joined. Exiting the consumer...');
|
|
173
161
|
this.close();
|
|
174
162
|
break;
|
|
175
163
|
}
|
|
@@ -182,23 +170,37 @@ export class Consumer {
|
|
|
182
170
|
break;
|
|
183
171
|
}
|
|
184
172
|
console.error(error);
|
|
185
|
-
|
|
173
|
+
this.close();
|
|
186
174
|
break;
|
|
187
175
|
}
|
|
188
176
|
}
|
|
189
177
|
this.stopHook?.();
|
|
190
178
|
};
|
|
191
179
|
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
180
|
+
private async process(messages: Required<Message>[]) {
|
|
181
|
+
const { options } = this;
|
|
182
|
+
const { retrier } = options;
|
|
183
|
+
|
|
184
|
+
if ('onBatch' in options) {
|
|
185
|
+
await retrier(() => options.onBatch(messages));
|
|
186
|
+
|
|
187
|
+
messages.forEach(({ topic, partition, offset }) =>
|
|
188
|
+
this.offsetManager.resolve(topic, partition, offset + 1n),
|
|
189
|
+
);
|
|
190
|
+
} else if ('onMessage' in options) {
|
|
191
|
+
try {
|
|
192
|
+
for (const message of messages) {
|
|
193
|
+
await retrier(() => options.onMessage(message));
|
|
194
|
+
|
|
195
|
+
const { topic, partition, offset } = message;
|
|
196
|
+
this.offsetManager.resolve(topic, partition, offset + 1n);
|
|
197
|
+
}
|
|
198
|
+
} catch (error) {
|
|
199
|
+
await this.consumerGroup?.offsetCommit().catch(() => {});
|
|
200
|
+
throw error;
|
|
201
|
+
}
|
|
197
202
|
}
|
|
198
|
-
await this.consumerGroup
|
|
199
|
-
?.leaveGroup()
|
|
200
|
-
.catch((error) => console.warn(`Failed to leave group: ${error.message}`));
|
|
201
|
-
await this.cluster.disconnect().catch((error) => console.warn(`Failed to disconnect: ${error.message}`));
|
|
203
|
+
await this.consumerGroup?.offsetCommit();
|
|
202
204
|
}
|
|
203
205
|
|
|
204
206
|
private fetch(nodeId: number, assignment: Assignment) {
|
|
@@ -0,0 +1,179 @@
|
|
|
1
|
+
import EventEmitter from 'events';
|
|
2
|
+
import { API } from '../api';
|
|
3
|
+
import { Assignment } from '../api/sync-group';
|
|
4
|
+
import { Metadata } from '../metadata';
|
|
5
|
+
import { Batch, Message } from '../types';
|
|
6
|
+
import { KafkaTSError } from '../utils/error';
|
|
7
|
+
import { createTracer } from '../utils/tracer';
|
|
8
|
+
import { ConsumerGroup } from './consumer-group';
|
|
9
|
+
import { Fetcher } from './fetcher';
|
|
10
|
+
import { Processor } from './processor';
|
|
11
|
+
|
|
12
|
+
const trace = createTracer('FetchManager');
|
|
13
|
+
|
|
14
|
+
export type Granularity = 'partition' | 'topic' | 'broker';
|
|
15
|
+
|
|
16
|
+
type FetchManagerOptions = {
|
|
17
|
+
fetch: (nodeId: number, assignment: Assignment) => Promise<ReturnType<(typeof API.FETCH)['response']>>;
|
|
18
|
+
process: (batch: Batch) => Promise<void>;
|
|
19
|
+
metadata: Metadata;
|
|
20
|
+
consumerGroup?: ConsumerGroup;
|
|
21
|
+
nodeAssignments: { nodeId: number; assignment: Assignment }[];
|
|
22
|
+
granularity: Granularity;
|
|
23
|
+
concurrency: number;
|
|
24
|
+
};
|
|
25
|
+
|
|
26
|
+
type Checkpoint = { kind: 'checkpoint'; fetcherId: number };
|
|
27
|
+
type Entry = Batch | Checkpoint;
|
|
28
|
+
|
|
29
|
+
export class FetchManager extends EventEmitter<{ data: []; checkpoint: [number]; stop: [] }> {
|
|
30
|
+
private queue: Entry[] = [];
|
|
31
|
+
private isRunning = false;
|
|
32
|
+
private fetchers: Fetcher[];
|
|
33
|
+
private processors: Processor[];
|
|
34
|
+
|
|
35
|
+
constructor(private options: FetchManagerOptions) {
|
|
36
|
+
super();
|
|
37
|
+
|
|
38
|
+
const { fetch, process, consumerGroup, nodeAssignments, concurrency } = this.options;
|
|
39
|
+
|
|
40
|
+
this.fetchers = nodeAssignments.map(
|
|
41
|
+
({ nodeId, assignment }, index) =>
|
|
42
|
+
new Fetcher(index, {
|
|
43
|
+
nodeId,
|
|
44
|
+
assignment,
|
|
45
|
+
consumerGroup,
|
|
46
|
+
fetch,
|
|
47
|
+
onResponse: this.onResponse.bind(this),
|
|
48
|
+
}),
|
|
49
|
+
);
|
|
50
|
+
this.processors = Array.from({ length: concurrency }).map(
|
|
51
|
+
() => new Processor({ process, poll: this.poll.bind(this) }),
|
|
52
|
+
);
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
public async start() {
|
|
56
|
+
this.queue = [];
|
|
57
|
+
this.isRunning = true;
|
|
58
|
+
|
|
59
|
+
try {
|
|
60
|
+
await Promise.all([
|
|
61
|
+
...this.fetchers.map((fetcher) => fetcher.loop()),
|
|
62
|
+
...this.processors.map((processor) => processor.loop()),
|
|
63
|
+
]);
|
|
64
|
+
} finally {
|
|
65
|
+
this.isRunning = false;
|
|
66
|
+
this.emit('stop');
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
@trace()
|
|
71
|
+
public async stop() {
|
|
72
|
+
this.isRunning = false;
|
|
73
|
+
this.emit('stop');
|
|
74
|
+
|
|
75
|
+
await Promise.all([
|
|
76
|
+
...this.fetchers.map((fetcher) => fetcher.stop()),
|
|
77
|
+
...this.processors.map((processor) => processor.stop()),
|
|
78
|
+
]);
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
@trace()
|
|
82
|
+
public async poll(): Promise<Batch> {
|
|
83
|
+
if (!this.isRunning) {
|
|
84
|
+
return [];
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
const batch = this.queue.shift();
|
|
88
|
+
if (!batch) {
|
|
89
|
+
await new Promise<void>((resolve) => {
|
|
90
|
+
const onData = () => {
|
|
91
|
+
this.removeListener('stop', onStop);
|
|
92
|
+
resolve();
|
|
93
|
+
};
|
|
94
|
+
const onStop = () => {
|
|
95
|
+
this.removeListener('data', onData);
|
|
96
|
+
resolve();
|
|
97
|
+
};
|
|
98
|
+
this.once('data', onData);
|
|
99
|
+
this.once('stop', onStop);
|
|
100
|
+
});
|
|
101
|
+
return this.poll();
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
if ('kind' in batch && batch.kind === 'checkpoint') {
|
|
105
|
+
this.emit('checkpoint', batch.fetcherId);
|
|
106
|
+
return this.poll();
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
return batch as Exclude<Entry, Checkpoint>;
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
private async onResponse(fetcherId: number, response: ReturnType<(typeof API.FETCH)['response']>) {
|
|
113
|
+
const { metadata, granularity } = this.options;
|
|
114
|
+
|
|
115
|
+
const batches = fetchResponseToBatches(response, granularity, metadata);
|
|
116
|
+
if (batches.length) {
|
|
117
|
+
this.queue.push(...batches);
|
|
118
|
+
this.queue.push({ kind: 'checkpoint', fetcherId });
|
|
119
|
+
|
|
120
|
+
this.emit('data');
|
|
121
|
+
await new Promise<void>((resolve) => {
|
|
122
|
+
const onCheckpoint = (id: number) => {
|
|
123
|
+
if (id === fetcherId) {
|
|
124
|
+
this.removeListener('stop', onStop);
|
|
125
|
+
resolve();
|
|
126
|
+
}
|
|
127
|
+
};
|
|
128
|
+
const onStop = () => {
|
|
129
|
+
this.removeListener('checkpoint', onCheckpoint);
|
|
130
|
+
resolve();
|
|
131
|
+
};
|
|
132
|
+
this.once('checkpoint', onCheckpoint);
|
|
133
|
+
this.once('stop', onStop);
|
|
134
|
+
});
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
const fetchResponseToBatches = (
|
|
140
|
+
batch: ReturnType<typeof API.FETCH.response>,
|
|
141
|
+
granularity: Granularity,
|
|
142
|
+
metadata: Metadata,
|
|
143
|
+
): Batch[] => {
|
|
144
|
+
const brokerTopics = batch.responses.map(({ topicId, partitions }) =>
|
|
145
|
+
partitions.map(({ partitionIndex, records }) =>
|
|
146
|
+
records.flatMap(({ baseTimestamp, baseOffset, records }) =>
|
|
147
|
+
records.map(
|
|
148
|
+
(message): Required<Message> => ({
|
|
149
|
+
topic: metadata.getTopicNameById(topicId),
|
|
150
|
+
partition: partitionIndex,
|
|
151
|
+
key: message.key ?? null,
|
|
152
|
+
value: message.value ?? null,
|
|
153
|
+
headers: Object.fromEntries(message.headers.map(({ key, value }) => [key, value])),
|
|
154
|
+
timestamp: baseTimestamp + BigInt(message.timestampDelta),
|
|
155
|
+
offset: baseOffset + BigInt(message.offsetDelta),
|
|
156
|
+
}),
|
|
157
|
+
),
|
|
158
|
+
),
|
|
159
|
+
),
|
|
160
|
+
);
|
|
161
|
+
|
|
162
|
+
switch (granularity) {
|
|
163
|
+
case 'broker':
|
|
164
|
+
const messages = brokerTopics.flatMap((topicPartition) =>
|
|
165
|
+
topicPartition.flatMap((partitionMessages) => partitionMessages),
|
|
166
|
+
);
|
|
167
|
+
return messages.length ? [messages] : [];
|
|
168
|
+
case 'topic':
|
|
169
|
+
return brokerTopics
|
|
170
|
+
.map((topicPartition) => topicPartition.flatMap((partitionMessages) => partitionMessages))
|
|
171
|
+
.filter((messages) => messages.length);
|
|
172
|
+
case 'partition':
|
|
173
|
+
return brokerTopics.flatMap((topicPartition) =>
|
|
174
|
+
topicPartition.map((partitionMessages) => partitionMessages),
|
|
175
|
+
);
|
|
176
|
+
default:
|
|
177
|
+
throw new KafkaTSError(`Unhandled batch granularity: ${granularity}`);
|
|
178
|
+
}
|
|
179
|
+
};
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
import { EventEmitter } from 'stream';
|
|
2
|
+
import { API } from '../api';
|
|
3
|
+
import { Assignment } from '../api/sync-group';
|
|
4
|
+
import { createTracer } from '../utils/tracer';
|
|
5
|
+
import { ConsumerGroup } from './consumer-group';
|
|
6
|
+
|
|
7
|
+
const trace = createTracer('Fetcher');
|
|
8
|
+
|
|
9
|
+
type FetcherOptions = {
|
|
10
|
+
nodeId: number;
|
|
11
|
+
assignment: Assignment;
|
|
12
|
+
consumerGroup?: ConsumerGroup;
|
|
13
|
+
fetch: (nodeId: number, assignment: Assignment) => Promise<ReturnType<(typeof API.FETCH)['response']>>;
|
|
14
|
+
onResponse: (fetcherId: number, response: ReturnType<(typeof API.FETCH)['response']>) => Promise<void>;
|
|
15
|
+
};
|
|
16
|
+
|
|
17
|
+
export class Fetcher extends EventEmitter<{ stop: []; stopped: []; data: []; drain: [] }> {
|
|
18
|
+
private isRunning = false;
|
|
19
|
+
|
|
20
|
+
constructor(
|
|
21
|
+
private fetcherId: number,
|
|
22
|
+
private options: FetcherOptions,
|
|
23
|
+
) {
|
|
24
|
+
super();
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
public async loop() {
|
|
28
|
+
const { nodeId, assignment, consumerGroup, fetch, onResponse } = this.options;
|
|
29
|
+
|
|
30
|
+
this.isRunning = true;
|
|
31
|
+
this.once('stop', () => (this.isRunning = false));
|
|
32
|
+
|
|
33
|
+
try {
|
|
34
|
+
while (this.isRunning) {
|
|
35
|
+
const response = await fetch(nodeId, assignment);
|
|
36
|
+
await consumerGroup?.handleLastHeartbeat();
|
|
37
|
+
await onResponse(this.fetcherId, response);
|
|
38
|
+
await consumerGroup?.handleLastHeartbeat();
|
|
39
|
+
}
|
|
40
|
+
} finally {
|
|
41
|
+
this.isRunning = false;
|
|
42
|
+
this.emit('stopped');
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
@trace()
|
|
47
|
+
public async stop() {
|
|
48
|
+
if (!this.isRunning) {
|
|
49
|
+
return;
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
this.emit('stop');
|
|
53
|
+
return new Promise<void>((resolve) => {
|
|
54
|
+
this.once('stopped', resolve);
|
|
55
|
+
});
|
|
56
|
+
}
|
|
57
|
+
}
|
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
import { API } from
|
|
2
|
-
import { IsolationLevel } from
|
|
3
|
-
import { Assignment } from
|
|
4
|
-
import { Cluster } from
|
|
5
|
-
import { distributeMessagesToTopicPartitionLeaders } from
|
|
6
|
-
import { ConsumerMetadata } from
|
|
1
|
+
import { API } from '../api';
|
|
2
|
+
import { IsolationLevel } from '../api/fetch';
|
|
3
|
+
import { Assignment } from '../api/sync-group';
|
|
4
|
+
import { Cluster } from '../cluster';
|
|
5
|
+
import { distributeMessagesToTopicPartitionLeaders } from '../distributors/messages-to-topic-partition-leaders';
|
|
6
|
+
import { ConsumerMetadata } from './consumer-metadata';
|
|
7
7
|
|
|
8
8
|
type OffsetManagerOptions = {
|
|
9
9
|
cluster: Cluster;
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
import { EventEmitter } from 'stream';
|
|
2
|
+
import { Batch } from '../types';
|
|
3
|
+
import { createTracer } from '../utils/tracer';
|
|
4
|
+
|
|
5
|
+
const trace = createTracer('Processor');
|
|
6
|
+
|
|
7
|
+
type ProcessorOptions = {
|
|
8
|
+
poll: () => Promise<Batch>;
|
|
9
|
+
process: (batch: Batch) => Promise<void>;
|
|
10
|
+
};
|
|
11
|
+
|
|
12
|
+
export class Processor extends EventEmitter<{ stop: []; stopped: [] }> {
|
|
13
|
+
private isRunning = false;
|
|
14
|
+
|
|
15
|
+
constructor(private options: ProcessorOptions) {
|
|
16
|
+
super();
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
public async loop() {
|
|
20
|
+
const { poll, process } = this.options;
|
|
21
|
+
|
|
22
|
+
this.isRunning = true;
|
|
23
|
+
this.once('stop', () => (this.isRunning = false));
|
|
24
|
+
|
|
25
|
+
try {
|
|
26
|
+
while (this.isRunning) {
|
|
27
|
+
const batch = await poll();
|
|
28
|
+
await process(batch);
|
|
29
|
+
}
|
|
30
|
+
} finally {
|
|
31
|
+
this.isRunning = false;
|
|
32
|
+
this.emit('stopped');
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
@trace()
|
|
37
|
+
public async stop() {
|
|
38
|
+
if (!this.isRunning) {
|
|
39
|
+
return;
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
return new Promise<void>((resolve) => {
|
|
43
|
+
this.once('stopped', resolve);
|
|
44
|
+
this.emit('stop');
|
|
45
|
+
});
|
|
46
|
+
}
|
|
47
|
+
}
|
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
import { describe, expect, it } from
|
|
2
|
-
import { distributeAssignmentsToNodesBalanced, distributeAssignmentsToNodesOptimized } from
|
|
1
|
+
import { describe, expect, it } from 'vitest';
|
|
2
|
+
import { distributeAssignmentsToNodesBalanced, distributeAssignmentsToNodesOptimized } from './assignments-to-replicas';
|
|
3
3
|
|
|
4
|
-
describe(
|
|
5
|
-
describe(
|
|
6
|
-
it(
|
|
4
|
+
describe('Distribute assignments to replica ids', () => {
|
|
5
|
+
describe('distributeAssignmentsToNodesBalanced', () => {
|
|
6
|
+
it('smoke', () => {
|
|
7
7
|
const result = distributeAssignmentsToNodesBalanced({ topic: [0, 1] }, { topic: { 0: [0, 1], 1: [1, 2] } });
|
|
8
8
|
expect(result).toMatchInlineSnapshot(`
|
|
9
9
|
{
|
|
@@ -22,8 +22,8 @@ describe("Distribute assignments to replica ids", () => {
|
|
|
22
22
|
});
|
|
23
23
|
});
|
|
24
24
|
|
|
25
|
-
describe(
|
|
26
|
-
it(
|
|
25
|
+
describe('distributeAssignmentsToNodesOptimized', () => {
|
|
26
|
+
it('smoke', () => {
|
|
27
27
|
const result = distributeAssignmentsToNodesOptimized(
|
|
28
28
|
{ topic: [0, 1] },
|
|
29
29
|
{ topic: { 0: [0, 1], 1: [1, 2] } },
|
|
@@ -50,7 +50,7 @@ export const distributeAssignmentsToNodesOptimized = (
|
|
|
50
50
|
}
|
|
51
51
|
|
|
52
52
|
result[parseInt(replicaId)] = partitions.reduce((acc, partition) => {
|
|
53
|
-
const [topicName, partitionId] = partition.split(
|
|
53
|
+
const [topicName, partitionId] = partition.split(':');
|
|
54
54
|
acc[topicName] ??= [];
|
|
55
55
|
acc[topicName].push(parseInt(partitionId));
|
|
56
56
|
return acc;
|
|
@@ -1,11 +1,11 @@
|
|
|
1
|
-
import { describe, expect, it } from
|
|
2
|
-
import { distributeMessagesToTopicPartitionLeaders } from
|
|
1
|
+
import { describe, expect, it } from 'vitest';
|
|
2
|
+
import { distributeMessagesToTopicPartitionLeaders } from './messages-to-topic-partition-leaders';
|
|
3
3
|
|
|
4
|
-
describe(
|
|
5
|
-
describe(
|
|
6
|
-
it(
|
|
4
|
+
describe('Distribute messages to partition leader ids', () => {
|
|
5
|
+
describe('distributeMessagesToTopicPartitionLeaders', () => {
|
|
6
|
+
it('snoke', () => {
|
|
7
7
|
const result = distributeMessagesToTopicPartitionLeaders(
|
|
8
|
-
[{ topic:
|
|
8
|
+
[{ topic: 'topic', partition: 0, key: null, value: null, offset: 0n, timestamp: 0n, headers: {} }],
|
|
9
9
|
{ topic: { 0: 1 } },
|
|
10
10
|
);
|
|
11
11
|
expect(result).toMatchInlineSnapshot(`
|
package/src/index.ts
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
-
export * from
|
|
2
|
-
export * from
|
|
3
|
-
export * from
|
|
1
|
+
export * from './utils/error';
|
|
2
|
+
export * from './client';
|
|
3
|
+
export * from './api';
|
|
4
|
+
export * from './types';
|
package/src/metadata.ts
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
import { API, API_ERROR } from
|
|
2
|
-
import { Cluster } from
|
|
3
|
-
import { delay } from
|
|
4
|
-
import { KafkaTSApiError } from
|
|
1
|
+
import { API, API_ERROR } from './api';
|
|
2
|
+
import { Cluster } from './cluster';
|
|
3
|
+
import { delay } from './utils/delay';
|
|
4
|
+
import { KafkaTSApiError } from './utils/error';
|
|
5
5
|
|
|
6
6
|
type MetadataOptions = {
|
|
7
7
|
cluster: Cluster;
|
package/src/producer/producer.ts
CHANGED
|
@@ -1,11 +1,11 @@
|
|
|
1
|
-
import { API, API_ERROR } from
|
|
2
|
-
import { Cluster } from
|
|
3
|
-
import { distributeMessagesToTopicPartitionLeaders } from
|
|
4
|
-
import { Metadata } from
|
|
5
|
-
import { Message } from
|
|
6
|
-
import { delay } from
|
|
7
|
-
import { KafkaTSApiError } from
|
|
8
|
-
import { memo } from
|
|
1
|
+
import { API, API_ERROR } from '../api';
|
|
2
|
+
import { Cluster } from '../cluster';
|
|
3
|
+
import { distributeMessagesToTopicPartitionLeaders } from '../distributors/messages-to-topic-partition-leaders';
|
|
4
|
+
import { Metadata } from '../metadata';
|
|
5
|
+
import { Message } from '../types';
|
|
6
|
+
import { delay } from '../utils/delay';
|
|
7
|
+
import { KafkaTSApiError } from '../utils/error';
|
|
8
|
+
import { memo } from '../utils/memo';
|
|
9
9
|
|
|
10
10
|
export type ProducerOptions = {
|
|
11
11
|
allowTopicAutoCreation?: boolean;
|
package/src/types.ts
CHANGED
package/src/utils/api.ts
CHANGED
|
@@ -1,11 +1,11 @@
|
|
|
1
|
-
import { Decoder } from
|
|
2
|
-
import { Encoder } from
|
|
1
|
+
import { Decoder } from './decoder';
|
|
2
|
+
import { Encoder } from './encoder';
|
|
3
3
|
|
|
4
4
|
export type Api<Request, Response> = {
|
|
5
5
|
apiKey: number;
|
|
6
6
|
apiVersion: number;
|
|
7
7
|
request: (encoder: Encoder, body: Request) => Encoder;
|
|
8
8
|
response: (buffer: Decoder) => Response;
|
|
9
|
-
}
|
|
9
|
+
};
|
|
10
10
|
|
|
11
|
-
export const createApi = <Request, Response>(api: Api<Request, Response>) => api;
|
|
11
|
+
export const createApi = <Request, Response>(api: Api<Request, Response>) => api;
|