kafka-ts 0.0.1-beta.6 → 0.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +8 -2
- package/.github/workflows/release.yml +0 -30
- package/certs/ca.crt +0 -29
- package/certs/kafka.keystore.jks +0 -0
- package/certs/kafka.truststore.jks +0 -0
- package/docker-compose.yml +0 -110
- package/examples/package-lock.json +0 -3529
- package/examples/package.json +0 -22
- package/examples/src/benchmark/common.ts +0 -98
- package/examples/src/benchmark/kafka-ts.ts +0 -67
- package/examples/src/benchmark/kafkajs.ts +0 -51
- package/examples/src/client.ts +0 -12
- package/examples/src/consumer.ts +0 -24
- package/examples/src/create-topic.ts +0 -43
- package/examples/src/opentelemetry.ts +0 -46
- package/examples/src/producer.ts +0 -22
- package/examples/src/replicator.ts +0 -26
- package/examples/src/utils/delay.ts +0 -1
- package/examples/src/utils/json.ts +0 -1
- package/examples/tsconfig.json +0 -7
- package/log4j.properties +0 -95
- package/scripts/create-scram-user.sh +0 -7
- package/scripts/generate-certs.sh +0 -27
- package/scripts/kafka-local.properties +0 -33
package/examples/package.json
DELETED
|
@@ -1,22 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"name": "examples",
|
|
3
|
-
"version": "1.0.0",
|
|
4
|
-
"description": "",
|
|
5
|
-
"main": "dist/replicator.js",
|
|
6
|
-
"scripts": {
|
|
7
|
-
"test": "echo \"Error: no test specified\" && exit 1",
|
|
8
|
-
"start:jaeger": "docker run --rm --name jaeger -e COLLECTOR_OTLP_ENABLED=true -p 16686:16686 -p 4317:4317 -p 4318:4318 -d jaegertracing/all-in-one:1.62.0",
|
|
9
|
-
"build": "tsc"
|
|
10
|
-
},
|
|
11
|
-
"dependencies": {
|
|
12
|
-
"@opentelemetry/api": "^1.9.0",
|
|
13
|
-
"@opentelemetry/auto-instrumentations-node": "^0.51.0",
|
|
14
|
-
"@opentelemetry/context-async-hooks": "^1.27.0",
|
|
15
|
-
"@opentelemetry/exporter-trace-otlp-grpc": "^0.54.0",
|
|
16
|
-
"@opentelemetry/sdk-node": "^0.54.0",
|
|
17
|
-
"kafka-ts": "file:../",
|
|
18
|
-
"kafkajs": "^2.2.4"
|
|
19
|
-
},
|
|
20
|
-
"author": "",
|
|
21
|
-
"license": "ISC"
|
|
22
|
-
}
|
|
@@ -1,98 +0,0 @@
|
|
|
1
|
-
import { delay } from '../utils/delay';
|
|
2
|
-
|
|
3
|
-
export const startBenchmarker = async ({
|
|
4
|
-
createTopic,
|
|
5
|
-
connectProducer,
|
|
6
|
-
startConsumer,
|
|
7
|
-
produce,
|
|
8
|
-
}: {
|
|
9
|
-
createTopic: (opts: { topic: string; partitions: number; replicationFactor: number }) => Promise<void>;
|
|
10
|
-
connectProducer: () => Promise<() => unknown>;
|
|
11
|
-
startConsumer: (
|
|
12
|
-
opts: {
|
|
13
|
-
groupId: string;
|
|
14
|
-
topic: string;
|
|
15
|
-
concurrency: number;
|
|
16
|
-
incrementCount: (key: string, value: number) => void;
|
|
17
|
-
},
|
|
18
|
-
callback: (timestamp: number) => void,
|
|
19
|
-
) => Promise<() => unknown>;
|
|
20
|
-
produce: (opts: { topic: string; length: number; timestamp: number; acks: -1 | 1 }) => Promise<void>;
|
|
21
|
-
}) => {
|
|
22
|
-
const benchmarkId = `benchmark-${Date.now()}`;
|
|
23
|
-
const {
|
|
24
|
-
TOPIC = benchmarkId,
|
|
25
|
-
PRODUCER = 'true',
|
|
26
|
-
CONSUMER = 'true',
|
|
27
|
-
PARTITIONS = '10',
|
|
28
|
-
REPLICATION_FACTOR = '3',
|
|
29
|
-
CONCURRENCY = '1',
|
|
30
|
-
PRODUCE_BATCH_SIZE = '10',
|
|
31
|
-
PRODUCE_DELAY_MS = '0',
|
|
32
|
-
} = process.env;
|
|
33
|
-
const enableProducer = PRODUCER === 'true';
|
|
34
|
-
const enableConsumer = CONSUMER === 'true';
|
|
35
|
-
const partitions = parseInt(PARTITIONS);
|
|
36
|
-
const replicationFactor = parseInt(REPLICATION_FACTOR);
|
|
37
|
-
const concurrency = parseInt(CONCURRENCY);
|
|
38
|
-
const produceBatchSize = parseInt(PRODUCE_BATCH_SIZE);
|
|
39
|
-
const produceDelayMs = parseInt(PRODUCE_DELAY_MS);
|
|
40
|
-
|
|
41
|
-
await createTopic({ topic: TOPIC, partitions, replicationFactor }).catch(console.error);
|
|
42
|
-
await delay(2500);
|
|
43
|
-
|
|
44
|
-
let counts: Record<string, number> = {};
|
|
45
|
-
let sums: Record<string, number> = {};
|
|
46
|
-
|
|
47
|
-
const incrementCount = (key: string, value: number) => {
|
|
48
|
-
counts[key] = (counts[key] || 0) + value;
|
|
49
|
-
};
|
|
50
|
-
|
|
51
|
-
const incrementSum = (key: string, value: number) => {
|
|
52
|
-
sums[key] = (sums[key] || 0) + value;
|
|
53
|
-
};
|
|
54
|
-
|
|
55
|
-
const stopProducer = await connectProducer();
|
|
56
|
-
|
|
57
|
-
const stopConsumer =
|
|
58
|
-
enableConsumer &&
|
|
59
|
-
(await startConsumer({ groupId: benchmarkId, topic: TOPIC, concurrency, incrementCount }, (timestamp) => {
|
|
60
|
-
incrementCount('CONSUMER', 1);
|
|
61
|
-
incrementSum('CONSUMER', Date.now() - timestamp);
|
|
62
|
-
}));
|
|
63
|
-
|
|
64
|
-
const interval = setInterval(() => {
|
|
65
|
-
const latencies = Object.entries(sums)
|
|
66
|
-
.map(([key, sum]) => `${key} ${(sum / counts[key]).toFixed(2)}ms`)
|
|
67
|
-
.sort()
|
|
68
|
-
.join(', ');
|
|
69
|
-
|
|
70
|
-
const counters = Object.entries(counts)
|
|
71
|
-
.map(([key, count]) => `${key} ${count}`)
|
|
72
|
-
.sort()
|
|
73
|
-
.join(', ');
|
|
74
|
-
|
|
75
|
-
console.log(`Latency: ${latencies} | Counters: ${counters}`);
|
|
76
|
-
counts = {};
|
|
77
|
-
sums = {};
|
|
78
|
-
}, 1000);
|
|
79
|
-
|
|
80
|
-
let isRunning = true;
|
|
81
|
-
const produceLoop = async () => {
|
|
82
|
-
if (!isRunning) return;
|
|
83
|
-
const start = Date.now();
|
|
84
|
-
await produce({ topic: TOPIC, length: produceBatchSize, timestamp: Date.now(), acks: -1 });
|
|
85
|
-
incrementCount('PRODUCER', 1);
|
|
86
|
-
incrementSum('PRODUCER', Date.now() - start);
|
|
87
|
-
produceDelayMs && (await delay(produceDelayMs));
|
|
88
|
-
produceLoop();
|
|
89
|
-
};
|
|
90
|
-
enableProducer && produceLoop();
|
|
91
|
-
|
|
92
|
-
process.once('SIGINT', async () => {
|
|
93
|
-
isRunning = false;
|
|
94
|
-
stopConsumer && (await stopConsumer());
|
|
95
|
-
await stopProducer();
|
|
96
|
-
clearInterval(interval);
|
|
97
|
-
});
|
|
98
|
-
};
|
|
@@ -1,67 +0,0 @@
|
|
|
1
|
-
import { readFileSync } from 'fs';
|
|
2
|
-
import { API, createKafkaClient, saslScramSha512 } from 'kafka-ts';
|
|
3
|
-
import { startBenchmarker } from './common';
|
|
4
|
-
|
|
5
|
-
// setTracer(new OpenTelemetryTracer());
|
|
6
|
-
|
|
7
|
-
const kafka = createKafkaClient({
|
|
8
|
-
bootstrapServers: [{ host: 'localhost', port: 9092 }],
|
|
9
|
-
clientId: 'kafka-ts',
|
|
10
|
-
sasl: saslScramSha512({ username: 'admin', password: 'admin' }),
|
|
11
|
-
ssl: { ca: readFileSync('../certs/ca.crt').toString() },
|
|
12
|
-
});
|
|
13
|
-
|
|
14
|
-
const producer = kafka.createProducer({ allowTopicAutoCreation: false });
|
|
15
|
-
|
|
16
|
-
startBenchmarker({
|
|
17
|
-
createTopic: async ({ topic, partitions, replicationFactor }) => {
|
|
18
|
-
const cluster = kafka.createCluster();
|
|
19
|
-
await cluster.connect();
|
|
20
|
-
|
|
21
|
-
const { controllerId } = await cluster.sendRequest(API.METADATA, {
|
|
22
|
-
allowTopicAutoCreation: false,
|
|
23
|
-
includeTopicAuthorizedOperations: false,
|
|
24
|
-
topics: [],
|
|
25
|
-
});
|
|
26
|
-
await cluster.setSeedBroker(controllerId);
|
|
27
|
-
await cluster.sendRequest(API.CREATE_TOPICS, {
|
|
28
|
-
validateOnly: false,
|
|
29
|
-
timeoutMs: 10_000,
|
|
30
|
-
topics: [
|
|
31
|
-
{
|
|
32
|
-
name: topic,
|
|
33
|
-
numPartitions: partitions,
|
|
34
|
-
replicationFactor,
|
|
35
|
-
assignments: [],
|
|
36
|
-
configs: [],
|
|
37
|
-
},
|
|
38
|
-
],
|
|
39
|
-
});
|
|
40
|
-
await cluster.disconnect();
|
|
41
|
-
},
|
|
42
|
-
connectProducer: async () => () => producer.close(),
|
|
43
|
-
startConsumer: async ({ groupId, topic, concurrency, incrementCount }, callback) => {
|
|
44
|
-
const consumer = await kafka.startConsumer({
|
|
45
|
-
groupId,
|
|
46
|
-
topics: [topic],
|
|
47
|
-
onBatch: async (messages) => {
|
|
48
|
-
for (const message of messages) {
|
|
49
|
-
callback(parseInt(message.timestamp.toString()));
|
|
50
|
-
}
|
|
51
|
-
},
|
|
52
|
-
concurrency,
|
|
53
|
-
});
|
|
54
|
-
consumer.on('offsetCommit', () => incrementCount('OFFSET_COMMIT', 1));
|
|
55
|
-
return () => consumer.close();
|
|
56
|
-
},
|
|
57
|
-
produce: async ({ topic, length, timestamp, acks }) => {
|
|
58
|
-
await producer.send(
|
|
59
|
-
Array.from({ length }).map(() => ({
|
|
60
|
-
topic: topic,
|
|
61
|
-
value: Buffer.from('hello'),
|
|
62
|
-
timestamp: BigInt(timestamp),
|
|
63
|
-
})),
|
|
64
|
-
{ acks },
|
|
65
|
-
);
|
|
66
|
-
},
|
|
67
|
-
});
|
|
@@ -1,51 +0,0 @@
|
|
|
1
|
-
import { readFileSync } from 'fs';
|
|
2
|
-
import { Kafka } from 'kafkajs';
|
|
3
|
-
import { startBenchmarker } from './common';
|
|
4
|
-
|
|
5
|
-
const kafkajs = new Kafka({
|
|
6
|
-
brokers: ['localhost:9092'],
|
|
7
|
-
clientId: 'kafkajs',
|
|
8
|
-
sasl: { username: 'admin', password: 'admin', mechanism: 'plain' },
|
|
9
|
-
ssl: { ca: readFileSync('../certs/ca.crt').toString() },
|
|
10
|
-
});
|
|
11
|
-
|
|
12
|
-
const producer = kafkajs.producer({ allowAutoTopicCreation: false });
|
|
13
|
-
|
|
14
|
-
startBenchmarker({
|
|
15
|
-
createTopic: async ({ topic, partitions, replicationFactor }) => {
|
|
16
|
-
const admin = kafkajs.admin();
|
|
17
|
-
await admin.connect();
|
|
18
|
-
await admin.createTopics({ topics: [{ topic, numPartitions: partitions, replicationFactor }] });
|
|
19
|
-
await admin.disconnect();
|
|
20
|
-
},
|
|
21
|
-
connectProducer: async () => {
|
|
22
|
-
await producer.connect();
|
|
23
|
-
return () => producer.disconnect();
|
|
24
|
-
},
|
|
25
|
-
startConsumer: async ({ groupId, topic, concurrency, incrementCount }, callback) => {
|
|
26
|
-
const consumer = kafkajs.consumer({ groupId, allowAutoTopicCreation: false });
|
|
27
|
-
await consumer.connect();
|
|
28
|
-
await consumer.subscribe({ topic });
|
|
29
|
-
await consumer.run({
|
|
30
|
-
eachBatch: async ({ batch }) => {
|
|
31
|
-
for (const message of batch.messages) {
|
|
32
|
-
callback(parseInt(message.timestamp));
|
|
33
|
-
}
|
|
34
|
-
},
|
|
35
|
-
partitionsConsumedConcurrently: concurrency,
|
|
36
|
-
autoCommit: true,
|
|
37
|
-
});
|
|
38
|
-
consumer.on(consumer.events.COMMIT_OFFSETS, () => incrementCount('OFFSET_COMMIT', 1));
|
|
39
|
-
return () => consumer.disconnect();
|
|
40
|
-
},
|
|
41
|
-
produce: async ({ topic, length, timestamp, acks }) => {
|
|
42
|
-
await producer.send({
|
|
43
|
-
topic,
|
|
44
|
-
messages: Array.from({ length }).map(() => ({
|
|
45
|
-
value: Buffer.from(timestamp.toString()),
|
|
46
|
-
timestamp: timestamp.toString(),
|
|
47
|
-
})),
|
|
48
|
-
acks,
|
|
49
|
-
});
|
|
50
|
-
},
|
|
51
|
-
});
|
package/examples/src/client.ts
DELETED
|
@@ -1,12 +0,0 @@
|
|
|
1
|
-
import { readFileSync } from 'fs';
|
|
2
|
-
import { createKafkaClient, saslScramSha512, setTracer } from 'kafka-ts';
|
|
3
|
-
import { OpenTelemetryTracer } from './opentelemetry';
|
|
4
|
-
|
|
5
|
-
setTracer(new OpenTelemetryTracer());
|
|
6
|
-
|
|
7
|
-
export const kafka = createKafkaClient({
|
|
8
|
-
clientId: 'examples',
|
|
9
|
-
bootstrapServers: [{ host: 'localhost', port: 9092 }],
|
|
10
|
-
sasl: saslScramSha512({ username: 'admin', password: 'admin' }),
|
|
11
|
-
ssl: { ca: readFileSync('../certs/ca.crt').toString() },
|
|
12
|
-
});
|
package/examples/src/consumer.ts
DELETED
|
@@ -1,24 +0,0 @@
|
|
|
1
|
-
import { jsonSerializer, log } from 'kafka-ts';
|
|
2
|
-
import { kafka } from './client';
|
|
3
|
-
import { delay } from '../../dist/utils/delay';
|
|
4
|
-
|
|
5
|
-
(async () => {
|
|
6
|
-
const consumer = await kafka.startConsumer({
|
|
7
|
-
groupId: 'example-group',
|
|
8
|
-
groupInstanceId: 'example-group-instance',
|
|
9
|
-
topics: ['my-topic'],
|
|
10
|
-
allowTopicAutoCreation: true,
|
|
11
|
-
onBatch: (batch) => {
|
|
12
|
-
log.info(
|
|
13
|
-
`Received batch: ${JSON.stringify(batch.map((message) => ({ ...message, value: message.value?.toString() })), jsonSerializer)}`,
|
|
14
|
-
);
|
|
15
|
-
log.info(`Latency: ${Date.now() - parseInt(batch[0].timestamp.toString())}ms`)
|
|
16
|
-
},
|
|
17
|
-
batchGranularity: 'broker',
|
|
18
|
-
concurrency: 10,
|
|
19
|
-
});
|
|
20
|
-
|
|
21
|
-
process.on('SIGINT', async () => {
|
|
22
|
-
await consumer.close();
|
|
23
|
-
});
|
|
24
|
-
})();
|
|
@@ -1,43 +0,0 @@
|
|
|
1
|
-
import { API, API_ERROR, KafkaTSApiError, log } from 'kafka-ts';
|
|
2
|
-
import { kafka } from './client';
|
|
3
|
-
|
|
4
|
-
(async () => {
|
|
5
|
-
const cluster = kafka.createCluster();
|
|
6
|
-
await cluster.connect();
|
|
7
|
-
|
|
8
|
-
const { controllerId } = await cluster.sendRequest(API.METADATA, {
|
|
9
|
-
allowTopicAutoCreation: false,
|
|
10
|
-
includeTopicAuthorizedOperations: false,
|
|
11
|
-
topics: [],
|
|
12
|
-
});
|
|
13
|
-
|
|
14
|
-
try {
|
|
15
|
-
await cluster.sendRequestToNode(controllerId)(API.CREATE_TOPICS, {
|
|
16
|
-
validateOnly: false,
|
|
17
|
-
timeoutMs: 10_000,
|
|
18
|
-
topics: [
|
|
19
|
-
{
|
|
20
|
-
name: 'my-topic',
|
|
21
|
-
numPartitions: 10,
|
|
22
|
-
replicationFactor: 1,
|
|
23
|
-
assignments: [],
|
|
24
|
-
configs: [],
|
|
25
|
-
},
|
|
26
|
-
],
|
|
27
|
-
});
|
|
28
|
-
} catch (error) {
|
|
29
|
-
if ((error as KafkaTSApiError).errorCode !== API_ERROR.TOPIC_ALREADY_EXISTS) {
|
|
30
|
-
throw error;
|
|
31
|
-
}
|
|
32
|
-
}
|
|
33
|
-
|
|
34
|
-
const metadata = await cluster.sendRequestToNode(controllerId)(API.METADATA, {
|
|
35
|
-
allowTopicAutoCreation: false,
|
|
36
|
-
includeTopicAuthorizedOperations: false,
|
|
37
|
-
topics: [{ id: null, name: 'my-topic' }],
|
|
38
|
-
});
|
|
39
|
-
|
|
40
|
-
log.info('Metadata', metadata);
|
|
41
|
-
|
|
42
|
-
await cluster.disconnect();
|
|
43
|
-
})();
|
|
@@ -1,46 +0,0 @@
|
|
|
1
|
-
import { context, ROOT_CONTEXT, trace } from '@opentelemetry/api';
|
|
2
|
-
import { getNodeAutoInstrumentations } from '@opentelemetry/auto-instrumentations-node';
|
|
3
|
-
import { AsyncHooksContextManager } from '@opentelemetry/context-async-hooks';
|
|
4
|
-
import { OTLPTraceExporter } from '@opentelemetry/exporter-trace-otlp-grpc';
|
|
5
|
-
import { NodeSDK } from '@opentelemetry/sdk-node';
|
|
6
|
-
import { BatchSpanProcessor } from '@opentelemetry/sdk-trace-base';
|
|
7
|
-
import { Tracer } from 'kafka-ts';
|
|
8
|
-
|
|
9
|
-
const contextManager = new AsyncHooksContextManager();
|
|
10
|
-
contextManager.enable();
|
|
11
|
-
context.setGlobalContextManager(contextManager);
|
|
12
|
-
|
|
13
|
-
const exporter = new OTLPTraceExporter({ url: 'http://localhost:4317' });
|
|
14
|
-
|
|
15
|
-
const sdk = new NodeSDK({
|
|
16
|
-
serviceName: 'kafka-ts',
|
|
17
|
-
traceExporter: exporter,
|
|
18
|
-
spanProcessors: [new BatchSpanProcessor(exporter)],
|
|
19
|
-
instrumentations: [getNodeAutoInstrumentations()],
|
|
20
|
-
});
|
|
21
|
-
|
|
22
|
-
sdk.start();
|
|
23
|
-
|
|
24
|
-
process.once('SIGINT', () => {
|
|
25
|
-
sdk.shutdown();
|
|
26
|
-
});
|
|
27
|
-
|
|
28
|
-
const tracer = trace.getTracer('kafka-ts');
|
|
29
|
-
|
|
30
|
-
export class OpenTelemetryTracer implements Tracer {
|
|
31
|
-
startActiveSpan(module, method, { body, ...metadata } = {} as any, callback) {
|
|
32
|
-
return tracer.startActiveSpan(
|
|
33
|
-
`${module}.${method} ${metadata?.message ?? ''}`,
|
|
34
|
-
{ attributes: metadata },
|
|
35
|
-
metadata?.root ? ROOT_CONTEXT : context.active(),
|
|
36
|
-
(span) => {
|
|
37
|
-
const result = callback();
|
|
38
|
-
if (result instanceof Promise) {
|
|
39
|
-
return result.finally(() => span.end());
|
|
40
|
-
}
|
|
41
|
-
span.end();
|
|
42
|
-
return result;
|
|
43
|
-
},
|
|
44
|
-
);
|
|
45
|
-
}
|
|
46
|
-
}
|
package/examples/src/producer.ts
DELETED
|
@@ -1,22 +0,0 @@
|
|
|
1
|
-
import { createInterface } from 'readline';
|
|
2
|
-
import { kafka } from './client';
|
|
3
|
-
|
|
4
|
-
const producer = kafka.createProducer({ allowTopicAutoCreation: true });
|
|
5
|
-
|
|
6
|
-
const rl = createInterface({ input: process.stdin, output: process.stdout });
|
|
7
|
-
process.once('SIGINT', rl.close);
|
|
8
|
-
|
|
9
|
-
process.stdout.write('> ');
|
|
10
|
-
rl.on('line', async (line) => {
|
|
11
|
-
await producer.send(
|
|
12
|
-
[
|
|
13
|
-
{
|
|
14
|
-
topic: 'my-topic',
|
|
15
|
-
value: Buffer.from(line),
|
|
16
|
-
},
|
|
17
|
-
],
|
|
18
|
-
{ acks: -1 },
|
|
19
|
-
);
|
|
20
|
-
process.stdout.write('> ');
|
|
21
|
-
});
|
|
22
|
-
rl.once('close', () => producer.close());
|
|
@@ -1,26 +0,0 @@
|
|
|
1
|
-
import { log } from 'kafka-ts';
|
|
2
|
-
import { kafka } from './client';
|
|
3
|
-
|
|
4
|
-
(async () => {
|
|
5
|
-
const topic = 'example-topic';
|
|
6
|
-
|
|
7
|
-
const producer = kafka.createProducer({ allowTopicAutoCreation: true });
|
|
8
|
-
const consumer = await kafka.startConsumer({
|
|
9
|
-
topics: [topic],
|
|
10
|
-
onBatch: async (messages) => {
|
|
11
|
-
await producer.send(
|
|
12
|
-
messages.map((message) => ({
|
|
13
|
-
...message,
|
|
14
|
-
headers: { 'X-Replicated': 'true' },
|
|
15
|
-
topic: `${message.topic}-replicated`,
|
|
16
|
-
offset: 0n,
|
|
17
|
-
})),
|
|
18
|
-
);
|
|
19
|
-
log.info(`Replicated ${messages.length} messages`);
|
|
20
|
-
},
|
|
21
|
-
});
|
|
22
|
-
process.on('SIGINT', async () => {
|
|
23
|
-
await consumer.close();
|
|
24
|
-
await producer.close();
|
|
25
|
-
});
|
|
26
|
-
})();
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
export const delay = (delayMs: number) => new Promise<void>((resolve) => setTimeout(resolve, delayMs));
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
export const serializer = (_: string, value: unknown) => (typeof value === 'bigint' ? value.toString() : value);
|
package/examples/tsconfig.json
DELETED
package/log4j.properties
DELETED
|
@@ -1,95 +0,0 @@
|
|
|
1
|
-
# Licensed to the Apache Software Foundation (ASF) under one or more
|
|
2
|
-
# contributor license agreements. See the NOTICE file distributed with
|
|
3
|
-
# this work for additional information regarding copyright ownership.
|
|
4
|
-
# The ASF licenses this file to You under the Apache License, Version 2.0
|
|
5
|
-
# (the "License"); you may not use this file except in compliance with
|
|
6
|
-
# the License. You may obtain a copy of the License at
|
|
7
|
-
#
|
|
8
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
-
#
|
|
10
|
-
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
-
# See the License for the specific language governing permissions and
|
|
14
|
-
# limitations under the License.
|
|
15
|
-
|
|
16
|
-
# Unspecified loggers and loggers with additivity=true output to server.log and stdout
|
|
17
|
-
# Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise
|
|
18
|
-
log4j.rootLogger=INFO, stdout, kafkaAppender
|
|
19
|
-
|
|
20
|
-
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
|
|
21
|
-
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
|
|
22
|
-
log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
|
|
23
|
-
|
|
24
|
-
log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender
|
|
25
|
-
log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH
|
|
26
|
-
log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log
|
|
27
|
-
log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout
|
|
28
|
-
log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
|
|
29
|
-
|
|
30
|
-
log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender
|
|
31
|
-
log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH
|
|
32
|
-
log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log
|
|
33
|
-
log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout
|
|
34
|
-
log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
|
|
35
|
-
|
|
36
|
-
log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender
|
|
37
|
-
log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH
|
|
38
|
-
log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log
|
|
39
|
-
log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout
|
|
40
|
-
log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
|
|
41
|
-
|
|
42
|
-
log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender
|
|
43
|
-
log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH
|
|
44
|
-
log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log
|
|
45
|
-
log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout
|
|
46
|
-
log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
|
|
47
|
-
|
|
48
|
-
log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender
|
|
49
|
-
log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH
|
|
50
|
-
log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log
|
|
51
|
-
log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout
|
|
52
|
-
log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
|
|
53
|
-
|
|
54
|
-
log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender
|
|
55
|
-
log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd-HH
|
|
56
|
-
log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log
|
|
57
|
-
log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout
|
|
58
|
-
log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
|
|
59
|
-
|
|
60
|
-
# Change the line below to adjust ZK client logging
|
|
61
|
-
log4j.logger.org.apache.zookeeper=INFO
|
|
62
|
-
|
|
63
|
-
# Change the two lines below to adjust the general broker logging level (output to server.log and stdout)
|
|
64
|
-
log4j.logger.kafka=INFO
|
|
65
|
-
log4j.logger.org.apache.kafka=INFO
|
|
66
|
-
|
|
67
|
-
# Change to INFO or TRACE to enable request logging
|
|
68
|
-
# log4j.logger.kafka.request.logger=TRACE, requestAppender
|
|
69
|
-
# log4j.additivity.kafka.request.logger=false
|
|
70
|
-
|
|
71
|
-
# Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output
|
|
72
|
-
# related to the handling of requests
|
|
73
|
-
# log4j.logger.kafka.network.Processor=TRACE, requestAppender
|
|
74
|
-
log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender
|
|
75
|
-
# log4j.additivity.kafka.server.KafkaApis=false
|
|
76
|
-
# log4j.logger.kafka.network.RequestChannel$=TRACE, requestAppender
|
|
77
|
-
# log4j.additivity.kafka.network.RequestChannel$=false
|
|
78
|
-
|
|
79
|
-
# Change the line below to adjust KRaft mode controller logging
|
|
80
|
-
log4j.logger.org.apache.kafka.controller=INFO, controllerAppender
|
|
81
|
-
log4j.additivity.org.apache.kafka.controller=false
|
|
82
|
-
|
|
83
|
-
# Change the line below to adjust ZK mode controller logging
|
|
84
|
-
log4j.logger.kafka.controller=INFO, controllerAppender
|
|
85
|
-
log4j.additivity.kafka.controller=false
|
|
86
|
-
|
|
87
|
-
log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender
|
|
88
|
-
log4j.additivity.kafka.log.LogCleaner=false
|
|
89
|
-
|
|
90
|
-
log4j.logger.state.change.logger=INFO, stateChangeAppender
|
|
91
|
-
log4j.additivity.state.change.logger=false
|
|
92
|
-
|
|
93
|
-
# Access denials are logged at INFO level, change to INFO to also log allowed accesses
|
|
94
|
-
log4j.logger.kafka.authorizer.logger=INFO, authorizerAppender
|
|
95
|
-
log4j.additivity.kafka.authorizer.logger=false
|
|
@@ -1,7 +0,0 @@
|
|
|
1
|
-
#!/bin/bash
|
|
2
|
-
set -e
|
|
3
|
-
|
|
4
|
-
SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd)
|
|
5
|
-
|
|
6
|
-
kafka-configs --bootstrap-server localhost:9092 --command-config "$SCRIPT_DIR/kafka-local.properties" --alter --add-config 'SCRAM-SHA-256=[password=admin]' --entity-type users --entity-name admin
|
|
7
|
-
kafka-configs --bootstrap-server localhost:9092 --command-config "$SCRIPT_DIR/kafka-local.properties" --alter --add-config 'SCRAM-SHA-512=[password=admin]' --entity-type users --entity-name admin
|
|
@@ -1,27 +0,0 @@
|
|
|
1
|
-
#!/bin/bash
|
|
2
|
-
set -e
|
|
3
|
-
|
|
4
|
-
# 1. Generating a x509 (CA) cert from a private key:
|
|
5
|
-
openssl genrsa -out certs/ca.key 4096
|
|
6
|
-
openssl req -new -x509 -key certs/ca.key -days 87660 -subj "/CN=kafka-ca" -out certs/ca.crt
|
|
7
|
-
|
|
8
|
-
# 2. Generating a private key for kafka server and csr:
|
|
9
|
-
openssl genrsa -out certs/kafka.key 4096
|
|
10
|
-
openssl req -new -nodes -key certs/kafka.key -out certs/kafka.csr -subj "/CN=kafka"
|
|
11
|
-
openssl x509 -req -in certs/kafka.csr -CA certs/ca.crt -CAkey certs/ca.key -CAcreateserial -out certs/kafka.crt -days 3650 -extensions SAN -extfile <(printf "[SAN]\nsubjectAltName=DNS:localhost")
|
|
12
|
-
|
|
13
|
-
# 3. Generating keystore for kafka server:
|
|
14
|
-
openssl pkcs12 -export -in certs/kafka.crt \
|
|
15
|
-
-passout pass:password \
|
|
16
|
-
-inkey certs/kafka.key \
|
|
17
|
-
-out certs/kafka.keystore.jks
|
|
18
|
-
|
|
19
|
-
# 4. Generating truststore for kafka server:
|
|
20
|
-
keytool -importkeystore -srckeystore certs/kafka.keystore.jks \
|
|
21
|
-
-srcstoretype PKCS12 \
|
|
22
|
-
-srcstorepass password \
|
|
23
|
-
-deststorepass password \
|
|
24
|
-
-destkeystore certs/kafka.truststore.jks \
|
|
25
|
-
-noprompt
|
|
26
|
-
|
|
27
|
-
rm certs/{ca.key,ca.srl,kafka.crt,kafka.csr,kafka.key}
|
|
@@ -1,33 +0,0 @@
|
|
|
1
|
-
security.protocol = SASL_SSL
|
|
2
|
-
sasl.mechanism = PLAIN
|
|
3
|
-
sasl.jaas.config = org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="admin";
|
|
4
|
-
ssl.truststore.type = PEM
|
|
5
|
-
ssl.truststore.certificates = -----BEGIN CERTIFICATE----- \
|
|
6
|
-
MIIFCTCCAvGgAwIBAgIUYSj+2uS05/+RbdvXYi+NnYZrzIEwDQYJKoZIhvcNAQEL \
|
|
7
|
-
BQAwEzERMA8GA1UEAwwIa2Fma2EtY2EwIBcNMjQxMDE4MTMwNDQ4WhgPMjI2NDEw \
|
|
8
|
-
MjAxMzA0NDhaMBMxETAPBgNVBAMMCGthZmthLWNhMIICIjANBgkqhkiG9w0BAQEF \
|
|
9
|
-
AAOCAg8AMIICCgKCAgEAiuEnn0ZAaWsteEMsgrCgvhGmBBZh6NMbWrWRq/9cvESQ \
|
|
10
|
-
OjU7LUIXEjzHAeIc/NfMHeOl6D2BfXS+BAcmJBHZ5DAZoCOATKvy0Ry5KEdlNbat \
|
|
11
|
-
o/B9i5jWKbK1Q0sF+2hSfRWB5OGrdsusN9XxH2JYuW93fY9htf5Au+h5BjLMTL8z \
|
|
12
|
-
wRJHX4EuC63VyF3U0xl20TePuYpATpvEXQZQyaHWGMM8+EJVJiv6pQY2GoKKHsD7 \
|
|
13
|
-
xFB31mKgvLQDWkSyeRXeZE0wgoni4MM4Q9hKH+U+ccn7Xxj2grrjK96u6/B6LIkc \
|
|
14
|
-
z30Lh+yWuO57VM/cqrnK1DCMR3+hG6ser5lLemkJmptbrvsm2uZEI2vCglsoz352 \
|
|
15
|
-
ZmrU5LX8Taj5SPmWssRxQHtx20L5kDDs0QNtvPprhaPdFmPVr39EVTS4p1As5meZ \
|
|
16
|
-
QLnOti7Vj0RtALQ1BBRc1nl8OUh8N4krBEgs+BLGwcW8T9vw7EfqaQI7G9q/b/+L \
|
|
17
|
-
soEHRzdRxEs5TPjVGQNcd2REQf/Dn1ovHOlgmkhRKWPLhHVRza2FjypdO0yUF0io \
|
|
18
|
-
W2vxzHvdLsUhpmAVu//dOOopzfukJfOELsyxSkQucyFpaDmu4y9WjPq2cJ1K1F1L \
|
|
19
|
-
DuM9BVHIZTcI7CxlqujYroYnH4QMH/dUu5MAfqnHE926XC3YIWvfuv4NLs0vcKcC \
|
|
20
|
-
AwEAAaNTMFEwHQYDVR0OBBYEFI/M/wYP4G8TGt2AR5X8H6YAwfUQMB8GA1UdIwQY \
|
|
21
|
-
MBaAFI/M/wYP4G8TGt2AR5X8H6YAwfUQMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI \
|
|
22
|
-
hvcNAQELBQADggIBAHLIwasKnvI5Cim8app5V1C4keCrjInLRXQuY9r7wFQ6jFFC \
|
|
23
|
-
KraOcEM2LCjwDvjWv30Rdjrv0sMm3NNL+GC2SWpj5tDmsUYxMJcDRVFLbKWBdjrx \
|
|
24
|
-
m/8zFfr4gL2pUSJZxucey0KCP4yCljetSpXbN+upvq97i2/O7JFcyeFKu4Z3KeKJ \
|
|
25
|
-
Pn+gI9Dn5WteTzPNLR4TX948YvmOSURKMAc2LHXDgz4xL+UqNAf18cbJYd47YBLi \
|
|
26
|
-
JAlPISVBUFZcOXAp2Y48m2AjN2ZfL8Sb7see6Ia87/19Dls9FFmCDF/bN9G2R1Tv \
|
|
27
|
-
gEnby25or7xfH5cJpkEBHPN9Tg/D/XA7fe6rEvssMBuMsjr5DNKzUeQ5NgkKA9PU \
|
|
28
|
-
JFTd75xmGq71ydOaO7ICwcrBtzYYJ9HyEOg9Boz7ikALvi1LqK/y2L9pAQ6RUqNn \
|
|
29
|
-
5IQs/FeJFC48ZvvM5vhQLQQJM+Bz991JA6i/KiQvyNHi4PhrPtSbwENhxFSZWWdG \
|
|
30
|
-
0E8XYAWs1X6y+9MiWea9HvA9+0mkM0XTZK/ps4jgTaPsY7xyhHMnMVsXZu6LLxNd \
|
|
31
|
-
puxokDyDxYo63OY6AZLpwpsjpfDH1jPakR/lU0/jvudV6GC+kQr7hZseTUQI21f2 \
|
|
32
|
-
njVbHOlihJIV+6o6lrS+xLwU7zWwvx/cObmplwffsyI5sPwnXIIMJPR6ToSV \
|
|
33
|
-
-----END CERTIFICATE-----
|