kafka-console 3.0.0-rc.3 → 3.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -2
- package/.claude/settings.local.json +0 -35
- package/.progress/master.md +0 -63
- package/build/cli.d.ts +0 -2
- package/build/commands/config.d.ts +0 -7
- package/build/commands/consume.d.ts +0 -12
- package/build/commands/createTopic.d.ts +0 -7
- package/build/commands/deleteTopic.d.ts +0 -2
- package/build/commands/fetchTopicOffsets.d.ts +0 -6
- package/build/commands/list.d.ts +0 -6
- package/build/commands/metadata.d.ts +0 -2
- package/build/commands/produce.d.ts +0 -9
- package/build/index.d.ts +0 -5
- package/build/utils/formatters.d.ts +0 -13
- package/build/utils/kafka.d.ts +0 -56
- package/build/utils/output.d.ts +0 -4
- package/e2e/cli.test.ts +0 -325
- package/eslint.config.js +0 -37
- package/pnpm-workspace.yaml +0 -3
- package/scripts/bench-kafka.js +0 -456
- package/tsconfig.build.json +0 -8
- package/vitest.config.e2e.ts +0 -9
- package/vitest.config.ts +0 -17
package/package.json
CHANGED
|
@@ -1,35 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"permissions": {
|
|
3
|
-
"allow": [
|
|
4
|
-
"WebFetch(domain:3axap4ehko.github.io)",
|
|
5
|
-
"WebFetch(domain:raw.githubusercontent.com)",
|
|
6
|
-
"Bash(pnpm run:*)",
|
|
7
|
-
"Bash(pnpm test:*)",
|
|
8
|
-
"Bash(node scripts/bench-kafka.js:*)",
|
|
9
|
-
"Bash(node --trace-warnings scripts/bench-kafka.js:*)",
|
|
10
|
-
"Bash(timeout 30s node:*)",
|
|
11
|
-
"Bash(timeout 20s node:*)",
|
|
12
|
-
"Bash(timeout 15s node:*)",
|
|
13
|
-
"Bash(npm ls:*)",
|
|
14
|
-
"Bash(pnpm ls:*)",
|
|
15
|
-
"Bash(node -e:*)",
|
|
16
|
-
"Bash(timeout 60s node:*)",
|
|
17
|
-
"mcp__context7__query-docs",
|
|
18
|
-
"Bash(timeout 60 node:*)",
|
|
19
|
-
"Bash(kafka-topics.sh:*)",
|
|
20
|
-
"Bash(docker exec:*)",
|
|
21
|
-
"Bash(timeout 30 node:*)",
|
|
22
|
-
"Bash(node build/cli.js:*)",
|
|
23
|
-
"Bash(timeout 5 node:*)",
|
|
24
|
-
"Skill(ask-codex)",
|
|
25
|
-
"Bash(pnpm test:e2e:*)",
|
|
26
|
-
"Bash(pnpm build:*)",
|
|
27
|
-
"Bash(node build/index.js:*)",
|
|
28
|
-
"Bash(node:*)",
|
|
29
|
-
"mcp__context7__resolve-library-id",
|
|
30
|
-
"WebSearch"
|
|
31
|
-
],
|
|
32
|
-
"deny": [],
|
|
33
|
-
"ask": []
|
|
34
|
-
}
|
|
35
|
-
}
|
package/.progress/master.md
DELETED
|
@@ -1,63 +0,0 @@
|
|
|
1
|
-
[2026-02-13 15:03:00] - Starting comprehensive E2E test of all CLI commands against local Kafka
|
|
2
|
-
[2026-02-13 15:04:00] - Build failed: `Admin` not exported from `@platformatic/kafka` - it's at `adminClient.Admin`
|
|
3
|
-
[2026-02-13 15:05:00] - Fixed import: `import { adminClient } from '@platformatic/kafka'; const { Admin } = adminClient;`
|
|
4
|
-
[2026-02-13 15:05:30] - Fixed type annotation: `Promise<InstanceType<typeof Admin>>` since Admin is a value not a type
|
|
5
|
-
[2026-02-13 15:06:00] - Build succeeds after fixes
|
|
6
|
-
[2026-02-13 15:06:30] - --version, --help: PASS
|
|
7
|
-
[2026-02-13 15:07:00] - list, list -p, list -a, ls alias: PASS
|
|
8
|
-
[2026-02-13 15:07:30] - topic:create, topic:delete: PASS
|
|
9
|
-
[2026-02-13 15:08:00] - metadata: PARTIAL - broker info works but topics always empty (metadata({topics:[]}) returns 0 topics in @platformatic/kafka unlike kafkajs)
|
|
10
|
-
[2026-02-13 15:08:30] - produce (stdin pipe, file input, static headers, --wait): PASS
|
|
11
|
-
[2026-02-13 15:09:00] - consume (--from 0, --from ISO, --count, --skip, --pretty, --output file, -d raw): PASS
|
|
12
|
-
[2026-02-13 15:09:30] - topic:offsets (no args, timestamp, ISO, consumer group): PASS
|
|
13
|
-
[2026-02-13 15:10:00] - config: PASS (intentionally throws unsupported error)
|
|
14
|
-
[2026-02-13 15:10:30] - Error handling for nonexistent topics: exits with code 1 (generic "failed N times" messages from library)
|
|
15
|
-
[2026-02-13 15:11:00] - Unit tests: fixed mock (added adminClient export), all 21 tests pass
|
|
16
|
-
[2026-02-13 15:11:30] - BUG: metadata command always shows 0 topics - @platformatic/kafka metadata({topics:[]}) returns empty unlike kafkajs
|
|
17
|
-
[2026-02-13 15:11:30] - BUG: consuming a raw-produced message with default json formatter crashes the consumer (unhandled JSON parse error)
|
|
18
|
-
[2026-02-13 15:15:00] - config command was incorrectly stubbed out - @platformatic/kafka has describeConfigs(), just needs resourceType/resourceName params (not type/name)
|
|
19
|
-
[2026-02-13 15:17:00] - Implemented config command, added resourceTypeToNumber mapping, added ConfigResourceTypes to mock
|
|
20
|
-
[2026-02-13 15:17:30] - Verified: config -r topic -n <name> and config -r broker -n 1 both work. All 21 unit tests pass
|
|
21
|
-
[2026-02-13 15:31:00] - Enhanced consumer output: added partition, offset, timestamp, ahead fields to consume output
|
|
22
|
-
[2026-02-13 15:31:00] - Added `high` (bigint) to ConsumerMessage interface, fetched via listOffsets(LATEST) before consumption starts
|
|
23
|
-
[2026-02-13 15:31:00] - ahead = Number(high - offset), offset/timestamp serialized as strings for JSON precision
|
|
24
|
-
[2026-02-13 15:31:30] - Build clean, all 21 tests pass
|
|
25
|
-
[TIME 2026-03-01 10:50:41] - Started writing unit tests to reproduce reported src/ bug findings (metadata scope, swallowed async errors, timeout type mismatch, missing close on failures).
|
|
26
|
-
[TIME 2026-03-04 23:44:00] - Redesign: removed Pool indirection, commands own lifecycle directly
|
|
27
|
-
[TIME 2026-03-04 23:44:00] - Deleted pool.ts + pool tests, removed evnty dependency
|
|
28
|
-
[TIME 2026-03-04 23:44:00] - kafka.ts: exported getClientConfig, removed createClient/createAdmin/createCluster/createConsumer/createProducer wrappers and intermediate types
|
|
29
|
-
[TIME 2026-03-04 23:44:00] - fetch* functions now accept config object (ReturnType<typeof getClientConfig>) instead of ClientOptions
|
|
30
|
-
[TIME 2026-03-04 23:44:00] - Admin commands (list, config, createTopic, deleteTopic, metadata): use new Admin(getClientConfig(...)) directly
|
|
31
|
-
[TIME 2026-03-04 23:44:00] - consume.ts: full rewrite - Consumer+MessagesStream directly, skip/count/timeout as inline loop logic, try/finally lifecycle
|
|
32
|
-
[TIME 2026-03-04 23:44:00] - produce.ts: full rewrite - Producer.send() directly, stdin via async generator (readline), file via async generator
|
|
33
|
-
[TIME 2026-03-04 23:44:00] - Build clean, all 11 tests pass (pool tests removed, rest unchanged)
|
|
34
|
-
[TIME 2026-03-05 09:25:00] - Fixed 2 failing e2e tests:
|
|
35
|
-
[TIME 2026-03-05 09:25:00] - metadata.ts: added `if (!topicMeta) continue;` guard for topics with undefined metadata in meta.topics Map
|
|
36
|
-
[TIME 2026-03-05 09:25:00] - consume.ts: moved break check from top of loop to after index increment. Bug was: after processing last needed message, loop awaited next message from stream (which never came), causing timeout. Fix: `if (++index >= limit) break;` at end of loop body.
|
|
37
|
-
[TIME 2026-03-05 09:28:00] - All tests pass: 11 unit, 10 e2e. Lint clean.
|
|
38
|
-
[TIME 2026-03-05 09:35:00] - Expanded e2e tests from 10 to 28. Added coverage for: consume --skip, --pretty, --output, -d raw, --from ISO, static headers, file-produced messages, timeout; produce --header, --input, -d raw, --wait; list --all, ls alias, -p; config -r topic, -r broker; metadata -p.
|
|
39
|
-
[TIME 2026-03-05 20:12:51] - Started JSONL output refactor: unify command outputs around line-delimited JSON and update tests.
|
|
40
|
-
[TIME 2026-03-06 10:37:59] - Added src/utils/output.ts with writeJsonl/writeJsonlMany; switched list/config/createTopic/metadata/topic:offsets to JSONL emission and made consume always compact single-line JSON per record.
|
|
41
|
-
[TIME 2026-03-06 10:37:59] - Updated e2e suite to parse JSONL outputs (line-wise JSON.parse) instead of array/object dumps; unit tests and e2e now pass.
|
|
42
|
-
[TIME 2026-03-06 10:39:35] - Verification: pnpm build, pnpm test, pnpm test:e2e, pnpm lint all pass after JSONL output refactor.
|
|
43
|
-
[TIME 2026-03-06 18:19:07] - Removed AWS auth completely: deleted aws-related CLI flags and env mappings, removed aws branch in getSASL, and cleaned README auth/features/env docs.
|
|
44
|
-
[TIME 2026-03-06 18:19:07] - Verification after AWS removal: pnpm build, pnpm lint, pnpm test all pass.
|
|
45
|
-
[TIME 2026-03-06 18:19:59] - Follow-up cleanup requested: remove dead options/types after JSONL and AWS removals (-p, -l, unused GlobalOptions fields, unused Format alias), and align tests/docs.
|
|
46
|
-
[TIME 2026-03-06 18:28:56] - Removed dead CLI flags -l/--log-level and -p/--pretty from index.ts; removed unused logLevelParser and GlobalOptions.logLevel/pretty from kafka.ts; removed unused Format alias from formatters.ts.
|
|
47
|
-
[TIME 2026-03-06 18:28:56] - Updated README and e2e tests to drop stale --pretty usage/docs; verification passed: pnpm build, pnpm lint, pnpm test, KAFKA_BROKERS=localhost:9092 pnpm test:e2e (25 tests).
|
|
48
|
-
[TIME 2026-03-06 18:42:12] - Following up on 2 correctness issues: consume output close race and produce crash on missing value. Plan is targeted fixes plus unit tests reproducing both.
|
|
49
|
-
[TIME 2026-03-06 18:51:33] - consume.ts already had finished(output) from the interrupted patch; completed produce.ts validation for missing value in stdin/file inputs and added command-level regression tests for both issues.
|
|
50
|
-
[TIME 2026-03-06 18:51:33] - New tests exposed broken Kafka vitest mock constructors (arrow mock implementations were not new-able); fixed Admin/Consumer/Producer mocks to use constructable function implementations.
|
|
51
|
-
[TIME 2026-03-06 18:51:33] - Verification passed: pnpm test (13), pnpm build, pnpm lint, KAFKA_BROKERS=localhost:9092 pnpm test:e2e (25).
|
|
52
|
-
[TIME 2026-03-07 00:34:24] - Minor follow-up: moving producer header short flag off -h, making consume shut down cleanly on SIGINT/SIGTERM, and wiring metadata ISR through from actual partition metadata with regression coverage.
|
|
53
|
-
[TIME 2026-03-09 17:31:04] - Implemented minor fixes: producer header shorthand is now -H, consume registers SIGINT/SIGTERM handlers that close the stream cleanly, and metadata now serializes partition.isr instead of [].
|
|
54
|
-
[TIME 2026-03-09 17:31:04] - Added regression coverage for header flag registration, consume SIGINT shutdown, and metadata ISR propagation; unit/build/lint passed.
|
|
55
|
-
[TIME 2026-03-09 17:31:04] - E2E blocked by environment: localhost:9092 is currently unreachable. Direct `node build/cli.js metadata -b localhost:9092` fails with `Cannot connect to any broker`, so the full e2e suite currently fails before exercising behavior.
|
|
56
|
-
[TIME 2026-03-09 18:14:43] - Broker came back; reran smoke + e2e and the SIGINT case initially exposed another real bug: closing the consumer directly while consuming throws `Cannot leave group while consuming messages.` Adjusted signal handler to close stream first when active and only close consumer during startup interruption.
|
|
57
|
-
[TIME 2026-03-09 18:14:43] - Final verification passed: pnpm test (17), pnpm lint, KAFKA_BROKERS=localhost:9092 pnpm test:e2e (26).
|
|
58
|
-
[TIME 2026-03-09 23:07:44] - Verified the new review items against @platformatic/kafka source before editing. Consumer.close() and MessagesStream.close() are idempotent, so the extra close booleans in consume.ts were just local indirection.
|
|
59
|
-
[TIME 2026-03-09 23:07:44] - Applied targeted cleanup/hardening: removed redundant consume close guards and dead interrupted checks, preserved the first cleanup error when file finalization also fails, hardened produce input parsing against undefined values, fixed the Kafka mock indentation, and aligned commander tests with the actual exported CLI instance.
|
|
60
|
-
[TIME 2026-03-09 23:24:16] - Verification after the final consume refactor passed end-to-end: pnpm test (19), pnpm build, pnpm lint, KAFKA_BROKERS=localhost:9092 pnpm test:e2e (26). ESLint initially rejected `throw` inside finally, so consume now carries cleanup errors out after finally instead of throwing from it.
|
|
61
|
-
[TIME 2026-03-10 00:34:38] - Removing built-in js formatter entirely. Scope: formatter registry/code, CLI help text, README format tables/examples, and unit tests so json/raw + custom module paths are the only advertised formats.
|
|
62
|
-
[TIME 2026-03-10 18:50:37] - Built-in js formatter removed cleanly. Deleted vm-based formatter code, updated CLI descriptions/help examples to advertise json/raw/custom module paths only, removed README js docs, and added tests that js is no longer exported or advertised. Verification passed: pnpm test (21), pnpm build, pnpm lint, KAFKA_BROKERS=localhost:9092 pnpm test:e2e (26).
|
|
63
|
-
[TIME 2026-03-10 21:48:04] - Hardening TLS defaults: added global --insecure flag, switched --ssl to verify certificates by default, updated README auth/troubleshooting text, and added unit coverage for secure-vs-insecure TLS config plus CLI flag registration.
|
package/build/cli.d.ts
DELETED
|
@@ -1,7 +0,0 @@
|
|
|
1
|
-
import { type CommandContext, type ConfigResourceType } from '../utils/kafka.ts';
|
|
2
|
-
interface ConfigOptions {
|
|
3
|
-
resource: ConfigResourceType;
|
|
4
|
-
resourceName: string;
|
|
5
|
-
}
|
|
6
|
-
export default function config(opts: ConfigOptions, { parent }: CommandContext): Promise<void>;
|
|
7
|
-
export {};
|
|
@@ -1,12 +0,0 @@
|
|
|
1
|
-
import { type CommandContext } from '../utils/kafka.ts';
|
|
2
|
-
interface ConsumeOptions {
|
|
3
|
-
group: string;
|
|
4
|
-
dataFormat: string;
|
|
5
|
-
from?: string;
|
|
6
|
-
count: number;
|
|
7
|
-
skip: number;
|
|
8
|
-
output?: string;
|
|
9
|
-
snapshot?: boolean;
|
|
10
|
-
}
|
|
11
|
-
export default function consume(topic: string, opts: ConsumeOptions, { parent }: CommandContext): Promise<void>;
|
|
12
|
-
export {};
|
|
@@ -1,7 +0,0 @@
|
|
|
1
|
-
import { type CommandContext } from '../utils/kafka.ts';
|
|
2
|
-
interface CreateTopicOptions {
|
|
3
|
-
partitions?: number;
|
|
4
|
-
replicas?: number;
|
|
5
|
-
}
|
|
6
|
-
export default function createTopic(topic: string, opts: CreateTopicOptions, { parent }: CommandContext): Promise<void>;
|
|
7
|
-
export {};
|
|
@@ -1,6 +0,0 @@
|
|
|
1
|
-
import { type CommandContext } from '../utils/kafka.ts';
|
|
2
|
-
interface FetchTopicOffsetsOptions {
|
|
3
|
-
group?: string;
|
|
4
|
-
}
|
|
5
|
-
export default function fetchTopicOffset(topic: string, timestamp: string | undefined, opts: FetchTopicOffsetsOptions, { parent }: CommandContext): Promise<void>;
|
|
6
|
-
export {};
|
package/build/commands/list.d.ts
DELETED
|
@@ -1,9 +0,0 @@
|
|
|
1
|
-
import { type CommandContext } from '../utils/kafka.ts';
|
|
2
|
-
interface ProduceOptions {
|
|
3
|
-
dataFormat: string;
|
|
4
|
-
header: string[];
|
|
5
|
-
input?: string;
|
|
6
|
-
wait: number;
|
|
7
|
-
}
|
|
8
|
-
export default function produce(topic: string, opts: ProduceOptions, { parent }: CommandContext): Promise<void>;
|
|
9
|
-
export {};
|
package/build/index.d.ts
DELETED
|
@@ -1,13 +0,0 @@
|
|
|
1
|
-
export interface Encoder<T> {
|
|
2
|
-
(value: T): Promise<string | Buffer> | string | Buffer;
|
|
3
|
-
}
|
|
4
|
-
export interface Decoder<T> {
|
|
5
|
-
(value: Buffer | string | null): Promise<T> | T;
|
|
6
|
-
}
|
|
7
|
-
export interface Formatter<T> {
|
|
8
|
-
encode: Encoder<T>;
|
|
9
|
-
decode: Decoder<T>;
|
|
10
|
-
}
|
|
11
|
-
export declare const json: Formatter<unknown>;
|
|
12
|
-
export declare const raw: Formatter<unknown>;
|
|
13
|
-
export declare function getFormatter(format: string): Formatter<unknown>;
|
package/build/utils/kafka.d.ts
DELETED
|
@@ -1,56 +0,0 @@
|
|
|
1
|
-
import { ConfigResourceTypes } from '@platformatic/kafka';
|
|
2
|
-
import type { ConnectionOptions } from 'node:tls';
|
|
3
|
-
export type ConfigResourceType = 'UNKNOWN' | 'TOPIC' | 'BROKER' | 'BROKER_LOGGER';
|
|
4
|
-
export declare function resourceParser(resource: string): ConfigResourceType;
|
|
5
|
-
export declare function resourceTypeToNumber(type: ConfigResourceType): (typeof ConfigResourceTypes)[keyof typeof ConfigResourceTypes];
|
|
6
|
-
export interface GlobalOptions {
|
|
7
|
-
brokers: string;
|
|
8
|
-
timeout: number;
|
|
9
|
-
ssl: boolean;
|
|
10
|
-
insecure?: boolean;
|
|
11
|
-
mechanism?: string;
|
|
12
|
-
username?: string;
|
|
13
|
-
password?: string;
|
|
14
|
-
oauthBearer?: string;
|
|
15
|
-
}
|
|
16
|
-
export interface CommandContext {
|
|
17
|
-
parent: {
|
|
18
|
-
opts(): GlobalOptions;
|
|
19
|
-
};
|
|
20
|
-
}
|
|
21
|
-
export interface SASLConfig {
|
|
22
|
-
mechanism: 'PLAIN' | 'SCRAM-SHA-256' | 'SCRAM-SHA-512' | 'OAUTHBEARER';
|
|
23
|
-
username?: string;
|
|
24
|
-
password?: string;
|
|
25
|
-
token?: string;
|
|
26
|
-
}
|
|
27
|
-
export declare function getSASL({ mechanism, username, password, oauthBearer, }: Pick<GlobalOptions, 'mechanism' | 'username' | 'password' | 'oauthBearer'>): SASLConfig | undefined;
|
|
28
|
-
export declare function getClientConfig(bootstrapServer: string, ssl: boolean, sasl?: SASLConfig, insecure?: boolean): {
|
|
29
|
-
clientId: string;
|
|
30
|
-
bootstrapBrokers: string[];
|
|
31
|
-
tls?: ConnectionOptions;
|
|
32
|
-
sasl?: SASLConfig;
|
|
33
|
-
};
|
|
34
|
-
export declare function getClientConfigFromOpts(opts: GlobalOptions): {
|
|
35
|
-
clientId: string;
|
|
36
|
-
bootstrapBrokers: string[];
|
|
37
|
-
tls?: ConnectionOptions;
|
|
38
|
-
sasl?: SASLConfig;
|
|
39
|
-
};
|
|
40
|
-
export declare function fetchTopicOffsets(config: ReturnType<typeof getClientConfig>, topic: string): Promise<{
|
|
41
|
-
partition: number;
|
|
42
|
-
offset: string;
|
|
43
|
-
high: string;
|
|
44
|
-
low: string;
|
|
45
|
-
}[]>;
|
|
46
|
-
export declare function fetchTopicOffsetsByTimestamp(config: ReturnType<typeof getClientConfig>, topic: string, timestamp: number): Promise<{
|
|
47
|
-
partition: number;
|
|
48
|
-
offset: string;
|
|
49
|
-
}[]>;
|
|
50
|
-
export declare function fetchConsumerGroupOffsets(config: ReturnType<typeof getClientConfig>, groupId: string, topics: string[]): Promise<{
|
|
51
|
-
topic: string;
|
|
52
|
-
partitions: {
|
|
53
|
-
partition: number;
|
|
54
|
-
offset: string;
|
|
55
|
-
}[];
|
|
56
|
-
}[]>;
|
package/build/utils/output.d.ts
DELETED
package/e2e/cli.test.ts
DELETED
|
@@ -1,325 +0,0 @@
|
|
|
1
|
-
import { execSync, spawn } from 'child_process';
|
|
2
|
-
import { mkdtempSync, writeFileSync, readFileSync, rmSync, existsSync } from 'fs';
|
|
3
|
-
import { join } from 'path';
|
|
4
|
-
import { tmpdir } from 'os';
|
|
5
|
-
import { describe, it, expect, beforeAll, afterAll } from 'vitest';
|
|
6
|
-
|
|
7
|
-
const BROKERS = process.env.KAFKA_BROKERS || 'localhost:9092';
|
|
8
|
-
const CLI_PATH = 'node build/cli.js';
|
|
9
|
-
const TEST_TOPIC = `e2e-test-${Date.now()}`;
|
|
10
|
-
const TEST_GROUP = `e2e-group-${Date.now()}`;
|
|
11
|
-
const TMP_DIR = mkdtempSync(join(tmpdir(), 'kafka-e2e-'));
|
|
12
|
-
|
|
13
|
-
function cli(args: string): string {
|
|
14
|
-
return execSync(`${CLI_PATH} ${args} -b ${BROKERS}`, {
|
|
15
|
-
encoding: 'utf-8',
|
|
16
|
-
timeout: 15000,
|
|
17
|
-
}).trim();
|
|
18
|
-
}
|
|
19
|
-
|
|
20
|
-
function cliWithInput(args: string, input: string): string {
|
|
21
|
-
return execSync(`echo '${input}' | ${CLI_PATH} ${args} -b ${BROKERS}`, {
|
|
22
|
-
encoding: 'utf-8',
|
|
23
|
-
shell: '/bin/bash',
|
|
24
|
-
timeout: 15000,
|
|
25
|
-
}).trim();
|
|
26
|
-
}
|
|
27
|
-
|
|
28
|
-
function parseJsonl<T = unknown>(output: string): T[] {
|
|
29
|
-
if (!output) return [];
|
|
30
|
-
return output.split('\n').map((line) => JSON.parse(line) as T);
|
|
31
|
-
}
|
|
32
|
-
|
|
33
|
-
function cliExpectFail(args: string): { stdout: string; stderr: string } {
|
|
34
|
-
try {
|
|
35
|
-
const stdout = cli(args);
|
|
36
|
-
return { stdout, stderr: '' };
|
|
37
|
-
} catch (e: unknown) {
|
|
38
|
-
const err = e as { stdout?: string; stderr?: string };
|
|
39
|
-
return { stdout: (err.stdout ?? '').trim(), stderr: (err.stderr ?? '').trim() };
|
|
40
|
-
}
|
|
41
|
-
}
|
|
42
|
-
|
|
43
|
-
describe('CLI e2e tests', () => {
|
|
44
|
-
beforeAll(() => {
|
|
45
|
-
execSync('pnpm build', { encoding: 'utf-8' });
|
|
46
|
-
});
|
|
47
|
-
|
|
48
|
-
afterAll(() => {
|
|
49
|
-
try {
|
|
50
|
-
cli(`topic:delete ${TEST_TOPIC}`);
|
|
51
|
-
} catch {
|
|
52
|
-
// ignore cleanup errors
|
|
53
|
-
}
|
|
54
|
-
try {
|
|
55
|
-
rmSync(TMP_DIR, { recursive: true });
|
|
56
|
-
} catch {
|
|
57
|
-
// ignore cleanup errors
|
|
58
|
-
}
|
|
59
|
-
});
|
|
60
|
-
|
|
61
|
-
describe('metadata', () => {
|
|
62
|
-
it('should return cluster metadata', () => {
|
|
63
|
-
const output = cli('metadata');
|
|
64
|
-
const [metadata] = parseJsonl<Record<string, unknown>>(output);
|
|
65
|
-
|
|
66
|
-
expect(metadata).toHaveProperty('brokers');
|
|
67
|
-
expect(metadata).toHaveProperty('clusterId');
|
|
68
|
-
expect(metadata).toHaveProperty('controllerId');
|
|
69
|
-
expect(Array.isArray(metadata.brokers)).toBe(true);
|
|
70
|
-
expect(metadata.brokers.length).toBeGreaterThan(0);
|
|
71
|
-
expect(metadata.brokers[0]).toHaveProperty('host');
|
|
72
|
-
expect(metadata.brokers[0]).toHaveProperty('port');
|
|
73
|
-
});
|
|
74
|
-
});
|
|
75
|
-
|
|
76
|
-
describe('topic:create', () => {
|
|
77
|
-
it('should create a topic', () => {
|
|
78
|
-
const output = cli(`topic:create ${TEST_TOPIC}`);
|
|
79
|
-
const topics = parseJsonl<Record<string, unknown>>(output);
|
|
80
|
-
|
|
81
|
-
expect(topics.length).toBe(1);
|
|
82
|
-
expect(topics[0]).toHaveProperty('name', TEST_TOPIC);
|
|
83
|
-
expect(topics[0]).toHaveProperty('partitions');
|
|
84
|
-
expect(topics[0]).toHaveProperty('replicas');
|
|
85
|
-
});
|
|
86
|
-
});
|
|
87
|
-
|
|
88
|
-
describe('list', () => {
|
|
89
|
-
it('should list topics including the created one', () => {
|
|
90
|
-
const output = cli('list');
|
|
91
|
-
const topics = parseJsonl<string>(output);
|
|
92
|
-
expect(topics).toContain(TEST_TOPIC);
|
|
93
|
-
});
|
|
94
|
-
|
|
95
|
-
it('should list topics with ls alias', () => {
|
|
96
|
-
const output = cli('ls');
|
|
97
|
-
const topics = parseJsonl<string>(output);
|
|
98
|
-
expect(topics).toContain(TEST_TOPIC);
|
|
99
|
-
});
|
|
100
|
-
|
|
101
|
-
it('should list topics with --all flag including internals', () => {
|
|
102
|
-
const output = cli('list --all');
|
|
103
|
-
const topics = parseJsonl<string>(output);
|
|
104
|
-
expect(topics).toContain(TEST_TOPIC);
|
|
105
|
-
});
|
|
106
|
-
});
|
|
107
|
-
|
|
108
|
-
describe('config', () => {
|
|
109
|
-
it('should describe topic config', () => {
|
|
110
|
-
const output = cli(`config -r topic -n ${TEST_TOPIC}`);
|
|
111
|
-
const configs = parseJsonl<Record<string, unknown>>(output);
|
|
112
|
-
expect(configs.length).toBeGreaterThan(0);
|
|
113
|
-
});
|
|
114
|
-
|
|
115
|
-
it('should describe broker config', () => {
|
|
116
|
-
const output = cli('config -r broker -n 1');
|
|
117
|
-
const configs = parseJsonl<Record<string, unknown>>(output);
|
|
118
|
-
expect(configs.length).toBeGreaterThan(0);
|
|
119
|
-
});
|
|
120
|
-
});
|
|
121
|
-
|
|
122
|
-
describe('produce', () => {
|
|
123
|
-
it('should produce messages via stdin', () => {
|
|
124
|
-
const message = JSON.stringify({ key: 'test-key', value: 'test-value', headers: { h1: 'v1' } });
|
|
125
|
-
cliWithInput(`produce ${TEST_TOPIC}`, message);
|
|
126
|
-
});
|
|
127
|
-
|
|
128
|
-
it('should produce multiple messages via stdin', () => {
|
|
129
|
-
for (let i = 0; i < 3; i++) {
|
|
130
|
-
const message = JSON.stringify({ key: `key-${i}`, value: `value-${i}` });
|
|
131
|
-
cliWithInput(`produce ${TEST_TOPIC}`, message);
|
|
132
|
-
}
|
|
133
|
-
});
|
|
134
|
-
|
|
135
|
-
it('should produce with static headers', () => {
|
|
136
|
-
const message = JSON.stringify({ key: 'header-key', value: 'header-value' });
|
|
137
|
-
cliWithInput(`produce ${TEST_TOPIC} -H x-source:test -H x-env:e2e`, message);
|
|
138
|
-
});
|
|
139
|
-
|
|
140
|
-
it('should produce from input file', () => {
|
|
141
|
-
const filePath = join(TMP_DIR, 'produce-input.json');
|
|
142
|
-
const messages = [
|
|
143
|
-
{ key: 'file-key-0', value: 'file-value-0' },
|
|
144
|
-
{ key: 'file-key-1', value: 'file-value-1' },
|
|
145
|
-
];
|
|
146
|
-
writeFileSync(filePath, JSON.stringify(messages));
|
|
147
|
-
|
|
148
|
-
cli(`produce ${TEST_TOPIC} --input ${filePath}`);
|
|
149
|
-
});
|
|
150
|
-
|
|
151
|
-
it('should produce with raw data format', () => {
|
|
152
|
-
const message = JSON.stringify({ key: 'raw-key', value: 'raw-value' });
|
|
153
|
-
cliWithInput(`produce ${TEST_TOPIC} -d raw`, message);
|
|
154
|
-
});
|
|
155
|
-
|
|
156
|
-
it('should produce with --wait', () => {
|
|
157
|
-
const message = JSON.stringify({ key: 'wait-key', value: 'wait-value' });
|
|
158
|
-
cliWithInput(`produce ${TEST_TOPIC} --wait 10`, message);
|
|
159
|
-
});
|
|
160
|
-
});
|
|
161
|
-
|
|
162
|
-
describe('consume', () => {
|
|
163
|
-
it('should consume messages from the beginning', () => {
|
|
164
|
-
const group = `${TEST_GROUP}-from0`;
|
|
165
|
-
const output = cli(`consume ${TEST_TOPIC} -g ${group} --from 0 --count 4 -t 10000`);
|
|
166
|
-
const messages = output.split('\n').map(line => JSON.parse(line));
|
|
167
|
-
|
|
168
|
-
expect(messages.length).toBe(4);
|
|
169
|
-
expect(messages[0]).toHaveProperty('key', 'test-key');
|
|
170
|
-
expect(messages[0]).toHaveProperty('value', 'test-value');
|
|
171
|
-
expect(messages[0]).toHaveProperty('headers');
|
|
172
|
-
expect(messages[0].headers).toHaveProperty('h1', 'v1');
|
|
173
|
-
expect(messages[0]).toHaveProperty('partition');
|
|
174
|
-
expect(messages[0]).toHaveProperty('offset');
|
|
175
|
-
expect(messages[0]).toHaveProperty('timestamp');
|
|
176
|
-
expect(messages[0]).toHaveProperty('ahead');
|
|
177
|
-
});
|
|
178
|
-
|
|
179
|
-
it('should skip messages with --skip', () => {
|
|
180
|
-
const group = `${TEST_GROUP}-skip`;
|
|
181
|
-
const output = cli(`consume ${TEST_TOPIC} -g ${group} --from 0 --skip 2 --count 2 -t 10000`);
|
|
182
|
-
const messages = output.split('\n').map(line => JSON.parse(line));
|
|
183
|
-
|
|
184
|
-
expect(messages.length).toBe(2);
|
|
185
|
-
expect(messages[0]).toHaveProperty('key', 'key-1');
|
|
186
|
-
expect(messages[1]).toHaveProperty('key', 'key-2');
|
|
187
|
-
});
|
|
188
|
-
|
|
189
|
-
it('should consume with --from ISO timestamp', () => {
|
|
190
|
-
const past = new Date(Date.now() - 120000).toISOString();
|
|
191
|
-
const group = `${TEST_GROUP}-iso`;
|
|
192
|
-
const output = cli(`consume ${TEST_TOPIC} -g ${group} --from "${past}" --count 1 -t 10000`);
|
|
193
|
-
const messages = output.split('\n').map(line => JSON.parse(line));
|
|
194
|
-
|
|
195
|
-
expect(messages.length).toBe(1);
|
|
196
|
-
expect(messages[0]).toHaveProperty('key');
|
|
197
|
-
expect(messages[0]).toHaveProperty('value');
|
|
198
|
-
});
|
|
199
|
-
|
|
200
|
-
it('should write output to file', () => {
|
|
201
|
-
const outPath = join(TMP_DIR, 'consume-output.json');
|
|
202
|
-
const group = `${TEST_GROUP}-file`;
|
|
203
|
-
cli(`consume ${TEST_TOPIC} -g ${group} --from 0 --count 2 -t 10000 --output ${outPath}`);
|
|
204
|
-
|
|
205
|
-
expect(existsSync(outPath)).toBe(true);
|
|
206
|
-
const content = readFileSync(outPath, 'utf8').trim();
|
|
207
|
-
const messages = content.split('\n').map(line => JSON.parse(line));
|
|
208
|
-
expect(messages.length).toBe(2);
|
|
209
|
-
expect(messages[0]).toHaveProperty('key', 'test-key');
|
|
210
|
-
});
|
|
211
|
-
|
|
212
|
-
it('should consume with raw data format', () => {
|
|
213
|
-
const group = `${TEST_GROUP}-raw`;
|
|
214
|
-
const output = cli(`consume ${TEST_TOPIC} -g ${group} --from 0 --count 1 -t 10000 -d raw`);
|
|
215
|
-
const message = JSON.parse(output);
|
|
216
|
-
expect(message).toHaveProperty('key', 'test-key');
|
|
217
|
-
// raw decoder returns the string as-is (JSON-encoded value from producer)
|
|
218
|
-
expect(message).toHaveProperty('value', '"test-value"');
|
|
219
|
-
});
|
|
220
|
-
|
|
221
|
-
it('should consume static headers from producer', () => {
|
|
222
|
-
const group = `${TEST_GROUP}-headers`;
|
|
223
|
-
// Message at offset 4 has static headers x-source:test and x-env:e2e
|
|
224
|
-
const output = cli(`consume ${TEST_TOPIC} -g ${group} --from 0 --skip 4 --count 1 -t 10000`);
|
|
225
|
-
const message = JSON.parse(output);
|
|
226
|
-
expect(message).toHaveProperty('key', 'header-key');
|
|
227
|
-
expect(message.headers).toHaveProperty('x-source', 'test');
|
|
228
|
-
expect(message.headers).toHaveProperty('x-env', 'e2e');
|
|
229
|
-
});
|
|
230
|
-
|
|
231
|
-
it('should consume messages produced from file', () => {
|
|
232
|
-
const group = `${TEST_GROUP}-fileinput`;
|
|
233
|
-
// Messages at offsets 5-6 were produced from file
|
|
234
|
-
const output = cli(`consume ${TEST_TOPIC} -g ${group} --from 0 --skip 5 --count 2 -t 10000`);
|
|
235
|
-
const messages = output.split('\n').map(line => JSON.parse(line));
|
|
236
|
-
expect(messages.length).toBe(2);
|
|
237
|
-
expect(messages[0]).toHaveProperty('key', 'file-key-0');
|
|
238
|
-
expect(messages[1]).toHaveProperty('key', 'file-key-1');
|
|
239
|
-
});
|
|
240
|
-
|
|
241
|
-
it('should timeout when no messages available', () => {
|
|
242
|
-
const group = `${TEST_GROUP}-timeout`;
|
|
243
|
-
const result = cliExpectFail(`consume ${TEST_TOPIC} -g ${group} -t 2000`);
|
|
244
|
-
expect(result.stderr).toContain('TIMEOUT');
|
|
245
|
-
});
|
|
246
|
-
|
|
247
|
-
it('should exit cleanly on SIGINT without timeout', async () => {
|
|
248
|
-
const group = `${TEST_GROUP}-sigint`;
|
|
249
|
-
const child = spawn('node', ['build/cli.js', 'consume', TEST_TOPIC, '-g', group, '-b', BROKERS], {
|
|
250
|
-
stdio: ['ignore', 'pipe', 'pipe'],
|
|
251
|
-
});
|
|
252
|
-
|
|
253
|
-
const stdoutChunks: Buffer[] = [];
|
|
254
|
-
const stderrChunks: Buffer[] = [];
|
|
255
|
-
child.stdout.on('data', (chunk: Buffer) => stdoutChunks.push(chunk));
|
|
256
|
-
child.stderr.on('data', (chunk: Buffer) => stderrChunks.push(chunk));
|
|
257
|
-
|
|
258
|
-
await new Promise((resolve) => setTimeout(resolve, 300));
|
|
259
|
-
cliWithInput(`produce ${TEST_TOPIC}`, JSON.stringify({ key: 'sigint-key', value: 'sigint-value' }));
|
|
260
|
-
await new Promise<void>((resolve, reject) => {
|
|
261
|
-
const timer = setTimeout(() => reject(new Error('consume did not receive a message before SIGINT')), 3000);
|
|
262
|
-
child.stdout.once('data', () => {
|
|
263
|
-
clearTimeout(timer);
|
|
264
|
-
resolve();
|
|
265
|
-
});
|
|
266
|
-
});
|
|
267
|
-
child.kill('SIGINT');
|
|
268
|
-
|
|
269
|
-
const result = await new Promise<{ code: number | null; signal: NodeJS.Signals | null }>((resolve) => {
|
|
270
|
-
child.once('exit', (code, signal) => resolve({ code, signal }));
|
|
271
|
-
});
|
|
272
|
-
|
|
273
|
-
expect(result.signal).toBeNull();
|
|
274
|
-
expect(result.code).toBe(0);
|
|
275
|
-
expect(Buffer.concat(stdoutChunks).toString()).toContain('"key":"sigint-key"');
|
|
276
|
-
expect(Buffer.concat(stderrChunks).toString()).not.toContain('TIMEOUT');
|
|
277
|
-
});
|
|
278
|
-
});
|
|
279
|
-
|
|
280
|
-
describe('topic:offsets', () => {
|
|
281
|
-
it('should return topic offsets (high/low)', () => {
|
|
282
|
-
const output = cli(`topic:offsets ${TEST_TOPIC}`);
|
|
283
|
-
const offsets = parseJsonl<Record<string, unknown>>(output);
|
|
284
|
-
|
|
285
|
-
expect(offsets.length).toBeGreaterThan(0);
|
|
286
|
-
expect(offsets[0]).toHaveProperty('partition');
|
|
287
|
-
expect(offsets[0]).toHaveProperty('offset');
|
|
288
|
-
expect(offsets[0]).toHaveProperty('high');
|
|
289
|
-
expect(offsets[0]).toHaveProperty('low');
|
|
290
|
-
expect(Number(offsets[0].high)).toBeGreaterThanOrEqual(4);
|
|
291
|
-
});
|
|
292
|
-
|
|
293
|
-
it('should return consumer group offsets', () => {
|
|
294
|
-
const output = cli(`topic:offsets ${TEST_TOPIC} -g ${TEST_GROUP}-from0`);
|
|
295
|
-
const offsets = parseJsonl<Record<string, unknown>>(output);
|
|
296
|
-
|
|
297
|
-
expect(offsets.length).toBe(1);
|
|
298
|
-
expect(offsets[0]).toHaveProperty('topic', TEST_TOPIC);
|
|
299
|
-
expect(offsets[0]).toHaveProperty('partitions');
|
|
300
|
-
expect(Array.isArray(offsets[0].partitions)).toBe(true);
|
|
301
|
-
});
|
|
302
|
-
|
|
303
|
-
it('should return offsets by timestamp', () => {
|
|
304
|
-
const timestamp = new Date(Date.now() - 60000).toISOString();
|
|
305
|
-
const output = cli(`topic:offsets ${TEST_TOPIC} "${timestamp}"`);
|
|
306
|
-
const offsets = parseJsonl<Record<string, unknown>>(output);
|
|
307
|
-
|
|
308
|
-
expect(offsets.length).toBeGreaterThan(0);
|
|
309
|
-
expect(offsets[0]).toHaveProperty('partition');
|
|
310
|
-
expect(offsets[0]).toHaveProperty('offset');
|
|
311
|
-
expect(offsets[0]).not.toHaveProperty('high');
|
|
312
|
-
expect(offsets[0]).not.toHaveProperty('low');
|
|
313
|
-
});
|
|
314
|
-
});
|
|
315
|
-
|
|
316
|
-
describe('topic:delete', () => {
|
|
317
|
-
it('should delete the topic', () => {
|
|
318
|
-
cli(`topic:delete ${TEST_TOPIC}`);
|
|
319
|
-
|
|
320
|
-
const output = cli('list');
|
|
321
|
-
const topics = parseJsonl<string>(output);
|
|
322
|
-
expect(topics).not.toContain(TEST_TOPIC);
|
|
323
|
-
});
|
|
324
|
-
});
|
|
325
|
-
});
|
package/eslint.config.js
DELETED
|
@@ -1,37 +0,0 @@
|
|
|
1
|
-
import eslint from '@eslint/js';
|
|
2
|
-
import tseslint from 'typescript-eslint';
|
|
3
|
-
import { defineConfig, globalIgnores } from 'eslint/config';
|
|
4
|
-
import prettier from 'eslint-plugin-prettier';
|
|
5
|
-
|
|
6
|
-
export default defineConfig(
|
|
7
|
-
globalIgnores(['**/docs', '**/build', '**/coverage', '**/node_modules', '**/scripts', '**/*.tmp.ts', '**/__tests__/*', '**/__bench__/*', '**/__mocks__/*', '**/e2e/*', 'vitest.config*.ts', '**/*.js', '**/*.cjs', '**/*.mjs']),
|
|
8
|
-
eslint.configs.recommended,
|
|
9
|
-
tseslint.configs.recommendedTypeChecked,
|
|
10
|
-
{
|
|
11
|
-
files: ['**/*.ts'],
|
|
12
|
-
plugins: {
|
|
13
|
-
prettier,
|
|
14
|
-
},
|
|
15
|
-
languageOptions: {
|
|
16
|
-
parserOptions: {
|
|
17
|
-
projectService: true,
|
|
18
|
-
tsconfigRootDir: import.meta.dirname,
|
|
19
|
-
},
|
|
20
|
-
},
|
|
21
|
-
rules: {
|
|
22
|
-
'prettier/prettier': 2,
|
|
23
|
-
'@typescript-eslint/no-unused-vars': 2,
|
|
24
|
-
'@typescript-eslint/ban-ts-comment': 1,
|
|
25
|
-
'@typescript-eslint/await-thenable': 1,
|
|
26
|
-
'@typescript-eslint/no-floating-promises': 1,
|
|
27
|
-
'@typescript-eslint/ban-types': 0,
|
|
28
|
-
'@typescript-eslint/no-explicit-any': 0,
|
|
29
|
-
'@typescript-eslint/require-await': 0,
|
|
30
|
-
'@typescript-eslint/no-misused-promises': 0,
|
|
31
|
-
'@typescript-eslint/no-empty-object-type': 0,
|
|
32
|
-
'@typescript-eslint/no-unsafe-declaration-merging': 0,
|
|
33
|
-
'@typescript-eslint/prefer-promise-reject-errors': 0,
|
|
34
|
-
},
|
|
35
|
-
},
|
|
36
|
-
);
|
|
37
|
-
|
package/pnpm-workspace.yaml
DELETED
package/scripts/bench-kafka.js
DELETED
|
@@ -1,456 +0,0 @@
|
|
|
1
|
-
/* eslint-disable no-console */
|
|
2
|
-
const { Kafka, CompressionTypes, logLevel } = require('kafkajs');
|
|
3
|
-
const KafkaRDKafka = require('node-rdkafka');
|
|
4
|
-
const { Producer: PlatformaticProducer, Consumer: PlatformaticConsumer, CompressionAlgorithms, ProduceAcks, stringSerializers, stringDeserializers, MessagesStreamModes } = require('@platformatic/kafka');
|
|
5
|
-
const crypto = require('crypto');
|
|
6
|
-
|
|
7
|
-
function generatePayload(size) {
|
|
8
|
-
// Simulate JSON-like data: ~50% compressible structure, ~50% random values
|
|
9
|
-
const template = '{"id":"","ts":,"data":""}';
|
|
10
|
-
const randomPart = crypto.randomBytes(Math.floor(size / 2)).toString('base64');
|
|
11
|
-
const padding = 'x'.repeat(Math.max(0, size - template.length - randomPart.length));
|
|
12
|
-
return JSON.stringify({ id: crypto.randomUUID(), ts: Date.now(), data: randomPart, pad: padding }).slice(0, size).padEnd(size, ' ');
|
|
13
|
-
}
|
|
14
|
-
|
|
15
|
-
function parseArgs() {
|
|
16
|
-
const args = process.argv.slice(2);
|
|
17
|
-
const out = {
|
|
18
|
-
brokers: process.env.KAFKA_BROKERS || 'localhost:9092',
|
|
19
|
-
topic: `kcli-bench-${Date.now()}`,
|
|
20
|
-
count: 1_000_000,
|
|
21
|
-
size: 1024,
|
|
22
|
-
batchSize: 1000,
|
|
23
|
-
concurrency: 16,
|
|
24
|
-
partitions: 12,
|
|
25
|
-
acks: 1,
|
|
26
|
-
rdkConsumeBatch: 1000,
|
|
27
|
-
rdkFetchMinBytes: 1048576,
|
|
28
|
-
rdkFetchWaitMs: 50,
|
|
29
|
-
rdkMaxPartitionFetchBytes: 5242880,
|
|
30
|
-
rdkQueuedMinMessages: 10000,
|
|
31
|
-
rdkQueuedMaxKbytes: 102400,
|
|
32
|
-
};
|
|
33
|
-
|
|
34
|
-
for (let i = 0; i < args.length; i += 1) {
|
|
35
|
-
const arg = args[i];
|
|
36
|
-
if (arg === '--brokers') out.brokers = args[++i];
|
|
37
|
-
else if (arg === '--topic') out.topic = args[++i];
|
|
38
|
-
else if (arg === '--count') out.count = Number(args[++i]);
|
|
39
|
-
else if (arg === '--size') out.size = Number(args[++i]);
|
|
40
|
-
else if (arg === '--batch') out.batchSize = Number(args[++i]);
|
|
41
|
-
else if (arg === '--concurrency') out.concurrency = Number(args[++i]);
|
|
42
|
-
else if (arg === '--partitions') out.partitions = Number(args[++i]);
|
|
43
|
-
else if (arg === '--acks') out.acks = Number(args[++i]);
|
|
44
|
-
else if (arg === '--rdk-consume-batch') out.rdkConsumeBatch = Number(args[++i]);
|
|
45
|
-
else if (arg === '--rdk-fetch-min-bytes') out.rdkFetchMinBytes = Number(args[++i]);
|
|
46
|
-
else if (arg === '--rdk-fetch-wait-ms') out.rdkFetchWaitMs = Number(args[++i]);
|
|
47
|
-
else if (arg === '--rdk-max-partition-fetch-bytes') out.rdkMaxPartitionFetchBytes = Number(args[++i]);
|
|
48
|
-
else if (arg === '--rdk-queued-min-messages') out.rdkQueuedMinMessages = Number(args[++i]);
|
|
49
|
-
else if (arg === '--rdk-queued-max-kbytes') out.rdkQueuedMaxKbytes = Number(args[++i]);
|
|
50
|
-
}
|
|
51
|
-
|
|
52
|
-
return out;
|
|
53
|
-
}
|
|
54
|
-
|
|
55
|
-
function elapsedMs(start) {
|
|
56
|
-
const diff = process.hrtime.bigint() - start;
|
|
57
|
-
return Number(diff) / 1_000_000;
|
|
58
|
-
}
|
|
59
|
-
|
|
60
|
-
async function ensureTopicKafkaJS(brokers, topic, partitions) {
|
|
61
|
-
const kafka = new Kafka({
|
|
62
|
-
clientId: 'kcli-bench-admin',
|
|
63
|
-
brokers: brokers.split(','),
|
|
64
|
-
logLevel: logLevel.NOTHING,
|
|
65
|
-
});
|
|
66
|
-
const admin = kafka.admin();
|
|
67
|
-
await admin.connect();
|
|
68
|
-
try {
|
|
69
|
-
await admin.createTopics({
|
|
70
|
-
topics: [{ topic, numPartitions: partitions }],
|
|
71
|
-
waitForLeaders: true,
|
|
72
|
-
});
|
|
73
|
-
} catch (err) {
|
|
74
|
-
// topic may already exist or auto-create is enabled
|
|
75
|
-
} finally {
|
|
76
|
-
await admin.disconnect();
|
|
77
|
-
}
|
|
78
|
-
}
|
|
79
|
-
|
|
80
|
-
async function benchKafkaJSProduce({ brokers, topic, count, size, batchSize, concurrency, acks }) {
|
|
81
|
-
const kafka = new Kafka({
|
|
82
|
-
clientId: 'kcli-bench-producer',
|
|
83
|
-
brokers: brokers.split(','),
|
|
84
|
-
logLevel: logLevel.NOTHING,
|
|
85
|
-
});
|
|
86
|
-
const producer = kafka.producer({ allowAutoTopicCreation: true });
|
|
87
|
-
await producer.connect();
|
|
88
|
-
|
|
89
|
-
const payload = generatePayload(size);
|
|
90
|
-
const start = process.hrtime.bigint();
|
|
91
|
-
const inFlight = [];
|
|
92
|
-
const windowSize = Math.max(1, concurrency);
|
|
93
|
-
|
|
94
|
-
for (let i = 0; i < count; i += batchSize) {
|
|
95
|
-
const messages = [];
|
|
96
|
-
const end = Math.min(i + batchSize, count);
|
|
97
|
-
for (let j = i; j < end; j += 1) {
|
|
98
|
-
messages.push({ value: payload });
|
|
99
|
-
}
|
|
100
|
-
inFlight.push(producer.send({
|
|
101
|
-
topic,
|
|
102
|
-
messages,
|
|
103
|
-
compression: CompressionTypes.GZIP,
|
|
104
|
-
acks,
|
|
105
|
-
}));
|
|
106
|
-
if (inFlight.length >= windowSize) {
|
|
107
|
-
await Promise.all(inFlight);
|
|
108
|
-
inFlight.length = 0;
|
|
109
|
-
}
|
|
110
|
-
}
|
|
111
|
-
|
|
112
|
-
if (inFlight.length > 0) {
|
|
113
|
-
await Promise.all(inFlight);
|
|
114
|
-
}
|
|
115
|
-
|
|
116
|
-
const ms = elapsedMs(start);
|
|
117
|
-
await producer.disconnect();
|
|
118
|
-
return ms;
|
|
119
|
-
}
|
|
120
|
-
|
|
121
|
-
async function benchKafkaJSConsume({ brokers, topic, count }) {
|
|
122
|
-
const kafka = new Kafka({
|
|
123
|
-
clientId: 'kcli-bench-consumer',
|
|
124
|
-
brokers: brokers.split(','),
|
|
125
|
-
logLevel: logLevel.NOTHING,
|
|
126
|
-
});
|
|
127
|
-
const groupId = `kcli-bench-js-${Date.now()}`;
|
|
128
|
-
const consumer = kafka.consumer({ groupId });
|
|
129
|
-
await consumer.connect();
|
|
130
|
-
await consumer.subscribe({ topic, fromBeginning: true });
|
|
131
|
-
|
|
132
|
-
let firstMessageMs = null;
|
|
133
|
-
let seen = 0;
|
|
134
|
-
const start = process.hrtime.bigint();
|
|
135
|
-
let resolveDone;
|
|
136
|
-
let doneResolved = false;
|
|
137
|
-
|
|
138
|
-
const done = new Promise((resolve, reject) => {
|
|
139
|
-
resolveDone = resolve;
|
|
140
|
-
consumer.run({
|
|
141
|
-
autoCommit: false,
|
|
142
|
-
eachBatch: async ({ batch, resolveOffset, heartbeat, isRunning, isStale }) => {
|
|
143
|
-
for (const message of batch.messages) {
|
|
144
|
-
if (!isRunning() || isStale()) return;
|
|
145
|
-
if (firstMessageMs === null) {
|
|
146
|
-
firstMessageMs = elapsedMs(start);
|
|
147
|
-
}
|
|
148
|
-
seen += 1;
|
|
149
|
-
resolveOffset(message.offset);
|
|
150
|
-
if (seen >= count && !doneResolved) {
|
|
151
|
-
doneResolved = true;
|
|
152
|
-
resolveDone();
|
|
153
|
-
break;
|
|
154
|
-
}
|
|
155
|
-
}
|
|
156
|
-
await heartbeat();
|
|
157
|
-
},
|
|
158
|
-
}).catch(reject);
|
|
159
|
-
});
|
|
160
|
-
|
|
161
|
-
await done;
|
|
162
|
-
await consumer.stop();
|
|
163
|
-
await consumer.disconnect();
|
|
164
|
-
|
|
165
|
-
return { totalMs: elapsedMs(start), firstMessageMs };
|
|
166
|
-
}
|
|
167
|
-
|
|
168
|
-
function createRDKafkaProducer(brokers, acks) {
|
|
169
|
-
return new KafkaRDKafka.Producer({
|
|
170
|
-
'metadata.broker.list': brokers,
|
|
171
|
-
'request.required.acks': acks,
|
|
172
|
-
'dr_cb': true,
|
|
173
|
-
'queue.buffering.max.messages': 1000000,
|
|
174
|
-
'compression.codec': 'gzip',
|
|
175
|
-
});
|
|
176
|
-
}
|
|
177
|
-
|
|
178
|
-
async function benchRDKafkaProduce({ brokers, topic, count, size, acks }) {
|
|
179
|
-
const producer = createRDKafkaProducer(brokers, acks);
|
|
180
|
-
let delivered = 0;
|
|
181
|
-
let deliveryErrors = 0;
|
|
182
|
-
|
|
183
|
-
await new Promise((resolve, reject) => {
|
|
184
|
-
producer.on('ready', resolve);
|
|
185
|
-
producer.on('event.error', reject);
|
|
186
|
-
producer.connect();
|
|
187
|
-
});
|
|
188
|
-
|
|
189
|
-
producer.on('delivery-report', (err) => {
|
|
190
|
-
if (err) {
|
|
191
|
-
deliveryErrors += 1;
|
|
192
|
-
return;
|
|
193
|
-
}
|
|
194
|
-
delivered += 1;
|
|
195
|
-
});
|
|
196
|
-
|
|
197
|
-
producer.setPollInterval(100);
|
|
198
|
-
const payload = Buffer.from(generatePayload(size));
|
|
199
|
-
const start = process.hrtime.bigint();
|
|
200
|
-
|
|
201
|
-
for (let i = 0; i < count; i += 1) {
|
|
202
|
-
try {
|
|
203
|
-
producer.produce(topic, null, payload, null, Date.now());
|
|
204
|
-
} catch (err) {
|
|
205
|
-
if (err && err.code === -184) {
|
|
206
|
-
await new Promise((resolve) => producer.once('drain', resolve));
|
|
207
|
-
producer.produce(topic, null, payload, null, Date.now());
|
|
208
|
-
} else {
|
|
209
|
-
throw err;
|
|
210
|
-
}
|
|
211
|
-
}
|
|
212
|
-
}
|
|
213
|
-
|
|
214
|
-
await new Promise((resolve, reject) => {
|
|
215
|
-
producer.flush(30_000, (err) => {
|
|
216
|
-
if (err) return reject(err);
|
|
217
|
-
resolve();
|
|
218
|
-
});
|
|
219
|
-
});
|
|
220
|
-
|
|
221
|
-
await new Promise((resolve) => {
|
|
222
|
-
const waitStart = Date.now();
|
|
223
|
-
const check = () => {
|
|
224
|
-
if (delivered + deliveryErrors >= count) return resolve();
|
|
225
|
-
if (Date.now() - waitStart >= 60_000) return resolve();
|
|
226
|
-
setTimeout(check, 50);
|
|
227
|
-
};
|
|
228
|
-
check();
|
|
229
|
-
});
|
|
230
|
-
|
|
231
|
-
const ms = elapsedMs(start);
|
|
232
|
-
producer.disconnect();
|
|
233
|
-
return { ms, delivered, deliveryErrors };
|
|
234
|
-
}
|
|
235
|
-
|
|
236
|
-
async function benchRDKafkaConsume({
|
|
237
|
-
brokers,
|
|
238
|
-
topic,
|
|
239
|
-
count,
|
|
240
|
-
rdkConsumeBatch,
|
|
241
|
-
rdkFetchMinBytes,
|
|
242
|
-
rdkFetchWaitMs,
|
|
243
|
-
rdkMaxPartitionFetchBytes,
|
|
244
|
-
rdkQueuedMinMessages,
|
|
245
|
-
rdkQueuedMaxKbytes,
|
|
246
|
-
}) {
|
|
247
|
-
const groupId = `kcli-bench-rdk-${Date.now()}`;
|
|
248
|
-
|
|
249
|
-
const consumer = new KafkaRDKafka.KafkaConsumer({
|
|
250
|
-
'metadata.broker.list': brokers,
|
|
251
|
-
'group.id': groupId,
|
|
252
|
-
'enable.auto.commit': false,
|
|
253
|
-
'auto.offset.reset': 'earliest',
|
|
254
|
-
'fetch.min.bytes': rdkFetchMinBytes,
|
|
255
|
-
'fetch.wait.max.ms': rdkFetchWaitMs,
|
|
256
|
-
'max.partition.fetch.bytes': rdkMaxPartitionFetchBytes,
|
|
257
|
-
'queued.min.messages': rdkQueuedMinMessages,
|
|
258
|
-
'queued.max.messages.kbytes': rdkQueuedMaxKbytes,
|
|
259
|
-
}, {});
|
|
260
|
-
|
|
261
|
-
await new Promise((resolve, reject) => {
|
|
262
|
-
consumer.on('ready', resolve);
|
|
263
|
-
consumer.on('event.error', reject);
|
|
264
|
-
consumer.connect();
|
|
265
|
-
});
|
|
266
|
-
|
|
267
|
-
const metadata = await new Promise((resolve, reject) => {
|
|
268
|
-
consumer.getMetadata({ topic, timeout: 10000 }, (err, data) => {
|
|
269
|
-
if (err) reject(err);
|
|
270
|
-
else resolve(data);
|
|
271
|
-
});
|
|
272
|
-
});
|
|
273
|
-
|
|
274
|
-
const topicMeta = metadata.topics.find((t) => t.name === topic);
|
|
275
|
-
if (!topicMeta) throw new Error(`Topic ${topic} not found in metadata`);
|
|
276
|
-
|
|
277
|
-
const RD_KAFKA_OFFSET_BEGINNING = -2;
|
|
278
|
-
const assignments = topicMeta.partitions.map((p) => ({
|
|
279
|
-
topic,
|
|
280
|
-
partition: p.id,
|
|
281
|
-
offset: RD_KAFKA_OFFSET_BEGINNING,
|
|
282
|
-
}));
|
|
283
|
-
consumer.assign(assignments);
|
|
284
|
-
|
|
285
|
-
let firstMessageMs = null;
|
|
286
|
-
let seen = 0;
|
|
287
|
-
const start = process.hrtime.bigint();
|
|
288
|
-
const progressInterval = Math.max(1, Math.floor(count / 10));
|
|
289
|
-
const batchSize = 5000;
|
|
290
|
-
|
|
291
|
-
await new Promise((resolve, reject) => {
|
|
292
|
-
consumer.on('event.error', reject);
|
|
293
|
-
|
|
294
|
-
const consumeNext = () => {
|
|
295
|
-
if (seen >= count) {
|
|
296
|
-
consumer.disconnect();
|
|
297
|
-
resolve();
|
|
298
|
-
return;
|
|
299
|
-
}
|
|
300
|
-
consumer.consume(batchSize, (err, messages) => {
|
|
301
|
-
if (err) return reject(err);
|
|
302
|
-
const len = messages ? messages.length : 0;
|
|
303
|
-
if (len > 0) {
|
|
304
|
-
if (firstMessageMs === null) firstMessageMs = elapsedMs(start);
|
|
305
|
-
seen += len;
|
|
306
|
-
if (seen % progressInterval < len) {
|
|
307
|
-
console.log(`node-rdkafka consume progress: ${seen}/${count}`);
|
|
308
|
-
}
|
|
309
|
-
}
|
|
310
|
-
setImmediate(consumeNext);
|
|
311
|
-
});
|
|
312
|
-
};
|
|
313
|
-
consumeNext();
|
|
314
|
-
});
|
|
315
|
-
return { totalMs: elapsedMs(start), firstMessageMs };
|
|
316
|
-
}
|
|
317
|
-
|
|
318
|
-
async function benchPlatformaticProduce({ brokers, topic, count, size, batchSize, acks }) {
|
|
319
|
-
const producer = new PlatformaticProducer({
|
|
320
|
-
clientId: 'kcli-bench-platformatic-producer',
|
|
321
|
-
bootstrapBrokers: brokers.split(','),
|
|
322
|
-
compression: CompressionAlgorithms.GZIP,
|
|
323
|
-
serializers: stringSerializers,
|
|
324
|
-
});
|
|
325
|
-
|
|
326
|
-
const payload = generatePayload(size);
|
|
327
|
-
const start = process.hrtime.bigint();
|
|
328
|
-
|
|
329
|
-
for (let i = 0; i < count; i += batchSize) {
|
|
330
|
-
const messages = [];
|
|
331
|
-
const end = Math.min(i + batchSize, count);
|
|
332
|
-
for (let j = i; j < end; j += 1) {
|
|
333
|
-
messages.push({ topic, value: payload });
|
|
334
|
-
}
|
|
335
|
-
await producer.send({
|
|
336
|
-
messages,
|
|
337
|
-
acks: acks === 1 ? ProduceAcks.LEADER : ProduceAcks.ALL,
|
|
338
|
-
});
|
|
339
|
-
}
|
|
340
|
-
|
|
341
|
-
const ms = elapsedMs(start);
|
|
342
|
-
await producer.close();
|
|
343
|
-
return ms;
|
|
344
|
-
}
|
|
345
|
-
|
|
346
|
-
async function benchPlatformaticConsume({ brokers, topic, count }) {
|
|
347
|
-
const groupId = `kcli-bench-platformatic-${Date.now()}`;
|
|
348
|
-
|
|
349
|
-
const consumer = new PlatformaticConsumer({
|
|
350
|
-
clientId: 'kcli-bench-platformatic-consumer',
|
|
351
|
-
bootstrapBrokers: brokers.split(','),
|
|
352
|
-
groupId,
|
|
353
|
-
autocommit: true,
|
|
354
|
-
deserializers: stringDeserializers,
|
|
355
|
-
});
|
|
356
|
-
|
|
357
|
-
// Get earliest offsets for all partitions
|
|
358
|
-
const offsetsMap = await consumer.listOffsets({ topics: [topic], timestamp: -2n });
|
|
359
|
-
const offsets = [];
|
|
360
|
-
const partitionOffsets = offsetsMap.get(topic);
|
|
361
|
-
if (partitionOffsets) {
|
|
362
|
-
for (let partition = 0; partition < partitionOffsets.length; partition++) {
|
|
363
|
-
offsets.push({ topic, partition, offset: partitionOffsets[partition] });
|
|
364
|
-
}
|
|
365
|
-
}
|
|
366
|
-
console.log(`@plt: using MANUAL mode with ${offsets.length} partitions`);
|
|
367
|
-
|
|
368
|
-
const stream = await consumer.consume({
|
|
369
|
-
topics: [topic],
|
|
370
|
-
mode: MessagesStreamModes.MANUAL,
|
|
371
|
-
offsets,
|
|
372
|
-
sessionTimeout: 10000,
|
|
373
|
-
heartbeatInterval: 500,
|
|
374
|
-
});
|
|
375
|
-
|
|
376
|
-
let firstMessageMs = null;
|
|
377
|
-
let seen = 0;
|
|
378
|
-
const start = process.hrtime.bigint();
|
|
379
|
-
const progressInterval = Math.max(1, Math.floor(count / 10));
|
|
380
|
-
|
|
381
|
-
const result = await new Promise((resolve, reject) => {
|
|
382
|
-
stream.on('error', (err) => {
|
|
383
|
-
console.error('@platformatic/kafka stream error:', err);
|
|
384
|
-
reject(err);
|
|
385
|
-
});
|
|
386
|
-
|
|
387
|
-
stream.on('data', (message) => {
|
|
388
|
-
if (firstMessageMs === null) {
|
|
389
|
-
firstMessageMs = elapsedMs(start);
|
|
390
|
-
}
|
|
391
|
-
seen += 1;
|
|
392
|
-
if (seen % progressInterval === 0) {
|
|
393
|
-
console.log(`platformatic consume progress: ${seen}/${count}`);
|
|
394
|
-
}
|
|
395
|
-
if (seen >= count) {
|
|
396
|
-
stream.destroy();
|
|
397
|
-
resolve({ totalMs: elapsedMs(start), firstMessageMs });
|
|
398
|
-
}
|
|
399
|
-
});
|
|
400
|
-
|
|
401
|
-
stream.on('end', () => {
|
|
402
|
-
console.log('@platformatic/kafka stream ended');
|
|
403
|
-
resolve({ totalMs: elapsedMs(start), firstMessageMs });
|
|
404
|
-
});
|
|
405
|
-
|
|
406
|
-
stream.resume();
|
|
407
|
-
});
|
|
408
|
-
|
|
409
|
-
await consumer.close(true);
|
|
410
|
-
return result;
|
|
411
|
-
}
|
|
412
|
-
|
|
413
|
-
(async () => {
|
|
414
|
-
const config = parseArgs();
|
|
415
|
-
const { brokers, topic } = config;
|
|
416
|
-
const jsTopic = `${topic}-js`;
|
|
417
|
-
const rdkTopic = `${topic}-rdk`;
|
|
418
|
-
const pltTopic = `${topic}-plt`;
|
|
419
|
-
|
|
420
|
-
console.log('Benchmark config:', { ...config, topics: { kafkaJs: jsTopic, rdkafka: rdkTopic, platformatic: pltTopic } });
|
|
421
|
-
await ensureTopicKafkaJS(brokers, jsTopic, config.partitions);
|
|
422
|
-
await ensureTopicKafkaJS(brokers, rdkTopic, config.partitions);
|
|
423
|
-
await ensureTopicKafkaJS(brokers, pltTopic, config.partitions);
|
|
424
|
-
|
|
425
|
-
console.log('\nKafkaJS produce...');
|
|
426
|
-
const jsProduceMs = await benchKafkaJSProduce({ ...config, topic: jsTopic });
|
|
427
|
-
console.log(`KafkaJS produce: ${jsProduceMs.toFixed(1)} ms (${(config.count / (jsProduceMs / 1000)).toFixed(1)} msg/s)`);
|
|
428
|
-
|
|
429
|
-
console.log('KafkaJS consume...');
|
|
430
|
-
const jsConsume = await benchKafkaJSConsume({ ...config, topic: jsTopic });
|
|
431
|
-
console.log(`KafkaJS first message: ${jsConsume.firstMessageMs.toFixed(1)} ms`);
|
|
432
|
-
console.log(`KafkaJS consume: ${jsConsume.totalMs.toFixed(1)} ms (${(config.count / (jsConsume.totalMs / 1000)).toFixed(1)} msg/s)`);
|
|
433
|
-
|
|
434
|
-
console.log('\nnode-rdkafka produce...');
|
|
435
|
-
const rdkProduce = await benchRDKafkaProduce({ ...config, topic: rdkTopic });
|
|
436
|
-
const rdkDelivered = rdkProduce.delivered > 0 ? rdkProduce.delivered : config.count;
|
|
437
|
-
console.log(`node-rdkafka produce: ${rdkProduce.ms.toFixed(1)} ms (${(config.count / (rdkProduce.ms / 1000)).toFixed(1)} msg/s)`);
|
|
438
|
-
console.log(`node-rdkafka delivered: ${rdkDelivered} (errors: ${rdkProduce.deliveryErrors})`);
|
|
439
|
-
|
|
440
|
-
console.log('node-rdkafka consume...');
|
|
441
|
-
const rdkConsume = await benchRDKafkaConsume({ ...config, topic: rdkTopic, count: rdkDelivered });
|
|
442
|
-
console.log(`node-rdkafka first message: ${rdkConsume.firstMessageMs.toFixed(1)} ms`);
|
|
443
|
-
console.log(`node-rdkafka consume: ${rdkConsume.totalMs.toFixed(1)} ms (${(rdkDelivered / (rdkConsume.totalMs / 1000)).toFixed(1)} msg/s)`);
|
|
444
|
-
|
|
445
|
-
console.log('\n@platformatic/kafka produce...');
|
|
446
|
-
const pltProduceMs = await benchPlatformaticProduce({ ...config, topic: pltTopic });
|
|
447
|
-
console.log(`@platformatic/kafka produce: ${pltProduceMs.toFixed(1)} ms (${(config.count / (pltProduceMs / 1000)).toFixed(1)} msg/s)`);
|
|
448
|
-
|
|
449
|
-
console.log('@platformatic/kafka consume...');
|
|
450
|
-
const pltConsume = await benchPlatformaticConsume({ ...config, topic: pltTopic });
|
|
451
|
-
console.log(`@platformatic/kafka first message: ${pltConsume.firstMessageMs?.toFixed(1) ?? 'N/A'} ms`);
|
|
452
|
-
console.log(`@platformatic/kafka consume: ${pltConsume.totalMs.toFixed(1)} ms (${(config.count / (pltConsume.totalMs / 1000)).toFixed(1)} msg/s)`);
|
|
453
|
-
})().catch((err) => {
|
|
454
|
-
console.error(err);
|
|
455
|
-
process.exit(1);
|
|
456
|
-
});
|
package/tsconfig.build.json
DELETED
package/vitest.config.e2e.ts
DELETED
package/vitest.config.ts
DELETED
|
@@ -1,17 +0,0 @@
|
|
|
1
|
-
import { defineConfig } from 'vitest/config';
|
|
2
|
-
|
|
3
|
-
export default defineConfig({
|
|
4
|
-
test: {
|
|
5
|
-
include: ['src/**/__tests__/**/*.ts'],
|
|
6
|
-
exclude: ['src/**/__tests__/**/*.e2e.ts'],
|
|
7
|
-
alias: {
|
|
8
|
-
'@platformatic/kafka': new URL('./src/__mocks__/@platformatic/kafka.ts', import.meta.url).pathname,
|
|
9
|
-
'commander': new URL('./src/__mocks__/commander.ts', import.meta.url).pathname,
|
|
10
|
-
},
|
|
11
|
-
coverage: {
|
|
12
|
-
enabled: !!process.env.CI || !!process.env.COVERAGE,
|
|
13
|
-
include: ['src/**/*.ts'],
|
|
14
|
-
exclude: ['src/__mocks__/**', 'src/**/__tests__/**'],
|
|
15
|
-
},
|
|
16
|
-
},
|
|
17
|
-
});
|