kafka-ts 0.0.1-beta.2 → 0.0.1-beta.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -105,32 +105,52 @@ The existing high-level libraries (e.g. kafkajs) are missing a few crucial featu
105
105
 
106
106
  ### `kafka.startConsumer()`
107
107
 
108
- | Name | Type | Required | Default | Description |
109
- | ---------------------- | -------------------------------------- | -------- | --------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
110
- | topics | string[] | true | | List of topics to subscribe to |
111
- | groupId | string | false | _null_ | Consumer group id |
112
- | groupInstanceId | string | false | _null_ | Consumer group instance id |
113
- | rackId | string | false | _null_ | Rack id |
114
- | isolationLevel | IsolationLevel | false | | Isolation level |
115
- | sessionTimeoutMs | number | false | 30000 | Session timeout in milliseconds |
116
- | rebalanceTimeoutMs | number | false | 60000 | Rebalance timeout in milliseconds |
117
- | maxWaitMs | number | false | 5000 | Fetch long poll timeout in milliseconds |
118
- | minBytes | number | false | 1 | Minimum number of bytes to wait for before returning a fetch response |
119
- | maxBytes | number | false | 1_048_576 | Maximum number of bytes to return in the fetch response |
120
- | partitionMaxBytes | number | false | 1_048_576 | Maximum number of bytes to return per partition in the fetch response |
121
- | allowTopicAutoCreation | boolean | false | false | Allow kafka to auto-create topic when it doesn't exist |
122
- | fromBeginning | boolean | false | false | Start consuming from the beginning of the topic |
123
- | batchGranularity | BatchGranularity | false | partition | Controls messages split from fetch response. Also controls how often offsets are committed. **onBatch** will include messages:<br/>- **partition** - from a single batch<br/>- **topic** - from all topic partitions<br/>- **broker** - from all assignned topics and partitions |
124
- | concurrency | number | false | 1 | How many batches to process concurrently |
125
- | onMessage | (message: Message) => Promise<unknown> | true | | Callback executed on every message |
126
- | onBatch | (batch: Message[]) => Promise<unknown> | true | | Callback executed on every batch of messages (based on **batchGranuality**) |
108
+ | Name | Type | Required | Default | Description |
109
+ | ---------------------- | -------------------------------------- | -------- | ------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
110
+ | topics | string[] | true | | List of topics to subscribe to |
111
+ | groupId | string | false | _null_ | Consumer group id |
112
+ | groupInstanceId | string | false | _null_ | Consumer group instance id |
113
+ | rackId | string | false | _null_ | Rack id |
114
+ | isolationLevel | IsolationLevel | false | IsolationLevel.READ_UNCOMMITTED | Isolation level |
115
+ | sessionTimeoutMs | number | false | 30000 | Session timeout in milliseconds |
116
+ | rebalanceTimeoutMs | number | false | 60000 | Rebalance timeout in milliseconds |
117
+ | maxWaitMs | number | false | 5000 | Fetch long poll timeout in milliseconds |
118
+ | minBytes | number | false | 1 | Minimum number of bytes to wait for before returning a fetch response |
119
+ | maxBytes | number | false | 1_048_576 | Maximum number of bytes to return in the fetch response |
120
+ | partitionMaxBytes | number | false | 1_048_576 | Maximum number of bytes to return per partition in the fetch response |
121
+ | allowTopicAutoCreation | boolean | false | false | Allow kafka to auto-create topic when it doesn't exist |
122
+ | fromBeginning | boolean | false | false | Start consuming from the beginning of the topic |
123
+ | batchGranularity | BatchGranularity | false | partition | Controls messages split from fetch response. Also controls how often offsets are committed. **onBatch** will include messages:<br/>- **partition** - from a single batch<br/>- **topic** - from all topic partitions<br/>- **broker** - from all assignned topics and partitions |
124
+ | concurrency | number | false | 1 | How many batches to process concurrently |
125
+ | onMessage | (message: Message) => Promise<unknown> | true | | Callback executed on every message |
126
+ | onBatch | (batch: Message[]) => Promise<unknown> | true | | Callback executed on every batch of messages (based on **batchGranuality**) |
127
127
 
128
128
  ### `kafka.createProducer()`
129
129
 
130
- | Name | Type | Required | Default | Description |
131
- | ---------------------- | ----------- | -------- | ------- | --------------------------------------------------------------------------------------- |
132
- | allowTopicAutoCreation | boolean | false | false | Allow kafka to auto-create topic when it doesn't exist |
133
- | partitioner | Partitioner | false | | Custom partitioner function. By default, it uses a default java-compatible partitioner. |
130
+ | Name | Type | Required | Default | Description |
131
+ | ---------------------- | ----------- | -------- | ------------------ | --------------------------------------------------------------------------------------- |
132
+ | allowTopicAutoCreation | boolean | false | false | Allow kafka to auto-create topic when it doesn't exist |
133
+ | partitioner | Partitioner | false | defaultPartitioner | Custom partitioner function. By default, it uses a default java-compatible partitioner. |
134
+
135
+ ### `producer.send(messages: Message[])`
136
+
137
+ <!-- export type Message = {
138
+ topic: string;
139
+ partition?: number;
140
+ timestamp?: bigint;
141
+ key?: Buffer | null;
142
+ value: Buffer | null;
143
+ headers?: Record<string, string>;
144
+ }; -->
145
+
146
+ | Name | Type | Required | Default | Description |
147
+ | --------- | ---------------------- | -------- | ------- | -------------------------------------------------------------------------------------------------------------------------- |
148
+ | topic | string | true | | Topic to send the message to |
149
+ | partition | number | false | _null_ | Partition to send the message to. By default partitioned by key. If key is also missing, partition is assigned round-robin |
150
+ | timestamp | bigint | false | _null_ | Message timestamp in milliseconds |
151
+ | key | Buffer \| null | false | _null_ | Message key |
152
+ | value | Buffer \| null | true | | Message value |
153
+ | headers | Record<string, string> | false | _null_ | Message headers |
134
154
 
135
155
  ### Supported SASL mechanisms
136
156
 
@@ -10,9 +10,7 @@ rl.on('line', async (line) => {
10
10
  await producer.send([
11
11
  {
12
12
  topic: 'example-topic-f',
13
- key: null,
14
13
  value: Buffer.from(line),
15
- partition: 0,
16
14
  },
17
15
  ]);
18
16
  process.stdout.write('> ');
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "kafka-ts",
3
- "version": "0.0.1-beta.2",
3
+ "version": "0.0.1-beta.3",
4
4
  "main": "dist/index.js",
5
5
  "author": "Priit Käärd",
6
6
  "license": "MIT",
@@ -85,7 +85,7 @@ export class Producer {
85
85
  attributes: 0,
86
86
  timestampDelta: (message.timestamp ?? defaultTimestamp) - (baseTimestamp ?? 0n),
87
87
  offsetDelta: index,
88
- key: message.key,
88
+ key: message.key ?? null,
89
89
  value: message.value,
90
90
  headers: Object.entries(message.headers ?? {}).map(([key, value]) => ({
91
91
  key: Buffer.from(key),
package/src/types.ts CHANGED
@@ -1,9 +1,9 @@
1
1
  export type Message = {
2
2
  topic: string;
3
- partition: number;
3
+ partition?: number;
4
4
  offset?: bigint;
5
5
  timestamp?: bigint;
6
- key: Buffer | null;
6
+ key?: Buffer | null;
7
7
  value: Buffer | null;
8
8
  headers?: Record<string, string>;
9
9
  };