@platformatic/kafka 1.9.0 → 1.10.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -29,7 +29,7 @@ export interface FetchResponsePartition {
29
29
  logStartOffset: bigint;
30
30
  abortedTransactions: FetchResponsePartitionAbortedTransaction[];
31
31
  preferredReadReplica: number;
32
- records?: RecordsBatch;
32
+ records?: RecordsBatch[];
33
33
  }
34
34
  export interface FetchResponseTopic {
35
35
  topicId: string;
@@ -105,7 +105,11 @@ export function parseResponse(_correlationId, apiKey, apiVersion, reader) {
105
105
  }
106
106
  if (recordsSize > 1) {
107
107
  recordsSize--;
108
- partition.records = readRecordsBatch(Reader.from(r.buffer.subarray(r.position, r.position + recordsSize)));
108
+ const recordsBatchesReader = Reader.from(r.buffer.subarray(r.position, r.position + recordsSize));
109
+ partition.records = [];
110
+ do {
111
+ partition.records.push(readRecordsBatch(recordsBatchesReader));
112
+ } while (recordsBatchesReader.position < recordsSize);
109
113
  r.skip(recordsSize);
110
114
  }
111
115
  return partition;
@@ -29,7 +29,7 @@ export interface FetchResponsePartition {
29
29
  logStartOffset: bigint;
30
30
  abortedTransactions: FetchResponsePartitionAbortedTransaction[];
31
31
  preferredReadReplica: number;
32
- records?: RecordsBatch;
32
+ records?: RecordsBatch[];
33
33
  }
34
34
  export interface FetchResponseTopic {
35
35
  topicId: string;
@@ -105,7 +105,11 @@ export function parseResponse(_correlationId, apiKey, apiVersion, reader) {
105
105
  }
106
106
  if (recordsSize > 1) {
107
107
  recordsSize--;
108
- partition.records = readRecordsBatch(Reader.from(r.buffer.subarray(r.position, r.position + recordsSize)));
108
+ const recordsBatchesReader = Reader.from(r.buffer.subarray(r.position, r.position + recordsSize));
109
+ partition.records = [];
110
+ do {
111
+ partition.records.push(readRecordsBatch(recordsBatchesReader));
112
+ } while (recordsBatchesReader.position < recordsSize);
109
113
  r.skip(recordsSize);
110
114
  }
111
115
  return partition;
@@ -29,7 +29,7 @@ export interface FetchResponsePartition {
29
29
  logStartOffset: bigint;
30
30
  abortedTransactions: FetchResponsePartitionAbortedTransaction[];
31
31
  preferredReadReplica: number;
32
- records?: RecordsBatch;
32
+ records?: RecordsBatch[];
33
33
  }
34
34
  export interface FetchResponseTopic {
35
35
  topicId: string;
@@ -105,7 +105,11 @@ export function parseResponse(_correlationId, apiKey, apiVersion, reader) {
105
105
  }
106
106
  if (recordsSize > 1) {
107
107
  recordsSize--;
108
- partition.records = readRecordsBatch(Reader.from(r.buffer.subarray(r.position, r.position + recordsSize)));
108
+ const recordsBatchesReader = Reader.from(r.buffer.subarray(r.position, r.position + recordsSize));
109
+ partition.records = [];
110
+ do {
111
+ partition.records.push(readRecordsBatch(recordsBatchesReader));
112
+ } while (recordsBatchesReader.position < recordsSize);
109
113
  r.skip(recordsSize);
110
114
  }
111
115
  return partition;
@@ -9,7 +9,7 @@ export declare function defaultCorruptedMessageHandler(): boolean;
9
9
  export declare class MessagesStream<Key, Value, HeaderKey, HeaderValue> extends Readable {
10
10
  #private;
11
11
  constructor(consumer: Consumer<Key, Value, HeaderKey, HeaderValue>, options: ConsumeOptions<Key, Value, HeaderKey, HeaderValue>);
12
- close(callback?: CallbackWithPromise<void>): void;
12
+ close(callback: CallbackWithPromise<void>): void;
13
13
  close(): Promise<void>;
14
14
  isActive(): boolean;
15
15
  isConnected(): boolean;
@@ -291,70 +291,74 @@ export class MessagesStream extends Readable {
291
291
  // Parse results
292
292
  for (const topicResponse of response.responses) {
293
293
  const topic = topicIds.get(topicResponse.topicId);
294
- for (const { records, partitionIndex: partition } of topicResponse.partitions) {
295
- if (!records) {
294
+ for (const { records: recordsBatches, partitionIndex: partition } of topicResponse.partitions) {
295
+ if (!recordsBatches) {
296
296
  continue;
297
297
  }
298
- const firstTimestamp = records.firstTimestamp;
299
- const firstOffset = records.firstOffset;
300
- const leaderEpoch = metadata.topics.get(topic).partitions[partition].leaderEpoch;
301
- for (const record of records.records) {
302
- const offset = records.firstOffset + BigInt(record.offsetDelta);
303
- if (offset < requestedOffsets.get(`${topic}:${partition}`)) {
304
- // Thi is a duplicate message, ignore it
305
- continue;
306
- }
307
- diagnosticContext = createDiagnosticContext({
308
- client: this.#consumer,
309
- stream: this,
310
- operation: 'receive',
311
- raw: record
312
- });
313
- consumerReceivesChannel.start.publish(diagnosticContext);
314
- const commit = autocommit ? noopCallback : this.#commit.bind(this, topic, partition, offset, leaderEpoch);
315
- try {
316
- const headers = new Map();
317
- for (const [headerKey, headerValue] of record.headers) {
318
- headers.set(headerKeyDeserializer(headerKey), headerValueDeserializer(headerValue));
298
+ for (const batch of recordsBatches) {
299
+ const firstTimestamp = batch.firstTimestamp;
300
+ const firstOffset = batch.firstOffset;
301
+ const leaderEpoch = metadata.topics.get(topic).partitions[partition].leaderEpoch;
302
+ for (const record of batch.records) {
303
+ const offset = batch.firstOffset + BigInt(record.offsetDelta);
304
+ if (offset < requestedOffsets.get(`${topic}:${partition}`)) {
305
+ // Thi is a duplicate message, ignore it
306
+ continue;
319
307
  }
320
- const key = keyDeserializer(record.key, headers);
321
- const value = valueDeserializer(record.value, headers);
322
- this.#metricsConsumedMessages?.inc();
323
- const message = {
324
- key,
325
- value,
326
- headers,
327
- topic,
328
- partition,
329
- timestamp: firstTimestamp + record.timestampDelta,
330
- offset,
331
- commit
332
- };
333
- diagnosticContext.result = message;
334
- consumerReceivesChannel.asyncStart.publish(diagnosticContext);
335
- canPush = this.push(message);
336
- consumerReceivesChannel.asyncEnd.publish(diagnosticContext);
337
- }
338
- catch (error) {
339
- const shouldDestroy = this.#corruptedMessageHandler(record, topic, partition, firstTimestamp, firstOffset, commit);
340
- if (shouldDestroy) {
341
- diagnosticContext.error = error;
342
- consumerReceivesChannel.error.publish(diagnosticContext);
343
- this.destroy(new UserError('Failed to deserialize a message.', { cause: error }));
344
- return;
308
+ diagnosticContext = createDiagnosticContext({
309
+ client: this.#consumer,
310
+ stream: this,
311
+ operation: 'receive',
312
+ raw: record
313
+ });
314
+ consumerReceivesChannel.start.publish(diagnosticContext);
315
+ const commit = autocommit ? noopCallback : this.#commit.bind(this, topic, partition, offset, leaderEpoch);
316
+ try {
317
+ const headers = new Map();
318
+ for (const [headerKey, headerValue] of record.headers) {
319
+ headers.set(headerKeyDeserializer(headerKey), headerValueDeserializer(headerValue));
320
+ }
321
+ const key = keyDeserializer(record.key, headers);
322
+ const value = valueDeserializer(record.value, headers);
323
+ this.#metricsConsumedMessages?.inc();
324
+ const message = {
325
+ key,
326
+ value,
327
+ headers,
328
+ topic,
329
+ partition,
330
+ timestamp: firstTimestamp + record.timestampDelta,
331
+ offset,
332
+ commit
333
+ };
334
+ diagnosticContext.result = message;
335
+ consumerReceivesChannel.asyncStart.publish(diagnosticContext);
336
+ canPush = this.push(message);
337
+ consumerReceivesChannel.asyncEnd.publish(diagnosticContext);
338
+ }
339
+ catch (error) {
340
+ const shouldDestroy = this.#corruptedMessageHandler(record, topic, partition, firstTimestamp, firstOffset, commit);
341
+ if (shouldDestroy) {
342
+ diagnosticContext.error = error;
343
+ consumerReceivesChannel.error.publish(diagnosticContext);
344
+ this.destroy(new UserError('Failed to deserialize a message.', { cause: error }));
345
+ return;
346
+ }
347
+ }
348
+ finally {
349
+ consumerReceivesChannel.end.publish(diagnosticContext);
345
350
  }
346
351
  }
347
- finally {
348
- consumerReceivesChannel.end.publish(diagnosticContext);
352
+ if (batch === recordsBatches[recordsBatches.length - 1]) {
353
+ // Track the last read offset
354
+ const lastOffset = batch.firstOffset + BigInt(batch.lastOffsetDelta);
355
+ this.#offsetsToFetch.set(`${topic}:${partition}`, lastOffset + 1n);
356
+ // Autocommit if needed
357
+ if (autocommit) {
358
+ this.#offsetsToCommit.set(`${topic}:${partition}`, { topic, partition, offset: lastOffset, leaderEpoch });
359
+ }
349
360
  }
350
361
  }
351
- // Track the last read offset
352
- const lastOffset = records.firstOffset + BigInt(records.lastOffsetDelta);
353
- this.#offsetsToFetch.set(`${topic}:${partition}`, lastOffset + 1n);
354
- // Autocommit if needed
355
- if (autocommit) {
356
- this.#offsetsToCommit.set(`${topic}:${partition}`, { topic, partition, offset: lastOffset, leaderEpoch });
357
- }
358
362
  }
359
363
  }
360
364
  if (this.#autocommitEnabled && !this.#autocommitInterval) {
@@ -269,46 +269,36 @@ export class Connection extends EventEmitter {
269
269
  request.callback(new NetworkError('Connection closed'), undefined);
270
270
  return false;
271
271
  }
272
- let canWrite = true;
273
- const { correlationId, apiKey, apiVersion, payload: payloadFn, hasRequestHeaderTaggedFields } = request;
274
- const writer = Writer.create()
275
- .appendInt16(apiKey)
276
- .appendInt16(apiVersion)
277
- .appendInt32(correlationId)
272
+ const writer = Writer.create();
273
+ writer
274
+ .appendInt16(request.apiKey)
275
+ .appendInt16(request.apiVersion)
276
+ .appendInt32(request.correlationId)
278
277
  .appendString(this.#clientId, false);
279
- if (hasRequestHeaderTaggedFields) {
278
+ if (request.hasRequestHeaderTaggedFields) {
280
279
  writer.appendTaggedFields();
281
280
  }
282
- const payload = payloadFn();
283
- writer.appendFrom(payload);
284
- writer.prependLength();
285
- // Write the header
286
- this.#socket.cork();
287
- if (!payload.context.noResponse) {
288
- this.#inflightRequests.set(correlationId, request);
289
- }
290
- loggers.protocol('Sending request.', { apiKey: protocolAPIsById[apiKey], correlationId, request });
291
- for (const buf of writer.buffers) {
292
- if (!this.#socket.write(buf)) {
293
- canWrite = false;
294
- }
295
- }
296
- if (!canWrite) {
281
+ const payload = request.payload();
282
+ writer.appendFrom(payload).prependLength();
283
+ const expectResponse = !payload.context.noResponse;
284
+ if (expectResponse)
285
+ this.#inflightRequests.set(request.correlationId, request);
286
+ const canWrite = this.#socket.write(writer.buffer);
287
+ if (!canWrite)
297
288
  this.#socketMustBeDrained = true;
298
- }
299
- this.#socket.uncork();
300
- if (payload.context.noResponse) {
289
+ if (!expectResponse)
301
290
  request.callback(null, canWrite);
302
- }
303
- // debugDump(Date.now() % 100000, 'send', { owner: this.#ownerId, apiKey: protocolAPIsById[apiKey], correlationId })
291
+ loggers.protocol('Sending request.', {
292
+ apiKey: protocolAPIsById[request.apiKey],
293
+ correlationId: request.correlationId,
294
+ request
295
+ });
304
296
  return canWrite;
305
297
  }
306
- catch (error) {
307
- request.diagnostic.error = error;
298
+ catch (err) {
299
+ request.diagnostic.error = err;
308
300
  connectionsApiChannel.error.publish(request.diagnostic);
309
- connectionsApiChannel.end.publish(request.diagnostic);
310
- throw error;
311
- /* c8 ignore next 3 - C8 does not detect these as covered */
301
+ throw err;
312
302
  }
313
303
  finally {
314
304
  connectionsApiChannel.end.publish(request.diagnostic);
@@ -60,15 +60,16 @@ export function readRecord(reader) {
60
60
  }
61
61
  export function createRecordsBatch(messages, options = {}) {
62
62
  const now = BigInt(Date.now());
63
- const timestamps = [];
64
- for (let i = 0; i < messages.length; i++) {
65
- timestamps.push(messages[i].timestamp ?? now);
66
- }
67
- messages.sort();
68
- const firstTimestamp = timestamps[0];
69
- const maxTimestamp = timestamps[timestamps.length - 1];
63
+ const firstTimestamp = messages[0].timestamp ?? now;
64
+ let maxTimestamp = firstTimestamp;
70
65
  let buffer = new DynamicBuffer();
71
66
  for (let i = 0; i < messages.length; i++) {
67
+ let ts = messages[i].timestamp ?? now;
68
+ if (typeof ts === 'number')
69
+ ts = BigInt(ts);
70
+ messages[i].timestamp = ts;
71
+ if (ts > maxTimestamp)
72
+ maxTimestamp = ts;
72
73
  const record = createRecord(messages[i], i, firstTimestamp);
73
74
  buffer.appendFrom(record.dynamicBuffer);
74
75
  }
package/dist/utils.d.ts CHANGED
@@ -25,7 +25,7 @@ export declare class NumericMap extends Map<string, number> {
25
25
  export declare function niceJoin(array: string[], lastSeparator?: string, separator?: string): string;
26
26
  export declare function listErrorMessage(type: string[]): string;
27
27
  export declare function enumErrorMessage(type: Record<string, unknown>, keysOnly?: boolean): string;
28
- export declare function groupByProperty<Key, Value>(entries: Value[], property: keyof Value): [Key, Value[]][];
28
+ export declare function groupByProperty<Key extends PropertyKey, Value>(entries: readonly Value[], property: keyof Value): [Key, Value[]][];
29
29
  export declare function humanize(label: string, buffer: Buffer | DynamicBuffer): string;
30
30
  export declare function setDebugDumpLogger(logger: DebugDumpLogger): void;
31
31
  export declare function debugDump(...values: unknown[]): void;
package/dist/utils.js CHANGED
@@ -121,19 +121,13 @@ export function enumErrorMessage(type, keysOnly = false) {
121
121
  return `should be one of ${niceJoin(Object.entries(type).map(([k, v]) => `${v} (${k})`), ' or ')}`;
122
122
  }
123
123
  export function groupByProperty(entries, property) {
124
- const grouped = new Map();
125
- const result = [];
126
- for (const entry of entries) {
127
- const value = entry[property];
128
- let values = grouped.get(value);
129
- if (!values) {
130
- values = [];
131
- grouped.set(value, values);
132
- result.push([value, values]);
133
- }
134
- values.push(entry);
124
+ const buckets = Object.create(null);
125
+ for (let i = 0, len = entries.length; i < len; ++i) {
126
+ const e = entries[i];
127
+ const key = e[property];
128
+ (buckets[key] ||= []).push(e);
135
129
  }
136
- return result;
130
+ return Object.entries(buckets);
137
131
  }
138
132
  export function humanize(label, buffer) {
139
133
  const formatted = buffer
package/dist/version.js CHANGED
@@ -1,2 +1,2 @@
1
1
  export const name = "@platformatic/kafka";
2
- export const version = "1.9.0";
2
+ export const version = "1.10.5";
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@platformatic/kafka",
3
- "version": "1.9.0",
3
+ "version": "1.10.5",
4
4
  "description": "Modern and performant client for Apache Kafka",
5
5
  "homepage": "https://github.com/platformatic/kafka",
6
6
  "author": "Platformatic Inc. <oss@platformatic.dev> (https://platformatic.dev)",