@platformatic/kafka 1.14.0 → 1.15.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -99,12 +99,12 @@ export function parseResponse(_correlationId, apiKey, apiVersion, reader) {
|
|
|
99
99
|
}),
|
|
100
100
|
preferredReadReplica: r.readInt32()
|
|
101
101
|
};
|
|
102
|
-
let recordsSize = r.readUnsignedVarInt();
|
|
103
102
|
if (partition.errorCode !== 0) {
|
|
104
103
|
errors.push([`/responses/${i}/partitions/${j}`, partition.errorCode]);
|
|
105
104
|
}
|
|
106
|
-
|
|
107
|
-
|
|
105
|
+
// We need to reduce the size by one to follow the COMPACT_RECORDS specification
|
|
106
|
+
const recordsSize = r.readUnsignedVarInt() - 1;
|
|
107
|
+
if (recordsSize > 0) {
|
|
108
108
|
const recordsBatchesReader = Reader.from(r.buffer.subarray(r.position, r.position + recordsSize));
|
|
109
109
|
partition.records = [];
|
|
110
110
|
do {
|
|
@@ -17,8 +17,8 @@ function ensureBuffer(data) {
|
|
|
17
17
|
}
|
|
18
18
|
let snappyCompressSync;
|
|
19
19
|
let snappyDecompressSync;
|
|
20
|
-
let
|
|
21
|
-
let
|
|
20
|
+
let lz4CompressFrameSync;
|
|
21
|
+
let lz4DecompressFrameSync;
|
|
22
22
|
function loadSnappy() {
|
|
23
23
|
try {
|
|
24
24
|
const snappy = require('snappy');
|
|
@@ -33,8 +33,8 @@ function loadSnappy() {
|
|
|
33
33
|
function loadLZ4() {
|
|
34
34
|
try {
|
|
35
35
|
const lz4 = require('lz4-napi');
|
|
36
|
-
|
|
37
|
-
|
|
36
|
+
lz4CompressFrameSync = lz4.compressFrameSync;
|
|
37
|
+
lz4DecompressFrameSync = lz4.decompressFrameSync;
|
|
38
38
|
/* c8 ignore next 5 - In tests lz4-napi is always available */
|
|
39
39
|
}
|
|
40
40
|
catch (e) {
|
|
@@ -84,17 +84,17 @@ export const compressionsAlgorithms = {
|
|
|
84
84
|
lz4: {
|
|
85
85
|
compressSync(data) {
|
|
86
86
|
/* c8 ignore next 4 - In tests lz4-napi is always available */
|
|
87
|
-
if (!
|
|
87
|
+
if (!lz4CompressFrameSync) {
|
|
88
88
|
loadLZ4();
|
|
89
89
|
}
|
|
90
|
-
return
|
|
90
|
+
return lz4CompressFrameSync(ensureBuffer(data));
|
|
91
91
|
},
|
|
92
92
|
decompressSync(data) {
|
|
93
93
|
/* c8 ignore next 4 - In tests lz4-napi is always available */
|
|
94
|
-
if (!
|
|
94
|
+
if (!lz4DecompressFrameSync) {
|
|
95
95
|
loadLZ4();
|
|
96
96
|
}
|
|
97
|
-
return
|
|
97
|
+
return lz4DecompressFrameSync(ensureBuffer(data));
|
|
98
98
|
},
|
|
99
99
|
bitmask: 3,
|
|
100
100
|
available: true
|
package/dist/protocol/records.js
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import { UnsupportedCompressionError } from "../errors.js";
|
|
2
2
|
import { compressionsAlgorithms, compressionsAlgorithmsByBitmask } from "./compression.js";
|
|
3
3
|
import { crc32c } from "./crc32c.js";
|
|
4
|
+
import { INT32_SIZE, INT64_SIZE } from "./definitions.js";
|
|
4
5
|
import { DynamicBuffer } from "./dynamic-buffer.js";
|
|
5
6
|
import { Reader } from "./reader.js";
|
|
6
7
|
import { Writer } from "./writer.js";
|
|
@@ -117,6 +118,7 @@ export function createRecordsBatch(messages, options = {}) {
|
|
|
117
118
|
.appendInt64(0n, false));
|
|
118
119
|
}
|
|
119
120
|
export function readRecordsBatch(reader) {
|
|
121
|
+
const initialPosition = reader.position;
|
|
120
122
|
const batch = {
|
|
121
123
|
firstOffset: reader.readInt64(),
|
|
122
124
|
length: reader.readInt32(),
|
|
@@ -139,9 +141,12 @@ export function readRecordsBatch(reader) {
|
|
|
139
141
|
if (!algorithm) {
|
|
140
142
|
throw new UnsupportedCompressionError(`Unsupported compression algorithm with bitmask ${compression}`);
|
|
141
143
|
}
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
144
|
+
// The length of all headers immediately following Length up to the length of the Records array
|
|
145
|
+
const headersLength = reader.position - initialPosition - INT32_SIZE - INT64_SIZE;
|
|
146
|
+
const compressedDataLen = batch.length - headersLength;
|
|
147
|
+
const buffer = algorithm.decompressSync(reader.buffer.slice(reader.position, reader.position + compressedDataLen));
|
|
148
|
+
// Move the original reader to the end of the compressed data
|
|
149
|
+
reader.skip(compressedDataLen);
|
|
145
150
|
// Replace the reader with the decompressed buffer
|
|
146
151
|
reader = Reader.from(buffer);
|
|
147
152
|
}
|
package/dist/version.js
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
1
|
export const name = "@platformatic/kafka";
|
|
2
|
-
export const version = "1.
|
|
2
|
+
export const version = "1.15.1";
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@platformatic/kafka",
|
|
3
|
-
"version": "1.
|
|
3
|
+
"version": "1.15.1",
|
|
4
4
|
"description": "Modern and performant client for Apache Kafka",
|
|
5
5
|
"homepage": "https://github.com/platformatic/kafka",
|
|
6
6
|
"author": "Platformatic Inc. <oss@platformatic.dev> (https://platformatic.dev)",
|