node-pkware 0.7.0 → 1.0.0-alpha.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/implode.js ADDED
@@ -0,0 +1,365 @@
1
+ const { has, repeat, clone, last, clamp } = require('ramda')
2
+ const { isFunction } = require('ramda-adjunct')
3
+ const ExpandingBuffer = require('./helpers/ExpandingBuffer.js')
4
+ const { toHex, getLowestNBits, nBitsOfOnes } = require('./helpers/functions.js')
5
+ const { ExpectedFunctionError, InvalidDictionarySizeError, InvalidCompressionTypeError } = require('./errors.js')
6
+ const {
7
+ ChBitsAsc,
8
+ ChCodeAsc,
9
+ LONGEST_ALLOWED_REPETITION,
10
+ DICTIONARY_SIZE_LARGE,
11
+ DICTIONARY_SIZE_MEDIUM,
12
+ DICTIONARY_SIZE_SMALL,
13
+ COMPRESSION_BINARY,
14
+ COMPRESSION_ASCII,
15
+ ExLenBits,
16
+ LenBits,
17
+ LenCode,
18
+ DistCode,
19
+ DistBits
20
+ } = require('./constants.js')
21
+
22
+ const setup = state => {
23
+ state.nChBits = repeat(0, 0x306)
24
+ state.nChCodes = repeat(0, 0x306)
25
+
26
+ switch (state.dictionarySizeBits) {
27
+ case DICTIONARY_SIZE_LARGE:
28
+ state.dictionarySizeMask = nBitsOfOnes(6)
29
+ break
30
+ case DICTIONARY_SIZE_MEDIUM:
31
+ state.dictionarySizeMask = nBitsOfOnes(5)
32
+ break
33
+ case DICTIONARY_SIZE_SMALL:
34
+ state.dictionarySizeMask = nBitsOfOnes(4)
35
+ break
36
+ default:
37
+ throw new InvalidDictionarySizeError()
38
+ }
39
+
40
+ switch (state.compressionType) {
41
+ case COMPRESSION_BINARY:
42
+ for (let nChCode = 0, nCount = 0; nCount < 0x100; nCount++) {
43
+ state.nChBits[nCount] = 9
44
+ state.nChCodes[nCount] = nChCode
45
+ nChCode = getLowestNBits(16, nChCode) + 2
46
+ }
47
+ break
48
+ case COMPRESSION_ASCII:
49
+ for (let nCount = 0; nCount < 0x100; nCount++) {
50
+ state.nChBits[nCount] = ChBitsAsc[nCount] + 1
51
+ state.nChCodes[nCount] = ChCodeAsc[nCount] * 2
52
+ }
53
+ break
54
+ default:
55
+ throw new InvalidCompressionTypeError()
56
+ }
57
+
58
+ let nCount = 0x100
59
+ for (let i = 0; i < 0x10; i++) {
60
+ for (let nCount2 = 0; nCount2 < 1 << ExLenBits[i]; nCount2++) {
61
+ state.nChBits[nCount] = ExLenBits[i] + LenBits[i] + 1
62
+ state.nChCodes[nCount] = (nCount2 << (LenBits[i] + 1)) | (LenCode[i] * 2) | 1
63
+ nCount++
64
+ }
65
+ }
66
+
67
+ state.outputBuffer.append(Buffer.from([state.compressionType, state.dictionarySizeBits, 0]))
68
+ state.outBits = 0
69
+ }
70
+
71
+ const outputBits = (state, nBits, bitBuffer) => {
72
+ if (nBits > 8) {
73
+ outputBits(state, 8, bitBuffer)
74
+ bitBuffer = bitBuffer >> 8
75
+ nBits = nBits - 8
76
+ }
77
+
78
+ const outBits = state.outBits
79
+
80
+ // in the original code bitBuffer is long, but is cast to char
81
+ const lastBytes = state.outputBuffer.read(state.outputBuffer.size() - 1, 1)
82
+ state.outputBuffer.dropEnd(1)
83
+ state.outputBuffer.append(Buffer.from([lastBytes | getLowestNBits(8, bitBuffer << outBits)]))
84
+
85
+ state.outBits = state.outBits + nBits
86
+
87
+ if (state.outBits > 8) {
88
+ bitBuffer = bitBuffer >> (8 - outBits)
89
+ state.outputBuffer.append(Buffer.from([getLowestNBits(8, bitBuffer)]))
90
+ state.outBits = getLowestNBits(3, state.outBits)
91
+ } else {
92
+ state.outBits = getLowestNBits(3, state.outBits)
93
+ if (state.outBits === 0) {
94
+ state.outputBuffer.append(Buffer.from([0]))
95
+ }
96
+ }
97
+ }
98
+
99
+ // ---------------------------------
100
+
101
+ const getSizeOfMatching = (inputBytes, a, b) => {
102
+ const limit = clamp(2, LONGEST_ALLOWED_REPETITION, b - a)
103
+
104
+ for (let i = 2; i <= limit; i++) {
105
+ if (inputBytes[a + i] !== inputBytes[b + i]) {
106
+ return i
107
+ }
108
+ }
109
+
110
+ return limit
111
+ }
112
+
113
+ // TODO: make sure that we find the most recent one, which in turn allows
114
+ // us to store backward length in less amount of bits
115
+ // currently the code goes from the furthest point
116
+ const findRepetitions = (inputBytes, endOfLastMatch, cursor) => {
117
+ const notEnoughBytes = inputBytes.length - cursor < 2
118
+ const tooClose = cursor === endOfLastMatch || cursor - endOfLastMatch < 2
119
+ if (notEnoughBytes || tooClose) {
120
+ return { size: 0, distance: 0 }
121
+ }
122
+
123
+ const haystack = inputBytes.slice(endOfLastMatch, cursor)
124
+ const needle = inputBytes.slice(cursor, cursor + 2)
125
+
126
+ const matchIndex = haystack.indexOf(needle)
127
+ if (matchIndex !== -1) {
128
+ const distance = cursor - endOfLastMatch - matchIndex
129
+ return {
130
+ distance: distance - 1,
131
+ size: distance > 2 ? getSizeOfMatching(inputBytes, endOfLastMatch + matchIndex, cursor) : 2
132
+ }
133
+ }
134
+
135
+ return { size: 0, distance: 0 }
136
+ }
137
+
138
+ // this function can return:
139
+ // false - not flushable
140
+ // true - flushable
141
+ // null - flushable, but there might be a better repetition
142
+ const isRepetitionFlushable = (size, distance, startIndex, inputBufferSize) => {
143
+ if (size === 0) {
144
+ return false
145
+ }
146
+
147
+ // If we found repetition of 2 bytes, that is 0x100 or fuhrter back,
148
+ // don't bother. Storing the distance of 0x100 bytes would actually
149
+ // take more space than storing the 2 bytes as-is.
150
+ if (size === 2 && distance >= 0x100) {
151
+ return false
152
+ }
153
+
154
+ if (size >= 8 || startIndex + 1 >= inputBufferSize) {
155
+ return true
156
+ }
157
+
158
+ return null
159
+ }
160
+
161
+ // ---------------------------------
162
+
163
+ // repetitions are at least 2 bytes long,
164
+ // so the initial 2 bytes can be moved to the output as is
165
+ const handleFirstTwoBytes = state => {
166
+ if (state.handledFirstTwoBytes) {
167
+ return
168
+ }
169
+
170
+ if (state.inputBuffer.size() < 3) {
171
+ return
172
+ }
173
+
174
+ const [byte1, byte2] = state.inputBuffer.read(0, 2)
175
+ outputBits(state, state.nChBits[byte1], state.nChCodes[byte1])
176
+ outputBits(state, state.nChBits[byte2], state.nChCodes[byte2])
177
+
178
+ state.handledFirstTwoBytes = true
179
+ state.startIndex += 2
180
+ }
181
+
182
+ const processChunkData = (state, debug = false) => {
183
+ if (!has('dictionarySizeMask', state)) {
184
+ setup(state)
185
+ }
186
+
187
+ if (!state.inputBuffer.isEmpty()) {
188
+ state.startIndex = 0
189
+
190
+ handleFirstTwoBytes(state)
191
+
192
+ // -------------------------------
193
+
194
+ /* eslint-disable prefer-const */
195
+
196
+ let endOfLastMatch = 0 // used when searching for longer repetitions later
197
+ while (state.startIndex < state.inputBuffer.size()) {
198
+ let { size, distance } = findRepetitions(state.inputBuffer.read(endOfLastMatch), endOfLastMatch, state.startIndex)
199
+
200
+ let isFlushable = isRepetitionFlushable(size, distance, state.startIndex, state.inputBuffer.size())
201
+
202
+ if (isFlushable === false) {
203
+ const byte = state.inputBuffer.read(state.startIndex, 1)
204
+ outputBits(state, state.nChBits[byte], state.nChCodes[byte])
205
+ state.startIndex += 1
206
+ } else {
207
+ if (isFlushable === null) {
208
+ /*
209
+ // Try to find better repetition 1 byte later.
210
+ // stormlib/implode.c L517
211
+ let cursor = state.startIndex
212
+ let newSize = size
213
+ let newDistance = distance
214
+ let currentSize
215
+ let currentDistance
216
+ while (newSize <= currentSize && isRepetitionFlushable(newSize, newDistance, state.startIndex, state.inputBuffer.size())) {
217
+ currentSize = newSize
218
+ currentDistance = newDistance
219
+ const reps = findRepetitions(state.inputBuffer.read(endOfLastMatch), endOfLastMatch, ++cursor)
220
+ newSize = reps.size
221
+ newDistance = reps.distance
222
+ }
223
+ size = newSize
224
+ distance = currentDistance
225
+ */
226
+ }
227
+
228
+ const byte = size + 0xfe
229
+ outputBits(state, state.nChBits[byte], state.nChCodes[byte])
230
+ if (size === 2) {
231
+ const byte = distance >> 2
232
+ outputBits(state, state.distBits[byte], state.distCodes[byte])
233
+ outputBits(state, 2, distance & 3)
234
+ } else {
235
+ const byte = distance >> state.dictionarySizeBits
236
+ outputBits(state, state.distBits[byte], state.distCodes[byte])
237
+ outputBits(state, state.dictionarySizeBits, state.dictionarySizeMask & distance)
238
+ }
239
+
240
+ state.startIndex += size
241
+ }
242
+
243
+ /*
244
+ state.inputBuffer.dropStart(endOfLastMatch)
245
+ state.startIndex -= endOfLastMatch
246
+ endOfLastMatch = 0
247
+ */
248
+
249
+ if (state.dictionarySizeBits === DICTIONARY_SIZE_SMALL && state.startIndex >= 0x400) {
250
+ state.inputBuffer.dropStart(0x400)
251
+ state.startIndex -= 0x400
252
+ } else if (state.dictionarySizeBits === DICTIONARY_SIZE_MEDIUM && state.startIndex >= 0x800) {
253
+ state.inputBuffer.dropStart(0x800)
254
+ state.startIndex -= 0x800
255
+ } else if (state.dictionarySizeBits === DICTIONARY_SIZE_LARGE && state.startIndex >= 0x1000) {
256
+ state.inputBuffer.dropStart(0x1000)
257
+ state.startIndex -= 0x1000
258
+ }
259
+ }
260
+
261
+ /* eslint-enable prefer-const */
262
+
263
+ // -------------------------------
264
+
265
+ state.inputBuffer.dropStart(state.inputBuffer.size())
266
+ }
267
+
268
+ if (state.streamEnded) {
269
+ // Write the termination literal
270
+ outputBits(state, last(state.nChBits), last(state.nChCodes))
271
+ }
272
+ }
273
+
274
+ const implode = (compressionType, dictionarySizeBits, config = {}) => {
275
+ const { debug = false, inputBufferSize = 0x0, outputBufferSize = 0x0 } = config
276
+
277
+ const handler = function (chunk, encoding, callback) {
278
+ if (!isFunction(callback)) {
279
+ // can't call callback to pass in data or errors, so we throw up
280
+ throw new ExpectedFunctionError()
281
+ }
282
+
283
+ const state = handler._state
284
+
285
+ try {
286
+ state.inputBuffer.append(chunk)
287
+ if (state.isFirstChunk) {
288
+ state.isFirstChunk = false
289
+ this._flush = state.onInputFinished
290
+ }
291
+
292
+ if (debug) {
293
+ console.log(`implode: reading ${toHex(chunk.length)} bytes from chunk #${state.stats.chunkCounter++}`)
294
+ }
295
+
296
+ processChunkData(state, debug)
297
+
298
+ const blockSize = 0x800
299
+ if (state.outputBuffer.size() > blockSize) {
300
+ const numberOfBytes = (Math.floor(state.outputBuffer.size() / blockSize) - 1) * blockSize
301
+ const output = Buffer.from(state.outputBuffer.read(0, numberOfBytes))
302
+ state.outputBuffer.flushStart(numberOfBytes)
303
+
304
+ if (state.outBits === 0) {
305
+ // set last byte to 0
306
+ state.outputBuffer.dropEnd(1)
307
+ state.outputBuffer.append(Buffer.from([0]))
308
+ }
309
+
310
+ callback(null, output)
311
+ } else {
312
+ callback(null, Buffer.from([]))
313
+ }
314
+ } catch (e) {
315
+ callback(e)
316
+ }
317
+ }
318
+
319
+ handler._state = {
320
+ isFirstChunk: true,
321
+ streamEnded: false,
322
+ compressionType: compressionType,
323
+ dictionarySizeBits: dictionarySizeBits,
324
+ distCodes: clone(DistCode),
325
+ distBits: clone(DistBits),
326
+ startIndex: 0,
327
+ inputBuffer: new ExpandingBuffer(inputBufferSize),
328
+ outputBuffer: new ExpandingBuffer(outputBufferSize),
329
+ handledFirstTwoBytes: false,
330
+ onInputFinished: callback => {
331
+ const state = handler._state
332
+ state.streamEnded = true
333
+ try {
334
+ processChunkData(state, debug)
335
+
336
+ if (debug) {
337
+ console.log('---------------')
338
+ console.log('implode: total number of chunks read:', state.stats.chunkCounter)
339
+ console.log('implode: inputBuffer heap size', toHex(state.inputBuffer.heapSize()))
340
+ console.log('implode: outputBuffer heap size', toHex(state.outputBuffer.heapSize()))
341
+ }
342
+
343
+ callback(null, state.outputBuffer.read())
344
+ } catch (e) {
345
+ callback(e)
346
+ }
347
+ },
348
+ stats: {
349
+ chunkCounter: 0
350
+ }
351
+ }
352
+
353
+ return handler
354
+ }
355
+
356
+ module.exports = {
357
+ setup,
358
+ outputBits,
359
+ getSizeOfMatching,
360
+ findRepetitions,
361
+ isRepetitionFlushable,
362
+ handleFirstTwoBytes,
363
+ processChunkData,
364
+ implode
365
+ }
package/src/index.js ADDED
@@ -0,0 +1,18 @@
1
+ const { implode } = require('./implode.js')
2
+ const { explode } = require('./explode.js')
3
+ const constants = require('./constants.js')
4
+ const errors = require('./errors.js')
5
+ const stream = require('./helpers/stream.js')
6
+
7
+ const compress = implode
8
+ const decompress = explode
9
+
10
+ module.exports = {
11
+ implode,
12
+ compress,
13
+ explode,
14
+ decompress,
15
+ constants,
16
+ errors,
17
+ stream
18
+ }
@@ -0,0 +1,41 @@
1
+ export const COMPRESSION_BINARY: 0
2
+ export const COMPRESSION_ASCII: 1
3
+ export const DICTIONARY_SIZE_SMALL: 4
4
+ export const DICTIONARY_SIZE_MEDIUM: 5
5
+ export const DICTIONARY_SIZE_LARGE: 6
6
+ export const LONGEST_ALLOWED_REPETITION: 0x204
7
+
8
+ export const PKDCL_OK: 'OK'
9
+ export const PKDCL_STREAM_END: 'All data from the input stream is read'
10
+ export const PKDCL_NEED_DICT: 'Need more data (dictionary)'
11
+ export const PKDCL_CONTINUE: 'Continue (internal flag)'
12
+ export const PKDCL_GET_INPUT: 'Get input (internal flag)'
13
+
14
+ export const LITERAL_END_STREAM: 0x305
15
+ export const LITERAL_STREAM_ABORTED: 0x306
16
+
17
+ export const DistCode: number[]
18
+ export const DistBits: number[]
19
+ export const LenBits: number[]
20
+ export const LenCode: number[]
21
+ export const ExLenBits: number[]
22
+ export const LenBase: number[]
23
+ export const ChBitsAsc: number[]
24
+ export const ChCodeAsc: number[]
25
+
26
+ // Additional types
27
+
28
+ /**
29
+ * Compression types for implode
30
+ */
31
+ export type CompressionType = typeof COMPRESSION_BINARY | typeof COMPRESSION_ASCII
32
+ /**
33
+ * Dictionary sizes for implode, determines how well the file get compressed.
34
+ *
35
+ * Small dictionary size means less memory to lookback in data for repetitions, meaning it will be less effective, the file stays larger, less compressed.
36
+ * On the other hand, large compression allows more lookback allowing more effective compression, thus generating smaller, more compressed files.
37
+ */
38
+ export type DictionarySizeBits =
39
+ | typeof DICTIONARY_SIZE_SMALL
40
+ | typeof DICTIONARY_SIZE_MEDIUM
41
+ | typeof DICTIONARY_SIZE_LARGE
@@ -0,0 +1,30 @@
1
+ /**
2
+ * Thrown by
3
+ * - `implode` when invalid dictionary size was specified
4
+ * - `explode` when it encounters invalid data in the header section (the first 2 bytes of a compressed files)
5
+ */
6
+ export class InvalidDictionarySizeError extends Error {}
7
+
8
+ /**
9
+ * Thrown by
10
+ * - `implode` when invalid compression type was specified
11
+ * - `explode` when it encounters invalid data in the header section (the first 2 bytes of a compressed files)
12
+ */
13
+ export class InvalidCompressionTypeError extends Error {}
14
+
15
+ /**
16
+ * Thrown by
17
+ * - `explode`, when compressed data is less, than `5` bytes long
18
+ *
19
+ * Pkware compressed files have 2 bytes header followed by at lest 2 bytes of data and an end literal.
20
+ */
21
+ export class InvalidDataError extends Error {}
22
+
23
+ /**
24
+ * Thrown by
25
+ * - `explode` when compressed data ends without reaching the end literal or in mid decompression
26
+ */
27
+ export class AbortedError extends Error {}
28
+
29
+ export class ExpectedBufferError extends TypeError {}
30
+ export class ExpectedFunctionError extends TypeError {}
@@ -0,0 +1,56 @@
1
+ import ExpandingBuffer from './helpers/ExpandingBuffer'
2
+ import { Callback, Config, Handler, PrivateState } from './helpers/Shared'
3
+ import { CompressionType, DictionarySizeBits, PKDCL_STREAM_END, PKDCL_OK, LITERAL_STREAM_ABORTED } from './constants'
4
+
5
+ export function readHeader(buffer: Buffer): {
6
+ compressionType: CompressionType
7
+ dictionarySizeBits: DictionarySizeBits
8
+ }
9
+
10
+ type PrivateExplodeState = {
11
+ _backup: {
12
+ extraBits: number
13
+ bitBuffer: Buffer
14
+ }
15
+ needMoreInput: boolean
16
+ isFirstChunk: boolean
17
+ extraBits: number
18
+ bitBuffer: Buffer
19
+ chBitsAsc: number[] // DecodeLit and GenAscTabs uses this
20
+ lengthCodes: number[]
21
+ distPosCodes: number[]
22
+ inputBuffer: ExpandingBuffer
23
+ outputBuffer: ExpandingBuffer
24
+ onInputFinished(callback: Callback): void
25
+ backup(): void
26
+ restore(): void
27
+ stats: {
28
+ chunkCounter: number
29
+ }
30
+ compressionType: CompressionType
31
+ dictionarySizeBits: DictionarySizeBits
32
+ dictionarySizeMask: number
33
+ asciiTable2C34: number[]
34
+ asciiTable2D34: number[]
35
+ asciiTable2E34: number[]
36
+ asciiTable2EB4: number[]
37
+ }
38
+
39
+ /**
40
+ * Decompresses stream
41
+ * @returns a function, that you can use as a `transform._transform` method.
42
+ */
43
+ export function explode(config?: Config): PrivateState<PrivateExplodeState> & Handler
44
+ export function createPATIterator(limit: number, stepper: number): (n: number) => false | [number, number]
45
+ export function populateAsciiTable(value: number, index: number, bits: number, limit: number): number[]
46
+ export function generateAsciiTables(): {
47
+ asciiTable2C34: number[]
48
+ asciiTable2D34: number[]
49
+ asciiTable2E34: number[]
50
+ asciiTable2EB4: number[]
51
+ }
52
+ export function processChunkData(state: PrivateExplodeState, debug?: boolean): void
53
+ export function wasteBits(state: PrivateExplodeState, numberOfBits: number): typeof PKDCL_STREAM_END | typeof PKDCL_OK
54
+ export function decodeNextLiteral(state: PrivateExplodeState): typeof LITERAL_STREAM_ABORTED | number
55
+ export function decodeDistance(state: PrivateExplodeState, repeatLength: number): number
56
+ export function generateDecodeTables(startIndexes: number[], lengthBits: number[]): number[]
@@ -0,0 +1,25 @@
1
+ export = ExpandingBuffer
2
+ declare class ExpandingBuffer {
3
+ constructor(numberOfBytes?: number)
4
+ _heap: Buffer
5
+ _startIndex: number
6
+ _endIndex: number
7
+ _backup: {
8
+ _startIndex: number
9
+ _endIndex: number
10
+ }
11
+ _getActualData(offset?: number): Buffer
12
+ size(): number
13
+ isEmpty(): boolean
14
+ heapSize(): number
15
+ append(buffer: Buffer): void
16
+ read(offset?: number, limit?: number): number | Buffer
17
+ flushStart(numberOfBytes: number): void
18
+ flushEnd(numberOfBytes: number): void
19
+ dropStart(numberOfBytes: number): void
20
+ dropEnd(numberOfBytes: number): void
21
+ getHeap(): Buffer
22
+ clear(): void
23
+ _saveIndices(): void
24
+ _restoreIndices(): void
25
+ }
@@ -0,0 +1,46 @@
1
+ /**
2
+ * Shared type signatures used throughout the library.
3
+ */
4
+
5
+ /**
6
+ * A traditional "nodeback<Buffer>" callback type.
7
+ * @param error The error, if any.
8
+ * @param data The data, if any.
9
+ */
10
+ export type Callback = (error: Error | null, chunk: Buffer) => void
11
+
12
+ /**
13
+ * Handler for one chunk of bytes
14
+ * @param chunk The chunk of bytes
15
+ * @param encoding The encoding of the chunk (not used)
16
+ * @param callback The callback to call when done
17
+ */
18
+ export type Handler = (chunk: Buffer, encoding: unknown, callback: Callback) => void
19
+
20
+ /**
21
+ * How the implode & explode functions store their internal state.
22
+ */
23
+ export type PrivateState<T> = { _state: T }
24
+
25
+ /**
26
+ * Configuration options for the implode & explode functions.
27
+ */
28
+ export type Config = {
29
+ /**
30
+ * Whether the code should display debug messages on the console or not
31
+ * @default false
32
+ */
33
+ debug?: boolean
34
+ /**
35
+ * The starting size of the input buffer, may expand later as needed.
36
+ * Not having to expand may have performance impact.
37
+ * @default 0
38
+ */
39
+ inputBufferSize?: number
40
+ /**
41
+ * The starting size of the output buffer, may expand later as needed.
42
+ * Not having to expand may have performance impact.
43
+ * @default 0
44
+ */
45
+ outputBufferSize?: number
46
+ }
@@ -0,0 +1,10 @@
1
+ export function isBetween(min: number, max: number, num: number): boolean
2
+ export function nBitsOfOnes(numberOfBits: number): number
3
+ export function maskBits(numberOfBits: number, number: number): number
4
+ export function getLowestNBits(numberOfBits: number, number: number): number
5
+ export function isFullHexString(str: string): boolean
6
+ export function toHex(num: number, digits?: number, withoutPrefix?: boolean): string
7
+ export function mergeSparseArrays<T>(a: T[], b: T[]): T[]
8
+ export function parseNumberString(n: string, defaultValue?: number): number
9
+ export function getPackageVersion(): Promise<string>
10
+ export function fileExists(filename: string): Promise<boolean>
@@ -0,0 +1,61 @@
1
+ import { Transform, Writable } from 'stream'
2
+ import { Callback } from './Shared'
3
+
4
+ type QuasiTransformConstructorParameter = (
5
+ obj: {
6
+ handler: QuasiTransformConstructorParameter
7
+ handle(chunk: Buffer, encoding: string): Promise<void>
8
+ },
9
+ chunk: Buffer,
10
+ encoding: string
11
+ ) => void
12
+
13
+ /**
14
+ * Creates a "**predicate**" function, that awaits Buffers, keeps an internal counter of the bytes from them and splits the appropriate buffer at the given index.
15
+ * Splitting is done by returning an array with `[left: Buffer, right: Buffer, isLeftDone: bool]`.
16
+ * If you want to split data at the 100th byte and you keep feeding 60 byte long buffers to the function returned by `splitAt(100)`, then it will return arrays in the following manner:
17
+ * 1. `[inputBuffer, emptyBuffer, false]`
18
+ * 2. `[inputBuffer.slice(0, 40), inputBuffer.slice(40, 60), true]`
19
+ * 3. `[emptyBuffer, inputBuffer, true]`
20
+ * 4. `[emptyBuffer, inputBuffer, true]`
21
+ * 5. ... and so on
22
+ * @param index at which to split the buffer
23
+ */
24
+ export function splitAt(index: number): (chunk: Buffer) => null | [Buffer, Buffer, boolean]
25
+
26
+ /**
27
+ * A `transform._transform` type function, which lets the input chunks through without any change
28
+ */
29
+ export function transformIdentity(): (chunk: Buffer, encoding: unknown, callback: Callback) => void
30
+
31
+ /**
32
+ * A `transform._transform` type function, which for every input chunk will output an empty buffer
33
+ */
34
+ export function transformEmpty(): (chunk: unknown, encoding: unknown, callback: Callback) => void
35
+
36
+ /**
37
+ * Takes a `transform._transform` type function and turns it into a Transform stream instance
38
+ * @param handler a transform._transform type function
39
+ * @returns a Transform stream instance
40
+ */
41
+ export function through(handler: Exclude<ConstructorParameters<typeof Transform>[0], undefined>['transform']): Transform
42
+
43
+ /**
44
+ * Higher order function for introducing conditional logic to `transform._transform` functions.
45
+ * This is used internally to handle offsets for `explode()`.
46
+ * @param predicate
47
+ * @param leftHandler
48
+ * @param rightHandler
49
+ * @returns `transform._transform`
50
+ */
51
+ export function transformSplitBy(
52
+ predicate: (chunk: Buffer) => [Buffer, Buffer, boolean],
53
+ leftHandler: QuasiTransformConstructorParameter,
54
+ rightHandler: QuasiTransformConstructorParameter
55
+ ): (chunk: Buffer, encoding: string, callback: Callback) => void
56
+
57
+ /**
58
+ * Data can be piped to the returned function from a stream and it will concatenate all chunks into a single buffer.
59
+ * @param done a callback function, which will receive the concatenated buffer as a parameter
60
+ */
61
+ export function streamToBuffer(done: (heap: Buffer) => void): Writable
@@ -0,0 +1,6 @@
1
+ import { Handler } from './Shared'
2
+
3
+ export function isClass(obj: any): obj is Object
4
+ export function buffersShouldEqual(expected: Buffer, result: Buffer, offset?: number, displayAsHex?: boolean): void
5
+ export function bufferToString(buffer: Buffer, limit?: number): string
6
+ export function transformToABC(): Handler