node-pkware 0.8.1 → 1.0.0--beta.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/implode.mjs DELETED
@@ -1,285 +0,0 @@
1
- import { repeat, mergeRight, clone, last } from '../node_modules/ramda/src/index.mjs'
2
- import {
3
- DICTIONARY_SIZE1,
4
- DICTIONARY_SIZE2,
5
- DICTIONARY_SIZE3,
6
- ERROR_INVALID_DICTIONARY_SIZE,
7
- BINARY_COMPRESSION,
8
- ASCII_COMPRESSION,
9
- ERROR_INVALID_COMPRESSION_TYPE,
10
- ChBitsAsc,
11
- ChCodeAsc,
12
- ExLenBits,
13
- LenBits,
14
- LenCode,
15
- DistCode,
16
- DistBits
17
- } from './constants.mjs'
18
- import { nBitsOfOnes, getLowestNBits, toHex } from './helpers.mjs'
19
- import QuasiImmutableBuffer from './QuasiImmutableBuffer.mjs'
20
-
21
- const setup = (compressionType, dictionarySize) => {
22
- const state = {
23
- nChBits: repeat(0, 0x306),
24
- nChCodes: repeat(0, 0x306)
25
- }
26
-
27
- switch (dictionarySize) {
28
- case DICTIONARY_SIZE3:
29
- state.dictionarySizeBits = 6
30
- state.dictionarySizeMask = nBitsOfOnes(6)
31
- break
32
- case DICTIONARY_SIZE2:
33
- state.dictionarySizeBits = 5
34
- state.dictionarySizeMask = nBitsOfOnes(5)
35
- break
36
- case DICTIONARY_SIZE1:
37
- state.dictionarySizeBits = 4
38
- state.dictionarySizeMask = nBitsOfOnes(4)
39
- break
40
- default:
41
- throw new Error(ERROR_INVALID_DICTIONARY_SIZE)
42
- }
43
-
44
- switch (compressionType) {
45
- case BINARY_COMPRESSION:
46
- for (let nChCode = 0, nCount = 0; nCount < 0x100; nCount++) {
47
- state.nChBits[nCount] = 9
48
- state.nChCodes[nCount] = nChCode
49
- nChCode = getLowestNBits(16, nChCode) + 2
50
- }
51
- break
52
- case ASCII_COMPRESSION:
53
- for (let nCount = 0; nCount < 0x100; nCount++) {
54
- state.nChBits[nCount] = ChBitsAsc[nCount] + 1
55
- state.nChCodes[nCount] = ChCodeAsc[nCount] * 2
56
- }
57
- break
58
- default:
59
- throw new Error(ERROR_INVALID_COMPRESSION_TYPE)
60
- }
61
-
62
- let nCount = 0x100
63
- for (let i = 0; i < 0x10; i++) {
64
- for (let nCount2 = 0; nCount2 < 1 << ExLenBits[i]; nCount2++) {
65
- state.nChBits[nCount] = ExLenBits[i] + LenBits[i] + 1
66
- state.nChCodes[nCount] = (nCount2 << (LenBits[i] + 1)) | (LenCode[i] * 2) | 1
67
- nCount++
68
- }
69
- }
70
-
71
- state.initialData = Buffer.from([compressionType, state.dictionarySizeBits, 0])
72
- state.outBits = 0
73
-
74
- return state
75
- }
76
-
77
- const outputBits = (state, nBits, bitBuffer) => {
78
- if (nBits > 8) {
79
- outputBits(state, 8, bitBuffer)
80
- bitBuffer = bitBuffer >> 8
81
- nBits = nBits - 8
82
- }
83
-
84
- const outBits = state.outBits
85
-
86
- // in the original code bitBuffer is long, but is cast to char
87
- const lastBytes = state.outputBuffer.read(state.outputBuffer.size() - 1, 1)
88
- state.outputBuffer.dropEnd(1)
89
- state.outputBuffer.append(Buffer.from([lastBytes | getLowestNBits(8, bitBuffer << outBits)]))
90
-
91
- state.outBits = state.outBits + nBits
92
-
93
- if (state.outBits > 8) {
94
- bitBuffer = bitBuffer >> (8 - outBits)
95
- state.outputBuffer.append(Buffer.from([getLowestNBits(8, bitBuffer)]))
96
- state.outBits = getLowestNBits(3, state.outBits)
97
- } else {
98
- state.outBits = getLowestNBits(3, state.outBits)
99
- if (state.outBits === 0) {
100
- state.outputBuffer.append(Buffer.from([0]))
101
- }
102
- }
103
- }
104
-
105
- // TODO: only go till LONGEST_ALLOWED_REPETITION
106
- const getSizeOfMatching = (inputBytes, matchIndex, needleIndex) => {
107
- for (let i = 2; i <= needleIndex; i++) {
108
- if (inputBytes[matchIndex + i] !== inputBytes[needleIndex + i]) {
109
- return i
110
- }
111
- }
112
-
113
- return needleIndex
114
- }
115
-
116
- // TODO: make sure that we find the most recent one, which in turn allows
117
- // us to store backward length in less amount of bits
118
- // currently the code goes from the furthest point
119
- const findRepetitions = (inputBytes, startIndex) => {
120
- const needle = inputBytes.slice(startIndex, startIndex + 2)
121
- const haystack = inputBytes.slice(0, startIndex)
122
-
123
- const matchIndex = haystack.indexOf(needle)
124
- if (matchIndex !== -1) {
125
- return {
126
- distance: startIndex - matchIndex - 1,
127
- size: getSizeOfMatching(inputBytes, matchIndex, startIndex)
128
- }
129
- }
130
-
131
- return { size: 0, distance: 0 }
132
- }
133
-
134
- const processChunkData = (state, debug = false) => {
135
- if (state.inputBuffer.size() > 0x1000 || state.streamEnded) {
136
- state.needMoreInput = false
137
-
138
- let infLoopProtector = 20
139
- while (!state.inputBuffer.isEmpty()) {
140
- if (--infLoopProtector <= 0) {
141
- console.error('infinite loop detected, halting!')
142
- process.exit(1)
143
- }
144
-
145
- const inputBytes = state.inputBuffer.read(0, state.dictionarySizeBytes)
146
-
147
- let byte = inputBytes[0]
148
- outputBits(state, state.nChBits[byte], state.nChCodes[byte])
149
- byte = inputBytes[1]
150
- outputBits(state, state.nChBits[byte], state.nChCodes[byte])
151
-
152
- let startIndex = 2
153
- while (startIndex < inputBytes.length) {
154
- const { size, distance } = findRepetitions(inputBytes, startIndex)
155
-
156
- // TODO: remove side effects
157
- const isRepetitionFlushable = () => {
158
- if (size === 0) {
159
- return false
160
- }
161
-
162
- return false
163
-
164
- /*
165
- if (size === 2 && distance >= 0x100) {
166
- return false
167
- }
168
-
169
- if (size >= 8 || startIndex + 1 >= inputBytes.length) {
170
- return true
171
- }
172
-
173
- // TODO: try to find a better repetition 1 byte later
174
-
175
- return true
176
- */
177
- }
178
-
179
- if (isRepetitionFlushable()) {
180
- const byte = size + 0xfe
181
- outputBits(state, state.nChBits[byte], state.nChCodes[byte])
182
- if (size === 2) {
183
- const byte = distance >> 2
184
- outputBits(state, state.distBits[byte], state.distCodes[byte])
185
- outputBits(state, 2, distance & 3)
186
- } else {
187
- const byte = distance >> state.dictionarySizeBits
188
- outputBits(state, state.distBits[byte], state.distCodes[byte])
189
- outputBits(state, state.dictionarySizeBits, state.dictionarySizeMask & distance)
190
- }
191
- startIndex += size
192
- } else {
193
- const byte = inputBytes[startIndex]
194
- outputBits(state, state.nChBits[byte], state.nChCodes[byte])
195
- startIndex += 1
196
- }
197
- }
198
-
199
- state.inputBuffer.dropStart(inputBytes.length)
200
- }
201
- }
202
-
203
- if (state.streamEnded) {
204
- // Write the termination literal
205
- outputBits(state, last(state.nChBits), last(state.nChCodes))
206
- } else {
207
- state.needMoreInput = true
208
- }
209
- }
210
-
211
- const implode = (
212
- compressionType,
213
- dictionarySize,
214
- { debug = false, inputBufferSize = 0x0, outputBufferSize = 0x0 } = {}
215
- ) => {
216
- let state = {
217
- isFirstChunk: true,
218
- needMoreInput: true, // TODO: not sure, if we need this flag
219
- streamEnded: false,
220
- compressionType: compressionType,
221
- dictionarySizeBytes: dictionarySize,
222
- distCodes: clone(DistCode),
223
- distBits: clone(DistBits),
224
- inputBuffer: new QuasiImmutableBuffer(inputBufferSize),
225
- outputBuffer: new QuasiImmutableBuffer(outputBufferSize),
226
- onInputFinished: callback => {
227
- state.streamEnded = true
228
- try {
229
- processChunkData(state, debug)
230
-
231
- if (debug) {
232
- console.log('---------------')
233
- console.log('total number of chunks read:', state.stats.chunkCounter)
234
- console.log('inputBuffer heap size', toHex(state.inputBuffer.heapSize()))
235
- console.log('outputBuffer heap size', toHex(state.outputBuffer.heapSize()))
236
- }
237
-
238
- callback(null, state.outputBuffer.read())
239
- } catch (e) {
240
- callback(e)
241
- }
242
- },
243
- stats: {
244
- chunkCounter: 0
245
- }
246
- }
247
-
248
- return function (chunk, encoding, callback) {
249
- state.inputBuffer.append(chunk)
250
-
251
- try {
252
- if (state.isFirstChunk) {
253
- state.isFirstChunk = false
254
- this._flush = state.onInputFinished
255
- const { initialData, ...newState } = setup(compressionType, dictionarySize)
256
- state = mergeRight(state, newState)
257
- state.outputBuffer.append(initialData)
258
- }
259
-
260
- if (debug) {
261
- console.log(`reading ${toHex(chunk.length)} bytes from chunk #${state.stats.chunkCounter++}`)
262
- }
263
-
264
- processChunkData(state, debug)
265
-
266
- // output as much whole blocks of 0x800 bytes from the outputBuffer as possible
267
- const blockSize = 0x800
268
- const numberOfBytes = Math.floor(state.outputBuffer.size() / blockSize) * blockSize
269
- const output = Buffer.from(state.outputBuffer.read(0, numberOfBytes))
270
- state.outputBuffer.flushStart(numberOfBytes)
271
-
272
- if (state.outBits === 0) {
273
- // set last byte to 0
274
- state.outputBuffer.dropEnd(1)
275
- state.outputBuffer.append(Buffer.from([0]))
276
- }
277
-
278
- callback(null, output)
279
- } catch (e) {
280
- callback(e)
281
- }
282
- }
283
- }
284
-
285
- export default implode
package/src/index.mjs DELETED
@@ -1,25 +0,0 @@
1
- import implode from './implode.mjs'
2
- import explode from './explode.mjs'
3
- import {
4
- BINARY_COMPRESSION,
5
- ASCII_COMPRESSION,
6
- DICTIONARY_SIZE1,
7
- DICTIONARY_SIZE2,
8
- DICTIONARY_SIZE3
9
- } from './constants.mjs'
10
-
11
- // aliases
12
- const compress = implode
13
- const decompress = explode
14
-
15
- export {
16
- implode,
17
- explode,
18
- compress,
19
- decompress,
20
- BINARY_COMPRESSION,
21
- ASCII_COMPRESSION,
22
- DICTIONARY_SIZE1,
23
- DICTIONARY_SIZE2,
24
- DICTIONARY_SIZE3
25
- }