@loaders.gl/parquet 3.3.0 → 3.4.0-alpha.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/dist.min.js +26 -17
- package/dist/dist.min.js.map +3 -3
- package/dist/es5/index.js +3 -3
- package/dist/es5/index.js.map +1 -1
- package/dist/es5/lib/parse-parquet.js +25 -49
- package/dist/es5/lib/parse-parquet.js.map +1 -1
- package/dist/es5/parquet-loader.js +2 -3
- package/dist/es5/parquet-loader.js.map +1 -1
- package/dist/es5/parquet-wasm-loader.js +1 -1
- package/dist/es5/parquet-wasm-loader.js.map +1 -1
- package/dist/es5/parquet-wasm-writer.js +1 -1
- package/dist/es5/parquet-wasm-writer.js.map +1 -1
- package/dist/es5/parquet-writer.js +1 -1
- package/dist/es5/parquet-writer.js.map +1 -1
- package/dist/es5/parquetjs/compression.js +5 -15
- package/dist/es5/parquetjs/compression.js.map +1 -1
- package/dist/es5/parquetjs/encoder/{parquet-encoder.js → writer.js} +158 -70
- package/dist/es5/parquetjs/encoder/writer.js.map +1 -0
- package/dist/es5/parquetjs/file.js +94 -0
- package/dist/es5/parquetjs/file.js.map +1 -0
- package/dist/es5/parquetjs/parser/parquet-cursor.js +183 -0
- package/dist/es5/parquetjs/parser/parquet-cursor.js.map +1 -0
- package/dist/es5/parquetjs/parser/parquet-envelope-reader.js +327 -0
- package/dist/es5/parquetjs/parser/parquet-envelope-reader.js.map +1 -0
- package/dist/es5/parquetjs/parser/parquet-reader.js +222 -553
- package/dist/es5/parquetjs/parser/parquet-reader.js.map +1 -1
- package/dist/es5/parquetjs/schema/declare.js +1 -3
- package/dist/es5/parquetjs/schema/declare.js.map +1 -1
- package/dist/es5/parquetjs/schema/shred.js +33 -39
- package/dist/es5/parquetjs/schema/shred.js.map +1 -1
- package/dist/es5/parquetjs/schema/types.js.map +1 -1
- package/dist/es5/parquetjs/utils/buffer-utils.js +19 -0
- package/dist/es5/parquetjs/utils/buffer-utils.js.map +1 -0
- package/dist/es5/parquetjs/utils/file-utils.js +3 -2
- package/dist/es5/parquetjs/utils/file-utils.js.map +1 -1
- package/dist/esm/index.js +1 -1
- package/dist/esm/index.js.map +1 -1
- package/dist/esm/lib/parse-parquet.js +12 -6
- package/dist/esm/lib/parse-parquet.js.map +1 -1
- package/dist/esm/parquet-loader.js +2 -3
- package/dist/esm/parquet-loader.js.map +1 -1
- package/dist/esm/parquet-wasm-loader.js +1 -1
- package/dist/esm/parquet-wasm-loader.js.map +1 -1
- package/dist/esm/parquet-wasm-writer.js +1 -1
- package/dist/esm/parquet-wasm-writer.js.map +1 -1
- package/dist/esm/parquet-writer.js +1 -1
- package/dist/esm/parquet-writer.js.map +1 -1
- package/dist/esm/parquetjs/compression.js +1 -10
- package/dist/esm/parquetjs/compression.js.map +1 -1
- package/dist/esm/parquetjs/encoder/{parquet-encoder.js → writer.js} +37 -7
- package/dist/esm/parquetjs/encoder/writer.js.map +1 -0
- package/dist/esm/parquetjs/file.js +81 -0
- package/dist/esm/parquetjs/file.js.map +1 -0
- package/dist/esm/parquetjs/parser/parquet-cursor.js +78 -0
- package/dist/esm/parquetjs/parser/parquet-cursor.js.map +1 -0
- package/dist/esm/parquetjs/parser/parquet-envelope-reader.js +129 -0
- package/dist/esm/parquetjs/parser/parquet-envelope-reader.js.map +1 -0
- package/dist/esm/parquetjs/parser/parquet-reader.js +72 -158
- package/dist/esm/parquetjs/parser/parquet-reader.js.map +1 -1
- package/dist/esm/parquetjs/schema/declare.js +0 -1
- package/dist/esm/parquetjs/schema/declare.js.map +1 -1
- package/dist/esm/parquetjs/schema/shred.js +34 -42
- package/dist/esm/parquetjs/schema/shred.js.map +1 -1
- package/dist/esm/parquetjs/schema/types.js.map +1 -1
- package/dist/esm/parquetjs/utils/buffer-utils.js +13 -0
- package/dist/esm/parquetjs/utils/buffer-utils.js.map +1 -0
- package/dist/esm/parquetjs/utils/file-utils.js +1 -1
- package/dist/esm/parquetjs/utils/file-utils.js.map +1 -1
- package/dist/index.d.ts +1 -1
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +4 -3
- package/dist/lib/parse-parquet.d.ts +2 -2
- package/dist/lib/parse-parquet.d.ts.map +1 -1
- package/dist/lib/parse-parquet.js +12 -24
- package/dist/parquet-loader.d.ts +0 -1
- package/dist/parquet-loader.d.ts.map +1 -1
- package/dist/parquet-loader.js +1 -2
- package/dist/parquet-worker.js +24 -15
- package/dist/parquet-worker.js.map +3 -3
- package/dist/parquetjs/compression.d.ts.map +1 -1
- package/dist/parquetjs/compression.js +5 -16
- package/dist/parquetjs/encoder/{parquet-encoder.d.ts → writer.d.ts} +19 -10
- package/dist/parquetjs/encoder/writer.d.ts.map +1 -0
- package/dist/parquetjs/encoder/{parquet-encoder.js → writer.js} +37 -39
- package/dist/parquetjs/file.d.ts +10 -0
- package/dist/parquetjs/file.d.ts.map +1 -0
- package/dist/parquetjs/file.js +99 -0
- package/dist/parquetjs/parser/parquet-cursor.d.ts +36 -0
- package/dist/parquetjs/parser/parquet-cursor.d.ts.map +1 -0
- package/dist/parquetjs/parser/parquet-cursor.js +74 -0
- package/dist/parquetjs/parser/parquet-envelope-reader.d.ts +40 -0
- package/dist/parquetjs/parser/parquet-envelope-reader.d.ts.map +1 -0
- package/dist/parquetjs/parser/parquet-envelope-reader.js +136 -0
- package/dist/parquetjs/parser/parquet-reader.d.ts +57 -47
- package/dist/parquetjs/parser/parquet-reader.d.ts.map +1 -1
- package/dist/parquetjs/parser/parquet-reader.js +102 -168
- package/dist/parquetjs/schema/declare.d.ts +7 -14
- package/dist/parquetjs/schema/declare.d.ts.map +1 -1
- package/dist/parquetjs/schema/declare.js +0 -2
- package/dist/parquetjs/schema/shred.d.ts +0 -115
- package/dist/parquetjs/schema/shred.d.ts.map +1 -1
- package/dist/parquetjs/schema/shred.js +43 -161
- package/dist/parquetjs/schema/types.d.ts +2 -2
- package/dist/parquetjs/schema/types.d.ts.map +1 -1
- package/dist/parquetjs/utils/buffer-utils.d.ts +10 -0
- package/dist/parquetjs/utils/buffer-utils.d.ts.map +1 -0
- package/dist/parquetjs/utils/buffer-utils.js +22 -0
- package/dist/parquetjs/utils/file-utils.d.ts +4 -3
- package/dist/parquetjs/utils/file-utils.d.ts.map +1 -1
- package/dist/parquetjs/utils/file-utils.js +5 -2
- package/package.json +5 -7
- package/src/index.ts +2 -2
- package/src/lib/parse-parquet.ts +12 -25
- package/src/parquet-loader.ts +1 -3
- package/src/parquetjs/compression.ts +1 -14
- package/src/parquetjs/encoder/{parquet-encoder.ts → writer.ts} +28 -22
- package/src/parquetjs/file.ts +90 -0
- package/src/parquetjs/parser/parquet-cursor.ts +94 -0
- package/src/parquetjs/parser/parquet-envelope-reader.ts +199 -0
- package/src/parquetjs/parser/parquet-reader.ts +122 -239
- package/src/parquetjs/schema/declare.ts +9 -17
- package/src/parquetjs/schema/shred.ts +28 -157
- package/src/parquetjs/schema/types.ts +27 -21
- package/src/parquetjs/utils/buffer-utils.ts +18 -0
- package/src/parquetjs/utils/file-utils.ts +4 -3
- package/dist/es5/lib/convert-schema-deep.ts.disabled +0 -910
- package/dist/es5/parquetjs/encoder/parquet-encoder.js.map +0 -1
- package/dist/esm/lib/convert-schema-deep.ts.disabled +0 -910
- package/dist/esm/parquetjs/encoder/parquet-encoder.js.map +0 -1
- package/dist/parquetjs/encoder/parquet-encoder.d.ts.map +0 -1
- package/src/lib/convert-schema-deep.ts.disabled +0 -910
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
// Forked from https://github.com/kbajalc/parquets under MIT license (Copyright (c) 2017 ironSource Ltd.)
|
|
2
2
|
/* eslint-disable camelcase */
|
|
3
|
-
import {
|
|
3
|
+
import {Transform, Writable} from 'stream';
|
|
4
4
|
import {ParquetCodecOptions, PARQUET_CODECS} from '../codecs';
|
|
5
5
|
import * as Compression from '../compression';
|
|
6
6
|
import {
|
|
@@ -55,7 +55,7 @@ const PARQUET_DEFAULT_ROW_GROUP_SIZE = 4096;
|
|
|
55
55
|
const PARQUET_RDLVL_TYPE = 'INT32';
|
|
56
56
|
const PARQUET_RDLVL_ENCODING = 'RLE';
|
|
57
57
|
|
|
58
|
-
export interface
|
|
58
|
+
export interface ParquetWriterOptions {
|
|
59
59
|
baseOffset?: number;
|
|
60
60
|
rowGroupSize?: number;
|
|
61
61
|
pageSize?: number;
|
|
@@ -71,12 +71,12 @@ export interface ParquetEncoderOptions {
|
|
|
71
71
|
}
|
|
72
72
|
|
|
73
73
|
/**
|
|
74
|
-
* Write a parquet file to an output stream. The
|
|
74
|
+
* Write a parquet file to an output stream. The ParquetWriter will perform
|
|
75
75
|
* buffering/batching for performance, so close() must be called after all rows
|
|
76
76
|
* are written.
|
|
77
77
|
*/
|
|
78
78
|
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
|
79
|
-
export class
|
|
79
|
+
export class ParquetWriter<T> {
|
|
80
80
|
/**
|
|
81
81
|
* Convenience method to create a new buffered parquet writer that writes to
|
|
82
82
|
* the specified file
|
|
@@ -84,10 +84,10 @@ export class ParquetEncoder<T> {
|
|
|
84
84
|
static async openFile<T>(
|
|
85
85
|
schema: ParquetSchema,
|
|
86
86
|
path: string,
|
|
87
|
-
opts?:
|
|
88
|
-
): Promise<
|
|
87
|
+
opts?: ParquetWriterOptions
|
|
88
|
+
): Promise<ParquetWriter<T>> {
|
|
89
89
|
const outputStream = await osopen(path, opts);
|
|
90
|
-
return
|
|
90
|
+
return ParquetWriter.openStream(schema, outputStream, opts);
|
|
91
91
|
}
|
|
92
92
|
|
|
93
93
|
/**
|
|
@@ -96,11 +96,17 @@ export class ParquetEncoder<T> {
|
|
|
96
96
|
*/
|
|
97
97
|
static async openStream<T>(
|
|
98
98
|
schema: ParquetSchema,
|
|
99
|
-
outputStream:
|
|
100
|
-
opts
|
|
101
|
-
): Promise<
|
|
99
|
+
outputStream: Writable,
|
|
100
|
+
opts?: ParquetWriterOptions
|
|
101
|
+
): Promise<ParquetWriter<T>> {
|
|
102
|
+
if (!opts) {
|
|
103
|
+
// tslint:disable-next-line:no-parameter-reassignment
|
|
104
|
+
opts = {};
|
|
105
|
+
}
|
|
106
|
+
|
|
102
107
|
const envelopeWriter = await ParquetEnvelopeWriter.openStream(schema, outputStream, opts);
|
|
103
|
-
|
|
108
|
+
|
|
109
|
+
return new ParquetWriter(schema, envelopeWriter, opts);
|
|
104
110
|
}
|
|
105
111
|
|
|
106
112
|
public schema: ParquetSchema;
|
|
@@ -116,7 +122,7 @@ export class ParquetEncoder<T> {
|
|
|
116
122
|
constructor(
|
|
117
123
|
schema: ParquetSchema,
|
|
118
124
|
envelopeWriter: ParquetEnvelopeWriter,
|
|
119
|
-
opts:
|
|
125
|
+
opts: ParquetWriterOptions
|
|
120
126
|
) {
|
|
121
127
|
this.schema = schema;
|
|
122
128
|
this.envelopeWriter = envelopeWriter;
|
|
@@ -221,8 +227,8 @@ export class ParquetEnvelopeWriter {
|
|
|
221
227
|
*/
|
|
222
228
|
static async openStream(
|
|
223
229
|
schema: ParquetSchema,
|
|
224
|
-
outputStream:
|
|
225
|
-
opts:
|
|
230
|
+
outputStream: Writable,
|
|
231
|
+
opts: ParquetWriterOptions
|
|
226
232
|
): Promise<ParquetEnvelopeWriter> {
|
|
227
233
|
const writeFn = oswrite.bind(undefined, outputStream);
|
|
228
234
|
const closeFn = osclose.bind(undefined, outputStream);
|
|
@@ -243,7 +249,7 @@ export class ParquetEnvelopeWriter {
|
|
|
243
249
|
writeFn: (buf: Buffer) => Promise<void>,
|
|
244
250
|
closeFn: () => Promise<void>,
|
|
245
251
|
fileOffset: number,
|
|
246
|
-
opts:
|
|
252
|
+
opts: ParquetWriterOptions
|
|
247
253
|
) {
|
|
248
254
|
this.schema = schema;
|
|
249
255
|
this.write = writeFn;
|
|
@@ -308,10 +314,11 @@ export class ParquetEnvelopeWriter {
|
|
|
308
314
|
|
|
309
315
|
/**
|
|
310
316
|
* Create a parquet transform stream
|
|
311
|
-
|
|
312
|
-
|
|
317
|
+
*/
|
|
318
|
+
export class ParquetTransformer<T> extends Transform {
|
|
319
|
+
public writer: ParquetWriter<T>;
|
|
313
320
|
|
|
314
|
-
constructor(schema: ParquetSchema, opts:
|
|
321
|
+
constructor(schema: ParquetSchema, opts: ParquetWriterOptions = {}) {
|
|
315
322
|
super({objectMode: true});
|
|
316
323
|
|
|
317
324
|
const writeProxy = (function (t: ParquetTransformer<any>) {
|
|
@@ -320,7 +327,7 @@ export class ParquetTransformer<T> extends stream.Transform {
|
|
|
320
327
|
};
|
|
321
328
|
})(this);
|
|
322
329
|
|
|
323
|
-
this.writer = new
|
|
330
|
+
this.writer = new ParquetWriter(
|
|
324
331
|
schema,
|
|
325
332
|
new ParquetEnvelopeWriter(schema, writeProxy, async () => {}, 0, opts),
|
|
326
333
|
opts
|
|
@@ -341,7 +348,6 @@ export class ParquetTransformer<T> extends stream.Transform {
|
|
|
341
348
|
await this.writer.close(callback);
|
|
342
349
|
}
|
|
343
350
|
}
|
|
344
|
-
*/
|
|
345
351
|
|
|
346
352
|
/**
|
|
347
353
|
* Encode a consecutive array of data using one of the parquet encodings
|
|
@@ -484,7 +490,7 @@ async function encodeColumnChunk(
|
|
|
484
490
|
column: ParquetField,
|
|
485
491
|
buffer: ParquetBuffer,
|
|
486
492
|
offset: number,
|
|
487
|
-
opts:
|
|
493
|
+
opts: ParquetWriterOptions
|
|
488
494
|
): Promise<{
|
|
489
495
|
body: Buffer;
|
|
490
496
|
metadata: ColumnMetaData;
|
|
@@ -540,7 +546,7 @@ async function encodeColumnChunk(
|
|
|
540
546
|
async function encodeRowGroup(
|
|
541
547
|
schema: ParquetSchema,
|
|
542
548
|
data: ParquetBuffer,
|
|
543
|
-
opts:
|
|
549
|
+
opts: ParquetWriterOptions
|
|
544
550
|
): Promise<{
|
|
545
551
|
body: Buffer;
|
|
546
552
|
metadata: RowGroup;
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
// Forked from https://github.com/kbajalc/parquets under MIT license (Copyright (c) 2017 ironSource Ltd.)
|
|
2
|
+
import fs from 'fs';
|
|
3
|
+
|
|
4
|
+
export function fopen(filePath) {
|
|
5
|
+
return new Promise((resolve, reject) => {
|
|
6
|
+
fs.open(filePath, 'r', (err, fd) => {
|
|
7
|
+
if (err) {
|
|
8
|
+
reject(err);
|
|
9
|
+
} else {
|
|
10
|
+
resolve(fd);
|
|
11
|
+
}
|
|
12
|
+
});
|
|
13
|
+
});
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
export function fstat(filePath) {
|
|
17
|
+
return new Promise<fs.Stats>((resolve, reject) => {
|
|
18
|
+
fs.stat(filePath, (err, stat) => {
|
|
19
|
+
if (err) {
|
|
20
|
+
reject(err);
|
|
21
|
+
} else {
|
|
22
|
+
resolve(stat);
|
|
23
|
+
}
|
|
24
|
+
});
|
|
25
|
+
});
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
export function fread(fd, position, length) {
|
|
29
|
+
const buffer = Buffer.alloc(length);
|
|
30
|
+
|
|
31
|
+
return new Promise((resolve, reject) => {
|
|
32
|
+
fs.read(fd, buffer, 0, length, position, (err, bytesRead, buf) => {
|
|
33
|
+
if (err || bytesRead !== length) {
|
|
34
|
+
reject(err || Error('read failed'));
|
|
35
|
+
} else {
|
|
36
|
+
resolve(buf);
|
|
37
|
+
}
|
|
38
|
+
});
|
|
39
|
+
});
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
export function fclose(fd) {
|
|
43
|
+
return new Promise((resolve, reject) => {
|
|
44
|
+
fs.close(fd, (err) => {
|
|
45
|
+
if (err) {
|
|
46
|
+
reject(err);
|
|
47
|
+
} else {
|
|
48
|
+
resolve(err);
|
|
49
|
+
}
|
|
50
|
+
});
|
|
51
|
+
});
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
export function oswrite(os, buf): Promise<void> {
|
|
55
|
+
return new Promise((resolve, reject) => {
|
|
56
|
+
os.write(buf, (err) => {
|
|
57
|
+
if (err) {
|
|
58
|
+
reject(err);
|
|
59
|
+
} else {
|
|
60
|
+
resolve();
|
|
61
|
+
}
|
|
62
|
+
});
|
|
63
|
+
});
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
export function osclose(os): Promise<void> {
|
|
67
|
+
return new Promise((resolve, reject) => {
|
|
68
|
+
os.close((err) => {
|
|
69
|
+
if (err) {
|
|
70
|
+
reject(err);
|
|
71
|
+
} else {
|
|
72
|
+
resolve();
|
|
73
|
+
}
|
|
74
|
+
});
|
|
75
|
+
});
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
export function osopen(path, opts) {
|
|
79
|
+
return new Promise((resolve, reject) => {
|
|
80
|
+
const outputStream = fs.createWriteStream(path, opts);
|
|
81
|
+
|
|
82
|
+
outputStream.on('open', function (fd) {
|
|
83
|
+
resolve(outputStream);
|
|
84
|
+
});
|
|
85
|
+
|
|
86
|
+
outputStream.on('error', function (err) {
|
|
87
|
+
reject(err);
|
|
88
|
+
});
|
|
89
|
+
});
|
|
90
|
+
}
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
// Forked from https://github.com/kbajalc/parquets under MIT license (Copyright (c) 2017 ironSource Ltd.)
|
|
2
|
+
import {FileMetaData} from '../parquet-thrift';
|
|
3
|
+
import {ParquetEnvelopeReader} from './parquet-envelope-reader';
|
|
4
|
+
import {ParquetSchema} from '../schema/schema';
|
|
5
|
+
import {ParquetRecord} from '../schema/declare';
|
|
6
|
+
import {materializeRecords} from '../schema/shred';
|
|
7
|
+
|
|
8
|
+
/**
|
|
9
|
+
* A parquet cursor is used to retrieve rows from a parquet file in order
|
|
10
|
+
*/
|
|
11
|
+
export class ParquetCursor<T> implements AsyncIterable<T> {
|
|
12
|
+
public metadata: FileMetaData;
|
|
13
|
+
public envelopeReader: ParquetEnvelopeReader;
|
|
14
|
+
public schema: ParquetSchema;
|
|
15
|
+
public columnList: string[][];
|
|
16
|
+
public rowGroup: ParquetRecord[] = [];
|
|
17
|
+
public rowGroupIndex: number;
|
|
18
|
+
|
|
19
|
+
/**
|
|
20
|
+
* Create a new parquet reader from the file metadata and an envelope reader.
|
|
21
|
+
* It is usually not recommended to call this constructor directly except for
|
|
22
|
+
* advanced and internal use cases. Consider using getCursor() on the
|
|
23
|
+
* ParquetReader instead
|
|
24
|
+
*/
|
|
25
|
+
constructor(
|
|
26
|
+
metadata: FileMetaData,
|
|
27
|
+
envelopeReader: ParquetEnvelopeReader,
|
|
28
|
+
schema: ParquetSchema,
|
|
29
|
+
columnList: string[][]
|
|
30
|
+
) {
|
|
31
|
+
this.metadata = metadata;
|
|
32
|
+
this.envelopeReader = envelopeReader;
|
|
33
|
+
this.schema = schema;
|
|
34
|
+
this.columnList = columnList;
|
|
35
|
+
this.rowGroupIndex = 0;
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
/**
|
|
39
|
+
* Retrieve the next row from the cursor. Returns a row or NULL if the end
|
|
40
|
+
* of the file was reached
|
|
41
|
+
*/
|
|
42
|
+
async next<T = any>(): Promise<T> {
|
|
43
|
+
if (this.rowGroup.length === 0) {
|
|
44
|
+
if (this.rowGroupIndex >= this.metadata.row_groups.length) {
|
|
45
|
+
// @ts-ignore
|
|
46
|
+
return null;
|
|
47
|
+
}
|
|
48
|
+
const rowBuffer = await this.envelopeReader.readRowGroup(
|
|
49
|
+
this.schema,
|
|
50
|
+
this.metadata.row_groups[this.rowGroupIndex],
|
|
51
|
+
this.columnList
|
|
52
|
+
);
|
|
53
|
+
this.rowGroup = materializeRecords(this.schema, rowBuffer);
|
|
54
|
+
this.rowGroupIndex++;
|
|
55
|
+
}
|
|
56
|
+
return this.rowGroup.shift() as any;
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
/**
|
|
60
|
+
* Rewind the cursor the the beginning of the file
|
|
61
|
+
*/
|
|
62
|
+
rewind(): void {
|
|
63
|
+
this.rowGroup = [];
|
|
64
|
+
this.rowGroupIndex = 0;
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
/**
|
|
68
|
+
* Implement AsyncIterable
|
|
69
|
+
*/
|
|
70
|
+
// tslint:disable-next-line:function-name
|
|
71
|
+
[Symbol.asyncIterator](): AsyncIterator<T> {
|
|
72
|
+
let done = false;
|
|
73
|
+
return {
|
|
74
|
+
next: async () => {
|
|
75
|
+
if (done) {
|
|
76
|
+
return {done, value: null};
|
|
77
|
+
}
|
|
78
|
+
const value = await this.next();
|
|
79
|
+
if (value === null) {
|
|
80
|
+
return {done: true, value};
|
|
81
|
+
}
|
|
82
|
+
return {done: false, value};
|
|
83
|
+
},
|
|
84
|
+
return: async () => {
|
|
85
|
+
done = true;
|
|
86
|
+
return {done, value: null};
|
|
87
|
+
},
|
|
88
|
+
throw: async () => {
|
|
89
|
+
done = true;
|
|
90
|
+
return {done: true, value: null};
|
|
91
|
+
}
|
|
92
|
+
};
|
|
93
|
+
}
|
|
94
|
+
}
|
|
@@ -0,0 +1,199 @@
|
|
|
1
|
+
// Forked from https://github.com/kbajalc/parquets under MIT license (Copyright (c) 2017 ironSource Ltd.)
|
|
2
|
+
import {ParquetSchema} from '../schema/schema';
|
|
3
|
+
import {PARQUET_MAGIC, PARQUET_MAGIC_ENCRYPTED} from '../../constants';
|
|
4
|
+
import {ColumnChunk, CompressionCodec, FileMetaData, RowGroup, Type} from '../parquet-thrift';
|
|
5
|
+
import {
|
|
6
|
+
ParquetBuffer,
|
|
7
|
+
ParquetCompression,
|
|
8
|
+
ParquetData,
|
|
9
|
+
PrimitiveType,
|
|
10
|
+
ParquetOptions
|
|
11
|
+
} from '../schema/declare';
|
|
12
|
+
import {decodeFileMetadata, getThriftEnum, fieldIndexOf} from '../utils/read-utils';
|
|
13
|
+
import {decodeDataPages, decodePage} from './decoders';
|
|
14
|
+
|
|
15
|
+
const DEFAULT_DICTIONARY_SIZE = 1e6;
|
|
16
|
+
|
|
17
|
+
/**
|
|
18
|
+
* The parquet envelope reader allows direct, unbuffered access to the individual
|
|
19
|
+
* sections of the parquet file, namely the header, footer and the row groups.
|
|
20
|
+
* This class is intended for advanced/internal users; if you just want to retrieve
|
|
21
|
+
* rows from a parquet file use the ParquetReader instead
|
|
22
|
+
*/
|
|
23
|
+
export class ParquetEnvelopeReader {
|
|
24
|
+
public read: (position: number, length: number) => Promise<Buffer>;
|
|
25
|
+
/**
|
|
26
|
+
* Close this parquet reader. You MUST call this method once you're finished
|
|
27
|
+
* reading rows
|
|
28
|
+
*/
|
|
29
|
+
public close: () => Promise<void>;
|
|
30
|
+
public fileSize: number;
|
|
31
|
+
public defaultDictionarySize: number;
|
|
32
|
+
|
|
33
|
+
static async openBuffer(buffer: Buffer): Promise<ParquetEnvelopeReader> {
|
|
34
|
+
const readFn = (position: number, length: number) =>
|
|
35
|
+
Promise.resolve(buffer.slice(position, position + length));
|
|
36
|
+
const closeFn = () => Promise.resolve();
|
|
37
|
+
return new ParquetEnvelopeReader(readFn, closeFn, buffer.length);
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
constructor(
|
|
41
|
+
read: (position: number, length: number) => Promise<Buffer>,
|
|
42
|
+
close: () => Promise<void>,
|
|
43
|
+
fileSize: number,
|
|
44
|
+
options?: any
|
|
45
|
+
) {
|
|
46
|
+
this.read = read;
|
|
47
|
+
this.close = close;
|
|
48
|
+
this.fileSize = fileSize;
|
|
49
|
+
this.defaultDictionarySize = options?.defaultDictionarySize || DEFAULT_DICTIONARY_SIZE;
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
async readHeader(): Promise<void> {
|
|
53
|
+
const buffer = await this.read(0, PARQUET_MAGIC.length);
|
|
54
|
+
|
|
55
|
+
const magic = buffer.toString();
|
|
56
|
+
switch (magic) {
|
|
57
|
+
case PARQUET_MAGIC:
|
|
58
|
+
break;
|
|
59
|
+
case PARQUET_MAGIC_ENCRYPTED:
|
|
60
|
+
throw new Error('Encrypted parquet file not supported');
|
|
61
|
+
default:
|
|
62
|
+
throw new Error(`Invalid parquet file (magic=${magic})`);
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
async readRowGroup(
|
|
67
|
+
schema: ParquetSchema,
|
|
68
|
+
rowGroup: RowGroup,
|
|
69
|
+
columnList: string[][]
|
|
70
|
+
): Promise<ParquetBuffer> {
|
|
71
|
+
const buffer: ParquetBuffer = {
|
|
72
|
+
rowCount: Number(rowGroup.num_rows),
|
|
73
|
+
columnData: {}
|
|
74
|
+
};
|
|
75
|
+
for (const colChunk of rowGroup.columns) {
|
|
76
|
+
const colMetadata = colChunk.meta_data;
|
|
77
|
+
const colKey = colMetadata?.path_in_schema;
|
|
78
|
+
if (columnList.length > 0 && fieldIndexOf(columnList, colKey!) < 0) {
|
|
79
|
+
continue; // eslint-disable-line no-continue
|
|
80
|
+
}
|
|
81
|
+
buffer.columnData[colKey!.join()] = await this.readColumnChunk(schema, colChunk);
|
|
82
|
+
}
|
|
83
|
+
return buffer;
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
/**
|
|
87
|
+
* Do reading of parquet file's column chunk
|
|
88
|
+
* @param schema
|
|
89
|
+
* @param colChunk
|
|
90
|
+
*/
|
|
91
|
+
async readColumnChunk(schema: ParquetSchema, colChunk: ColumnChunk): Promise<ParquetData> {
|
|
92
|
+
if (colChunk.file_path !== undefined && colChunk.file_path !== null) {
|
|
93
|
+
throw new Error('external references are not supported');
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
const field = schema.findField(colChunk.meta_data?.path_in_schema!);
|
|
97
|
+
const type: PrimitiveType = getThriftEnum(Type, colChunk.meta_data?.type!) as any;
|
|
98
|
+
|
|
99
|
+
if (type !== field.primitiveType) {
|
|
100
|
+
throw new Error(`chunk type not matching schema: ${type}`);
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
const compression: ParquetCompression = getThriftEnum(
|
|
104
|
+
CompressionCodec,
|
|
105
|
+
colChunk.meta_data?.codec!
|
|
106
|
+
) as any;
|
|
107
|
+
|
|
108
|
+
const pagesOffset = Number(colChunk.meta_data?.data_page_offset!);
|
|
109
|
+
let pagesSize = Number(colChunk.meta_data?.total_compressed_size!);
|
|
110
|
+
|
|
111
|
+
if (!colChunk.file_path) {
|
|
112
|
+
pagesSize = Math.min(
|
|
113
|
+
this.fileSize - pagesOffset,
|
|
114
|
+
Number(colChunk.meta_data?.total_compressed_size)
|
|
115
|
+
);
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
const options: ParquetOptions = {
|
|
119
|
+
type,
|
|
120
|
+
rLevelMax: field.rLevelMax,
|
|
121
|
+
dLevelMax: field.dLevelMax,
|
|
122
|
+
compression,
|
|
123
|
+
column: field,
|
|
124
|
+
numValues: colChunk.meta_data?.num_values,
|
|
125
|
+
dictionary: []
|
|
126
|
+
};
|
|
127
|
+
|
|
128
|
+
let dictionary;
|
|
129
|
+
|
|
130
|
+
const dictionaryPageOffset = colChunk?.meta_data?.dictionary_page_offset;
|
|
131
|
+
|
|
132
|
+
if (dictionaryPageOffset) {
|
|
133
|
+
const dictionaryOffset = Number(dictionaryPageOffset);
|
|
134
|
+
// Getting dictionary from column chunk to iterate all over indexes to get dataPage values.
|
|
135
|
+
dictionary = await this.getDictionary(dictionaryOffset, options, pagesOffset);
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
dictionary = options.dictionary?.length ? options.dictionary : dictionary;
|
|
139
|
+
const pagesBuf = await this.read(pagesOffset, pagesSize);
|
|
140
|
+
return await decodeDataPages(pagesBuf, {...options, dictionary});
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
/**
|
|
144
|
+
* Getting dictionary for allows to flatten values by indices.
|
|
145
|
+
* @param dictionaryPageOffset
|
|
146
|
+
* @param options
|
|
147
|
+
* @param pagesOffset
|
|
148
|
+
* @returns
|
|
149
|
+
*/
|
|
150
|
+
async getDictionary(
|
|
151
|
+
dictionaryPageOffset: number,
|
|
152
|
+
options: ParquetOptions,
|
|
153
|
+
pagesOffset: number
|
|
154
|
+
): Promise<string[]> {
|
|
155
|
+
if (dictionaryPageOffset === 0) {
|
|
156
|
+
// dictionarySize = Math.min(this.fileSize - pagesOffset, this.defaultDictionarySize);
|
|
157
|
+
// pagesBuf = await this.read(pagesOffset, dictionarySize);
|
|
158
|
+
|
|
159
|
+
// In this case we are working with parquet-mr files format. Problem is described below:
|
|
160
|
+
// https://stackoverflow.com/questions/55225108/why-is-dictionary-page-offset-0-for-plain-dictionary-encoding
|
|
161
|
+
// We need to get dictionary page from column chunk if it exists.
|
|
162
|
+
// Now if we use code commented above we don't get DICTIONARY_PAGE we get DATA_PAGE instead.
|
|
163
|
+
return [];
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
const dictionarySize = Math.min(
|
|
167
|
+
this.fileSize - dictionaryPageOffset,
|
|
168
|
+
this.defaultDictionarySize
|
|
169
|
+
);
|
|
170
|
+
const pagesBuf = await this.read(dictionaryPageOffset, dictionarySize);
|
|
171
|
+
|
|
172
|
+
const cursor = {buffer: pagesBuf, offset: 0, size: pagesBuf.length};
|
|
173
|
+
const decodedPage = await decodePage(cursor, options);
|
|
174
|
+
|
|
175
|
+
return decodedPage.dictionary!;
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
async readFooter(): Promise<FileMetaData> {
|
|
179
|
+
const trailerLen = PARQUET_MAGIC.length + 4;
|
|
180
|
+
const trailerBuf = await this.read(this.fileSize - trailerLen, trailerLen);
|
|
181
|
+
|
|
182
|
+
const magic = trailerBuf.slice(4).toString();
|
|
183
|
+
if (magic !== PARQUET_MAGIC) {
|
|
184
|
+
throw new Error(`Not a valid parquet file (magic="${magic})`);
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
const metadataSize = trailerBuf.readUInt32LE(0);
|
|
188
|
+
const metadataOffset = this.fileSize - metadataSize - trailerLen;
|
|
189
|
+
if (metadataOffset < PARQUET_MAGIC.length) {
|
|
190
|
+
throw new Error(`Invalid metadata size ${metadataOffset}`);
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
const metadataBuf = await this.read(metadataOffset, metadataSize);
|
|
194
|
+
// let metadata = new parquet_thrift.FileMetaData();
|
|
195
|
+
// parquet_util.decodeThrift(metadata, metadataBuf);
|
|
196
|
+
const {metadata} = decodeFileMetadata(metadataBuf);
|
|
197
|
+
return metadata;
|
|
198
|
+
}
|
|
199
|
+
}
|