@loaders.gl/parquet 3.3.0 → 3.4.0-alpha.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/dist.min.js +26 -17
- package/dist/dist.min.js.map +3 -3
- package/dist/es5/index.js +3 -3
- package/dist/es5/index.js.map +1 -1
- package/dist/es5/lib/parse-parquet.js +25 -49
- package/dist/es5/lib/parse-parquet.js.map +1 -1
- package/dist/es5/parquet-loader.js +2 -3
- package/dist/es5/parquet-loader.js.map +1 -1
- package/dist/es5/parquet-wasm-loader.js +1 -1
- package/dist/es5/parquet-wasm-loader.js.map +1 -1
- package/dist/es5/parquet-wasm-writer.js +1 -1
- package/dist/es5/parquet-wasm-writer.js.map +1 -1
- package/dist/es5/parquet-writer.js +1 -1
- package/dist/es5/parquet-writer.js.map +1 -1
- package/dist/es5/parquetjs/compression.js +5 -15
- package/dist/es5/parquetjs/compression.js.map +1 -1
- package/dist/es5/parquetjs/encoder/{parquet-encoder.js → writer.js} +158 -70
- package/dist/es5/parquetjs/encoder/writer.js.map +1 -0
- package/dist/es5/parquetjs/file.js +94 -0
- package/dist/es5/parquetjs/file.js.map +1 -0
- package/dist/es5/parquetjs/parser/parquet-cursor.js +183 -0
- package/dist/es5/parquetjs/parser/parquet-cursor.js.map +1 -0
- package/dist/es5/parquetjs/parser/parquet-envelope-reader.js +327 -0
- package/dist/es5/parquetjs/parser/parquet-envelope-reader.js.map +1 -0
- package/dist/es5/parquetjs/parser/parquet-reader.js +222 -553
- package/dist/es5/parquetjs/parser/parquet-reader.js.map +1 -1
- package/dist/es5/parquetjs/schema/declare.js +1 -3
- package/dist/es5/parquetjs/schema/declare.js.map +1 -1
- package/dist/es5/parquetjs/schema/shred.js +33 -39
- package/dist/es5/parquetjs/schema/shred.js.map +1 -1
- package/dist/es5/parquetjs/schema/types.js.map +1 -1
- package/dist/es5/parquetjs/utils/buffer-utils.js +19 -0
- package/dist/es5/parquetjs/utils/buffer-utils.js.map +1 -0
- package/dist/es5/parquetjs/utils/file-utils.js +3 -2
- package/dist/es5/parquetjs/utils/file-utils.js.map +1 -1
- package/dist/esm/index.js +1 -1
- package/dist/esm/index.js.map +1 -1
- package/dist/esm/lib/parse-parquet.js +12 -6
- package/dist/esm/lib/parse-parquet.js.map +1 -1
- package/dist/esm/parquet-loader.js +2 -3
- package/dist/esm/parquet-loader.js.map +1 -1
- package/dist/esm/parquet-wasm-loader.js +1 -1
- package/dist/esm/parquet-wasm-loader.js.map +1 -1
- package/dist/esm/parquet-wasm-writer.js +1 -1
- package/dist/esm/parquet-wasm-writer.js.map +1 -1
- package/dist/esm/parquet-writer.js +1 -1
- package/dist/esm/parquet-writer.js.map +1 -1
- package/dist/esm/parquetjs/compression.js +1 -10
- package/dist/esm/parquetjs/compression.js.map +1 -1
- package/dist/esm/parquetjs/encoder/{parquet-encoder.js → writer.js} +37 -7
- package/dist/esm/parquetjs/encoder/writer.js.map +1 -0
- package/dist/esm/parquetjs/file.js +81 -0
- package/dist/esm/parquetjs/file.js.map +1 -0
- package/dist/esm/parquetjs/parser/parquet-cursor.js +78 -0
- package/dist/esm/parquetjs/parser/parquet-cursor.js.map +1 -0
- package/dist/esm/parquetjs/parser/parquet-envelope-reader.js +129 -0
- package/dist/esm/parquetjs/parser/parquet-envelope-reader.js.map +1 -0
- package/dist/esm/parquetjs/parser/parquet-reader.js +72 -158
- package/dist/esm/parquetjs/parser/parquet-reader.js.map +1 -1
- package/dist/esm/parquetjs/schema/declare.js +0 -1
- package/dist/esm/parquetjs/schema/declare.js.map +1 -1
- package/dist/esm/parquetjs/schema/shred.js +34 -42
- package/dist/esm/parquetjs/schema/shred.js.map +1 -1
- package/dist/esm/parquetjs/schema/types.js.map +1 -1
- package/dist/esm/parquetjs/utils/buffer-utils.js +13 -0
- package/dist/esm/parquetjs/utils/buffer-utils.js.map +1 -0
- package/dist/esm/parquetjs/utils/file-utils.js +1 -1
- package/dist/esm/parquetjs/utils/file-utils.js.map +1 -1
- package/dist/index.d.ts +1 -1
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +4 -3
- package/dist/lib/parse-parquet.d.ts +2 -2
- package/dist/lib/parse-parquet.d.ts.map +1 -1
- package/dist/lib/parse-parquet.js +12 -24
- package/dist/parquet-loader.d.ts +0 -1
- package/dist/parquet-loader.d.ts.map +1 -1
- package/dist/parquet-loader.js +1 -2
- package/dist/parquet-worker.js +24 -15
- package/dist/parquet-worker.js.map +3 -3
- package/dist/parquetjs/compression.d.ts.map +1 -1
- package/dist/parquetjs/compression.js +5 -16
- package/dist/parquetjs/encoder/{parquet-encoder.d.ts → writer.d.ts} +19 -10
- package/dist/parquetjs/encoder/writer.d.ts.map +1 -0
- package/dist/parquetjs/encoder/{parquet-encoder.js → writer.js} +37 -39
- package/dist/parquetjs/file.d.ts +10 -0
- package/dist/parquetjs/file.d.ts.map +1 -0
- package/dist/parquetjs/file.js +99 -0
- package/dist/parquetjs/parser/parquet-cursor.d.ts +36 -0
- package/dist/parquetjs/parser/parquet-cursor.d.ts.map +1 -0
- package/dist/parquetjs/parser/parquet-cursor.js +74 -0
- package/dist/parquetjs/parser/parquet-envelope-reader.d.ts +40 -0
- package/dist/parquetjs/parser/parquet-envelope-reader.d.ts.map +1 -0
- package/dist/parquetjs/parser/parquet-envelope-reader.js +136 -0
- package/dist/parquetjs/parser/parquet-reader.d.ts +57 -47
- package/dist/parquetjs/parser/parquet-reader.d.ts.map +1 -1
- package/dist/parquetjs/parser/parquet-reader.js +102 -168
- package/dist/parquetjs/schema/declare.d.ts +7 -14
- package/dist/parquetjs/schema/declare.d.ts.map +1 -1
- package/dist/parquetjs/schema/declare.js +0 -2
- package/dist/parquetjs/schema/shred.d.ts +0 -115
- package/dist/parquetjs/schema/shred.d.ts.map +1 -1
- package/dist/parquetjs/schema/shred.js +43 -161
- package/dist/parquetjs/schema/types.d.ts +2 -2
- package/dist/parquetjs/schema/types.d.ts.map +1 -1
- package/dist/parquetjs/utils/buffer-utils.d.ts +10 -0
- package/dist/parquetjs/utils/buffer-utils.d.ts.map +1 -0
- package/dist/parquetjs/utils/buffer-utils.js +22 -0
- package/dist/parquetjs/utils/file-utils.d.ts +4 -3
- package/dist/parquetjs/utils/file-utils.d.ts.map +1 -1
- package/dist/parquetjs/utils/file-utils.js +5 -2
- package/package.json +5 -7
- package/src/index.ts +2 -2
- package/src/lib/parse-parquet.ts +12 -25
- package/src/parquet-loader.ts +1 -3
- package/src/parquetjs/compression.ts +1 -14
- package/src/parquetjs/encoder/{parquet-encoder.ts → writer.ts} +28 -22
- package/src/parquetjs/file.ts +90 -0
- package/src/parquetjs/parser/parquet-cursor.ts +94 -0
- package/src/parquetjs/parser/parquet-envelope-reader.ts +199 -0
- package/src/parquetjs/parser/parquet-reader.ts +122 -239
- package/src/parquetjs/schema/declare.ts +9 -17
- package/src/parquetjs/schema/shred.ts +28 -157
- package/src/parquetjs/schema/types.ts +27 -21
- package/src/parquetjs/utils/buffer-utils.ts +18 -0
- package/src/parquetjs/utils/file-utils.ts +4 -3
- package/dist/es5/lib/convert-schema-deep.ts.disabled +0 -910
- package/dist/es5/parquetjs/encoder/parquet-encoder.js.map +0 -1
- package/dist/esm/lib/convert-schema-deep.ts.disabled +0 -910
- package/dist/esm/parquetjs/encoder/parquet-encoder.js.map +0 -1
- package/dist/parquetjs/encoder/parquet-encoder.d.ts.map +0 -1
- package/src/lib/convert-schema-deep.ts.disabled +0 -910
|
@@ -1,280 +1,163 @@
|
|
|
1
1
|
// Forked from https://github.com/kbajalc/parquets under MIT license (Copyright (c) 2017 ironSource Ltd.)
|
|
2
|
-
import
|
|
3
|
-
|
|
2
|
+
import {ParquetEnvelopeReader} from './parquet-envelope-reader';
|
|
3
|
+
import {FileMetaData} from '../parquet-thrift';
|
|
4
4
|
import {ParquetSchema} from '../schema/schema';
|
|
5
|
+
import {ParquetCursor} from './parquet-cursor';
|
|
6
|
+
import {PARQUET_VERSION} from '../../constants';
|
|
5
7
|
import {decodeSchema} from './decoders';
|
|
6
|
-
import {materializeRecords} from '../schema/shred';
|
|
7
|
-
|
|
8
|
-
import {PARQUET_MAGIC, PARQUET_MAGIC_ENCRYPTED} from '../../constants';
|
|
9
|
-
import {ColumnChunk, CompressionCodec, FileMetaData, RowGroup, Type} from '../parquet-thrift';
|
|
10
|
-
import {
|
|
11
|
-
ParquetBuffer,
|
|
12
|
-
ParquetCompression,
|
|
13
|
-
ParquetData,
|
|
14
|
-
PrimitiveType,
|
|
15
|
-
ParquetOptions
|
|
16
|
-
} from '../schema/declare';
|
|
17
|
-
import {decodeFileMetadata, getThriftEnum, fieldIndexOf} from '../utils/read-utils';
|
|
18
|
-
import {decodeDataPages, decodePage} from './decoders';
|
|
19
|
-
|
|
20
|
-
export type ParquetReaderProps = {
|
|
21
|
-
defaultDictionarySize?: number;
|
|
22
|
-
};
|
|
23
|
-
|
|
24
|
-
/** Properties for initializing a ParquetRowGroupReader */
|
|
25
|
-
export type ParquetIterationProps = {
|
|
26
|
-
/** Filter allowing some columns to be dropped */
|
|
27
|
-
columnList?: string[] | string[][];
|
|
28
|
-
};
|
|
29
|
-
|
|
30
|
-
const DEFAULT_PROPS: Required<ParquetReaderProps> = {
|
|
31
|
-
defaultDictionarySize: 1e6
|
|
32
|
-
};
|
|
33
8
|
|
|
34
9
|
/**
|
|
35
|
-
*
|
|
36
|
-
*
|
|
37
|
-
*
|
|
38
|
-
*
|
|
10
|
+
* A parquet reader allows retrieving the rows from a parquet file in order.
|
|
11
|
+
* The basic usage is to create a reader and then retrieve a cursor/iterator
|
|
12
|
+
* which allows you to consume row after row until all rows have been read. It is
|
|
13
|
+
* important that you call close() after you are finished reading the file to
|
|
14
|
+
* avoid leaking file descriptors.
|
|
39
15
|
*/
|
|
40
|
-
export class ParquetReader {
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
for await (const rows of this.rowBatchIterator(props)) {
|
|
60
|
-
// yield *rows
|
|
61
|
-
for (const row of rows) {
|
|
62
|
-
yield row;
|
|
63
|
-
}
|
|
16
|
+
export class ParquetReader<T> implements AsyncIterable<T> {
|
|
17
|
+
/**
|
|
18
|
+
* return a new parquet reader initialized with a read function
|
|
19
|
+
*/
|
|
20
|
+
static async openBlob<T>(blob: Blob): Promise<ParquetReader<T>> {
|
|
21
|
+
const readFn = async (start: number, length: number) => {
|
|
22
|
+
const arrayBuffer = await blob.slice(start, start + length).arrayBuffer();
|
|
23
|
+
return Buffer.from(arrayBuffer);
|
|
24
|
+
};
|
|
25
|
+
const closeFn = async () => {};
|
|
26
|
+
const size = blob.size;
|
|
27
|
+
const envelopeReader = new ParquetEnvelopeReader(readFn, closeFn, size);
|
|
28
|
+
try {
|
|
29
|
+
await envelopeReader.readHeader();
|
|
30
|
+
const metadata = await envelopeReader.readFooter();
|
|
31
|
+
return new ParquetReader(metadata, envelopeReader);
|
|
32
|
+
} catch (err) {
|
|
33
|
+
await envelopeReader.close();
|
|
34
|
+
throw err;
|
|
64
35
|
}
|
|
65
36
|
}
|
|
66
37
|
|
|
67
|
-
/**
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
38
|
+
/**
|
|
39
|
+
* return a new parquet reader initialized with a read function
|
|
40
|
+
*/
|
|
41
|
+
static async openArrayBuffer<T>(arrayBuffer: ArrayBuffer): Promise<ParquetReader<T>> {
|
|
42
|
+
const readFn = async (start: number, length: number) => Buffer.from(arrayBuffer, start, length);
|
|
43
|
+
const closeFn = async () => {};
|
|
44
|
+
const size = arrayBuffer.byteLength;
|
|
45
|
+
const envelopeReader = new ParquetEnvelopeReader(readFn, closeFn, size);
|
|
46
|
+
try {
|
|
47
|
+
await envelopeReader.readHeader();
|
|
48
|
+
const metadata = await envelopeReader.readFooter();
|
|
49
|
+
return new ParquetReader(metadata, envelopeReader);
|
|
50
|
+
} catch (err) {
|
|
51
|
+
await envelopeReader.close();
|
|
52
|
+
throw err;
|
|
72
53
|
}
|
|
73
54
|
}
|
|
74
55
|
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
const rowGroupCount = metadata?.row_groups.length || 0;
|
|
86
|
-
|
|
87
|
-
for (let rowGroupIndex = 0; rowGroupIndex < rowGroupCount; rowGroupIndex++) {
|
|
88
|
-
const rowGroup = await this.readRowGroup(
|
|
89
|
-
schema,
|
|
90
|
-
metadata.row_groups[rowGroupIndex],
|
|
91
|
-
columnList
|
|
92
|
-
);
|
|
93
|
-
yield rowGroup;
|
|
56
|
+
static async openBuffer<T>(buffer: Buffer): Promise<ParquetReader<T>> {
|
|
57
|
+
const envelopeReader = await ParquetEnvelopeReader.openBuffer(buffer);
|
|
58
|
+
try {
|
|
59
|
+
await envelopeReader.readHeader();
|
|
60
|
+
const metadata = await envelopeReader.readFooter();
|
|
61
|
+
return new ParquetReader<T>(metadata, envelopeReader);
|
|
62
|
+
} catch (err) {
|
|
63
|
+
await envelopeReader.close();
|
|
64
|
+
throw err;
|
|
94
65
|
}
|
|
95
66
|
}
|
|
96
67
|
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
}
|
|
101
|
-
|
|
102
|
-
async getSchema(): Promise<ParquetSchema> {
|
|
103
|
-
const metadata = await this.getFileMetadata();
|
|
104
|
-
const root = metadata.schema[0];
|
|
105
|
-
const {schema: schemaDefinition} = decodeSchema(metadata.schema, 1, root.num_children!);
|
|
106
|
-
const schema = new ParquetSchema(schemaDefinition);
|
|
107
|
-
return schema;
|
|
108
|
-
}
|
|
68
|
+
public metadata: FileMetaData;
|
|
69
|
+
public envelopeReader: ParquetEnvelopeReader;
|
|
70
|
+
public schema: ParquetSchema;
|
|
109
71
|
|
|
110
72
|
/**
|
|
111
|
-
*
|
|
112
|
-
*
|
|
73
|
+
* Create a new parquet reader from the file metadata and an envelope reader.
|
|
74
|
+
* It is not recommended to call this constructor directly except for advanced
|
|
75
|
+
* and internal use cases. Consider using one of the open{File,Buffer} methods
|
|
76
|
+
* instead
|
|
113
77
|
*/
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
for (const kv of metadata.key_value_metadata!) {
|
|
118
|
-
md[kv.key] = kv.value!;
|
|
78
|
+
constructor(metadata: FileMetaData, envelopeReader: ParquetEnvelopeReader) {
|
|
79
|
+
if (metadata.version !== PARQUET_VERSION) {
|
|
80
|
+
throw new Error('invalid parquet version');
|
|
119
81
|
}
|
|
120
|
-
return md;
|
|
121
|
-
}
|
|
122
82
|
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
return this.metadata;
|
|
83
|
+
this.metadata = metadata;
|
|
84
|
+
this.envelopeReader = envelopeReader;
|
|
85
|
+
const root = this.metadata.schema[0];
|
|
86
|
+
const {schema} = decodeSchema(this.metadata.schema, 1, root.num_children!);
|
|
87
|
+
this.schema = new ParquetSchema(schema);
|
|
129
88
|
}
|
|
130
89
|
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
break;
|
|
140
|
-
case PARQUET_MAGIC_ENCRYPTED:
|
|
141
|
-
throw new Error('Encrypted parquet file not supported');
|
|
142
|
-
default:
|
|
143
|
-
throw new Error(`Invalid parquet file (magic=${magic})`);
|
|
144
|
-
}
|
|
90
|
+
/**
|
|
91
|
+
* Close this parquet reader. You MUST call this method once you're finished
|
|
92
|
+
* reading rows
|
|
93
|
+
*/
|
|
94
|
+
async close(): Promise<void> {
|
|
95
|
+
await this.envelopeReader.close();
|
|
96
|
+
// this.envelopeReader = null;
|
|
97
|
+
// this.metadata = null;
|
|
145
98
|
}
|
|
146
99
|
|
|
147
|
-
/**
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
100
|
+
/**
|
|
101
|
+
* Return a cursor to the file. You may open more than one cursor and use
|
|
102
|
+
* them concurrently. All cursors become invalid once close() is called on
|
|
103
|
+
* the reader object.
|
|
104
|
+
*
|
|
105
|
+
* The required_columns parameter controls which columns are actually read
|
|
106
|
+
* from disk. An empty array or no value implies all columns. A list of column
|
|
107
|
+
* names means that only those columns should be loaded from disk.
|
|
108
|
+
*/
|
|
109
|
+
getCursor(): ParquetCursor<T>;
|
|
110
|
+
// @ts-ignore
|
|
111
|
+
getCursor<K extends keyof T>(columnList: (K | K[])[]): ParquetCursor<Pick<T, K>>;
|
|
112
|
+
getCursor(columnList: (string | string[])[]): ParquetCursor<Partial<T>>;
|
|
113
|
+
getCursor(columnList?: (string | string[])[]): ParquetCursor<Partial<T>> {
|
|
114
|
+
if (!columnList) {
|
|
115
|
+
// tslint:disable-next-line:no-parameter-reassignment
|
|
116
|
+
columnList = [];
|
|
155
117
|
}
|
|
156
118
|
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
if (metadataOffset < PARQUET_MAGIC.length) {
|
|
160
|
-
throw new Error(`Invalid metadata size ${metadataOffset}`);
|
|
161
|
-
}
|
|
119
|
+
// tslint:disable-next-line:no-parameter-reassignment
|
|
120
|
+
columnList = columnList.map((x) => (Array.isArray(x) ? x : [x]));
|
|
162
121
|
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
122
|
+
return new ParquetCursor<T>(
|
|
123
|
+
this.metadata,
|
|
124
|
+
this.envelopeReader,
|
|
125
|
+
this.schema,
|
|
126
|
+
columnList as string[][]
|
|
127
|
+
);
|
|
168
128
|
}
|
|
169
129
|
|
|
170
|
-
/**
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
const buffer: ParquetBuffer = {
|
|
177
|
-
rowCount: Number(rowGroup.num_rows),
|
|
178
|
-
columnData: {}
|
|
179
|
-
};
|
|
180
|
-
for (const colChunk of rowGroup.columns) {
|
|
181
|
-
const colMetadata = colChunk.meta_data;
|
|
182
|
-
const colKey = colMetadata?.path_in_schema;
|
|
183
|
-
if (columnList.length > 0 && fieldIndexOf(columnList, colKey!) < 0) {
|
|
184
|
-
continue; // eslint-disable-line no-continue
|
|
185
|
-
}
|
|
186
|
-
buffer.columnData[colKey!.join()] = await this.readColumnChunk(schema, colChunk);
|
|
187
|
-
}
|
|
188
|
-
return buffer;
|
|
130
|
+
/**
|
|
131
|
+
* Return the number of rows in this file. Note that the number of rows is
|
|
132
|
+
* not neccessarily equal to the number of rows in each column.
|
|
133
|
+
*/
|
|
134
|
+
getRowCount(): number {
|
|
135
|
+
return Number(this.metadata.num_rows);
|
|
189
136
|
}
|
|
190
137
|
|
|
191
138
|
/**
|
|
192
|
-
*
|
|
139
|
+
* Returns the ParquetSchema for this file
|
|
193
140
|
*/
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
throw new Error('external references are not supported');
|
|
197
|
-
}
|
|
198
|
-
|
|
199
|
-
const field = schema.findField(colChunk.meta_data?.path_in_schema!);
|
|
200
|
-
const type: PrimitiveType = getThriftEnum(Type, colChunk.meta_data?.type!) as any;
|
|
201
|
-
|
|
202
|
-
if (type !== field.primitiveType) {
|
|
203
|
-
throw new Error(`chunk type not matching schema: ${type}`);
|
|
204
|
-
}
|
|
205
|
-
|
|
206
|
-
const compression: ParquetCompression = getThriftEnum(
|
|
207
|
-
CompressionCodec,
|
|
208
|
-
colChunk.meta_data?.codec!
|
|
209
|
-
) as any;
|
|
210
|
-
|
|
211
|
-
const pagesOffset = Number(colChunk.meta_data?.data_page_offset!);
|
|
212
|
-
let pagesSize = Number(colChunk.meta_data?.total_compressed_size!);
|
|
213
|
-
|
|
214
|
-
if (!colChunk.file_path) {
|
|
215
|
-
pagesSize = Math.min(
|
|
216
|
-
this.file.size - pagesOffset,
|
|
217
|
-
Number(colChunk.meta_data?.total_compressed_size)
|
|
218
|
-
);
|
|
219
|
-
}
|
|
220
|
-
|
|
221
|
-
const options: ParquetOptions = {
|
|
222
|
-
type,
|
|
223
|
-
rLevelMax: field.rLevelMax,
|
|
224
|
-
dLevelMax: field.dLevelMax,
|
|
225
|
-
compression,
|
|
226
|
-
column: field,
|
|
227
|
-
numValues: colChunk.meta_data?.num_values,
|
|
228
|
-
dictionary: []
|
|
229
|
-
};
|
|
230
|
-
|
|
231
|
-
let dictionary;
|
|
232
|
-
|
|
233
|
-
const dictionaryPageOffset = colChunk?.meta_data?.dictionary_page_offset;
|
|
234
|
-
|
|
235
|
-
if (dictionaryPageOffset) {
|
|
236
|
-
const dictionaryOffset = Number(dictionaryPageOffset);
|
|
237
|
-
// Getting dictionary from column chunk to iterate all over indexes to get dataPage values.
|
|
238
|
-
dictionary = await this.getDictionary(dictionaryOffset, options, pagesOffset);
|
|
239
|
-
}
|
|
240
|
-
|
|
241
|
-
dictionary = options.dictionary?.length ? options.dictionary : dictionary;
|
|
242
|
-
const pagesBuf = await this.file.read(pagesOffset, pagesSize);
|
|
243
|
-
return await decodeDataPages(pagesBuf, {...options, dictionary});
|
|
141
|
+
getSchema(): ParquetSchema {
|
|
142
|
+
return this.schema;
|
|
244
143
|
}
|
|
245
144
|
|
|
246
145
|
/**
|
|
247
|
-
*
|
|
248
|
-
* @param dictionaryPageOffset
|
|
249
|
-
* @param options
|
|
250
|
-
* @param pagesOffset
|
|
251
|
-
* @returns
|
|
146
|
+
* Returns the user (key/value) metadata for this file
|
|
252
147
|
*/
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
): Promise<string[]> {
|
|
258
|
-
if (dictionaryPageOffset === 0) {
|
|
259
|
-
// dictionarySize = Math.min(this.fileSize - pagesOffset, this.defaultDictionarySize);
|
|
260
|
-
// pagesBuf = await this.read(pagesOffset, dictionarySize);
|
|
261
|
-
|
|
262
|
-
// In this case we are working with parquet-mr files format. Problem is described below:
|
|
263
|
-
// https://stackoverflow.com/questions/55225108/why-is-dictionary-page-offset-0-for-plain-dictionary-encoding
|
|
264
|
-
// We need to get dictionary page from column chunk if it exists.
|
|
265
|
-
// Now if we use code commented above we don't get DICTIONARY_PAGE we get DATA_PAGE instead.
|
|
266
|
-
return [];
|
|
148
|
+
getMetadata(): Record<string, string> {
|
|
149
|
+
const md: Record<string, string> = {};
|
|
150
|
+
for (const kv of this.metadata.key_value_metadata!) {
|
|
151
|
+
md[kv.key] = kv.value!;
|
|
267
152
|
}
|
|
153
|
+
return md;
|
|
154
|
+
}
|
|
268
155
|
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
const cursor = {buffer: pagesBuf, offset: 0, size: pagesBuf.length};
|
|
276
|
-
const decodedPage = await decodePage(cursor, options);
|
|
277
|
-
|
|
278
|
-
return decodedPage.dictionary!;
|
|
156
|
+
/**
|
|
157
|
+
* Implement AsyncIterable
|
|
158
|
+
*/
|
|
159
|
+
// tslint:disable-next-line:function-name
|
|
160
|
+
[Symbol.asyncIterator](): AsyncIterator<T> {
|
|
161
|
+
return this.getCursor()[Symbol.asyncIterator]();
|
|
279
162
|
}
|
|
280
163
|
}
|
|
@@ -98,7 +98,6 @@ export interface ParquetField {
|
|
|
98
98
|
fields?: Record<string, ParquetField>;
|
|
99
99
|
}
|
|
100
100
|
|
|
101
|
-
/** @todo better name, this is an internal type? */
|
|
102
101
|
export interface ParquetOptions {
|
|
103
102
|
type: ParquetType;
|
|
104
103
|
rLevelMax: number;
|
|
@@ -109,14 +108,20 @@ export interface ParquetOptions {
|
|
|
109
108
|
dictionary?: ParquetDictionary;
|
|
110
109
|
}
|
|
111
110
|
|
|
111
|
+
export interface ParquetData {
|
|
112
|
+
dlevels: number[];
|
|
113
|
+
rlevels: number[];
|
|
114
|
+
values: any[];
|
|
115
|
+
count: number;
|
|
116
|
+
pageHeaders: PageHeader[];
|
|
117
|
+
}
|
|
118
|
+
|
|
112
119
|
export interface ParquetPageData {
|
|
113
120
|
dlevels: number[];
|
|
114
121
|
rlevels: number[];
|
|
115
|
-
|
|
116
|
-
values: any[]; // ArrayLike<any>;
|
|
122
|
+
values: any[];
|
|
117
123
|
count: number;
|
|
118
124
|
dictionary?: ParquetDictionary;
|
|
119
|
-
/** The "raw" page header from the file */
|
|
120
125
|
pageHeader: PageHeader;
|
|
121
126
|
}
|
|
122
127
|
|
|
@@ -124,24 +129,11 @@ export interface ParquetRecord {
|
|
|
124
129
|
[key: string]: any;
|
|
125
130
|
}
|
|
126
131
|
|
|
127
|
-
/** @
|
|
128
|
-
* Holds data for one row group (column chunks) */
|
|
129
132
|
export class ParquetBuffer {
|
|
130
|
-
/** Number of rows in this page */
|
|
131
133
|
rowCount: number;
|
|
132
|
-
|
|
133
134
|
columnData: Record<string, ParquetData>;
|
|
134
135
|
constructor(rowCount: number = 0, columnData: Record<string, ParquetData> = {}) {
|
|
135
136
|
this.rowCount = rowCount;
|
|
136
137
|
this.columnData = columnData;
|
|
137
138
|
}
|
|
138
139
|
}
|
|
139
|
-
|
|
140
|
-
/** Holds the data for one column chunk */
|
|
141
|
-
export interface ParquetData {
|
|
142
|
-
dlevels: number[];
|
|
143
|
-
rlevels: number[];
|
|
144
|
-
values: any[];
|
|
145
|
-
count: number;
|
|
146
|
-
pageHeaders: PageHeader[];
|
|
147
|
-
}
|