@loaders.gl/parquet 3.3.0 → 3.4.0-alpha.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (131) hide show
  1. package/dist/dist.min.js +26 -17
  2. package/dist/dist.min.js.map +3 -3
  3. package/dist/es5/index.js +3 -3
  4. package/dist/es5/index.js.map +1 -1
  5. package/dist/es5/lib/parse-parquet.js +25 -49
  6. package/dist/es5/lib/parse-parquet.js.map +1 -1
  7. package/dist/es5/parquet-loader.js +2 -3
  8. package/dist/es5/parquet-loader.js.map +1 -1
  9. package/dist/es5/parquet-wasm-loader.js +1 -1
  10. package/dist/es5/parquet-wasm-loader.js.map +1 -1
  11. package/dist/es5/parquet-wasm-writer.js +1 -1
  12. package/dist/es5/parquet-wasm-writer.js.map +1 -1
  13. package/dist/es5/parquet-writer.js +1 -1
  14. package/dist/es5/parquet-writer.js.map +1 -1
  15. package/dist/es5/parquetjs/compression.js +5 -15
  16. package/dist/es5/parquetjs/compression.js.map +1 -1
  17. package/dist/es5/parquetjs/encoder/{parquet-encoder.js → writer.js} +158 -70
  18. package/dist/es5/parquetjs/encoder/writer.js.map +1 -0
  19. package/dist/es5/parquetjs/file.js +94 -0
  20. package/dist/es5/parquetjs/file.js.map +1 -0
  21. package/dist/es5/parquetjs/parser/parquet-cursor.js +183 -0
  22. package/dist/es5/parquetjs/parser/parquet-cursor.js.map +1 -0
  23. package/dist/es5/parquetjs/parser/parquet-envelope-reader.js +327 -0
  24. package/dist/es5/parquetjs/parser/parquet-envelope-reader.js.map +1 -0
  25. package/dist/es5/parquetjs/parser/parquet-reader.js +222 -553
  26. package/dist/es5/parquetjs/parser/parquet-reader.js.map +1 -1
  27. package/dist/es5/parquetjs/schema/declare.js +1 -3
  28. package/dist/es5/parquetjs/schema/declare.js.map +1 -1
  29. package/dist/es5/parquetjs/schema/shred.js +33 -39
  30. package/dist/es5/parquetjs/schema/shred.js.map +1 -1
  31. package/dist/es5/parquetjs/schema/types.js.map +1 -1
  32. package/dist/es5/parquetjs/utils/buffer-utils.js +19 -0
  33. package/dist/es5/parquetjs/utils/buffer-utils.js.map +1 -0
  34. package/dist/es5/parquetjs/utils/file-utils.js +3 -2
  35. package/dist/es5/parquetjs/utils/file-utils.js.map +1 -1
  36. package/dist/esm/index.js +1 -1
  37. package/dist/esm/index.js.map +1 -1
  38. package/dist/esm/lib/parse-parquet.js +12 -6
  39. package/dist/esm/lib/parse-parquet.js.map +1 -1
  40. package/dist/esm/parquet-loader.js +2 -3
  41. package/dist/esm/parquet-loader.js.map +1 -1
  42. package/dist/esm/parquet-wasm-loader.js +1 -1
  43. package/dist/esm/parquet-wasm-loader.js.map +1 -1
  44. package/dist/esm/parquet-wasm-writer.js +1 -1
  45. package/dist/esm/parquet-wasm-writer.js.map +1 -1
  46. package/dist/esm/parquet-writer.js +1 -1
  47. package/dist/esm/parquet-writer.js.map +1 -1
  48. package/dist/esm/parquetjs/compression.js +1 -10
  49. package/dist/esm/parquetjs/compression.js.map +1 -1
  50. package/dist/esm/parquetjs/encoder/{parquet-encoder.js → writer.js} +37 -7
  51. package/dist/esm/parquetjs/encoder/writer.js.map +1 -0
  52. package/dist/esm/parquetjs/file.js +81 -0
  53. package/dist/esm/parquetjs/file.js.map +1 -0
  54. package/dist/esm/parquetjs/parser/parquet-cursor.js +78 -0
  55. package/dist/esm/parquetjs/parser/parquet-cursor.js.map +1 -0
  56. package/dist/esm/parquetjs/parser/parquet-envelope-reader.js +129 -0
  57. package/dist/esm/parquetjs/parser/parquet-envelope-reader.js.map +1 -0
  58. package/dist/esm/parquetjs/parser/parquet-reader.js +72 -158
  59. package/dist/esm/parquetjs/parser/parquet-reader.js.map +1 -1
  60. package/dist/esm/parquetjs/schema/declare.js +0 -1
  61. package/dist/esm/parquetjs/schema/declare.js.map +1 -1
  62. package/dist/esm/parquetjs/schema/shred.js +34 -42
  63. package/dist/esm/parquetjs/schema/shred.js.map +1 -1
  64. package/dist/esm/parquetjs/schema/types.js.map +1 -1
  65. package/dist/esm/parquetjs/utils/buffer-utils.js +13 -0
  66. package/dist/esm/parquetjs/utils/buffer-utils.js.map +1 -0
  67. package/dist/esm/parquetjs/utils/file-utils.js +1 -1
  68. package/dist/esm/parquetjs/utils/file-utils.js.map +1 -1
  69. package/dist/index.d.ts +1 -1
  70. package/dist/index.d.ts.map +1 -1
  71. package/dist/index.js +4 -3
  72. package/dist/lib/parse-parquet.d.ts +2 -2
  73. package/dist/lib/parse-parquet.d.ts.map +1 -1
  74. package/dist/lib/parse-parquet.js +12 -24
  75. package/dist/parquet-loader.d.ts +0 -1
  76. package/dist/parquet-loader.d.ts.map +1 -1
  77. package/dist/parquet-loader.js +1 -2
  78. package/dist/parquet-worker.js +24 -15
  79. package/dist/parquet-worker.js.map +3 -3
  80. package/dist/parquetjs/compression.d.ts.map +1 -1
  81. package/dist/parquetjs/compression.js +5 -16
  82. package/dist/parquetjs/encoder/{parquet-encoder.d.ts → writer.d.ts} +19 -10
  83. package/dist/parquetjs/encoder/writer.d.ts.map +1 -0
  84. package/dist/parquetjs/encoder/{parquet-encoder.js → writer.js} +37 -39
  85. package/dist/parquetjs/file.d.ts +10 -0
  86. package/dist/parquetjs/file.d.ts.map +1 -0
  87. package/dist/parquetjs/file.js +99 -0
  88. package/dist/parquetjs/parser/parquet-cursor.d.ts +36 -0
  89. package/dist/parquetjs/parser/parquet-cursor.d.ts.map +1 -0
  90. package/dist/parquetjs/parser/parquet-cursor.js +74 -0
  91. package/dist/parquetjs/parser/parquet-envelope-reader.d.ts +40 -0
  92. package/dist/parquetjs/parser/parquet-envelope-reader.d.ts.map +1 -0
  93. package/dist/parquetjs/parser/parquet-envelope-reader.js +136 -0
  94. package/dist/parquetjs/parser/parquet-reader.d.ts +57 -47
  95. package/dist/parquetjs/parser/parquet-reader.d.ts.map +1 -1
  96. package/dist/parquetjs/parser/parquet-reader.js +102 -168
  97. package/dist/parquetjs/schema/declare.d.ts +7 -14
  98. package/dist/parquetjs/schema/declare.d.ts.map +1 -1
  99. package/dist/parquetjs/schema/declare.js +0 -2
  100. package/dist/parquetjs/schema/shred.d.ts +0 -115
  101. package/dist/parquetjs/schema/shred.d.ts.map +1 -1
  102. package/dist/parquetjs/schema/shred.js +43 -161
  103. package/dist/parquetjs/schema/types.d.ts +2 -2
  104. package/dist/parquetjs/schema/types.d.ts.map +1 -1
  105. package/dist/parquetjs/utils/buffer-utils.d.ts +10 -0
  106. package/dist/parquetjs/utils/buffer-utils.d.ts.map +1 -0
  107. package/dist/parquetjs/utils/buffer-utils.js +22 -0
  108. package/dist/parquetjs/utils/file-utils.d.ts +4 -3
  109. package/dist/parquetjs/utils/file-utils.d.ts.map +1 -1
  110. package/dist/parquetjs/utils/file-utils.js +5 -2
  111. package/package.json +5 -7
  112. package/src/index.ts +2 -2
  113. package/src/lib/parse-parquet.ts +12 -25
  114. package/src/parquet-loader.ts +1 -3
  115. package/src/parquetjs/compression.ts +1 -14
  116. package/src/parquetjs/encoder/{parquet-encoder.ts → writer.ts} +28 -22
  117. package/src/parquetjs/file.ts +90 -0
  118. package/src/parquetjs/parser/parquet-cursor.ts +94 -0
  119. package/src/parquetjs/parser/parquet-envelope-reader.ts +199 -0
  120. package/src/parquetjs/parser/parquet-reader.ts +122 -239
  121. package/src/parquetjs/schema/declare.ts +9 -17
  122. package/src/parquetjs/schema/shred.ts +28 -157
  123. package/src/parquetjs/schema/types.ts +27 -21
  124. package/src/parquetjs/utils/buffer-utils.ts +18 -0
  125. package/src/parquetjs/utils/file-utils.ts +4 -3
  126. package/dist/es5/lib/convert-schema-deep.ts.disabled +0 -910
  127. package/dist/es5/parquetjs/encoder/parquet-encoder.js.map +0 -1
  128. package/dist/esm/lib/convert-schema-deep.ts.disabled +0 -910
  129. package/dist/esm/parquetjs/encoder/parquet-encoder.js.map +0 -1
  130. package/dist/parquetjs/encoder/parquet-encoder.d.ts.map +0 -1
  131. package/src/lib/convert-schema-deep.ts.disabled +0 -910
@@ -1,280 +1,163 @@
1
1
  // Forked from https://github.com/kbajalc/parquets under MIT license (Copyright (c) 2017 ironSource Ltd.)
2
- import type {ReadableFile} from '@loaders.gl/loader-utils';
3
-
2
+ import {ParquetEnvelopeReader} from './parquet-envelope-reader';
3
+ import {FileMetaData} from '../parquet-thrift';
4
4
  import {ParquetSchema} from '../schema/schema';
5
+ import {ParquetCursor} from './parquet-cursor';
6
+ import {PARQUET_VERSION} from '../../constants';
5
7
  import {decodeSchema} from './decoders';
6
- import {materializeRecords} from '../schema/shred';
7
-
8
- import {PARQUET_MAGIC, PARQUET_MAGIC_ENCRYPTED} from '../../constants';
9
- import {ColumnChunk, CompressionCodec, FileMetaData, RowGroup, Type} from '../parquet-thrift';
10
- import {
11
- ParquetBuffer,
12
- ParquetCompression,
13
- ParquetData,
14
- PrimitiveType,
15
- ParquetOptions
16
- } from '../schema/declare';
17
- import {decodeFileMetadata, getThriftEnum, fieldIndexOf} from '../utils/read-utils';
18
- import {decodeDataPages, decodePage} from './decoders';
19
-
20
- export type ParquetReaderProps = {
21
- defaultDictionarySize?: number;
22
- };
23
-
24
- /** Properties for initializing a ParquetRowGroupReader */
25
- export type ParquetIterationProps = {
26
- /** Filter allowing some columns to be dropped */
27
- columnList?: string[] | string[][];
28
- };
29
-
30
- const DEFAULT_PROPS: Required<ParquetReaderProps> = {
31
- defaultDictionarySize: 1e6
32
- };
33
8
 
34
9
  /**
35
- * The parquet envelope reader allows direct, unbuffered access to the individual
36
- * sections of the parquet file, namely the header, footer and the row groups.
37
- * This class is intended for advanced/internal users; if you just want to retrieve
38
- * rows from a parquet file use the ParquetReader instead
10
+ * A parquet reader allows retrieving the rows from a parquet file in order.
11
+ * The basic usage is to create a reader and then retrieve a cursor/iterator
12
+ * which allows you to consume row after row until all rows have been read. It is
13
+ * important that you call close() after you are finished reading the file to
14
+ * avoid leaking file descriptors.
39
15
  */
40
- export class ParquetReader {
41
- props: Required<ParquetReaderProps>;
42
- file: ReadableFile;
43
- metadata: Promise<FileMetaData> | null = null;
44
-
45
- constructor(file: ReadableFile, props?: ParquetReaderProps) {
46
- this.file = file;
47
- this.props = {...DEFAULT_PROPS, ...props};
48
- }
49
-
50
- close(): void {
51
- // eslint-disable-next-line @typescript-eslint/no-floating-promises
52
- this.file.close();
53
- }
54
-
55
- // HIGH LEVEL METHODS
56
-
57
- /** Yield one row at a time */
58
- async *rowIterator(props?: ParquetIterationProps) {
59
- for await (const rows of this.rowBatchIterator(props)) {
60
- // yield *rows
61
- for (const row of rows) {
62
- yield row;
63
- }
16
+ export class ParquetReader<T> implements AsyncIterable<T> {
17
+ /**
18
+ * return a new parquet reader initialized with a read function
19
+ */
20
+ static async openBlob<T>(blob: Blob): Promise<ParquetReader<T>> {
21
+ const readFn = async (start: number, length: number) => {
22
+ const arrayBuffer = await blob.slice(start, start + length).arrayBuffer();
23
+ return Buffer.from(arrayBuffer);
24
+ };
25
+ const closeFn = async () => {};
26
+ const size = blob.size;
27
+ const envelopeReader = new ParquetEnvelopeReader(readFn, closeFn, size);
28
+ try {
29
+ await envelopeReader.readHeader();
30
+ const metadata = await envelopeReader.readFooter();
31
+ return new ParquetReader(metadata, envelopeReader);
32
+ } catch (err) {
33
+ await envelopeReader.close();
34
+ throw err;
64
35
  }
65
36
  }
66
37
 
67
- /** Yield one batch of rows at a time */
68
- async *rowBatchIterator(props?: ParquetIterationProps) {
69
- const schema = await this.getSchema();
70
- for await (const rowGroup of this.rowGroupIterator(props)) {
71
- yield materializeRecords(schema, rowGroup);
38
+ /**
39
+ * return a new parquet reader initialized with a read function
40
+ */
41
+ static async openArrayBuffer<T>(arrayBuffer: ArrayBuffer): Promise<ParquetReader<T>> {
42
+ const readFn = async (start: number, length: number) => Buffer.from(arrayBuffer, start, length);
43
+ const closeFn = async () => {};
44
+ const size = arrayBuffer.byteLength;
45
+ const envelopeReader = new ParquetEnvelopeReader(readFn, closeFn, size);
46
+ try {
47
+ await envelopeReader.readHeader();
48
+ const metadata = await envelopeReader.readFooter();
49
+ return new ParquetReader(metadata, envelopeReader);
50
+ } catch (err) {
51
+ await envelopeReader.close();
52
+ throw err;
72
53
  }
73
54
  }
74
55
 
75
- /** Iterate over the raw row groups */
76
- async *rowGroupIterator(props?: ParquetIterationProps) {
77
- // Ensure strings are nested in arrays
78
- const columnList: string[][] = (props?.columnList || []).map((x) =>
79
- Array.isArray(x) ? x : [x]
80
- );
81
-
82
- const metadata = await this.getFileMetadata();
83
- const schema = await this.getSchema();
84
-
85
- const rowGroupCount = metadata?.row_groups.length || 0;
86
-
87
- for (let rowGroupIndex = 0; rowGroupIndex < rowGroupCount; rowGroupIndex++) {
88
- const rowGroup = await this.readRowGroup(
89
- schema,
90
- metadata.row_groups[rowGroupIndex],
91
- columnList
92
- );
93
- yield rowGroup;
56
+ static async openBuffer<T>(buffer: Buffer): Promise<ParquetReader<T>> {
57
+ const envelopeReader = await ParquetEnvelopeReader.openBuffer(buffer);
58
+ try {
59
+ await envelopeReader.readHeader();
60
+ const metadata = await envelopeReader.readFooter();
61
+ return new ParquetReader<T>(metadata, envelopeReader);
62
+ } catch (err) {
63
+ await envelopeReader.close();
64
+ throw err;
94
65
  }
95
66
  }
96
67
 
97
- async getRowCount(): Promise<number> {
98
- const metadata = await this.getFileMetadata();
99
- return Number(metadata.num_rows);
100
- }
101
-
102
- async getSchema(): Promise<ParquetSchema> {
103
- const metadata = await this.getFileMetadata();
104
- const root = metadata.schema[0];
105
- const {schema: schemaDefinition} = decodeSchema(metadata.schema, 1, root.num_children!);
106
- const schema = new ParquetSchema(schemaDefinition);
107
- return schema;
108
- }
68
+ public metadata: FileMetaData;
69
+ public envelopeReader: ParquetEnvelopeReader;
70
+ public schema: ParquetSchema;
109
71
 
110
72
  /**
111
- * Returns the user (key/value) metadata for this file
112
- * In parquet this is not stored on the schema like it is in arrow
73
+ * Create a new parquet reader from the file metadata and an envelope reader.
74
+ * It is not recommended to call this constructor directly except for advanced
75
+ * and internal use cases. Consider using one of the open{File,Buffer} methods
76
+ * instead
113
77
  */
114
- async getSchemaMetadata(): Promise<Record<string, string>> {
115
- const metadata = await this.getFileMetadata();
116
- const md: Record<string, string> = {};
117
- for (const kv of metadata.key_value_metadata!) {
118
- md[kv.key] = kv.value!;
78
+ constructor(metadata: FileMetaData, envelopeReader: ParquetEnvelopeReader) {
79
+ if (metadata.version !== PARQUET_VERSION) {
80
+ throw new Error('invalid parquet version');
119
81
  }
120
- return md;
121
- }
122
82
 
123
- async getFileMetadata(): Promise<FileMetaData> {
124
- if (!this.metadata) {
125
- await this.readHeader();
126
- this.metadata = this.readFooter();
127
- }
128
- return this.metadata;
83
+ this.metadata = metadata;
84
+ this.envelopeReader = envelopeReader;
85
+ const root = this.metadata.schema[0];
86
+ const {schema} = decodeSchema(this.metadata.schema, 1, root.num_children!);
87
+ this.schema = new ParquetSchema(schema);
129
88
  }
130
89
 
131
- // LOW LEVEL METHODS
132
-
133
- /** Metadata is stored in the footer */
134
- async readHeader(): Promise<void> {
135
- const buffer = await this.file.read(0, PARQUET_MAGIC.length);
136
- const magic = buffer.toString();
137
- switch (magic) {
138
- case PARQUET_MAGIC:
139
- break;
140
- case PARQUET_MAGIC_ENCRYPTED:
141
- throw new Error('Encrypted parquet file not supported');
142
- default:
143
- throw new Error(`Invalid parquet file (magic=${magic})`);
144
- }
90
+ /**
91
+ * Close this parquet reader. You MUST call this method once you're finished
92
+ * reading rows
93
+ */
94
+ async close(): Promise<void> {
95
+ await this.envelopeReader.close();
96
+ // this.envelopeReader = null;
97
+ // this.metadata = null;
145
98
  }
146
99
 
147
- /** Metadata is stored in the footer */
148
- async readFooter(): Promise<FileMetaData> {
149
- const trailerLen = PARQUET_MAGIC.length + 4;
150
- const trailerBuf = await this.file.read(this.file.size - trailerLen, trailerLen);
151
-
152
- const magic = trailerBuf.slice(4).toString();
153
- if (magic !== PARQUET_MAGIC) {
154
- throw new Error(`Not a valid parquet file (magic="${magic})`);
100
+ /**
101
+ * Return a cursor to the file. You may open more than one cursor and use
102
+ * them concurrently. All cursors become invalid once close() is called on
103
+ * the reader object.
104
+ *
105
+ * The required_columns parameter controls which columns are actually read
106
+ * from disk. An empty array or no value implies all columns. A list of column
107
+ * names means that only those columns should be loaded from disk.
108
+ */
109
+ getCursor(): ParquetCursor<T>;
110
+ // @ts-ignore
111
+ getCursor<K extends keyof T>(columnList: (K | K[])[]): ParquetCursor<Pick<T, K>>;
112
+ getCursor(columnList: (string | string[])[]): ParquetCursor<Partial<T>>;
113
+ getCursor(columnList?: (string | string[])[]): ParquetCursor<Partial<T>> {
114
+ if (!columnList) {
115
+ // tslint:disable-next-line:no-parameter-reassignment
116
+ columnList = [];
155
117
  }
156
118
 
157
- const metadataSize = trailerBuf.readUInt32LE(0);
158
- const metadataOffset = this.file.size - metadataSize - trailerLen;
159
- if (metadataOffset < PARQUET_MAGIC.length) {
160
- throw new Error(`Invalid metadata size ${metadataOffset}`);
161
- }
119
+ // tslint:disable-next-line:no-parameter-reassignment
120
+ columnList = columnList.map((x) => (Array.isArray(x) ? x : [x]));
162
121
 
163
- const metadataBuf = await this.file.read(metadataOffset, metadataSize);
164
- // let metadata = new parquet_thrift.FileMetaData();
165
- // parquet_util.decodeThrift(metadata, metadataBuf);
166
- const {metadata} = decodeFileMetadata(metadataBuf);
167
- return metadata;
122
+ return new ParquetCursor<T>(
123
+ this.metadata,
124
+ this.envelopeReader,
125
+ this.schema,
126
+ columnList as string[][]
127
+ );
168
128
  }
169
129
 
170
- /** Data is stored in row groups (similar to Apache Arrow record batches) */
171
- async readRowGroup(
172
- schema: ParquetSchema,
173
- rowGroup: RowGroup,
174
- columnList: string[][]
175
- ): Promise<ParquetBuffer> {
176
- const buffer: ParquetBuffer = {
177
- rowCount: Number(rowGroup.num_rows),
178
- columnData: {}
179
- };
180
- for (const colChunk of rowGroup.columns) {
181
- const colMetadata = colChunk.meta_data;
182
- const colKey = colMetadata?.path_in_schema;
183
- if (columnList.length > 0 && fieldIndexOf(columnList, colKey!) < 0) {
184
- continue; // eslint-disable-line no-continue
185
- }
186
- buffer.columnData[colKey!.join()] = await this.readColumnChunk(schema, colChunk);
187
- }
188
- return buffer;
130
+ /**
131
+ * Return the number of rows in this file. Note that the number of rows is
132
+ * not neccessarily equal to the number of rows in each column.
133
+ */
134
+ getRowCount(): number {
135
+ return Number(this.metadata.num_rows);
189
136
  }
190
137
 
191
138
  /**
192
- * Each row group contains column chunks for all the columns.
139
+ * Returns the ParquetSchema for this file
193
140
  */
194
- async readColumnChunk(schema: ParquetSchema, colChunk: ColumnChunk): Promise<ParquetData> {
195
- if (colChunk.file_path !== undefined && colChunk.file_path !== null) {
196
- throw new Error('external references are not supported');
197
- }
198
-
199
- const field = schema.findField(colChunk.meta_data?.path_in_schema!);
200
- const type: PrimitiveType = getThriftEnum(Type, colChunk.meta_data?.type!) as any;
201
-
202
- if (type !== field.primitiveType) {
203
- throw new Error(`chunk type not matching schema: ${type}`);
204
- }
205
-
206
- const compression: ParquetCompression = getThriftEnum(
207
- CompressionCodec,
208
- colChunk.meta_data?.codec!
209
- ) as any;
210
-
211
- const pagesOffset = Number(colChunk.meta_data?.data_page_offset!);
212
- let pagesSize = Number(colChunk.meta_data?.total_compressed_size!);
213
-
214
- if (!colChunk.file_path) {
215
- pagesSize = Math.min(
216
- this.file.size - pagesOffset,
217
- Number(colChunk.meta_data?.total_compressed_size)
218
- );
219
- }
220
-
221
- const options: ParquetOptions = {
222
- type,
223
- rLevelMax: field.rLevelMax,
224
- dLevelMax: field.dLevelMax,
225
- compression,
226
- column: field,
227
- numValues: colChunk.meta_data?.num_values,
228
- dictionary: []
229
- };
230
-
231
- let dictionary;
232
-
233
- const dictionaryPageOffset = colChunk?.meta_data?.dictionary_page_offset;
234
-
235
- if (dictionaryPageOffset) {
236
- const dictionaryOffset = Number(dictionaryPageOffset);
237
- // Getting dictionary from column chunk to iterate all over indexes to get dataPage values.
238
- dictionary = await this.getDictionary(dictionaryOffset, options, pagesOffset);
239
- }
240
-
241
- dictionary = options.dictionary?.length ? options.dictionary : dictionary;
242
- const pagesBuf = await this.file.read(pagesOffset, pagesSize);
243
- return await decodeDataPages(pagesBuf, {...options, dictionary});
141
+ getSchema(): ParquetSchema {
142
+ return this.schema;
244
143
  }
245
144
 
246
145
  /**
247
- * Getting dictionary for allows to flatten values by indices.
248
- * @param dictionaryPageOffset
249
- * @param options
250
- * @param pagesOffset
251
- * @returns
146
+ * Returns the user (key/value) metadata for this file
252
147
  */
253
- async getDictionary(
254
- dictionaryPageOffset: number,
255
- options: ParquetOptions,
256
- pagesOffset: number
257
- ): Promise<string[]> {
258
- if (dictionaryPageOffset === 0) {
259
- // dictionarySize = Math.min(this.fileSize - pagesOffset, this.defaultDictionarySize);
260
- // pagesBuf = await this.read(pagesOffset, dictionarySize);
261
-
262
- // In this case we are working with parquet-mr files format. Problem is described below:
263
- // https://stackoverflow.com/questions/55225108/why-is-dictionary-page-offset-0-for-plain-dictionary-encoding
264
- // We need to get dictionary page from column chunk if it exists.
265
- // Now if we use code commented above we don't get DICTIONARY_PAGE we get DATA_PAGE instead.
266
- return [];
148
+ getMetadata(): Record<string, string> {
149
+ const md: Record<string, string> = {};
150
+ for (const kv of this.metadata.key_value_metadata!) {
151
+ md[kv.key] = kv.value!;
267
152
  }
153
+ return md;
154
+ }
268
155
 
269
- const dictionarySize = Math.min(
270
- this.file.size - dictionaryPageOffset,
271
- this.props.defaultDictionarySize
272
- );
273
- const pagesBuf = await this.file.read(dictionaryPageOffset, dictionarySize);
274
-
275
- const cursor = {buffer: pagesBuf, offset: 0, size: pagesBuf.length};
276
- const decodedPage = await decodePage(cursor, options);
277
-
278
- return decodedPage.dictionary!;
156
+ /**
157
+ * Implement AsyncIterable
158
+ */
159
+ // tslint:disable-next-line:function-name
160
+ [Symbol.asyncIterator](): AsyncIterator<T> {
161
+ return this.getCursor()[Symbol.asyncIterator]();
279
162
  }
280
163
  }
@@ -98,7 +98,6 @@ export interface ParquetField {
98
98
  fields?: Record<string, ParquetField>;
99
99
  }
100
100
 
101
- /** @todo better name, this is an internal type? */
102
101
  export interface ParquetOptions {
103
102
  type: ParquetType;
104
103
  rLevelMax: number;
@@ -109,14 +108,20 @@ export interface ParquetOptions {
109
108
  dictionary?: ParquetDictionary;
110
109
  }
111
110
 
111
+ export interface ParquetData {
112
+ dlevels: number[];
113
+ rlevels: number[];
114
+ values: any[];
115
+ count: number;
116
+ pageHeaders: PageHeader[];
117
+ }
118
+
112
119
  export interface ParquetPageData {
113
120
  dlevels: number[];
114
121
  rlevels: number[];
115
- /** Actual column chunks */
116
- values: any[]; // ArrayLike<any>;
122
+ values: any[];
117
123
  count: number;
118
124
  dictionary?: ParquetDictionary;
119
- /** The "raw" page header from the file */
120
125
  pageHeader: PageHeader;
121
126
  }
122
127
 
@@ -124,24 +129,11 @@ export interface ParquetRecord {
124
129
  [key: string]: any;
125
130
  }
126
131
 
127
- /** @
128
- * Holds data for one row group (column chunks) */
129
132
  export class ParquetBuffer {
130
- /** Number of rows in this page */
131
133
  rowCount: number;
132
-
133
134
  columnData: Record<string, ParquetData>;
134
135
  constructor(rowCount: number = 0, columnData: Record<string, ParquetData> = {}) {
135
136
  this.rowCount = rowCount;
136
137
  this.columnData = columnData;
137
138
  }
138
139
  }
139
-
140
- /** Holds the data for one column chunk */
141
- export interface ParquetData {
142
- dlevels: number[];
143
- rlevels: number[];
144
- values: any[];
145
- count: number;
146
- pageHeaders: PageHeader[];
147
- }