@firebaseextensions/firestore-bigquery-change-tracker 1.1.13 → 1.1.16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/bigquery/clustering.js +49 -0
- package/lib/bigquery/handleFailedTransactions.js +31 -0
- package/lib/bigquery/index.js +151 -38
- package/lib/bigquery/partitioning.js +232 -0
- package/lib/bigquery/schema.js +17 -1
- package/lib/bigquery/snapshot.js +6 -5
- package/lib/bigquery/validateProject.js +18 -0
- package/lib/logs.js +59 -3
- package/lib/types.js +9 -0
- package/package.json +4 -2
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.Clustering = void 0;
|
|
4
|
+
const logs = require("../logs");
|
|
5
|
+
class Clustering {
|
|
6
|
+
constructor(config, table, schema) {
|
|
7
|
+
this.updateCluster = async (metaData) => {
|
|
8
|
+
const clustering = { fields: this.config.clustering };
|
|
9
|
+
metaData.clustering = clustering;
|
|
10
|
+
logs.updatedClustering(this.config.clustering.join(","));
|
|
11
|
+
return Promise.resolve();
|
|
12
|
+
};
|
|
13
|
+
this.removeCluster = async (metaData) => {
|
|
14
|
+
metaData.clustering = null;
|
|
15
|
+
logs.removedClustering(this.table.id);
|
|
16
|
+
return Promise.resolve();
|
|
17
|
+
};
|
|
18
|
+
this.updateClustering = async (metaData) => {
|
|
19
|
+
/** Return if invalid config */
|
|
20
|
+
if (await this.hasInvalidFields(metaData))
|
|
21
|
+
return Promise.resolve();
|
|
22
|
+
return !!this.config.clustering && !!this.config.clustering.length
|
|
23
|
+
? this.updateCluster(metaData)
|
|
24
|
+
: this.removeCluster(metaData);
|
|
25
|
+
};
|
|
26
|
+
this.config = config;
|
|
27
|
+
this.table = table;
|
|
28
|
+
this.schema = schema;
|
|
29
|
+
}
|
|
30
|
+
hasValidTableReference() {
|
|
31
|
+
logs.invalidTableReference();
|
|
32
|
+
return !!this.table;
|
|
33
|
+
}
|
|
34
|
+
async hasInvalidFields(metaData) {
|
|
35
|
+
const { clustering = [] } = this.config;
|
|
36
|
+
if (!clustering)
|
|
37
|
+
return Promise.resolve(false);
|
|
38
|
+
const fieldNames = metaData
|
|
39
|
+
? metaData.schema.fields.map(($) => $.name)
|
|
40
|
+
: [];
|
|
41
|
+
const invalidFields = clustering.filter(($) => !fieldNames.includes($));
|
|
42
|
+
if (invalidFields.length) {
|
|
43
|
+
logs.invalidClustering(invalidFields.join(","));
|
|
44
|
+
return Promise.resolve(true);
|
|
45
|
+
}
|
|
46
|
+
return Promise.resolve(false);
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
exports.Clustering = Clustering;
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
const firebase = require("firebase-admin");
|
|
4
|
+
if (!firebase.apps.length) {
|
|
5
|
+
firebase.initializeApp();
|
|
6
|
+
firebase.firestore().settings({ ignoreUndefinedProperties: true });
|
|
7
|
+
}
|
|
8
|
+
exports.default = async (rows, config, e) => {
|
|
9
|
+
const db = firebase.firestore();
|
|
10
|
+
const batchArray = [db.batch()];
|
|
11
|
+
let operationCounter = 0;
|
|
12
|
+
let batchIndex = 0;
|
|
13
|
+
rows.forEach((row) => {
|
|
14
|
+
var ref = db.collection(config.backupTableId).doc(row.insertId);
|
|
15
|
+
batchArray[batchIndex].set(ref, {
|
|
16
|
+
...row,
|
|
17
|
+
error_details: e.message,
|
|
18
|
+
});
|
|
19
|
+
operationCounter++;
|
|
20
|
+
// Check if max limit for batch has been met.
|
|
21
|
+
if (operationCounter === 499) {
|
|
22
|
+
batchArray.push(db.batch());
|
|
23
|
+
batchIndex++;
|
|
24
|
+
operationCounter = 0;
|
|
25
|
+
}
|
|
26
|
+
});
|
|
27
|
+
for (let batch of batchArray) {
|
|
28
|
+
await batch.commit();
|
|
29
|
+
}
|
|
30
|
+
return Promise.resolve();
|
|
31
|
+
};
|
package/lib/bigquery/index.js
CHANGED
|
@@ -19,10 +19,14 @@ exports.FirestoreBigQueryEventHistoryTracker = void 0;
|
|
|
19
19
|
const bigquery = require("@google-cloud/bigquery");
|
|
20
20
|
const firebase = require("firebase-admin");
|
|
21
21
|
const traverse = require("traverse");
|
|
22
|
+
const node_fetch_1 = require("node-fetch");
|
|
22
23
|
const schema_1 = require("./schema");
|
|
23
24
|
const snapshot_1 = require("./snapshot");
|
|
25
|
+
const handleFailedTransactions_1 = require("./handleFailedTransactions");
|
|
24
26
|
const tracker_1 = require("../tracker");
|
|
25
27
|
const logs = require("../logs");
|
|
28
|
+
const partitioning_1 = require("./partitioning");
|
|
29
|
+
const clustering_1 = require("./clustering");
|
|
26
30
|
var schema_2 = require("./schema");
|
|
27
31
|
Object.defineProperty(exports, "RawChangelogSchema", { enumerable: true, get: function () { return schema_2.RawChangelogSchema; } });
|
|
28
32
|
Object.defineProperty(exports, "RawChangelogViewSchema", { enumerable: true, get: function () { return schema_2.RawChangelogViewSchema; } });
|
|
@@ -38,15 +42,19 @@ Object.defineProperty(exports, "RawChangelogViewSchema", { enumerable: true, get
|
|
|
38
42
|
class FirestoreBigQueryEventHistoryTracker {
|
|
39
43
|
constructor(config) {
|
|
40
44
|
this.config = config;
|
|
41
|
-
this.
|
|
45
|
+
this._initialized = false;
|
|
42
46
|
this.bq = new bigquery.BigQuery();
|
|
47
|
+
this.bq.projectId = config.bqProjectId || process.env.PROJECT_ID;
|
|
43
48
|
if (!this.config.datasetLocation) {
|
|
44
49
|
this.config.datasetLocation = "us";
|
|
45
50
|
}
|
|
46
51
|
}
|
|
47
52
|
async record(events) {
|
|
48
53
|
await this.initialize();
|
|
54
|
+
const partitionHandler = new partitioning_1.Partitioning(this.config);
|
|
49
55
|
const rows = events.map((event) => {
|
|
56
|
+
const partitionValue = partitionHandler.getPartitionValue(event);
|
|
57
|
+
const { documentId, ...pathParams } = event.pathParams || {};
|
|
50
58
|
return {
|
|
51
59
|
insertId: event.eventId,
|
|
52
60
|
json: {
|
|
@@ -56,10 +64,26 @@ class FirestoreBigQueryEventHistoryTracker {
|
|
|
56
64
|
document_id: event.documentId,
|
|
57
65
|
operation: tracker_1.ChangeType[event.operation],
|
|
58
66
|
data: JSON.stringify(this.serializeData(event.data)),
|
|
67
|
+
...partitionValue,
|
|
68
|
+
...(this.config.wildcardIds &&
|
|
69
|
+
event.pathParams && { path_params: JSON.stringify(pathParams) }),
|
|
59
70
|
},
|
|
60
71
|
};
|
|
61
72
|
});
|
|
62
|
-
await this.
|
|
73
|
+
const transformedRows = await this.transformRows(rows);
|
|
74
|
+
await this.insertData(transformedRows);
|
|
75
|
+
}
|
|
76
|
+
async transformRows(rows) {
|
|
77
|
+
if (this.config.transformFunction && this.config.transformFunction !== "") {
|
|
78
|
+
const response = await node_fetch_1.default(this.config.transformFunction, {
|
|
79
|
+
method: "post",
|
|
80
|
+
body: JSON.stringify({ data: rows }),
|
|
81
|
+
headers: { "Content-Type": "application/json" },
|
|
82
|
+
});
|
|
83
|
+
const responseJson = await response.json();
|
|
84
|
+
return responseJson.data;
|
|
85
|
+
}
|
|
86
|
+
return rows;
|
|
63
87
|
}
|
|
64
88
|
serializeData(eventData) {
|
|
65
89
|
if (typeof eventData === "undefined") {
|
|
@@ -87,10 +111,8 @@ class FirestoreBigQueryEventHistoryTracker {
|
|
|
87
111
|
async isRetryableInsertionError(e) {
|
|
88
112
|
let isRetryable = true;
|
|
89
113
|
const expectedErrors = [
|
|
90
|
-
{
|
|
91
|
-
|
|
92
|
-
location: "document_id",
|
|
93
|
-
},
|
|
114
|
+
{ message: "no such field.", location: schema_1.documentIdField.name },
|
|
115
|
+
{ message: "no such field.", location: schema_1.documentPathParams.name },
|
|
94
116
|
];
|
|
95
117
|
if (e.response &&
|
|
96
118
|
e.response.insertErrors &&
|
|
@@ -111,6 +133,32 @@ class FirestoreBigQueryEventHistoryTracker {
|
|
|
111
133
|
}
|
|
112
134
|
return isRetryable;
|
|
113
135
|
}
|
|
136
|
+
/**
|
|
137
|
+
* Tables can often take time to create and propagate.
|
|
138
|
+
* A half a second delay is added per check while the function
|
|
139
|
+
* continually re-checks until the referenced dataset and table become available.
|
|
140
|
+
*/
|
|
141
|
+
async waitForInitialization() {
|
|
142
|
+
return new Promise((resolve) => {
|
|
143
|
+
let handle = setInterval(async () => {
|
|
144
|
+
try {
|
|
145
|
+
const dataset = this.bigqueryDataset();
|
|
146
|
+
const changelogName = this.rawChangeLogTableName();
|
|
147
|
+
const table = dataset.table(changelogName);
|
|
148
|
+
const [datasetExists] = await dataset.exists();
|
|
149
|
+
const [tableExists] = await table.exists();
|
|
150
|
+
if (datasetExists && tableExists) {
|
|
151
|
+
clearInterval(handle);
|
|
152
|
+
return resolve(table);
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
catch (ex) {
|
|
156
|
+
clearInterval(handle);
|
|
157
|
+
logs.failedToInitializeWait(ex.message);
|
|
158
|
+
}
|
|
159
|
+
}, 5000);
|
|
160
|
+
});
|
|
161
|
+
}
|
|
114
162
|
/**
|
|
115
163
|
* Inserts rows of data into the BigQuery raw change log table.
|
|
116
164
|
*/
|
|
@@ -134,8 +182,13 @@ class FirestoreBigQueryEventHistoryTracker {
|
|
|
134
182
|
logs.dataInsertRetried(rows.length);
|
|
135
183
|
return this.insertData(rows, { ...overrideOptions, ignoreUnknownValues: true }, retry);
|
|
136
184
|
}
|
|
185
|
+
// Exceeded number of retires, save in failed collection
|
|
186
|
+
if (!retry && this.config.backupTableId) {
|
|
187
|
+
await handleFailedTransactions_1.default(rows, this.config, e);
|
|
188
|
+
}
|
|
137
189
|
// Reinitializing in case the destintation table is modified.
|
|
138
|
-
this.
|
|
190
|
+
this._initialized = false;
|
|
191
|
+
logs.bigQueryTableInsertErrors(e.errors);
|
|
139
192
|
throw e;
|
|
140
193
|
}
|
|
141
194
|
}
|
|
@@ -144,13 +197,19 @@ class FirestoreBigQueryEventHistoryTracker {
|
|
|
144
197
|
* After the first invokation, it skips initialization assuming these resources are still there.
|
|
145
198
|
*/
|
|
146
199
|
async initialize() {
|
|
147
|
-
|
|
148
|
-
|
|
200
|
+
try {
|
|
201
|
+
if (this._initialized) {
|
|
202
|
+
return;
|
|
203
|
+
}
|
|
204
|
+
await this.initializeDataset();
|
|
205
|
+
await this.initializeRawChangeLogTable();
|
|
206
|
+
await this.initializeLatestView();
|
|
207
|
+
this._initialized = true;
|
|
208
|
+
}
|
|
209
|
+
catch (ex) {
|
|
210
|
+
await this.waitForInitialization();
|
|
211
|
+
this._initialized = true;
|
|
149
212
|
}
|
|
150
|
-
await this.initializeDataset();
|
|
151
|
-
await this.initializeRawChangeLogTable();
|
|
152
|
-
await this.initializeLatestView();
|
|
153
|
-
this.initialized = true;
|
|
154
213
|
}
|
|
155
214
|
/**
|
|
156
215
|
* Creates the specified dataset if it doesn't already exists.
|
|
@@ -162,9 +221,14 @@ class FirestoreBigQueryEventHistoryTracker {
|
|
|
162
221
|
logs.bigQueryDatasetExists(this.config.datasetId);
|
|
163
222
|
}
|
|
164
223
|
else {
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
224
|
+
try {
|
|
225
|
+
logs.bigQueryDatasetCreating(this.config.datasetId);
|
|
226
|
+
await dataset.create();
|
|
227
|
+
logs.bigQueryDatasetCreated(this.config.datasetId);
|
|
228
|
+
}
|
|
229
|
+
catch (ex) {
|
|
230
|
+
logs.tableCreationError(this.config.datasetId, ex.message);
|
|
231
|
+
}
|
|
168
232
|
}
|
|
169
233
|
return dataset;
|
|
170
234
|
}
|
|
@@ -176,30 +240,47 @@ class FirestoreBigQueryEventHistoryTracker {
|
|
|
176
240
|
const dataset = this.bigqueryDataset();
|
|
177
241
|
const table = dataset.table(changelogName);
|
|
178
242
|
const [tableExists] = await table.exists();
|
|
243
|
+
const partitioning = new partitioning_1.Partitioning(this.config, table);
|
|
244
|
+
const clustering = new clustering_1.Clustering(this.config, table);
|
|
179
245
|
if (tableExists) {
|
|
180
246
|
logs.bigQueryTableAlreadyExists(table.id, dataset.id);
|
|
181
247
|
const [metadata] = await table.getMetadata();
|
|
182
|
-
const fields = metadata.schema.fields;
|
|
248
|
+
const fields = metadata.schema ? metadata.schema.fields : [];
|
|
249
|
+
await clustering.updateClustering(metadata);
|
|
183
250
|
const documentIdColExists = fields.find((column) => column.name === "document_id");
|
|
251
|
+
const pathParamsColExists = fields.find((column) => column.name === "path_params");
|
|
184
252
|
if (!documentIdColExists) {
|
|
185
253
|
fields.push(schema_1.documentIdField);
|
|
254
|
+
logs.addNewColumn(this.rawChangeLogTableName(), schema_1.documentIdField.name);
|
|
255
|
+
}
|
|
256
|
+
if (!pathParamsColExists && this.config.wildcardIds) {
|
|
257
|
+
fields.push(schema_1.documentPathParams);
|
|
258
|
+
logs.addNewColumn(this.rawChangeLogTableName(), schema_1.documentPathParams.name);
|
|
259
|
+
}
|
|
260
|
+
await partitioning.addPartitioningToSchema(metadata.schema.fields);
|
|
261
|
+
if (!documentIdColExists || !pathParamsColExists) {
|
|
186
262
|
await table.setMetadata(metadata);
|
|
187
|
-
logs.addDocumentIdColumn(this.rawChangeLogTableName());
|
|
188
263
|
}
|
|
189
264
|
}
|
|
190
265
|
else {
|
|
191
266
|
logs.bigQueryTableCreating(changelogName);
|
|
192
|
-
const
|
|
193
|
-
|
|
194
|
-
schema
|
|
195
|
-
}
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
267
|
+
const schema = { fields: [...schema_1.RawChangelogSchema.fields] };
|
|
268
|
+
if (this.config.wildcardIds) {
|
|
269
|
+
schema.fields.push(schema_1.documentPathParams);
|
|
270
|
+
}
|
|
271
|
+
const options = { friendlyName: changelogName, schema };
|
|
272
|
+
//Add partitioning
|
|
273
|
+
await partitioning.addPartitioningToSchema(schema.fields);
|
|
274
|
+
await partitioning.updateTableMetadata(options);
|
|
275
|
+
// Add clustering
|
|
276
|
+
await clustering.updateClustering(options);
|
|
277
|
+
try {
|
|
278
|
+
await table.create(options);
|
|
279
|
+
logs.bigQueryTableCreated(changelogName);
|
|
280
|
+
}
|
|
281
|
+
catch (ex) {
|
|
282
|
+
logs.tableCreationError(changelogName, ex.message);
|
|
200
283
|
}
|
|
201
|
-
await table.create(options);
|
|
202
|
-
logs.bigQueryTableCreated(changelogName);
|
|
203
284
|
}
|
|
204
285
|
return table;
|
|
205
286
|
}
|
|
@@ -211,32 +292,64 @@ class FirestoreBigQueryEventHistoryTracker {
|
|
|
211
292
|
const dataset = this.bigqueryDataset();
|
|
212
293
|
const view = dataset.table(this.rawLatestView());
|
|
213
294
|
const [viewExists] = await view.exists();
|
|
295
|
+
const schema = schema_1.RawChangelogViewSchema;
|
|
296
|
+
const partitioning = new partitioning_1.Partitioning(this.config, view);
|
|
214
297
|
if (viewExists) {
|
|
215
298
|
logs.bigQueryViewAlreadyExists(view.id, dataset.id);
|
|
216
299
|
const [metadata] = await view.getMetadata();
|
|
217
|
-
const fields = metadata.schema.fields;
|
|
300
|
+
const fields = metadata.schema ? metadata.schema.fields : [];
|
|
301
|
+
if (this.config.wildcardIds) {
|
|
302
|
+
schema.fields.push(schema_1.documentPathParams);
|
|
303
|
+
}
|
|
218
304
|
const documentIdColExists = fields.find((column) => column.name === "document_id");
|
|
305
|
+
const pathParamsColExists = fields.find((column) => column.name === "path_params");
|
|
219
306
|
if (!documentIdColExists) {
|
|
220
|
-
metadata.view = snapshot_1.latestConsistentSnapshotView(this.config.datasetId, this.rawChangeLogTableName());
|
|
221
|
-
|
|
222
|
-
|
|
307
|
+
metadata.view = snapshot_1.latestConsistentSnapshotView(this.config.datasetId, this.rawChangeLogTableName(), schema);
|
|
308
|
+
logs.addNewColumn(this.rawLatestView(), schema_1.documentIdField.name);
|
|
309
|
+
}
|
|
310
|
+
if (!pathParamsColExists && this.config.wildcardIds) {
|
|
311
|
+
metadata.view = snapshot_1.latestConsistentSnapshotView(this.config.datasetId, this.rawChangeLogTableName(), schema);
|
|
312
|
+
logs.addNewColumn(this.rawLatestView(), schema_1.documentPathParams.name);
|
|
223
313
|
}
|
|
314
|
+
//Add partitioning
|
|
315
|
+
await partitioning.addPartitioningToSchema(schema.fields);
|
|
316
|
+
//TODO: Tidy up and format / add test cases?
|
|
317
|
+
// if (
|
|
318
|
+
// !documentIdColExists ||
|
|
319
|
+
// (!pathParamsColExists && this.config.wildcardIds) ||
|
|
320
|
+
// partition.isValidPartitionForExistingTable(partitionColExists)
|
|
321
|
+
// ) {
|
|
322
|
+
await view.setMetadata(metadata);
|
|
323
|
+
// }
|
|
224
324
|
}
|
|
225
325
|
else {
|
|
226
|
-
const
|
|
326
|
+
const schema = { fields: [...schema_1.RawChangelogViewSchema.fields] };
|
|
327
|
+
//Add partitioning field
|
|
328
|
+
await partitioning.addPartitioningToSchema(schema.fields);
|
|
329
|
+
//TODO Create notification for a user that View cannot be Time Partitioned by the field.
|
|
330
|
+
// await partitioning.updateTableMetadata(options);
|
|
331
|
+
if (this.config.wildcardIds) {
|
|
332
|
+
schema.fields.push(schema_1.documentPathParams);
|
|
333
|
+
}
|
|
334
|
+
const latestSnapshot = snapshot_1.latestConsistentSnapshotView(this.config.datasetId, this.rawChangeLogTableName(), schema, this.bq.projectId);
|
|
227
335
|
logs.bigQueryViewCreating(this.rawLatestView(), latestSnapshot.query);
|
|
228
336
|
const options = {
|
|
229
337
|
friendlyName: this.rawLatestView(),
|
|
230
338
|
view: latestSnapshot,
|
|
231
339
|
};
|
|
232
|
-
if (this.config.
|
|
340
|
+
if (this.config.timePartitioning) {
|
|
233
341
|
options.timePartitioning = {
|
|
234
|
-
type: this.config.
|
|
342
|
+
type: this.config.timePartitioning,
|
|
235
343
|
};
|
|
236
344
|
}
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
345
|
+
try {
|
|
346
|
+
await view.create(options);
|
|
347
|
+
await view.setMetadata({ schema: schema_1.RawChangelogViewSchema });
|
|
348
|
+
logs.bigQueryViewCreated(this.rawLatestView());
|
|
349
|
+
}
|
|
350
|
+
catch (ex) {
|
|
351
|
+
logs.tableCreationError(this.rawLatestView(), ex.message);
|
|
352
|
+
}
|
|
240
353
|
}
|
|
241
354
|
return view;
|
|
242
355
|
}
|
|
@@ -0,0 +1,232 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.Partitioning = void 0;
|
|
4
|
+
const firebase = require("firebase-admin");
|
|
5
|
+
const logs = require("../logs");
|
|
6
|
+
const schema_1 = require("./schema");
|
|
7
|
+
const bigquery_1 = require("@google-cloud/bigquery");
|
|
8
|
+
const types_1 = require("../types");
|
|
9
|
+
class Partitioning {
|
|
10
|
+
constructor(config, table, schema) {
|
|
11
|
+
this.config = config;
|
|
12
|
+
this.table = table;
|
|
13
|
+
this.schema = schema;
|
|
14
|
+
}
|
|
15
|
+
isPartitioningEnabled() {
|
|
16
|
+
const { timePartitioning } = this.config;
|
|
17
|
+
return !!timePartitioning;
|
|
18
|
+
}
|
|
19
|
+
isValidPartitionTypeString(value) {
|
|
20
|
+
return typeof value === "string";
|
|
21
|
+
}
|
|
22
|
+
async metaDataSchemaFields() {
|
|
23
|
+
let metadata;
|
|
24
|
+
try {
|
|
25
|
+
[metadata] = await this.table.getMetadata();
|
|
26
|
+
}
|
|
27
|
+
catch {
|
|
28
|
+
console.log("No metadata found");
|
|
29
|
+
return null;
|
|
30
|
+
}
|
|
31
|
+
/** Return null if no valid schema on table **/
|
|
32
|
+
if (!metadata.schema)
|
|
33
|
+
return null;
|
|
34
|
+
return metadata.schema.fields;
|
|
35
|
+
}
|
|
36
|
+
isValidPartitionTypeDate(value) {
|
|
37
|
+
/* Check if valid timestamp value from sdk */
|
|
38
|
+
if (value instanceof firebase.firestore.Timestamp)
|
|
39
|
+
return true;
|
|
40
|
+
/* Check if valid date/time value from console */
|
|
41
|
+
return Object.prototype.toString.call(value) === "[object Date]";
|
|
42
|
+
}
|
|
43
|
+
hasHourAndDatePartitionConfig() {
|
|
44
|
+
if (this.config.timePartitioning === "HOUR" &&
|
|
45
|
+
this.config.timePartitioningFieldType === "DATE") {
|
|
46
|
+
logs.hourAndDatePartitioningWarning();
|
|
47
|
+
return true;
|
|
48
|
+
}
|
|
49
|
+
return false;
|
|
50
|
+
}
|
|
51
|
+
hasValidCustomPartitionConfig() {
|
|
52
|
+
/* Return false if partition type option has not been set*/
|
|
53
|
+
if (!this.isPartitioningEnabled())
|
|
54
|
+
return false;
|
|
55
|
+
const { timePartitioningField, timePartitioningFieldType, timePartitioningFirestoreField, } = this.config;
|
|
56
|
+
const hasNoCustomOptions = !timePartitioningField &&
|
|
57
|
+
!timePartitioningFieldType &&
|
|
58
|
+
!timePartitioningFirestoreField;
|
|
59
|
+
/* No custom congig has been set, use partition value option only */
|
|
60
|
+
if (hasNoCustomOptions)
|
|
61
|
+
return true;
|
|
62
|
+
/* check if all options have been provided to be */
|
|
63
|
+
return (!!timePartitioningField &&
|
|
64
|
+
!!timePartitioningFieldType &&
|
|
65
|
+
!!timePartitioningFirestoreField);
|
|
66
|
+
}
|
|
67
|
+
hasValidTimePartitionOption() {
|
|
68
|
+
const { timePartitioning } = this.config;
|
|
69
|
+
return ["HOUR", "DAY", "MONTH", "YEAR"].includes(timePartitioning);
|
|
70
|
+
}
|
|
71
|
+
hasValidTimePartitionType() {
|
|
72
|
+
const { timePartitioningFieldType } = this.config;
|
|
73
|
+
if (!timePartitioningFieldType || timePartitioningFieldType === undefined)
|
|
74
|
+
return true;
|
|
75
|
+
return ["TIMESTAMP", "DATE", "DATETIME"].includes(timePartitioningFieldType);
|
|
76
|
+
}
|
|
77
|
+
async hasExistingSchema() {
|
|
78
|
+
const [metadata] = await this.table.getMetadata();
|
|
79
|
+
return !!metadata.schema;
|
|
80
|
+
}
|
|
81
|
+
hasValidTableReference() {
|
|
82
|
+
logs.invalidTableReference();
|
|
83
|
+
return !!this.table;
|
|
84
|
+
}
|
|
85
|
+
async isTablePartitioned() {
|
|
86
|
+
if (!this.table)
|
|
87
|
+
return Promise.resolve(false);
|
|
88
|
+
// No table provided, cannot evaluate
|
|
89
|
+
if (this.table.exists()) {
|
|
90
|
+
logs.cannotPartitionExistingTable(this.table);
|
|
91
|
+
return Promise.resolve(false);
|
|
92
|
+
}
|
|
93
|
+
/*** No table exists, return */
|
|
94
|
+
const [tableExists] = await this.table.exists();
|
|
95
|
+
if (!tableExists)
|
|
96
|
+
return Promise.resolve(false);
|
|
97
|
+
/* Check if partition metadata already exists */
|
|
98
|
+
const [metadata] = await this.table.getMetadata();
|
|
99
|
+
if (!!metadata.timePartitioning)
|
|
100
|
+
return Promise.resolve(true);
|
|
101
|
+
/** Find schema fields **/
|
|
102
|
+
const schemaFields = await this.metaDataSchemaFields();
|
|
103
|
+
/** No Schema exists, return */
|
|
104
|
+
if (!schemaFields)
|
|
105
|
+
return Promise.resolve(false);
|
|
106
|
+
/* Return false if time partition field not found */
|
|
107
|
+
return schemaFields.some((column) => column.name === this.config.timePartitioningField);
|
|
108
|
+
}
|
|
109
|
+
async isValidPartitionForExistingTable() {
|
|
110
|
+
if (this.isTablePartitioned())
|
|
111
|
+
return false;
|
|
112
|
+
return this.hasValidCustomPartitionConfig();
|
|
113
|
+
}
|
|
114
|
+
isValidPartitionForNewTable() {
|
|
115
|
+
if (!this.isPartitioningEnabled())
|
|
116
|
+
return false;
|
|
117
|
+
return this.hasValidCustomPartitionConfig();
|
|
118
|
+
}
|
|
119
|
+
convertDateValue(fieldValue) {
|
|
120
|
+
const { timePartitioningFieldType } = this.config;
|
|
121
|
+
/* Return as Datetime value */
|
|
122
|
+
if (timePartitioningFieldType === types_1.PartitionFieldType.DATETIME) {
|
|
123
|
+
return bigquery_1.BigQuery.datetime(fieldValue.toISOString()).value;
|
|
124
|
+
}
|
|
125
|
+
/* Return as Date value */
|
|
126
|
+
if (timePartitioningFieldType === types_1.PartitionFieldType.DATE) {
|
|
127
|
+
return bigquery_1.BigQuery.date(fieldValue.toISOString().substring(0, 10)).value;
|
|
128
|
+
}
|
|
129
|
+
/* Return as Timestamp */
|
|
130
|
+
return bigquery_1.BigQuery.timestamp(fieldValue).value;
|
|
131
|
+
}
|
|
132
|
+
/*
|
|
133
|
+
Extracts a valid Partition field from the Document Change Event.
|
|
134
|
+
Matches result based on a pre-defined Firestore field matching the event data object.
|
|
135
|
+
Return an empty object if no field name or value provided.
|
|
136
|
+
Returns empty object if not a string or timestamp
|
|
137
|
+
Logs warning if not a valid datatype
|
|
138
|
+
Delete changes events have no data, return early as cannot partition on empty data.
|
|
139
|
+
**/
|
|
140
|
+
getPartitionValue(event) {
|
|
141
|
+
if (!event.data)
|
|
142
|
+
return {};
|
|
143
|
+
const firestoreFieldName = this.config.timePartitioningFirestoreField;
|
|
144
|
+
const fieldName = this.config.timePartitioningField;
|
|
145
|
+
const fieldValue = event.data[firestoreFieldName];
|
|
146
|
+
if (!fieldName || !fieldValue) {
|
|
147
|
+
return {};
|
|
148
|
+
}
|
|
149
|
+
if (this.isValidPartitionTypeString(fieldValue)) {
|
|
150
|
+
return { [fieldName]: fieldValue };
|
|
151
|
+
}
|
|
152
|
+
if (this.isValidPartitionTypeDate(fieldValue)) {
|
|
153
|
+
/* Return converted console value */
|
|
154
|
+
if (fieldValue.toDate) {
|
|
155
|
+
return { [fieldName]: this.convertDateValue(fieldValue.toDate()) };
|
|
156
|
+
}
|
|
157
|
+
/* Return standard date value */
|
|
158
|
+
return { [fieldName]: fieldValue };
|
|
159
|
+
}
|
|
160
|
+
logs.firestoreTimePartitionFieldError(event.documentName, fieldName, firestoreFieldName, fieldValue);
|
|
161
|
+
return {};
|
|
162
|
+
}
|
|
163
|
+
customFieldExists(fields = []) {
|
|
164
|
+
if (!fields.length)
|
|
165
|
+
return false;
|
|
166
|
+
const { timePartitioningField } = this.config;
|
|
167
|
+
return fields.map(($) => $.name).includes(timePartitioningField);
|
|
168
|
+
}
|
|
169
|
+
async addPartitioningToSchema(fields = []) {
|
|
170
|
+
/** check if class has valid table reference */
|
|
171
|
+
if (!this.hasValidTableReference())
|
|
172
|
+
return Promise.resolve();
|
|
173
|
+
/** return if table is already partitioned **/
|
|
174
|
+
if (await this.isTablePartitioned())
|
|
175
|
+
return Promise.resolve();
|
|
176
|
+
/** return if an invalid partition type has been requested**/
|
|
177
|
+
if (!this.hasValidTimePartitionType())
|
|
178
|
+
return Promise.resolve();
|
|
179
|
+
/** Return if invalid partitioning and field type combination */
|
|
180
|
+
if (this.hasHourAndDatePartitionConfig())
|
|
181
|
+
return Promise.resolve();
|
|
182
|
+
/** return if an invalid partition type has been requested**/
|
|
183
|
+
if (!this.hasValidCustomPartitionConfig())
|
|
184
|
+
return Promise.resolve();
|
|
185
|
+
/** return if an invalid partition type has been requested**/
|
|
186
|
+
if (!this.hasValidCustomPartitionConfig())
|
|
187
|
+
return Promise.resolve();
|
|
188
|
+
/** update fields with new schema option ** */
|
|
189
|
+
if (!this.hasValidTimePartitionOption())
|
|
190
|
+
return Promise.resolve();
|
|
191
|
+
/* Check if partition field has been provided */
|
|
192
|
+
if (!this.config.timePartitioningField)
|
|
193
|
+
return Promise.resolve();
|
|
194
|
+
// if (await !this.hasExistingSchema) return Promise.resolve();
|
|
195
|
+
// Field already exists on schema, skip
|
|
196
|
+
if (this.customFieldExists(fields))
|
|
197
|
+
return Promise.resolve();
|
|
198
|
+
fields.push(schema_1.getNewPartitionField(this.config));
|
|
199
|
+
/** log successful addition of partition column */
|
|
200
|
+
logs.addPartitionFieldColumn(this.table.id, this.config.timePartitioningField);
|
|
201
|
+
return Promise.resolve();
|
|
202
|
+
}
|
|
203
|
+
async updateTableMetadata(options) {
|
|
204
|
+
/** return if table is already partitioned **/
|
|
205
|
+
if (await this.isTablePartitioned())
|
|
206
|
+
return Promise.resolve();
|
|
207
|
+
/** return if an invalid partition type has been requested**/
|
|
208
|
+
if (!this.hasValidTimePartitionType())
|
|
209
|
+
return Promise.resolve();
|
|
210
|
+
/** update fields with new schema option ** */
|
|
211
|
+
if (!this.hasValidTimePartitionOption())
|
|
212
|
+
return Promise.resolve();
|
|
213
|
+
/** Return if invalid partitioning and field type combination */
|
|
214
|
+
if (this.hasHourAndDatePartitionConfig())
|
|
215
|
+
return Promise.resolve();
|
|
216
|
+
/** return if an invalid partition type has been requested**/
|
|
217
|
+
if (!this.hasValidCustomPartitionConfig())
|
|
218
|
+
return Promise.resolve();
|
|
219
|
+
// if (await !this.hasExistingSchema) return Promise.resolve();
|
|
220
|
+
if (this.config.timePartitioning) {
|
|
221
|
+
options.timePartitioning = { type: this.config.timePartitioning };
|
|
222
|
+
}
|
|
223
|
+
//TODO: Add check for skipping adding views partition field, this is not a feature that can be added .
|
|
224
|
+
if (this.config.timePartitioningField) {
|
|
225
|
+
options.timePartitioning = {
|
|
226
|
+
...options.timePartitioning,
|
|
227
|
+
field: this.config.timePartitioningField,
|
|
228
|
+
};
|
|
229
|
+
}
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
exports.Partitioning = Partitioning;
|
package/lib/bigquery/schema.js
CHANGED
|
@@ -15,7 +15,7 @@
|
|
|
15
15
|
* limitations under the License.
|
|
16
16
|
*/
|
|
17
17
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
18
|
-
exports.RawChangelogSchema = exports.RawChangelogViewSchema = exports.documentIdField = exports.longitudeField = exports.latitudeField = exports.timestampField = exports.operationField = exports.eventIdField = exports.documentNameField = exports.dataField = void 0;
|
|
18
|
+
exports.getNewPartitionField = exports.RawChangelogSchema = exports.RawChangelogViewSchema = exports.documentPathParams = exports.documentIdField = exports.longitudeField = exports.latitudeField = exports.timestampField = exports.operationField = exports.eventIdField = exports.documentNameField = exports.dataField = void 0;
|
|
19
19
|
const bigQueryField = (name, type, mode, fields) => ({
|
|
20
20
|
fields,
|
|
21
21
|
mode: mode || "NULLABLE",
|
|
@@ -37,6 +37,12 @@ exports.documentIdField = {
|
|
|
37
37
|
type: "STRING",
|
|
38
38
|
description: "The document id as defined in the firestore database.",
|
|
39
39
|
};
|
|
40
|
+
exports.documentPathParams = {
|
|
41
|
+
name: "path_params",
|
|
42
|
+
mode: "NULLABLE",
|
|
43
|
+
type: "STRING",
|
|
44
|
+
description: "JSON string representing wildcard params with Firestore Document ids",
|
|
45
|
+
};
|
|
40
46
|
/*
|
|
41
47
|
* We cannot specify a schema for view creation, and all view columns default
|
|
42
48
|
* to the NULLABLE mode.
|
|
@@ -111,3 +117,13 @@ exports.RawChangelogSchema = {
|
|
|
111
117
|
exports.documentIdField,
|
|
112
118
|
],
|
|
113
119
|
};
|
|
120
|
+
// Helper function for Partitioned Changelogs field
|
|
121
|
+
exports.getNewPartitionField = (config) => {
|
|
122
|
+
const { timePartitioningField, timePartitioningFieldType } = config;
|
|
123
|
+
return {
|
|
124
|
+
name: timePartitioningField,
|
|
125
|
+
mode: "NULLABLE",
|
|
126
|
+
type: timePartitioningFieldType,
|
|
127
|
+
description: "The document TimePartition partition field selected by user",
|
|
128
|
+
};
|
|
129
|
+
};
|
package/lib/bigquery/snapshot.js
CHANGED
|
@@ -19,13 +19,13 @@ exports.buildLatestSnapshotViewQuery = exports.latestConsistentSnapshotView = vo
|
|
|
19
19
|
const sqlFormatter = require("sql-formatter");
|
|
20
20
|
const schema_1 = require("./schema");
|
|
21
21
|
const excludeFields = ["document_name", "document_id"];
|
|
22
|
-
exports.latestConsistentSnapshotView = (datasetId, tableName) => ({
|
|
23
|
-
query: buildLatestSnapshotViewQuery(datasetId, tableName, schema_1.timestampField.name,
|
|
22
|
+
exports.latestConsistentSnapshotView = (datasetId, tableName, schema, bqProjectId) => ({
|
|
23
|
+
query: buildLatestSnapshotViewQuery(datasetId, tableName, schema_1.timestampField.name, schema["fields"]
|
|
24
24
|
.map((field) => field.name)
|
|
25
|
-
.filter((name) => excludeFields.indexOf(name) === -1)),
|
|
25
|
+
.filter((name) => excludeFields.indexOf(name) === -1), bqProjectId),
|
|
26
26
|
useLegacySql: false,
|
|
27
27
|
});
|
|
28
|
-
function buildLatestSnapshotViewQuery(datasetId, tableName, timestampColumnName, groupByColumns) {
|
|
28
|
+
function buildLatestSnapshotViewQuery(datasetId, tableName, timestampColumnName, groupByColumns, bqProjectId) {
|
|
29
29
|
if (datasetId === "" || tableName === "" || timestampColumnName === "") {
|
|
30
30
|
throw Error(`Missing some query parameters!`);
|
|
31
31
|
}
|
|
@@ -56,7 +56,8 @@ function buildLatestSnapshotViewQuery(datasetId, tableName, timestampColumnName,
|
|
|
56
56
|
FIRST_VALUE(operation)
|
|
57
57
|
OVER(PARTITION BY document_name ORDER BY ${timestampColumnName} DESC) = "DELETE"
|
|
58
58
|
AS is_deleted
|
|
59
|
-
FROM \`${
|
|
59
|
+
FROM \`${bqProjectId ||
|
|
60
|
+
process.env.PROJECT_ID}.${datasetId}.${tableName}\`
|
|
60
61
|
ORDER BY document_name, ${timestampColumnName} DESC
|
|
61
62
|
)
|
|
62
63
|
WHERE NOT is_deleted
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.validateProject = void 0;
|
|
4
|
+
const { ProjectsClient } = require("@google-cloud/resource-manager");
|
|
5
|
+
/* TODO: searchProjectsAsync sometimes returns {}.
|
|
6
|
+
* Could be resource intensive, if checked on every records insert.
|
|
7
|
+
*/
|
|
8
|
+
exports.validateProject = async (id) => {
|
|
9
|
+
let isValid = false;
|
|
10
|
+
const client = new ProjectsClient();
|
|
11
|
+
const projects = client.searchProjectsAsync();
|
|
12
|
+
for await (const project of projects) {
|
|
13
|
+
if (project.projectId === id) {
|
|
14
|
+
isValid = true;
|
|
15
|
+
}
|
|
16
|
+
}
|
|
17
|
+
return isValid;
|
|
18
|
+
};
|
package/lib/logs.js
CHANGED
|
@@ -15,7 +15,7 @@
|
|
|
15
15
|
* limitations under the License.
|
|
16
16
|
*/
|
|
17
17
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
18
|
-
exports.
|
|
18
|
+
exports.failedToInitializeWait = exports.tableCreationError = exports.invalidClustering = exports.hourAndDatePartitioningWarning = exports.invalidTableReference = exports.invalidProjectIdWarning = exports.cannotPartitionExistingTable = exports.removedClustering = exports.updatedClustering = exports.bigQueryTableInsertErrors = exports.firestoreTimePartitioningParametersWarning = exports.firestoreTimePartitionFieldError = exports.addPartitionFieldColumn = exports.addNewColumn = exports.timestampMissingValue = exports.error = exports.dataTypeInvalid = exports.dataInserting = exports.dataInsertRetried = exports.dataInserted = exports.complete = exports.bigQueryViewValidating = exports.bigQueryViewValidated = exports.bigQueryViewUpToDate = exports.bigQueryViewUpdating = exports.bigQueryViewUpdated = exports.bigQueryViewAlreadyExists = exports.bigQueryViewCreating = exports.bigQueryViewCreated = exports.bigQueryUserDefinedFunctionCreated = exports.bigQueryUserDefinedFunctionCreating = exports.bigQueryTableValidating = exports.bigQueryTableValidated = exports.bigQueryTableUpToDate = exports.bigQueryTableUpdating = exports.bigQueryTableUpdated = exports.bigQueryTableCreating = exports.bigQueryTableCreated = exports.bigQueryTableAlreadyExists = exports.bigQuerySchemaViewCreated = exports.bigQueryLatestSnapshotViewQueryCreated = exports.bigQueryErrorRecordingDocumentChange = exports.bigQueryDatasetExists = exports.bigQueryDatasetCreating = exports.bigQueryDatasetCreated = exports.arrayFieldInvalid = void 0;
|
|
19
19
|
const firebase_functions_1 = require("firebase-functions");
|
|
20
20
|
exports.arrayFieldInvalid = (fieldName) => {
|
|
21
21
|
firebase_functions_1.logger.warn(`Array field '${fieldName}' does not contain an array, skipping`);
|
|
@@ -114,6 +114,62 @@ exports.error = (err) => {
|
|
|
114
114
|
exports.timestampMissingValue = (fieldName) => {
|
|
115
115
|
firebase_functions_1.logger.warn(`Missing value for timestamp field: ${fieldName}, using default timestamp instead.`);
|
|
116
116
|
};
|
|
117
|
-
exports.
|
|
118
|
-
firebase_functions_1.logger.log(`Updated '${table}' table with a '
|
|
117
|
+
exports.addNewColumn = (table, field) => {
|
|
118
|
+
firebase_functions_1.logger.log(`Updated '${table}' table with a '${field}' column`);
|
|
119
|
+
};
|
|
120
|
+
exports.addPartitionFieldColumn = (table, field) => {
|
|
121
|
+
firebase_functions_1.logger.log(`Updated '${table}' table with a partition field '${field}' column`);
|
|
122
|
+
};
|
|
123
|
+
exports.firestoreTimePartitionFieldError = (documentName, fieldName, firestoreFieldName, firestoreFieldData) => {
|
|
124
|
+
firebase_functions_1.logger.warn(`Wrong type of Firestore Field for TimePartitioning. Accepts only strings in BigQuery format (DATE, DATETIME, TIMESTAMP) and Firestore Timestamp. Firestore Document field path: ${documentName}. Field name: ${firestoreFieldName}. Field data: ${firestoreFieldData}. Schema field "${fieldName}" value will be null.`);
|
|
125
|
+
};
|
|
126
|
+
exports.firestoreTimePartitioningParametersWarning = (fieldName, fieldType, firestoreFieldName, dataFirestoreField) => {
|
|
127
|
+
firebase_functions_1.logger.warn("All TimePartitioning option parameters need to be available to create new custom schema field");
|
|
128
|
+
!fieldName && firebase_functions_1.logger.warn(`Parameter missing: TIME_PARTITIONING_FIELD`);
|
|
129
|
+
!fieldType && firebase_functions_1.logger.warn(`Parameter missing: TIME_PARTITIONING_FIELD_TYPE`);
|
|
130
|
+
!firestoreFieldName &&
|
|
131
|
+
firebase_functions_1.logger.warn(`Parameter missing: TIME_PARTITIONING_FIRESTORE_FIELD`);
|
|
132
|
+
!dataFirestoreField &&
|
|
133
|
+
firebase_functions_1.logger.warn(`No data found in Firestore Document under selected field: "${firestoreFieldName}"`);
|
|
134
|
+
};
|
|
135
|
+
exports.bigQueryTableInsertErrors = (insertErrors) => {
|
|
136
|
+
firebase_functions_1.logger.warn(`Error when inserting data to table.`);
|
|
137
|
+
insertErrors.forEach((error) => {
|
|
138
|
+
firebase_functions_1.logger.warn("ROW DATA JSON:");
|
|
139
|
+
firebase_functions_1.logger.warn(error.row);
|
|
140
|
+
if (error && error.errors) {
|
|
141
|
+
error.errors.forEach((error) => firebase_functions_1.logger.warn(`ROW ERROR MESSAGE: ${error.message}`));
|
|
142
|
+
}
|
|
143
|
+
});
|
|
144
|
+
};
|
|
145
|
+
exports.updatedClustering = (fields) => {
|
|
146
|
+
firebase_functions_1.logger.info(`Clustering updated with new settings fields: ${fields}`);
|
|
147
|
+
};
|
|
148
|
+
exports.removedClustering = (tableName) => {
|
|
149
|
+
firebase_functions_1.logger.info(`Clustering removed on ${tableName}`);
|
|
150
|
+
};
|
|
151
|
+
exports.cannotPartitionExistingTable = (table) => {
|
|
152
|
+
firebase_functions_1.logger.warn(`Cannot partition an existing table ${table.dataset.id}_${table.id}`);
|
|
153
|
+
};
|
|
154
|
+
function invalidProjectIdWarning(bqProjectId) {
|
|
155
|
+
firebase_functions_1.logger.warn(`Invalid project Id ${bqProjectId}, data cannot be synchronized`);
|
|
156
|
+
}
|
|
157
|
+
exports.invalidProjectIdWarning = invalidProjectIdWarning;
|
|
158
|
+
function invalidTableReference() {
|
|
159
|
+
firebase_functions_1.logger.warn(`No valid table reference is available. Skipping partitioning`);
|
|
160
|
+
}
|
|
161
|
+
exports.invalidTableReference = invalidTableReference;
|
|
162
|
+
function hourAndDatePartitioningWarning() {
|
|
163
|
+
firebase_functions_1.logger.warn(`Cannot partition table with hour partitioning and Date. For DATE columns, the partitions can have daily, monthly, or yearly granularity. Skipping partitioning`);
|
|
164
|
+
}
|
|
165
|
+
exports.hourAndDatePartitioningWarning = hourAndDatePartitioningWarning;
|
|
166
|
+
function invalidClustering(fields) {
|
|
167
|
+
firebase_functions_1.logger.warn(`Unable to add clustering, field(s) ${fields} do not exist on the expected table`);
|
|
168
|
+
}
|
|
169
|
+
exports.invalidClustering = invalidClustering;
|
|
170
|
+
exports.tableCreationError = (table, message) => {
|
|
171
|
+
firebase_functions_1.logger.warn(`Error caught creating table`, message);
|
|
172
|
+
};
|
|
173
|
+
exports.failedToInitializeWait = (message) => {
|
|
174
|
+
firebase_functions_1.logger.warn(`Failed while waiting to initialize.`, message);
|
|
119
175
|
};
|
package/lib/types.js
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.PartitionFieldType = void 0;
|
|
4
|
+
var PartitionFieldType;
|
|
5
|
+
(function (PartitionFieldType) {
|
|
6
|
+
PartitionFieldType["DATE"] = "DATE";
|
|
7
|
+
PartitionFieldType["DATETIME"] = "DATETIME";
|
|
8
|
+
PartitionFieldType["TIMESTAMP"] = "TIMESTAMP";
|
|
9
|
+
})(PartitionFieldType = exports.PartitionFieldType || (exports.PartitionFieldType = {}));
|
package/package.json
CHANGED
|
@@ -5,14 +5,14 @@
|
|
|
5
5
|
"url": "github.com/firebase/extensions.git",
|
|
6
6
|
"directory": "firestore-bigquery-export/firestore-bigquery-change-tracker"
|
|
7
7
|
},
|
|
8
|
-
"version": "1.1.
|
|
8
|
+
"version": "1.1.16",
|
|
9
9
|
"description": "Core change-tracker library for Cloud Firestore Collection BigQuery Exports",
|
|
10
10
|
"main": "./lib/index.js",
|
|
11
11
|
"scripts": {
|
|
12
12
|
"build": "npm run clean && npm run compile",
|
|
13
13
|
"clean": "rimraf lib",
|
|
14
14
|
"compile": "tsc",
|
|
15
|
-
"test": "jest",
|
|
15
|
+
"test:local": "firebase ext:dev:emulators:exec ./node_modules/.bin/jest --test-params=./src/__tests__/emulator-params.env --project=extensions-testing --config=./src/__tests__/firebase.json",
|
|
16
16
|
"prepare": "npm run build"
|
|
17
17
|
},
|
|
18
18
|
"files": [
|
|
@@ -23,11 +23,13 @@
|
|
|
23
23
|
"license": "Apache-2.0",
|
|
24
24
|
"dependencies": {
|
|
25
25
|
"@google-cloud/bigquery": "^4.7.0",
|
|
26
|
+
"@google-cloud/resource-manager": "^3.0.0",
|
|
26
27
|
"firebase-admin": "^8.0.0",
|
|
27
28
|
"firebase-functions": "^3.13.2",
|
|
28
29
|
"generate-schema": "^2.6.0",
|
|
29
30
|
"inquirer": "^6.4.0",
|
|
30
31
|
"lodash": "^4.17.14",
|
|
32
|
+
"node-fetch": "^2.6.1",
|
|
31
33
|
"sql-formatter": "^2.3.3",
|
|
32
34
|
"traverse": "^0.6.6"
|
|
33
35
|
},
|