google-cloud-bigquery 1.6.0 → 1.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 9066e151cc7d6515a247afdcf5b76b70fc089fb5d1b74e12830b49b4ef8ba1ce
|
4
|
+
data.tar.gz: e06851af9995cda27436a7378f82456709ecb6640034e23f3bc47f9fc24811b0
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 355f857c958cbb2dca1c78e5a49feb39e17bd1ce254adbede3d0722374e40ff04baf3ac302885afb7de8d0f62afd38e4911a37d1f1efa4fe13f5a78f3cd6e790
|
7
|
+
data.tar.gz: 4f29fdfbeee060ae6bcb560223dba70c6493c50e795fb60c673fb9bc2f5da93a74c5c47590ece5b7c40e8a26754967f16d2cfb9dbfb7d40e08291e47ef5b8b76
|
@@ -270,6 +270,26 @@ module Google
|
|
270
270
|
Schema.from_gapi(@gapi.configuration.load.schema).freeze
|
271
271
|
end
|
272
272
|
|
273
|
+
##
|
274
|
+
# Allows the schema of the destination table to be updated as a side
|
275
|
+
# effect of the load job if a schema is autodetected or supplied in the
|
276
|
+
# job configuration. Schema update options are supported in two cases:
|
277
|
+
# when write disposition is `WRITE_APPEND`; when write disposition is
|
278
|
+
# `WRITE_TRUNCATE` and the destination table is a partition of a table,
|
279
|
+
# specified by partition decorators. For normal tables, `WRITE_TRUNCATE`
|
280
|
+
# will always overwrite the schema. One or more of the following values
|
281
|
+
# are specified:
|
282
|
+
#
|
283
|
+
# * `ALLOW_FIELD_ADDITION`: allow adding a nullable field to the schema.
|
284
|
+
# * `ALLOW_FIELD_RELAXATION`: allow relaxing a required field in the
|
285
|
+
# original schema to nullable.
|
286
|
+
#
|
287
|
+
# @return [Array<String>] An array of strings.
|
288
|
+
#
|
289
|
+
def schema_update_options
|
290
|
+
Array @gapi.configuration.load.schema_update_options
|
291
|
+
end
|
292
|
+
|
273
293
|
##
|
274
294
|
# The number of source data files in the load job.
|
275
295
|
#
|
@@ -329,6 +349,81 @@ module Google
|
|
329
349
|
nil
|
330
350
|
end
|
331
351
|
|
352
|
+
###
|
353
|
+
# Checks if the destination table will be time-partitioned. See
|
354
|
+
# [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
355
|
+
#
|
356
|
+
# @return [Boolean, nil] `true` when the table will be time-partitioned,
|
357
|
+
# or `false` otherwise.
|
358
|
+
#
|
359
|
+
# @!group Attributes
|
360
|
+
#
|
361
|
+
def time_partitioning?
|
362
|
+
!@gapi.configuration.load.time_partitioning.nil?
|
363
|
+
end
|
364
|
+
|
365
|
+
###
|
366
|
+
# The period for which the destination table will be partitioned, if
|
367
|
+
# any. See [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
368
|
+
#
|
369
|
+
# @return [String, nil] The partition type. Currently the only supported
|
370
|
+
# value is "DAY", or `nil` if not present.
|
371
|
+
#
|
372
|
+
# @!group Attributes
|
373
|
+
#
|
374
|
+
def time_partitioning_type
|
375
|
+
@gapi.configuration.load.time_partitioning.type if time_partitioning?
|
376
|
+
end
|
377
|
+
|
378
|
+
###
|
379
|
+
# The field on which the destination table will be partitioned, if any.
|
380
|
+
# If not set, the destination table will be partitioned by pseudo column
|
381
|
+
# `_PARTITIONTIME`; if set, the table will be partitioned by this field.
|
382
|
+
# See [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
383
|
+
#
|
384
|
+
# @return [String, nil] The partition field, if a field was configured.
|
385
|
+
# `nil` if not partitioned or not set (partitioned by pseudo column
|
386
|
+
# '_PARTITIONTIME').
|
387
|
+
#
|
388
|
+
# @!group Attributes
|
389
|
+
#
|
390
|
+
def time_partitioning_field
|
391
|
+
@gapi.configuration.load.time_partitioning.field if time_partitioning?
|
392
|
+
end
|
393
|
+
|
394
|
+
###
|
395
|
+
# The expiration for the destination table partitions, if any, in
|
396
|
+
# seconds. See [Partitioned
|
397
|
+
# Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
398
|
+
#
|
399
|
+
# @return [Integer, nil] The expiration time, in seconds, for data in
|
400
|
+
# partitions, or `nil` if not present.
|
401
|
+
#
|
402
|
+
# @!group Attributes
|
403
|
+
#
|
404
|
+
def time_partitioning_expiration
|
405
|
+
@gapi.configuration.load.time_partitioning.expiration_ms / 1_000 if
|
406
|
+
time_partitioning? &&
|
407
|
+
!@gapi.configuration.load.time_partitioning.expiration_ms.nil?
|
408
|
+
end
|
409
|
+
|
410
|
+
###
|
411
|
+
# If set to true, queries over the destination table will require a
|
412
|
+
# partition filter that can be used for partition elimination to be
|
413
|
+
# specified. See [Partitioned
|
414
|
+
# Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
415
|
+
#
|
416
|
+
# @return [Boolean] `true` when a partition filter will be required,
|
417
|
+
# or `false` otherwise.
|
418
|
+
#
|
419
|
+
# @!group Attributes
|
420
|
+
#
|
421
|
+
def time_partitioning_require_filter?
|
422
|
+
tp = @gapi.configuration.load.time_partitioning
|
423
|
+
return false if tp.nil? || tp.require_partition_filter.nil?
|
424
|
+
tp.require_partition_filter
|
425
|
+
end
|
426
|
+
|
332
427
|
##
|
333
428
|
# Yielded to a block to accumulate changes for a patch request.
|
334
429
|
class Updater < LoadJob
|
@@ -1009,6 +1104,35 @@ module Google
|
|
1009
1104
|
@gapi.configuration.load.update! quote: val
|
1010
1105
|
end
|
1011
1106
|
|
1107
|
+
##
|
1108
|
+
# Sets the schema update options, which allow the schema of the
|
1109
|
+
# destination table to be updated as a side effect of the load job if
|
1110
|
+
# a schema is autodetected or supplied in the job configuration.
|
1111
|
+
# Schema update options are supported in two cases: when write
|
1112
|
+
# disposition is `WRITE_APPEND`; when write disposition is
|
1113
|
+
# `WRITE_TRUNCATE` and the destination table is a partition of a
|
1114
|
+
# table, specified by partition decorators. For normal tables,
|
1115
|
+
# `WRITE_TRUNCATE` will always overwrite the schema. One or more of
|
1116
|
+
# the following values are specified:
|
1117
|
+
#
|
1118
|
+
# * `ALLOW_FIELD_ADDITION`: allow adding a nullable field to the
|
1119
|
+
# schema.
|
1120
|
+
# * `ALLOW_FIELD_RELAXATION`: allow relaxing a required field in the
|
1121
|
+
# original schema to nullable.
|
1122
|
+
#
|
1123
|
+
# @param [Array<String>] new_options The new schema update options.
|
1124
|
+
#
|
1125
|
+
# @!group Attributes
|
1126
|
+
#
|
1127
|
+
def schema_update_options= new_options
|
1128
|
+
if new_options.nil?
|
1129
|
+
@gapi.configuration.load.update! schema_update_options: nil
|
1130
|
+
else
|
1131
|
+
@gapi.configuration.load.update! \
|
1132
|
+
schema_update_options: Array(new_options)
|
1133
|
+
end
|
1134
|
+
end
|
1135
|
+
|
1012
1136
|
##
|
1013
1137
|
# Sets the number of leading rows to skip in the file.
|
1014
1138
|
#
|
@@ -1065,6 +1189,136 @@ module Google
|
|
1065
1189
|
@gapi.configuration.update! labels: val
|
1066
1190
|
end
|
1067
1191
|
|
1192
|
+
##
|
1193
|
+
# Sets the partitioning for the destination table. See [Partitioned
|
1194
|
+
# Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
1195
|
+
#
|
1196
|
+
# You can only set the partitioning field while creating a table.
|
1197
|
+
# BigQuery does not allow you to change partitioning on an existing
|
1198
|
+
# table.
|
1199
|
+
#
|
1200
|
+
# @param [String] type The partition type. Currently the only
|
1201
|
+
# supported value is "DAY".
|
1202
|
+
#
|
1203
|
+
# @example
|
1204
|
+
# require "google/cloud/bigquery"
|
1205
|
+
#
|
1206
|
+
# bigquery = Google::Cloud::Bigquery.new
|
1207
|
+
# dataset = bigquery.dataset "my_dataset"
|
1208
|
+
#
|
1209
|
+
# gs_url = "gs://my-bucket/file-name.csv"
|
1210
|
+
# load_job = dataset.load_job "my_new_table", gs_url do |job|
|
1211
|
+
# job.time_partitioning_type = "DAY"
|
1212
|
+
# end
|
1213
|
+
#
|
1214
|
+
# load_job.wait_until_done!
|
1215
|
+
# load_job.done? #=> true
|
1216
|
+
#
|
1217
|
+
# @!group Attributes
|
1218
|
+
#
|
1219
|
+
def time_partitioning_type= type
|
1220
|
+
@gapi.configuration.load.time_partitioning ||= \
|
1221
|
+
Google::Apis::BigqueryV2::TimePartitioning.new
|
1222
|
+
@gapi.configuration.load.time_partitioning.update! type: type
|
1223
|
+
end
|
1224
|
+
|
1225
|
+
##
|
1226
|
+
# Sets the field on which to partition the destination table. If not
|
1227
|
+
# set, the destination table is partitioned by pseudo column
|
1228
|
+
# `_PARTITIONTIME`; if set, the table is partitioned by this field.
|
1229
|
+
# See [Partitioned
|
1230
|
+
# Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
1231
|
+
#
|
1232
|
+
# The destination table must also be partitioned. See
|
1233
|
+
# {#time_partitioning_type=}.
|
1234
|
+
#
|
1235
|
+
# You can only set the partitioning field while creating a table.
|
1236
|
+
# BigQuery does not allow you to change partitioning on an existing
|
1237
|
+
# table.
|
1238
|
+
#
|
1239
|
+
# @param [String] field The partition field. The field must be a
|
1240
|
+
# top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or
|
1241
|
+
# REQUIRED.
|
1242
|
+
#
|
1243
|
+
# @example
|
1244
|
+
# require "google/cloud/bigquery"
|
1245
|
+
#
|
1246
|
+
# bigquery = Google::Cloud::Bigquery.new
|
1247
|
+
# dataset = bigquery.dataset "my_dataset"
|
1248
|
+
#
|
1249
|
+
# gs_url = "gs://my-bucket/file-name.csv"
|
1250
|
+
# load_job = dataset.load_job "my_new_table", gs_url do |job|
|
1251
|
+
# job.time_partitioning_type = "DAY"
|
1252
|
+
# job.time_partitioning_field = "dob"
|
1253
|
+
# job.schema do |schema|
|
1254
|
+
# schema.timestamp "dob", mode: :required
|
1255
|
+
# end
|
1256
|
+
# end
|
1257
|
+
#
|
1258
|
+
# load_job.wait_until_done!
|
1259
|
+
# load_job.done? #=> true
|
1260
|
+
#
|
1261
|
+
# @!group Attributes
|
1262
|
+
#
|
1263
|
+
def time_partitioning_field= field
|
1264
|
+
@gapi.configuration.load.time_partitioning ||= \
|
1265
|
+
Google::Apis::BigqueryV2::TimePartitioning.new
|
1266
|
+
@gapi.configuration.load.time_partitioning.update! field: field
|
1267
|
+
end
|
1268
|
+
|
1269
|
+
##
|
1270
|
+
# Sets the partition expiration for the destination table. See
|
1271
|
+
# [Partitioned
|
1272
|
+
# Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
1273
|
+
#
|
1274
|
+
# The destination table must also be partitioned. See
|
1275
|
+
# {#time_partitioning_type=}.
|
1276
|
+
#
|
1277
|
+
# @param [Integer] expiration An expiration time, in seconds,
|
1278
|
+
# for data in partitions.
|
1279
|
+
#
|
1280
|
+
# @example
|
1281
|
+
# require "google/cloud/bigquery"
|
1282
|
+
#
|
1283
|
+
# bigquery = Google::Cloud::Bigquery.new
|
1284
|
+
# dataset = bigquery.dataset "my_dataset"
|
1285
|
+
#
|
1286
|
+
# gs_url = "gs://my-bucket/file-name.csv"
|
1287
|
+
# load_job = dataset.load_job "my_new_table", gs_url do |job|
|
1288
|
+
# job.time_partitioning_type = "DAY"
|
1289
|
+
# job.time_partitioning_expiration = 86_400
|
1290
|
+
# end
|
1291
|
+
#
|
1292
|
+
# load_job.wait_until_done!
|
1293
|
+
# load_job.done? #=> true
|
1294
|
+
#
|
1295
|
+
# @!group Attributes
|
1296
|
+
#
|
1297
|
+
def time_partitioning_expiration= expiration
|
1298
|
+
@gapi.configuration.load.time_partitioning ||= \
|
1299
|
+
Google::Apis::BigqueryV2::TimePartitioning.new
|
1300
|
+
@gapi.configuration.load.time_partitioning.update! \
|
1301
|
+
expiration_ms: expiration * 1000
|
1302
|
+
end
|
1303
|
+
|
1304
|
+
##
|
1305
|
+
# If set to true, queries over the destination table will require a
|
1306
|
+
# partition filter that can be used for partition elimination to be
|
1307
|
+
# specified. See [Partitioned
|
1308
|
+
# Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
1309
|
+
#
|
1310
|
+
# @param [Boolean] val Indicates if queries over the destination table
|
1311
|
+
# will require a partition filter. The default value is `false`.
|
1312
|
+
#
|
1313
|
+
# @!group Attributes
|
1314
|
+
#
|
1315
|
+
def time_partitioning_require_filter= val
|
1316
|
+
@gapi.configuration.load.time_partitioning ||= \
|
1317
|
+
Google::Apis::BigqueryV2::TimePartitioning.new
|
1318
|
+
@gapi.configuration.load.time_partitioning.update! \
|
1319
|
+
require_partition_filter: val
|
1320
|
+
end
|
1321
|
+
|
1068
1322
|
##
|
1069
1323
|
# @private Returns the Google API client library version of this job.
|
1070
1324
|
#
|
@@ -148,13 +148,14 @@ module Google
|
|
148
148
|
# `false` otherwise.
|
149
149
|
#
|
150
150
|
def cache_hit?
|
151
|
+
return false unless @gapi.statistics.query
|
151
152
|
@gapi.statistics.query.cache_hit
|
152
153
|
end
|
153
154
|
|
154
155
|
##
|
155
156
|
# The number of bytes processed by the query.
|
156
157
|
#
|
157
|
-
# @return [Integer] Total bytes processed for the job.
|
158
|
+
# @return [Integer, nil] Total bytes processed for the job.
|
158
159
|
#
|
159
160
|
def bytes_processed
|
160
161
|
Integer @gapi.statistics.query.total_bytes_processed
|
@@ -165,8 +166,8 @@ module Google
|
|
165
166
|
##
|
166
167
|
# Describes the execution plan for the query.
|
167
168
|
#
|
168
|
-
# @return [Array<Google::Cloud::Bigquery::QueryJob::Stage
|
169
|
-
# containing the stages of the execution plan.
|
169
|
+
# @return [Array<Google::Cloud::Bigquery::QueryJob::Stage>, nil] An
|
170
|
+
# array containing the stages of the execution plan.
|
170
171
|
#
|
171
172
|
# @example
|
172
173
|
# require "google/cloud/bigquery"
|
@@ -188,12 +189,70 @@ module Google
|
|
188
189
|
# end
|
189
190
|
#
|
190
191
|
def query_plan
|
191
|
-
return nil unless @gapi.statistics.query
|
192
|
+
return nil unless @gapi.statistics.query &&
|
193
|
+
@gapi.statistics.query.query_plan
|
192
194
|
Array(@gapi.statistics.query.query_plan).map do |stage|
|
193
195
|
Stage.from_gapi stage
|
194
196
|
end
|
195
197
|
end
|
196
198
|
|
199
|
+
##
|
200
|
+
# The type of query statement, if valid. Possible values (new values
|
201
|
+
# might be added in the future):
|
202
|
+
#
|
203
|
+
# * "SELECT": `SELECT` query.
|
204
|
+
# * "INSERT": `INSERT` query; see https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language
|
205
|
+
# * "UPDATE": `UPDATE` query; see https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language
|
206
|
+
# * "DELETE": `DELETE` query; see https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language
|
207
|
+
# * "CREATE_TABLE": `CREATE [OR REPLACE] TABLE` without `AS SELECT`.
|
208
|
+
# * "CREATE_TABLE_AS_SELECT": `CREATE [OR REPLACE] TABLE ... AS SELECT`.
|
209
|
+
# * "DROP_TABLE": `DROP TABLE` query.
|
210
|
+
# * "CREATE_VIEW": `CREATE [OR REPLACE] VIEW ... AS SELECT ...`.
|
211
|
+
# * "DROP_VIEW": `DROP VIEW` query.
|
212
|
+
#
|
213
|
+
# @return [String, nil] The type of query statement.
|
214
|
+
#
|
215
|
+
def statement_type
|
216
|
+
return nil unless @gapi.statistics.query
|
217
|
+
@gapi.statistics.query.statement_type
|
218
|
+
end
|
219
|
+
|
220
|
+
##
|
221
|
+
# The DDL operation performed, possibly dependent on the pre-existence
|
222
|
+
# of the DDL target. (See {#ddl_target_table}.) Possible values (new
|
223
|
+
# values might be added in the future):
|
224
|
+
#
|
225
|
+
# * "CREATE": The query created the DDL target.
|
226
|
+
# * "SKIP": No-op. Example cases: the query is
|
227
|
+
# `CREATE TABLE IF NOT EXISTS` while the table already exists, or the
|
228
|
+
# query is `DROP TABLE IF EXISTS` while the table does not exist.
|
229
|
+
# * "REPLACE": The query replaced the DDL target. Example case: the
|
230
|
+
# query is `CREATE OR REPLACE TABLE`, and the table already exists.
|
231
|
+
# * "DROP": The query deleted the DDL target.
|
232
|
+
#
|
233
|
+
# @return [String, nil] The DDL operation performed.
|
234
|
+
#
|
235
|
+
def ddl_operation_performed
|
236
|
+
return nil unless @gapi.statistics.query
|
237
|
+
@gapi.statistics.query.ddl_operation_performed
|
238
|
+
end
|
239
|
+
|
240
|
+
##
|
241
|
+
# The DDL target table, in reference state. (See {Table#reference?}.)
|
242
|
+
# Present only for `CREATE/DROP TABLE/VIEW` queries. (See
|
243
|
+
# {#statement_type}.)
|
244
|
+
#
|
245
|
+
# @return [Google::Cloud::Bigquery::Table, nil] The DDL target table, in
|
246
|
+
# reference state.
|
247
|
+
#
|
248
|
+
def ddl_target_table
|
249
|
+
return nil unless @gapi.statistics.query
|
250
|
+
ensure_service!
|
251
|
+
table = @gapi.statistics.query.ddl_target_table
|
252
|
+
return nil unless table
|
253
|
+
Google::Cloud::Bigquery::Table.new_reference_from_gapi table, service
|
254
|
+
end
|
255
|
+
|
197
256
|
##
|
198
257
|
# The table in which the query results are stored.
|
199
258
|
#
|
@@ -259,6 +318,81 @@ module Google
|
|
259
318
|
)
|
260
319
|
end
|
261
320
|
|
321
|
+
###
|
322
|
+
# Checks if the destination table will be time-partitioned. See
|
323
|
+
# [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
324
|
+
#
|
325
|
+
# @return [Boolean, nil] `true` when the table will be time-partitioned,
|
326
|
+
# or `false` otherwise.
|
327
|
+
#
|
328
|
+
# @!group Attributes
|
329
|
+
#
|
330
|
+
def time_partitioning?
|
331
|
+
!@gapi.configuration.query.time_partitioning.nil?
|
332
|
+
end
|
333
|
+
|
334
|
+
###
|
335
|
+
# The period for which the destination table will be partitioned, if
|
336
|
+
# any. See [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
337
|
+
#
|
338
|
+
# @return [String, nil] The partition type. Currently the only supported
|
339
|
+
# value is "DAY", or `nil` if not present.
|
340
|
+
#
|
341
|
+
# @!group Attributes
|
342
|
+
#
|
343
|
+
def time_partitioning_type
|
344
|
+
@gapi.configuration.query.time_partitioning.type if time_partitioning?
|
345
|
+
end
|
346
|
+
|
347
|
+
###
|
348
|
+
# The field on which the destination table will be partitioned, if any.
|
349
|
+
# If not set, the destination table will be partitioned by pseudo column
|
350
|
+
# `_PARTITIONTIME`; if set, the table will be partitioned by this field.
|
351
|
+
# See [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
352
|
+
#
|
353
|
+
# @return [String, nil] The partition field, if a field was configured.
|
354
|
+
# `nil` if not partitioned or not set (partitioned by pseudo column
|
355
|
+
# '_PARTITIONTIME').
|
356
|
+
#
|
357
|
+
# @!group Attributes
|
358
|
+
#
|
359
|
+
def time_partitioning_field
|
360
|
+
return nil unless time_partitioning?
|
361
|
+
@gapi.configuration.query.time_partitioning.field
|
362
|
+
end
|
363
|
+
|
364
|
+
###
|
365
|
+
# The expiration for the destination table partitions, if any, in
|
366
|
+
# seconds. See [Partitioned
|
367
|
+
# Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
368
|
+
#
|
369
|
+
# @return [Integer, nil] The expiration time, in seconds, for data in
|
370
|
+
# partitions, or `nil` if not present.
|
371
|
+
#
|
372
|
+
# @!group Attributes
|
373
|
+
#
|
374
|
+
def time_partitioning_expiration
|
375
|
+
tp = @gapi.configuration.query.time_partitioning
|
376
|
+
tp.expiration_ms / 1_000 if tp && !tp.expiration_ms.nil?
|
377
|
+
end
|
378
|
+
|
379
|
+
###
|
380
|
+
# If set to true, queries over the destination table will require a
|
381
|
+
# partition filter that can be used for partition elimination to be
|
382
|
+
# specified. See [Partitioned
|
383
|
+
# Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
384
|
+
#
|
385
|
+
# @return [Boolean] `true` when a partition filter will be required,
|
386
|
+
# or `false` otherwise.
|
387
|
+
#
|
388
|
+
# @!group Attributes
|
389
|
+
#
|
390
|
+
def time_partitioning_require_filter?
|
391
|
+
tp = @gapi.configuration.query.time_partitioning
|
392
|
+
return false if tp.nil? || tp.require_partition_filter.nil?
|
393
|
+
tp.require_partition_filter
|
394
|
+
end
|
395
|
+
|
262
396
|
##
|
263
397
|
# Refreshes the job until the job is `DONE`.
|
264
398
|
# The delay between refreshes will incrementally increase.
|
@@ -678,6 +812,142 @@ module Google
|
|
678
812
|
)
|
679
813
|
end
|
680
814
|
|
815
|
+
##
|
816
|
+
# Sets the partitioning for the destination table. See [Partitioned
|
817
|
+
# Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
818
|
+
#
|
819
|
+
# You can only set the partitioning field while creating a table.
|
820
|
+
# BigQuery does not allow you to change partitioning on an existing
|
821
|
+
# table.
|
822
|
+
#
|
823
|
+
# @param [String] type The partition type. Currently the only
|
824
|
+
# supported value is "DAY".
|
825
|
+
#
|
826
|
+
# @example
|
827
|
+
# require "google/cloud/bigquery"
|
828
|
+
#
|
829
|
+
# bigquery = Google::Cloud::Bigquery.new
|
830
|
+
# dataset = bigquery.dataset "my_dataset"
|
831
|
+
#
|
832
|
+
# job = dataset.query_job "SELECT * FROM UNNEST(" \
|
833
|
+
# "GENERATE_TIMESTAMP_ARRAY(" \
|
834
|
+
# "'2018-10-01 00:00:00', " \
|
835
|
+
# "'2018-10-10 00:00:00', " \
|
836
|
+
# "INTERVAL 1 DAY)) AS dob" do |job|
|
837
|
+
# job.time_partitioning_type = "DAY"
|
838
|
+
# end
|
839
|
+
#
|
840
|
+
# job.wait_until_done!
|
841
|
+
# job.done? #=> true
|
842
|
+
#
|
843
|
+
# @!group Attributes
|
844
|
+
#
|
845
|
+
def time_partitioning_type= type
|
846
|
+
@gapi.configuration.query.time_partitioning ||= \
|
847
|
+
Google::Apis::BigqueryV2::TimePartitioning.new
|
848
|
+
@gapi.configuration.query.time_partitioning.update! type: type
|
849
|
+
end
|
850
|
+
|
851
|
+
##
|
852
|
+
# Sets the field on which to partition the destination table. If not
|
853
|
+
# set, the destination table is partitioned by pseudo column
|
854
|
+
# `_PARTITIONTIME`; if set, the table is partitioned by this field.
|
855
|
+
# See [Partitioned
|
856
|
+
# Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
857
|
+
#
|
858
|
+
# The destination table must also be partitioned. See
|
859
|
+
# {#time_partitioning_type=}.
|
860
|
+
#
|
861
|
+
# You can only set the partitioning field while creating a table.
|
862
|
+
# BigQuery does not allow you to change partitioning on an existing
|
863
|
+
# table.
|
864
|
+
#
|
865
|
+
# @param [String] field The partition field. The field must be a
|
866
|
+
# top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or
|
867
|
+
# REQUIRED.
|
868
|
+
#
|
869
|
+
# @example
|
870
|
+
# require "google/cloud/bigquery"
|
871
|
+
#
|
872
|
+
# bigquery = Google::Cloud::Bigquery.new
|
873
|
+
# dataset = bigquery.dataset "my_dataset"
|
874
|
+
#
|
875
|
+
# job = dataset.query_job "SELECT * FROM UNNEST(" \
|
876
|
+
# "GENERATE_TIMESTAMP_ARRAY(" \
|
877
|
+
# "'2018-10-01 00:00:00', " \
|
878
|
+
# "'2018-10-10 00:00:00', " \
|
879
|
+
# "INTERVAL 1 DAY)) AS dob" do |job|
|
880
|
+
# job.time_partitioning_type = "DAY"
|
881
|
+
# job.time_partitioning_field = "dob"
|
882
|
+
# end
|
883
|
+
#
|
884
|
+
# job.wait_until_done!
|
885
|
+
# job.done? #=> true
|
886
|
+
#
|
887
|
+
# @!group Attributes
|
888
|
+
#
|
889
|
+
def time_partitioning_field= field
|
890
|
+
@gapi.configuration.query.time_partitioning ||= \
|
891
|
+
Google::Apis::BigqueryV2::TimePartitioning.new
|
892
|
+
@gapi.configuration.query.time_partitioning.update! field: field
|
893
|
+
end
|
894
|
+
|
895
|
+
##
|
896
|
+
# Sets the partition expiration for the destination table. See
|
897
|
+
# [Partitioned
|
898
|
+
# Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
899
|
+
#
|
900
|
+
# The destination table must also be partitioned. See
|
901
|
+
# {#time_partitioning_type=}.
|
902
|
+
#
|
903
|
+
# @param [Integer] expiration An expiration time, in seconds,
|
904
|
+
# for data in partitions.
|
905
|
+
#
|
906
|
+
# @example
|
907
|
+
# require "google/cloud/bigquery"
|
908
|
+
#
|
909
|
+
# bigquery = Google::Cloud::Bigquery.new
|
910
|
+
# dataset = bigquery.dataset "my_dataset"
|
911
|
+
#
|
912
|
+
# job = dataset.query_job "SELECT * FROM UNNEST(" \
|
913
|
+
# "GENERATE_TIMESTAMP_ARRAY(" \
|
914
|
+
# "'2018-10-01 00:00:00', " \
|
915
|
+
# "'2018-10-10 00:00:00', " \
|
916
|
+
# "INTERVAL 1 DAY)) AS dob" do |job|
|
917
|
+
# job.time_partitioning_type = "DAY"
|
918
|
+
# job.time_partitioning_expiration = 86_400
|
919
|
+
# end
|
920
|
+
#
|
921
|
+
# job.wait_until_done!
|
922
|
+
# job.done? #=> true
|
923
|
+
#
|
924
|
+
# @!group Attributes
|
925
|
+
#
|
926
|
+
def time_partitioning_expiration= expiration
|
927
|
+
@gapi.configuration.query.time_partitioning ||= \
|
928
|
+
Google::Apis::BigqueryV2::TimePartitioning.new
|
929
|
+
@gapi.configuration.query.time_partitioning.update! \
|
930
|
+
expiration_ms: expiration * 1000
|
931
|
+
end
|
932
|
+
|
933
|
+
##
|
934
|
+
# If set to true, queries over the destination table will require a
|
935
|
+
# partition filter that can be used for partition elimination to be
|
936
|
+
# specified. See [Partitioned
|
937
|
+
# Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
938
|
+
#
|
939
|
+
# @param [Boolean] val Indicates if queries over the destination table
|
940
|
+
# will require a partition filter. The default value is `false`.
|
941
|
+
#
|
942
|
+
# @!group Attributes
|
943
|
+
#
|
944
|
+
def time_partitioning_require_filter= val
|
945
|
+
@gapi.configuration.query.time_partitioning ||= \
|
946
|
+
Google::Apis::BigqueryV2::TimePartitioning.new
|
947
|
+
@gapi.configuration.query.time_partitioning.update! \
|
948
|
+
require_partition_filter: val
|
949
|
+
end
|
950
|
+
|
681
951
|
##
|
682
952
|
# @private Returns the Google API client library version of this job.
|
683
953
|
#
|
@@ -219,7 +219,9 @@ module Google
|
|
219
219
|
end
|
220
220
|
|
221
221
|
###
|
222
|
-
# The field on which the table is partitioned, if any.
|
222
|
+
# The field on which the table is partitioned, if any. If not
|
223
|
+
# set, the destination table is partitioned by pseudo column
|
224
|
+
# `_PARTITIONTIME`; if set, the table is partitioned by this field. See
|
223
225
|
# [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
224
226
|
#
|
225
227
|
# @return [String, nil] The partition field, if a field was configured.
|
@@ -235,8 +237,10 @@ module Google
|
|
235
237
|
end
|
236
238
|
|
237
239
|
##
|
238
|
-
# Sets the field on which to partition the table.
|
239
|
-
#
|
240
|
+
# Sets the field on which to partition the table. If not
|
241
|
+
# set, the destination table is partitioned by pseudo column
|
242
|
+
# `_PARTITIONTIME`; if set, the table is partitioned by this field. See
|
243
|
+
# [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
240
244
|
# The table must also be partitioned.
|
241
245
|
#
|
242
246
|
# See {Table#time_partitioning_type=}.
|
@@ -2168,10 +2172,10 @@ module Google
|
|
2168
2172
|
|
2169
2173
|
##
|
2170
2174
|
# @private New Table from a Google API Client object.
|
2171
|
-
def self.from_gapi gapi,
|
2175
|
+
def self.from_gapi gapi, service
|
2172
2176
|
new.tap do |f|
|
2173
2177
|
f.gapi = gapi
|
2174
|
-
f.service =
|
2178
|
+
f.service = service
|
2175
2179
|
end
|
2176
2180
|
end
|
2177
2181
|
|
@@ -2190,6 +2194,15 @@ module Google
|
|
2190
2194
|
end
|
2191
2195
|
end
|
2192
2196
|
|
2197
|
+
##
|
2198
|
+
# @private New lazy Table object from a Google API Client object.
|
2199
|
+
def self.new_reference_from_gapi gapi, service
|
2200
|
+
new.tap do |b|
|
2201
|
+
b.service = service
|
2202
|
+
b.instance_variable_set :@reference, gapi
|
2203
|
+
end
|
2204
|
+
end
|
2205
|
+
|
2193
2206
|
protected
|
2194
2207
|
|
2195
2208
|
##
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: google-cloud-bigquery
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.
|
4
|
+
version: 1.7.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Mike Moore
|
@@ -9,7 +9,7 @@ authors:
|
|
9
9
|
autorequire:
|
10
10
|
bindir: bin
|
11
11
|
cert_chain: []
|
12
|
-
date: 2018-06-
|
12
|
+
date: 2018-06-29 00:00:00.000000000 Z
|
13
13
|
dependencies:
|
14
14
|
- !ruby/object:Gem::Dependency
|
15
15
|
name: google-cloud-core
|