google-cloud-bigquery 1.18.0 → 1.21.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +48 -0
- data/TROUBLESHOOTING.md +2 -8
- data/lib/google/cloud/bigquery/argument.rb +197 -0
- data/lib/google/cloud/bigquery/copy_job.rb +18 -1
- data/lib/google/cloud/bigquery/data.rb +15 -0
- data/lib/google/cloud/bigquery/dataset.rb +379 -49
- data/lib/google/cloud/bigquery/dataset/list.rb +1 -2
- data/lib/google/cloud/bigquery/extract_job.rb +19 -2
- data/lib/google/cloud/bigquery/job.rb +198 -0
- data/lib/google/cloud/bigquery/job/list.rb +5 -5
- data/lib/google/cloud/bigquery/load_job.rb +273 -26
- data/lib/google/cloud/bigquery/model.rb +6 -4
- data/lib/google/cloud/bigquery/project.rb +82 -22
- data/lib/google/cloud/bigquery/project/list.rb +1 -2
- data/lib/google/cloud/bigquery/query_job.rb +292 -0
- data/lib/google/cloud/bigquery/routine.rb +1108 -0
- data/lib/google/cloud/bigquery/routine/list.rb +165 -0
- data/lib/google/cloud/bigquery/schema.rb +2 -2
- data/lib/google/cloud/bigquery/service.rb +96 -39
- data/lib/google/cloud/bigquery/standard_sql.rb +257 -53
- data/lib/google/cloud/bigquery/table.rb +410 -62
- data/lib/google/cloud/bigquery/table/async_inserter.rb +21 -11
- data/lib/google/cloud/bigquery/table/list.rb +1 -2
- data/lib/google/cloud/bigquery/version.rb +1 -1
- metadata +9 -6
@@ -155,10 +155,87 @@ module Google
|
|
155
155
|
end
|
156
156
|
|
157
157
|
###
|
158
|
-
# Checks if the table is
|
158
|
+
# Checks if the table is range partitioned. See [Creating and using integer range partitioned
|
159
|
+
# tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
|
160
|
+
#
|
161
|
+
# @return [Boolean, nil] `true` when the table is range partitioned, or
|
162
|
+
# `false` otherwise, if the object is a resource (see {#resource?});
|
163
|
+
# `nil` if the object is a reference (see {#reference?}).
|
164
|
+
#
|
165
|
+
# @!group Attributes
|
166
|
+
#
|
167
|
+
def range_partitioning?
|
168
|
+
return nil if reference?
|
169
|
+
!@gapi.range_partitioning.nil?
|
170
|
+
end
|
171
|
+
|
172
|
+
###
|
173
|
+
# The field on which the table is range partitioned, if any. The field must be a top-level `NULLABLE/REQUIRED`
|
174
|
+
# field. The only supported type is `INTEGER/INT64`. See [Creating and using integer range partitioned
|
175
|
+
# tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
|
176
|
+
#
|
177
|
+
# @return [Integer, nil] The range partition field, or `nil` if not range partitioned or the object is a
|
178
|
+
# reference (see {#reference?}).
|
179
|
+
#
|
180
|
+
# @!group Attributes
|
181
|
+
#
|
182
|
+
def range_partitioning_field
|
183
|
+
return nil if reference?
|
184
|
+
ensure_full_data!
|
185
|
+
@gapi.range_partitioning.field if range_partitioning?
|
186
|
+
end
|
187
|
+
|
188
|
+
###
|
189
|
+
# The start of range partitioning, inclusive. See [Creating and using integer range partitioned
|
190
|
+
# tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
|
191
|
+
#
|
192
|
+
# @return [Integer, nil] The start of range partitioning, inclusive, or `nil` if not range partitioned or the
|
193
|
+
# object is a reference (see {#reference?}).
|
194
|
+
#
|
195
|
+
# @!group Attributes
|
196
|
+
#
|
197
|
+
def range_partitioning_start
|
198
|
+
return nil if reference?
|
199
|
+
ensure_full_data!
|
200
|
+
@gapi.range_partitioning.range.start if range_partitioning?
|
201
|
+
end
|
202
|
+
|
203
|
+
###
|
204
|
+
# The width of each interval. See [Creating and using integer range partitioned
|
205
|
+
# tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
|
206
|
+
#
|
207
|
+
# @return [Integer, nil] The width of each interval, for data in range partitions, or `nil` if not range
|
208
|
+
# partitioned or the object is a reference (see {#reference?}).
|
209
|
+
#
|
210
|
+
# @!group Attributes
|
211
|
+
#
|
212
|
+
def range_partitioning_interval
|
213
|
+
return nil if reference?
|
214
|
+
ensure_full_data!
|
215
|
+
return nil unless range_partitioning?
|
216
|
+
@gapi.range_partitioning.range.interval
|
217
|
+
end
|
218
|
+
|
219
|
+
###
|
220
|
+
# The end of range partitioning, exclusive. See [Creating and using integer range partitioned
|
221
|
+
# tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
|
222
|
+
#
|
223
|
+
# @return [Integer, nil] The end of range partitioning, exclusive, or `nil` if not range partitioned or the
|
224
|
+
# object is a reference (see {#reference?}).
|
225
|
+
#
|
226
|
+
# @!group Attributes
|
227
|
+
#
|
228
|
+
def range_partitioning_end
|
229
|
+
return nil if reference?
|
230
|
+
ensure_full_data!
|
231
|
+
@gapi.range_partitioning.range.end if range_partitioning?
|
232
|
+
end
|
233
|
+
|
234
|
+
###
|
235
|
+
# Checks if the table is time partitioned. See [Partitioned
|
159
236
|
# Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
160
237
|
#
|
161
|
-
# @return [Boolean, nil] `true` when the table is time
|
238
|
+
# @return [Boolean, nil] `true` when the table is time partitioned, or
|
162
239
|
# `false` otherwise, if the object is a resource (see {#resource?});
|
163
240
|
# `nil` if the object is a reference (see {#reference?}).
|
164
241
|
#
|
@@ -170,10 +247,10 @@ module Google
|
|
170
247
|
end
|
171
248
|
|
172
249
|
###
|
173
|
-
# The period for which the table is partitioned, if any. See
|
250
|
+
# The period for which the table is time partitioned, if any. See
|
174
251
|
# [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
175
252
|
#
|
176
|
-
# @return [String, nil] The partition type. Currently the only supported
|
253
|
+
# @return [String, nil] The time partition type. Currently the only supported
|
177
254
|
# value is "DAY", or `nil` if the object is a reference (see
|
178
255
|
# {#reference?}).
|
179
256
|
#
|
@@ -186,14 +263,14 @@ module Google
|
|
186
263
|
end
|
187
264
|
|
188
265
|
##
|
189
|
-
# Sets the partitioning for the table. See [Partitioned
|
266
|
+
# Sets the time partitioning type for the table. See [Partitioned
|
190
267
|
# Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
191
268
|
#
|
192
|
-
# You can only set partitioning when creating a table as in
|
193
|
-
# the example below. BigQuery does not allow you to change partitioning
|
269
|
+
# You can only set time partitioning when creating a table as in
|
270
|
+
# the example below. BigQuery does not allow you to change time partitioning
|
194
271
|
# on an existing table.
|
195
272
|
#
|
196
|
-
# @param [String] type The partition type. Currently the only
|
273
|
+
# @param [String] type The time partition type. Currently the only
|
197
274
|
# supported value is "DAY".
|
198
275
|
#
|
199
276
|
# @example
|
@@ -201,8 +278,12 @@ module Google
|
|
201
278
|
#
|
202
279
|
# bigquery = Google::Cloud::Bigquery.new
|
203
280
|
# dataset = bigquery.dataset "my_dataset"
|
204
|
-
# table = dataset.create_table "my_table" do |
|
205
|
-
#
|
281
|
+
# table = dataset.create_table "my_table" do |t|
|
282
|
+
# t.schema do |schema|
|
283
|
+
# schema.timestamp "dob", mode: :required
|
284
|
+
# end
|
285
|
+
# t.time_partitioning_type = "DAY"
|
286
|
+
# t.time_partitioning_field = "dob"
|
206
287
|
# end
|
207
288
|
#
|
208
289
|
# @!group Attributes
|
@@ -215,13 +296,13 @@ module Google
|
|
215
296
|
end
|
216
297
|
|
217
298
|
###
|
218
|
-
# The field on which the table is partitioned, if any. If not
|
219
|
-
# set, the destination table is partitioned by pseudo column
|
220
|
-
# `_PARTITIONTIME`; if set, the table is partitioned by this field. See
|
299
|
+
# The field on which the table is time partitioned, if any. If not
|
300
|
+
# set, the destination table is time partitioned by pseudo column
|
301
|
+
# `_PARTITIONTIME`; if set, the table is time partitioned by this field. See
|
221
302
|
# [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
222
303
|
#
|
223
|
-
# @return [String, nil] The partition field, if a field was configured.
|
224
|
-
# `nil` if not partitioned, not set (partitioned by pseudo column
|
304
|
+
# @return [String, nil] The time partition field, if a field was configured.
|
305
|
+
# `nil` if not time partitioned, not set (time partitioned by pseudo column
|
225
306
|
# '_PARTITIONTIME') or the object is a reference (see {#reference?}).
|
226
307
|
#
|
227
308
|
# @!group Attributes
|
@@ -233,19 +314,19 @@ module Google
|
|
233
314
|
end
|
234
315
|
|
235
316
|
##
|
236
|
-
# Sets the field on which to partition the table. If not
|
237
|
-
# set, the destination table is partitioned by pseudo column
|
238
|
-
# `_PARTITIONTIME`; if set, the table is partitioned by this field. See
|
317
|
+
# Sets the field on which to time partition the table. If not
|
318
|
+
# set, the destination table is time partitioned by pseudo column
|
319
|
+
# `_PARTITIONTIME`; if set, the table is time partitioned by this field. See
|
239
320
|
# [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
240
|
-
# The table must also be partitioned.
|
321
|
+
# The table must also be time partitioned.
|
241
322
|
#
|
242
323
|
# See {Table#time_partitioning_type=}.
|
243
324
|
#
|
244
|
-
# You can only set the partitioning field while creating a table as in
|
245
|
-
# the example below. BigQuery does not allow you to change partitioning
|
325
|
+
# You can only set the time partitioning field while creating a table as in
|
326
|
+
# the example below. BigQuery does not allow you to change time partitioning
|
246
327
|
# on an existing table.
|
247
328
|
#
|
248
|
-
# @param [String] field The partition field. The field must be a
|
329
|
+
# @param [String] field The time partition field. The field must be a
|
249
330
|
# top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or
|
250
331
|
# REQUIRED.
|
251
332
|
#
|
@@ -254,12 +335,12 @@ module Google
|
|
254
335
|
#
|
255
336
|
# bigquery = Google::Cloud::Bigquery.new
|
256
337
|
# dataset = bigquery.dataset "my_dataset"
|
257
|
-
# table = dataset.create_table "my_table" do |
|
258
|
-
#
|
259
|
-
# table.time_partitioning_field = "dob"
|
260
|
-
# table.schema do |schema|
|
338
|
+
# table = dataset.create_table "my_table" do |t|
|
339
|
+
# t.schema do |schema|
|
261
340
|
# schema.timestamp "dob", mode: :required
|
262
341
|
# end
|
342
|
+
# t.time_partitioning_type = "DAY"
|
343
|
+
# t.time_partitioning_field = "dob"
|
263
344
|
# end
|
264
345
|
#
|
265
346
|
# @!group Attributes
|
@@ -272,11 +353,11 @@ module Google
|
|
272
353
|
end
|
273
354
|
|
274
355
|
###
|
275
|
-
# The expiration for the
|
356
|
+
# The expiration for the time partitions, if any, in seconds. See
|
276
357
|
# [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
277
358
|
#
|
278
359
|
# @return [Integer, nil] The expiration time, in seconds, for data in
|
279
|
-
# partitions, or `nil` if not present or the object is a reference
|
360
|
+
# time partitions, or `nil` if not present or the object is a reference
|
280
361
|
# (see {#reference?}).
|
281
362
|
#
|
282
363
|
# @!group Attributes
|
@@ -290,9 +371,9 @@ module Google
|
|
290
371
|
end
|
291
372
|
|
292
373
|
##
|
293
|
-
# Sets the partition expiration for the table. See [Partitioned
|
374
|
+
# Sets the time partition expiration for the table. See [Partitioned
|
294
375
|
# Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
295
|
-
# The table must also be partitioned.
|
376
|
+
# The table must also be time partitioned.
|
296
377
|
#
|
297
378
|
# See {Table#time_partitioning_type=}.
|
298
379
|
#
|
@@ -301,16 +382,20 @@ module Google
|
|
301
382
|
# the update to comply with ETag-based optimistic concurrency control.
|
302
383
|
#
|
303
384
|
# @param [Integer] expiration An expiration time, in seconds,
|
304
|
-
# for data in partitions.
|
385
|
+
# for data in time partitions.
|
305
386
|
#
|
306
387
|
# @example
|
307
388
|
# require "google/cloud/bigquery"
|
308
389
|
#
|
309
390
|
# bigquery = Google::Cloud::Bigquery.new
|
310
391
|
# dataset = bigquery.dataset "my_dataset"
|
311
|
-
# table = dataset.create_table "my_table" do |
|
312
|
-
#
|
313
|
-
#
|
392
|
+
# table = dataset.create_table "my_table" do |t|
|
393
|
+
# t.schema do |schema|
|
394
|
+
# schema.timestamp "dob", mode: :required
|
395
|
+
# end
|
396
|
+
# t.time_partitioning_type = "DAY"
|
397
|
+
# t.time_partitioning_field = "dob"
|
398
|
+
# t.time_partitioning_expiration = 86_400
|
314
399
|
# end
|
315
400
|
#
|
316
401
|
# @!group Attributes
|
@@ -356,8 +441,8 @@ module Google
|
|
356
441
|
#
|
357
442
|
# bigquery = Google::Cloud::Bigquery.new
|
358
443
|
# dataset = bigquery.dataset "my_dataset"
|
359
|
-
# table = dataset.create_table "my_table" do |
|
360
|
-
#
|
444
|
+
# table = dataset.create_table "my_table" do |t|
|
445
|
+
# t.require_partition_filter = true
|
361
446
|
# end
|
362
447
|
#
|
363
448
|
# @!group Attributes
|
@@ -387,7 +472,7 @@ module Google
|
|
387
472
|
|
388
473
|
###
|
389
474
|
# One or more fields on which data should be clustered. Must be
|
390
|
-
# specified with time
|
475
|
+
# specified with time partitioning, data in the table will be
|
391
476
|
# first partitioned and subsequently clustered. The order of the
|
392
477
|
# returned fields determines the sort order of the data.
|
393
478
|
#
|
@@ -1096,12 +1181,20 @@ module Google
|
|
1096
1181
|
# SQL](https://cloud.google.com/bigquery/docs/reference/legacy-sql)
|
1097
1182
|
# dialect. Optional. The default value is false.
|
1098
1183
|
# @param [Array<String>, String] udfs User-defined function resources
|
1099
|
-
# used in
|
1100
|
-
# Google Cloud Storage URI (`gs://bucket/path`), or an inline resource
|
1184
|
+
# used in a legacy SQL query. May be either a code resource to load from
|
1185
|
+
# a Google Cloud Storage URI (`gs://bucket/path`), or an inline resource
|
1101
1186
|
# that contains code for a user-defined function (UDF). Providing an
|
1102
1187
|
# inline code resource is equivalent to providing a URI for a file
|
1103
|
-
# containing the same code.
|
1104
|
-
#
|
1188
|
+
# containing the same code.
|
1189
|
+
#
|
1190
|
+
# This parameter is used for defining User Defined Function (UDF)
|
1191
|
+
# resources only when using legacy SQL. Users of standard SQL should
|
1192
|
+
# leverage either DDL (e.g. `CREATE [TEMPORARY] FUNCTION ...`) or the
|
1193
|
+
# Routines API to define UDF resources.
|
1194
|
+
#
|
1195
|
+
# For additional information on migrating, see: [Migrating to
|
1196
|
+
# standard SQL - Differences in user-defined JavaScript
|
1197
|
+
# functions](https://cloud.google.com/bigquery/docs/reference/standard-sql/migrating-from-legacy-sql#differences_in_user-defined_javascript_functions)
|
1105
1198
|
#
|
1106
1199
|
# @example
|
1107
1200
|
# require "google/cloud/bigquery"
|
@@ -1217,8 +1310,7 @@ module Google
|
|
1217
1310
|
def data token: nil, max: nil, start: nil
|
1218
1311
|
ensure_service!
|
1219
1312
|
reload! unless resource_full?
|
1220
|
-
|
1221
|
-
data_json = service.list_tabledata dataset_id, table_id, options
|
1313
|
+
data_json = service.list_tabledata dataset_id, table_id, token: token, max: max, start: start
|
1222
1314
|
Data.from_gapi_json data_json, gapi, nil, service
|
1223
1315
|
end
|
1224
1316
|
|
@@ -1978,12 +2070,13 @@ module Google
|
|
1978
2070
|
#
|
1979
2071
|
# @param [Hash, Array<Hash>] rows A hash object or array of hash objects
|
1980
2072
|
# containing the data. Required.
|
1981
|
-
# @param [Array<String
|
1982
|
-
#
|
1983
|
-
#
|
1984
|
-
#
|
1985
|
-
#
|
1986
|
-
#
|
2073
|
+
# @param [Array<String|Symbol>, Symbol] insert_ids A unique ID for each row. BigQuery uses this property to
|
2074
|
+
# detect duplicate insertion requests on a best-effort basis. For more information, see [data
|
2075
|
+
# consistency](https://cloud.google.com/bigquery/streaming-data-into-bigquery#dataconsistency). Optional. If
|
2076
|
+
# not provided, the client library will assign a UUID to each row before the request is sent.
|
2077
|
+
#
|
2078
|
+
# The value `:skip` can be provided to skip the generation of IDs for all rows, or to skip the generation of an
|
2079
|
+
# ID for a specific row in the array.
|
1987
2080
|
# @param [Boolean] skip_invalid Insert all valid rows of a request, even
|
1988
2081
|
# if invalid rows exist. The default value is `false`, which causes
|
1989
2082
|
# the entire request to fail if any invalid rows exist.
|
@@ -2023,12 +2116,14 @@ module Google
|
|
2023
2116
|
#
|
2024
2117
|
def insert rows, insert_ids: nil, skip_invalid: nil, ignore_unknown: nil
|
2025
2118
|
rows = [rows] if rows.is_a? Hash
|
2119
|
+
raise ArgumentError, "No rows provided" if rows.empty?
|
2120
|
+
|
2121
|
+
insert_ids = Array.new(rows.count) { :skip } if insert_ids == :skip
|
2026
2122
|
insert_ids = Array insert_ids
|
2027
2123
|
if insert_ids.count.positive? && insert_ids.count != rows.count
|
2028
2124
|
raise ArgumentError, "insert_ids must be the same size as rows"
|
2029
2125
|
end
|
2030
|
-
|
2031
|
-
raise ArgumentError, "No rows provided" if rows.empty?
|
2126
|
+
|
2032
2127
|
ensure_service!
|
2033
2128
|
options = { skip_invalid: skip_invalid, ignore_unknown: ignore_unknown, insert_ids: insert_ids }
|
2034
2129
|
gapi = service.insert_tabledata dataset_id, table_id, rows, options
|
@@ -2162,7 +2257,7 @@ module Google
|
|
2162
2257
|
# table = dataset.table "my_table", skip_lookup: true
|
2163
2258
|
# table.exists? # true
|
2164
2259
|
#
|
2165
|
-
def exists? force:
|
2260
|
+
def exists? force: false
|
2166
2261
|
return gapi_exists? if force
|
2167
2262
|
# If we have a value, return it
|
2168
2263
|
return @exists unless @exists.nil?
|
@@ -2276,7 +2371,7 @@ module Google
|
|
2276
2371
|
end
|
2277
2372
|
|
2278
2373
|
##
|
2279
|
-
# @private New lazy Table object without making an HTTP request.
|
2374
|
+
# @private New lazy Table object without making an HTTP request, for use with the skip_lookup option.
|
2280
2375
|
def self.new_reference project_id, dataset_id, table_id, service
|
2281
2376
|
raise ArgumentError, "dataset_id is required" unless dataset_id
|
2282
2377
|
raise ArgumentError, "table_id is required" unless table_id
|
@@ -2505,20 +2600,182 @@ module Google
|
|
2505
2600
|
end
|
2506
2601
|
|
2507
2602
|
##
|
2508
|
-
# Yielded to a block to accumulate changes for a
|
2603
|
+
# Yielded to a block to accumulate changes for a create request. See {Dataset#create_table}.
|
2509
2604
|
class Updater < Table
|
2510
2605
|
##
|
2511
|
-
# A list of attributes that were updated.
|
2606
|
+
# @private A list of attributes that were updated.
|
2512
2607
|
attr_reader :updates
|
2513
2608
|
|
2514
2609
|
##
|
2515
|
-
# Create an Updater object.
|
2610
|
+
# @private Create an Updater object.
|
2516
2611
|
def initialize gapi
|
2517
2612
|
@updates = []
|
2518
2613
|
@gapi = gapi
|
2519
2614
|
@schema = nil
|
2520
2615
|
end
|
2521
2616
|
|
2617
|
+
##
|
2618
|
+
# Sets the field on which to range partition the table. See [Creating and using integer range partitioned
|
2619
|
+
# tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
|
2620
|
+
#
|
2621
|
+
# See {Table::Updater#range_partitioning_start=}, {Table::Updater#range_partitioning_interval=} and
|
2622
|
+
# {Table::Updater#range_partitioning_end=}.
|
2623
|
+
#
|
2624
|
+
# You can only set range partitioning when creating a table as in the example below. BigQuery does not allow
|
2625
|
+
# you to change partitioning on an existing table.
|
2626
|
+
#
|
2627
|
+
# @param [String] field The range partition field. The table is partitioned by this
|
2628
|
+
# field. The field must be a top-level `NULLABLE/REQUIRED` field. The only supported
|
2629
|
+
# type is `INTEGER/INT64`.
|
2630
|
+
#
|
2631
|
+
# @example
|
2632
|
+
# require "google/cloud/bigquery"
|
2633
|
+
#
|
2634
|
+
# bigquery = Google::Cloud::Bigquery.new
|
2635
|
+
# dataset = bigquery.dataset "my_dataset"
|
2636
|
+
#
|
2637
|
+
# table = dataset.create_table "my_table" do |t|
|
2638
|
+
# t.schema do |schema|
|
2639
|
+
# schema.integer "my_table_id", mode: :required
|
2640
|
+
# schema.string "my_table_data", mode: :required
|
2641
|
+
# end
|
2642
|
+
# t.range_partitioning_field = "my_table_id"
|
2643
|
+
# t.range_partitioning_start = 0
|
2644
|
+
# t.range_partitioning_interval = 10
|
2645
|
+
# t.range_partitioning_end = 100
|
2646
|
+
# end
|
2647
|
+
#
|
2648
|
+
# @!group Attributes
|
2649
|
+
#
|
2650
|
+
def range_partitioning_field= field
|
2651
|
+
reload! unless resource_full?
|
2652
|
+
@gapi.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
|
2653
|
+
range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
|
2654
|
+
)
|
2655
|
+
@gapi.range_partitioning.field = field
|
2656
|
+
patch_gapi! :range_partitioning
|
2657
|
+
end
|
2658
|
+
|
2659
|
+
##
|
2660
|
+
# Sets the start of range partitioning, inclusive, for the table. See [Creating and using integer range
|
2661
|
+
# partitioned tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
|
2662
|
+
#
|
2663
|
+
# You can only set range partitioning when creating a table as in the example below. BigQuery does not allow
|
2664
|
+
# you to change partitioning on an existing table.
|
2665
|
+
#
|
2666
|
+
# See {Table::Updater#range_partitioning_field=}, {Table::Updater#range_partitioning_interval=} and
|
2667
|
+
# {Table::Updater#range_partitioning_end=}.
|
2668
|
+
#
|
2669
|
+
# @param [Integer] range_start The start of range partitioning, inclusive.
|
2670
|
+
#
|
2671
|
+
# @example
|
2672
|
+
# require "google/cloud/bigquery"
|
2673
|
+
#
|
2674
|
+
# bigquery = Google::Cloud::Bigquery.new
|
2675
|
+
# dataset = bigquery.dataset "my_dataset"
|
2676
|
+
#
|
2677
|
+
# table = dataset.create_table "my_table" do |t|
|
2678
|
+
# t.schema do |schema|
|
2679
|
+
# schema.integer "my_table_id", mode: :required
|
2680
|
+
# schema.string "my_table_data", mode: :required
|
2681
|
+
# end
|
2682
|
+
# t.range_partitioning_field = "my_table_id"
|
2683
|
+
# t.range_partitioning_start = 0
|
2684
|
+
# t.range_partitioning_interval = 10
|
2685
|
+
# t.range_partitioning_end = 100
|
2686
|
+
# end
|
2687
|
+
#
|
2688
|
+
# @!group Attributes
|
2689
|
+
#
|
2690
|
+
def range_partitioning_start= range_start
|
2691
|
+
reload! unless resource_full?
|
2692
|
+
@gapi.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
|
2693
|
+
range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
|
2694
|
+
)
|
2695
|
+
@gapi.range_partitioning.range.start = range_start
|
2696
|
+
patch_gapi! :range_partitioning
|
2697
|
+
end
|
2698
|
+
|
2699
|
+
##
|
2700
|
+
# Sets width of each interval for data in range partitions. See [Creating and using integer range partitioned
|
2701
|
+
# tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
|
2702
|
+
#
|
2703
|
+
# You can only set range partitioning when creating a table as in the example below. BigQuery does not allow
|
2704
|
+
# you to change partitioning on an existing table.
|
2705
|
+
#
|
2706
|
+
# See {Table::Updater#range_partitioning_field=}, {Table::Updater#range_partitioning_start=} and
|
2707
|
+
# {Table::Updater#range_partitioning_end=}.
|
2708
|
+
#
|
2709
|
+
# @param [Integer] range_interval The width of each interval, for data in partitions.
|
2710
|
+
#
|
2711
|
+
# @example
|
2712
|
+
# require "google/cloud/bigquery"
|
2713
|
+
#
|
2714
|
+
# bigquery = Google::Cloud::Bigquery.new
|
2715
|
+
# dataset = bigquery.dataset "my_dataset"
|
2716
|
+
#
|
2717
|
+
# table = dataset.create_table "my_table" do |t|
|
2718
|
+
# t.schema do |schema|
|
2719
|
+
# schema.integer "my_table_id", mode: :required
|
2720
|
+
# schema.string "my_table_data", mode: :required
|
2721
|
+
# end
|
2722
|
+
# t.range_partitioning_field = "my_table_id"
|
2723
|
+
# t.range_partitioning_start = 0
|
2724
|
+
# t.range_partitioning_interval = 10
|
2725
|
+
# t.range_partitioning_end = 100
|
2726
|
+
# end
|
2727
|
+
#
|
2728
|
+
# @!group Attributes
|
2729
|
+
#
|
2730
|
+
def range_partitioning_interval= range_interval
|
2731
|
+
reload! unless resource_full?
|
2732
|
+
@gapi.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
|
2733
|
+
range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
|
2734
|
+
)
|
2735
|
+
@gapi.range_partitioning.range.interval = range_interval
|
2736
|
+
patch_gapi! :range_partitioning
|
2737
|
+
end
|
2738
|
+
|
2739
|
+
##
|
2740
|
+
# Sets the end of range partitioning, exclusive, for the table. See [Creating and using integer range
|
2741
|
+
# partitioned tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
|
2742
|
+
#
|
2743
|
+
# You can only set range partitioning when creating a table as in the example below. BigQuery does not allow
|
2744
|
+
# you to change partitioning on an existing table.
|
2745
|
+
#
|
2746
|
+
# See {Table::Updater#range_partitioning_start=}, {Table::Updater#range_partitioning_interval=} and
|
2747
|
+
# {Table::Updater#range_partitioning_field=}.
|
2748
|
+
#
|
2749
|
+
# @param [Integer] range_end The end of range partitioning, exclusive.
|
2750
|
+
#
|
2751
|
+
# @example
|
2752
|
+
# require "google/cloud/bigquery"
|
2753
|
+
#
|
2754
|
+
# bigquery = Google::Cloud::Bigquery.new
|
2755
|
+
# dataset = bigquery.dataset "my_dataset"
|
2756
|
+
#
|
2757
|
+
# table = dataset.create_table "my_table" do |t|
|
2758
|
+
# t.schema do |schema|
|
2759
|
+
# schema.integer "my_table_id", mode: :required
|
2760
|
+
# schema.string "my_table_data", mode: :required
|
2761
|
+
# end
|
2762
|
+
# t.range_partitioning_field = "my_table_id"
|
2763
|
+
# t.range_partitioning_start = 0
|
2764
|
+
# t.range_partitioning_interval = 10
|
2765
|
+
# t.range_partitioning_end = 100
|
2766
|
+
# end
|
2767
|
+
#
|
2768
|
+
# @!group Attributes
|
2769
|
+
#
|
2770
|
+
def range_partitioning_end= range_end
|
2771
|
+
reload! unless resource_full?
|
2772
|
+
@gapi.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
|
2773
|
+
range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
|
2774
|
+
)
|
2775
|
+
@gapi.range_partitioning.range.end = range_end
|
2776
|
+
patch_gapi! :range_partitioning
|
2777
|
+
end
|
2778
|
+
|
2522
2779
|
##
|
2523
2780
|
# Sets one or more fields on which data should be clustered. Must be
|
2524
2781
|
# specified with time-based partitioning, data in the table will be
|
@@ -2550,15 +2807,15 @@ module Google
|
|
2550
2807
|
#
|
2551
2808
|
# bigquery = Google::Cloud::Bigquery.new
|
2552
2809
|
# dataset = bigquery.dataset "my_dataset"
|
2553
|
-
# table = dataset.create_table "my_table" do |
|
2554
|
-
#
|
2555
|
-
# table.time_partitioning_field = "dob"
|
2556
|
-
# table.schema do |schema|
|
2810
|
+
# table = dataset.create_table "my_table" do |t|
|
2811
|
+
# t.schema do |schema|
|
2557
2812
|
# schema.timestamp "dob", mode: :required
|
2558
2813
|
# schema.string "first_name", mode: :required
|
2559
2814
|
# schema.string "last_name", mode: :required
|
2560
2815
|
# end
|
2561
|
-
#
|
2816
|
+
# t.time_partitioning_type = "DAY"
|
2817
|
+
# t.time_partitioning_field = "dob"
|
2818
|
+
# t.clustering_fields = ["last_name", "first_name"]
|
2562
2819
|
# end
|
2563
2820
|
#
|
2564
2821
|
# @!group Attributes
|
@@ -2955,8 +3212,97 @@ module Google
|
|
2955
3212
|
schema.record name, description: description, mode: mode, &block
|
2956
3213
|
end
|
2957
3214
|
|
3215
|
+
# rubocop:disable Style/MethodDefParentheses
|
3216
|
+
|
3217
|
+
##
|
3218
|
+
# @raise [RuntimeError] not implemented
|
3219
|
+
def data(*)
|
3220
|
+
raise "not implemented in #{self.class}"
|
3221
|
+
end
|
3222
|
+
|
3223
|
+
##
|
3224
|
+
# @raise [RuntimeError] not implemented
|
3225
|
+
def copy_job(*)
|
3226
|
+
raise "not implemented in #{self.class}"
|
3227
|
+
end
|
3228
|
+
|
3229
|
+
##
|
3230
|
+
# @raise [RuntimeError] not implemented
|
3231
|
+
def copy(*)
|
3232
|
+
raise "not implemented in #{self.class}"
|
3233
|
+
end
|
3234
|
+
|
3235
|
+
##
|
3236
|
+
# @raise [RuntimeError] not implemented
|
3237
|
+
def extract_job(*)
|
3238
|
+
raise "not implemented in #{self.class}"
|
3239
|
+
end
|
3240
|
+
|
3241
|
+
##
|
3242
|
+
# @raise [RuntimeError] not implemented
|
3243
|
+
def extract(*)
|
3244
|
+
raise "not implemented in #{self.class}"
|
3245
|
+
end
|
3246
|
+
|
3247
|
+
##
|
3248
|
+
# @raise [RuntimeError] not implemented
|
3249
|
+
def load_job(*)
|
3250
|
+
raise "not implemented in #{self.class}"
|
3251
|
+
end
|
3252
|
+
|
3253
|
+
##
|
3254
|
+
# @raise [RuntimeError] not implemented
|
3255
|
+
def load(*)
|
3256
|
+
raise "not implemented in #{self.class}"
|
3257
|
+
end
|
3258
|
+
|
2958
3259
|
##
|
2959
|
-
#
|
3260
|
+
# @raise [RuntimeError] not implemented
|
3261
|
+
def insert(*)
|
3262
|
+
raise "not implemented in #{self.class}"
|
3263
|
+
end
|
3264
|
+
|
3265
|
+
##
|
3266
|
+
# @raise [RuntimeError] not implemented
|
3267
|
+
def insert_async(*)
|
3268
|
+
raise "not implemented in #{self.class}"
|
3269
|
+
end
|
3270
|
+
|
3271
|
+
##
|
3272
|
+
# @raise [RuntimeError] not implemented
|
3273
|
+
def delete
|
3274
|
+
raise "not implemented in #{self.class}"
|
3275
|
+
end
|
3276
|
+
|
3277
|
+
##
|
3278
|
+
# @raise [RuntimeError] not implemented
|
3279
|
+
def query_job(*)
|
3280
|
+
raise "not implemented in #{self.class}"
|
3281
|
+
end
|
3282
|
+
|
3283
|
+
##
|
3284
|
+
# @raise [RuntimeError] not implemented
|
3285
|
+
def query(*)
|
3286
|
+
raise "not implemented in #{self.class}"
|
3287
|
+
end
|
3288
|
+
|
3289
|
+
##
|
3290
|
+
# @raise [RuntimeError] not implemented
|
3291
|
+
def external(*)
|
3292
|
+
raise "not implemented in #{self.class}"
|
3293
|
+
end
|
3294
|
+
|
3295
|
+
##
|
3296
|
+
# @raise [RuntimeError] not implemented
|
3297
|
+
def reload!
|
3298
|
+
raise "not implemented in #{self.class}"
|
3299
|
+
end
|
3300
|
+
alias refresh! reload!
|
3301
|
+
|
3302
|
+
# rubocop:enable Style/MethodDefParentheses
|
3303
|
+
|
3304
|
+
##
|
3305
|
+
# @private Make sure any access changes are saved
|
2960
3306
|
def check_for_mutated_schema!
|
2961
3307
|
return if @schema.nil?
|
2962
3308
|
return unless @schema.changed?
|
@@ -2964,6 +3310,8 @@ module Google
|
|
2964
3310
|
patch_gapi! :schema
|
2965
3311
|
end
|
2966
3312
|
|
3313
|
+
##
|
3314
|
+
# @private
|
2967
3315
|
def to_gapi
|
2968
3316
|
check_for_mutated_schema!
|
2969
3317
|
@gapi
|