google-cloud-bigquery 1.19.0 → 1.22.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -48,6 +48,44 @@ module Google
48
48
  # puts job.data.first
49
49
  # end
50
50
  #
51
+ # @example With multiple statements and child jobs:
52
+ # require "google/cloud/bigquery"
53
+ #
54
+ # bigquery = Google::Cloud::Bigquery.new
55
+ #
56
+ # multi_statement_sql = <<~SQL
57
+ # -- Declare a variable to hold names as an array.
58
+ # DECLARE top_names ARRAY<STRING>;
59
+ # -- Build an array of the top 100 names from the year 2017.
60
+ # SET top_names = (
61
+ # SELECT ARRAY_AGG(name ORDER BY number DESC LIMIT 100)
62
+ # FROM `bigquery-public-data.usa_names.usa_1910_current`
63
+ # WHERE year = 2017
64
+ # );
65
+ # -- Which names appear as words in Shakespeare's plays?
66
+ # SELECT
67
+ # name AS shakespeare_name
68
+ # FROM UNNEST(top_names) AS name
69
+ # WHERE name IN (
70
+ # SELECT word
71
+ # FROM `bigquery-public-data.samples.shakespeare`
72
+ # );
73
+ # SQL
74
+ #
75
+ # job = bigquery.query_job multi_statement_sql
76
+ #
77
+ # job.wait_until_done!
78
+ #
79
+ # child_jobs = bigquery.jobs parent_job: job
80
+ #
81
+ # child_jobs.each do |child_job|
82
+ # script_statistics = child_job.script_statistics
83
+ # puts script_statistics.evaluation_kind
84
+ # script_statistics.stack_frames.each do |stack_frame|
85
+ # puts stack_frame.text
86
+ # end
87
+ # end
88
+ #
51
89
  class QueryJob < Job
52
90
  ##
53
91
  # Checks if the priority for the query is `BATCH`.
@@ -410,6 +448,69 @@ module Google
410
448
  EncryptionConfiguration.from_gapi @gapi.configuration.query.destination_encryption_configuration
411
449
  end
412
450
 
451
+ ###
452
+ # Checks if the destination table will be range partitioned. See [Creating and using integer range partitioned
453
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
454
+ #
455
+ # @return [Boolean] `true` when the table is range partitioned, or `false` otherwise.
456
+ #
457
+ # @!group Attributes
458
+ #
459
+ def range_partitioning?
460
+ !@gapi.configuration.query.range_partitioning.nil?
461
+ end
462
+
463
+ ###
464
+ # The field on which the destination table will be range partitioned, if any. The field must be a
465
+ # top-level `NULLABLE/REQUIRED` field. The only supported type is `INTEGER/INT64`. See
466
+ # [Creating and using integer range partitioned
467
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
468
+ #
469
+ # @return [String, nil] The partition field, if a field was configured, or `nil` if not range partitioned.
470
+ #
471
+ # @!group Attributes
472
+ #
473
+ def range_partitioning_field
474
+ @gapi.configuration.query.range_partitioning.field if range_partitioning?
475
+ end
476
+
477
+ ###
478
+ # The start of range partitioning, inclusive. See [Creating and using integer range partitioned
479
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
480
+ #
481
+ # @return [Integer, nil] The start of range partitioning, inclusive, or `nil` if not range partitioned.
482
+ #
483
+ # @!group Attributes
484
+ #
485
+ def range_partitioning_start
486
+ @gapi.configuration.query.range_partitioning.range.start if range_partitioning?
487
+ end
488
+
489
+ ###
490
+ # The width of each interval. See [Creating and using integer range partitioned
491
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
492
+ #
493
+ # @return [Integer, nil] The width of each interval, for data in range partitions, or `nil` if not range
494
+ # partitioned.
495
+ #
496
+ # @!group Attributes
497
+ #
498
+ def range_partitioning_interval
499
+ @gapi.configuration.query.range_partitioning.range.interval if range_partitioning?
500
+ end
501
+
502
+ ###
503
+ # The end of range partitioning, exclusive. See [Creating and using integer range partitioned
504
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
505
+ #
506
+ # @return [Integer, nil] The end of range partitioning, exclusive, or `nil` if not range partitioned.
507
+ #
508
+ # @!group Attributes
509
+ #
510
+ def range_partitioning_end
511
+ @gapi.configuration.query.range_partitioning.range.end if range_partitioning?
512
+ end
513
+
413
514
  ###
414
515
  # Checks if the destination table will be time-partitioned. See
415
516
  # [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
@@ -574,9 +675,12 @@ module Google
574
675
  #
575
676
  # job.wait_until_done!
576
677
  # data = job.data
678
+ #
679
+ # # Iterate over the first page of results
577
680
  # data.each do |row|
578
681
  # puts row[:word]
579
682
  # end
683
+ # # Retrieve the next page of results
580
684
  # data = data.next if data.next?
581
685
  #
582
686
  def data token: nil, max: nil, start: nil
@@ -927,12 +1031,21 @@ module Google
927
1031
  # Sets the labels to use for the job.
928
1032
  #
929
1033
  # @param [Hash] value A hash of user-provided labels associated with
930
- # the job. You can use these to organize and group your jobs. Label
931
- # keys and values can be no longer than 63 characters, can only
932
- # contain lowercase letters, numeric characters, underscores and
933
- # dashes. International characters are allowed. Label values are
934
- # optional. Label keys must start with a letter and each label in
935
- # the list must have a different key.
1034
+ # the job. You can use these to organize and group your jobs.
1035
+ #
1036
+ # The labels applied to a resource must meet the following requirements:
1037
+ #
1038
+ # * Each resource can have multiple labels, up to a maximum of 64.
1039
+ # * Each label must be a key-value pair.
1040
+ # * Keys have a minimum length of 1 character and a maximum length of
1041
+ # 63 characters, and cannot be empty. Values can be empty, and have
1042
+ # a maximum length of 63 characters.
1043
+ # * Keys and values can contain only lowercase letters, numeric characters,
1044
+ # underscores, and dashes. All characters must use UTF-8 encoding, and
1045
+ # international characters are allowed.
1046
+ # * The key portion of a label must be unique. However, you can use the
1047
+ # same key with multiple resources.
1048
+ # * Keys must start with a lowercase letter or international character.
936
1049
  #
937
1050
  # @!group Attributes
938
1051
  #
@@ -1028,6 +1141,164 @@ module Google
1028
1141
  @gapi.configuration.query.update! destination_encryption_configuration: val.to_gapi
1029
1142
  end
1030
1143
 
1144
+ ##
1145
+ # Sets the field on which to range partition the table. See [Creating and using integer range partitioned
1146
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
1147
+ #
1148
+ # See {#range_partitioning_start=}, {#range_partitioning_interval=} and {#range_partitioning_end=}.
1149
+ #
1150
+ # You can only set range partitioning when creating a table. BigQuery does not allow you to change
1151
+ # partitioning on an existing table.
1152
+ #
1153
+ # @param [String] field The range partition field. the destination table is partitioned by this
1154
+ # field. The field must be a top-level `NULLABLE/REQUIRED` field. The only supported
1155
+ # type is `INTEGER/INT64`.
1156
+ #
1157
+ # @example
1158
+ # require "google/cloud/bigquery"
1159
+ #
1160
+ # bigquery = Google::Cloud::Bigquery.new
1161
+ # dataset = bigquery.dataset "my_dataset"
1162
+ # destination_table = dataset.table "my_destination_table",
1163
+ # skip_lookup: true
1164
+ #
1165
+ # job = bigquery.query_job "SELECT num FROM UNNEST(GENERATE_ARRAY(0, 99)) AS num" do |job|
1166
+ # job.table = destination_table
1167
+ # job.range_partitioning_field = "num"
1168
+ # job.range_partitioning_start = 0
1169
+ # job.range_partitioning_interval = 10
1170
+ # job.range_partitioning_end = 100
1171
+ # end
1172
+ #
1173
+ # job.wait_until_done!
1174
+ # job.done? #=> true
1175
+ #
1176
+ # @!group Attributes
1177
+ #
1178
+ def range_partitioning_field= field
1179
+ @gapi.configuration.query.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
1180
+ range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
1181
+ )
1182
+ @gapi.configuration.query.range_partitioning.field = field
1183
+ end
1184
+
1185
+ ##
1186
+ # Sets the start of range partitioning, inclusive, for the destination table. See [Creating and using integer
1187
+ # range partitioned tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
1188
+ #
1189
+ # You can only set range partitioning when creating a table. BigQuery does not allow you to change
1190
+ # partitioning on an existing table.
1191
+ #
1192
+ # See {#range_partitioning_field=}, {#range_partitioning_interval=} and {#range_partitioning_end=}.
1193
+ #
1194
+ # @param [Integer] range_start The start of range partitioning, inclusive.
1195
+ #
1196
+ # @example
1197
+ # require "google/cloud/bigquery"
1198
+ #
1199
+ # bigquery = Google::Cloud::Bigquery.new
1200
+ # dataset = bigquery.dataset "my_dataset"
1201
+ # destination_table = dataset.table "my_destination_table",
1202
+ # skip_lookup: true
1203
+ #
1204
+ # job = bigquery.query_job "SELECT num FROM UNNEST(GENERATE_ARRAY(0, 99)) AS num" do |job|
1205
+ # job.table = destination_table
1206
+ # job.range_partitioning_field = "num"
1207
+ # job.range_partitioning_start = 0
1208
+ # job.range_partitioning_interval = 10
1209
+ # job.range_partitioning_end = 100
1210
+ # end
1211
+ #
1212
+ # job.wait_until_done!
1213
+ # job.done? #=> true
1214
+ #
1215
+ # @!group Attributes
1216
+ #
1217
+ def range_partitioning_start= range_start
1218
+ @gapi.configuration.query.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
1219
+ range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
1220
+ )
1221
+ @gapi.configuration.query.range_partitioning.range.start = range_start
1222
+ end
1223
+
1224
+ ##
1225
+ # Sets width of each interval for data in range partitions. See [Creating and using integer range partitioned
1226
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
1227
+ #
1228
+ # You can only set range partitioning when creating a table. BigQuery does not allow you to change
1229
+ # partitioning on an existing table.
1230
+ #
1231
+ # See {#range_partitioning_field=}, {#range_partitioning_start=} and {#range_partitioning_end=}.
1232
+ #
1233
+ # @param [Integer] range_interval The width of each interval, for data in partitions.
1234
+ #
1235
+ # @example
1236
+ # require "google/cloud/bigquery"
1237
+ #
1238
+ # bigquery = Google::Cloud::Bigquery.new
1239
+ # dataset = bigquery.dataset "my_dataset"
1240
+ # destination_table = dataset.table "my_destination_table",
1241
+ # skip_lookup: true
1242
+ #
1243
+ # job = bigquery.query_job "SELECT num FROM UNNEST(GENERATE_ARRAY(0, 99)) AS num" do |job|
1244
+ # job.table = destination_table
1245
+ # job.range_partitioning_field = "num"
1246
+ # job.range_partitioning_start = 0
1247
+ # job.range_partitioning_interval = 10
1248
+ # job.range_partitioning_end = 100
1249
+ # end
1250
+ #
1251
+ # job.wait_until_done!
1252
+ # job.done? #=> true
1253
+ #
1254
+ # @!group Attributes
1255
+ #
1256
+ def range_partitioning_interval= range_interval
1257
+ @gapi.configuration.query.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
1258
+ range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
1259
+ )
1260
+ @gapi.configuration.query.range_partitioning.range.interval = range_interval
1261
+ end
1262
+
1263
+ ##
1264
+ # Sets the end of range partitioning, exclusive, for the destination table. See [Creating and using integer
1265
+ # range partitioned tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
1266
+ #
1267
+ # You can only set range partitioning when creating a table. BigQuery does not allow you to change
1268
+ # partitioning on an existing table.
1269
+ #
1270
+ # See {#range_partitioning_start=}, {#range_partitioning_interval=} and {#range_partitioning_field=}.
1271
+ #
1272
+ # @param [Integer] range_end The end of range partitioning, exclusive.
1273
+ #
1274
+ # @example
1275
+ # require "google/cloud/bigquery"
1276
+ #
1277
+ # bigquery = Google::Cloud::Bigquery.new
1278
+ # dataset = bigquery.dataset "my_dataset"
1279
+ # destination_table = dataset.table "my_destination_table",
1280
+ # skip_lookup: true
1281
+ #
1282
+ # job = bigquery.query_job "SELECT num FROM UNNEST(GENERATE_ARRAY(0, 99)) AS num" do |job|
1283
+ # job.table = destination_table
1284
+ # job.range_partitioning_field = "num"
1285
+ # job.range_partitioning_start = 0
1286
+ # job.range_partitioning_interval = 10
1287
+ # job.range_partitioning_end = 100
1288
+ # end
1289
+ #
1290
+ # job.wait_until_done!
1291
+ # job.done? #=> true
1292
+ #
1293
+ # @!group Attributes
1294
+ #
1295
+ def range_partitioning_end= range_end
1296
+ @gapi.configuration.query.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
1297
+ range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
1298
+ )
1299
+ @gapi.configuration.query.range_partitioning.range.end = range_end
1300
+ end
1301
+
1031
1302
  ##
1032
1303
  # Sets the partitioning for the destination table. See [Partitioned
1033
1304
  # Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
@@ -232,8 +232,8 @@ module Google
232
232
  #
233
233
  # bigquery = Google::Cloud::Bigquery.new
234
234
  # dataset = bigquery.dataset "my_dataset"
235
- # table = dataset.table "my_table" do |table|
236
- # table.schema.load File.read("path/to/schema.json")
235
+ # table = dataset.table "my_table" do |t|
236
+ # t.schema.load File.read("path/to/schema.json")
237
237
  # end
238
238
  #
239
239
  def load source
@@ -138,6 +138,8 @@ module Google
138
138
  end
139
139
  end
140
140
 
141
+ ##
142
+ # Gets the specified table resource by full table reference.
141
143
  def get_project_table project_id, dataset_id, table_id
142
144
  # The get operation is considered idempotent
143
145
  execute backoff: true do
@@ -151,10 +153,7 @@ module Google
151
153
  # it only returns the table resource,
152
154
  # which describes the structure of this table.
153
155
  def get_table dataset_id, table_id
154
- # The get operation is considered idempotent
155
- execute backoff: true do
156
- get_project_table @project, dataset_id, table_id
157
- end
156
+ get_project_table @project, dataset_id, table_id
158
157
  end
159
158
 
160
159
  ##
@@ -250,18 +249,21 @@ module Google
250
249
  end
251
250
  end
252
251
 
253
- # Gets the specified model resource by model ID.
254
- # This method does not return the data in the model,
255
- # it only returns the model resource,
256
- # which describes the structure of this model.
257
- def get_model dataset_id, model_id
252
+ # Gets the specified model resource by full model reference.
253
+ def get_project_model project_id, dataset_id, model_id
258
254
  # The get operation is considered idempotent
259
255
  execute backoff: true do
260
- json_txt = service.get_model @project, dataset_id, model_id, options: { skip_deserialization: true }
256
+ json_txt = service.get_model project_id, dataset_id, model_id, options: { skip_deserialization: true }
261
257
  JSON.parse json_txt, symbolize_names: true
262
258
  end
263
259
  end
264
260
 
261
+ # Gets the specified model resource by model ID. This method does not return the data in the model, it only
262
+ # returns the model resource, which describes the structure of this model.
263
+ def get_model dataset_id, model_id
264
+ get_project_model @project, dataset_id, model_id
265
+ end
266
+
265
267
  ##
266
268
  # Updates information in an existing model, replacing fields that
267
269
  # are provided in the submitted model resource.
@@ -339,14 +341,16 @@ module Google
339
341
  ##
340
342
  # Lists all jobs in the specified project to which you have
341
343
  # been granted the READER job role.
342
- def list_jobs all: nil, max: nil, token: nil, filter: nil, min_created_at: nil, max_created_at: nil
344
+ def list_jobs all: nil, token: nil, max: nil, filter: nil, min_created_at: nil, max_created_at: nil,
345
+ parent_job_id: nil
343
346
  # The list operation is considered idempotent
344
347
  min_creation_time = Convert.time_to_millis min_created_at
345
348
  max_creation_time = Convert.time_to_millis max_created_at
346
349
  execute backoff: true do
347
350
  service.list_jobs @project, all_users: all, max_results: max,
348
351
  page_token: token, projection: "full", state_filter: filter,
349
- min_creation_time: min_creation_time, max_creation_time: max_creation_time
352
+ min_creation_time: min_creation_time, max_creation_time: max_creation_time,
353
+ parent_job_id: parent_job_id
350
354
  end
351
355
  end
352
356
 
@@ -155,10 +155,87 @@ module Google
155
155
  end
156
156
 
157
157
  ###
158
- # Checks if the table is time-partitioned. See [Partitioned
158
+ # Checks if the table is range partitioned. See [Creating and using integer range partitioned
159
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
160
+ #
161
+ # @return [Boolean, nil] `true` when the table is range partitioned, or
162
+ # `false` otherwise, if the object is a resource (see {#resource?});
163
+ # `nil` if the object is a reference (see {#reference?}).
164
+ #
165
+ # @!group Attributes
166
+ #
167
+ def range_partitioning?
168
+ return nil if reference?
169
+ !@gapi.range_partitioning.nil?
170
+ end
171
+
172
+ ###
173
+ # The field on which the table is range partitioned, if any. The field must be a top-level `NULLABLE/REQUIRED`
174
+ # field. The only supported type is `INTEGER/INT64`. See [Creating and using integer range partitioned
175
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
176
+ #
177
+ # @return [Integer, nil] The range partition field, or `nil` if not range partitioned or the object is a
178
+ # reference (see {#reference?}).
179
+ #
180
+ # @!group Attributes
181
+ #
182
+ def range_partitioning_field
183
+ return nil if reference?
184
+ ensure_full_data!
185
+ @gapi.range_partitioning.field if range_partitioning?
186
+ end
187
+
188
+ ###
189
+ # The start of range partitioning, inclusive. See [Creating and using integer range partitioned
190
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
191
+ #
192
+ # @return [Integer, nil] The start of range partitioning, inclusive, or `nil` if not range partitioned or the
193
+ # object is a reference (see {#reference?}).
194
+ #
195
+ # @!group Attributes
196
+ #
197
+ def range_partitioning_start
198
+ return nil if reference?
199
+ ensure_full_data!
200
+ @gapi.range_partitioning.range.start if range_partitioning?
201
+ end
202
+
203
+ ###
204
+ # The width of each interval. See [Creating and using integer range partitioned
205
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
206
+ #
207
+ # @return [Integer, nil] The width of each interval, for data in range partitions, or `nil` if not range
208
+ # partitioned or the object is a reference (see {#reference?}).
209
+ #
210
+ # @!group Attributes
211
+ #
212
+ def range_partitioning_interval
213
+ return nil if reference?
214
+ ensure_full_data!
215
+ return nil unless range_partitioning?
216
+ @gapi.range_partitioning.range.interval
217
+ end
218
+
219
+ ###
220
+ # The end of range partitioning, exclusive. See [Creating and using integer range partitioned
221
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
222
+ #
223
+ # @return [Integer, nil] The end of range partitioning, exclusive, or `nil` if not range partitioned or the
224
+ # object is a reference (see {#reference?}).
225
+ #
226
+ # @!group Attributes
227
+ #
228
+ def range_partitioning_end
229
+ return nil if reference?
230
+ ensure_full_data!
231
+ @gapi.range_partitioning.range.end if range_partitioning?
232
+ end
233
+
234
+ ###
235
+ # Checks if the table is time partitioned. See [Partitioned
159
236
  # Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
160
237
  #
161
- # @return [Boolean, nil] `true` when the table is time-partitioned, or
238
+ # @return [Boolean, nil] `true` when the table is time partitioned, or
162
239
  # `false` otherwise, if the object is a resource (see {#resource?});
163
240
  # `nil` if the object is a reference (see {#reference?}).
164
241
  #
@@ -170,10 +247,10 @@ module Google
170
247
  end
171
248
 
172
249
  ###
173
- # The period for which the table is partitioned, if any. See
250
+ # The period for which the table is time partitioned, if any. See
174
251
  # [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
175
252
  #
176
- # @return [String, nil] The partition type. Currently the only supported
253
+ # @return [String, nil] The time partition type. Currently the only supported
177
254
  # value is "DAY", or `nil` if the object is a reference (see
178
255
  # {#reference?}).
179
256
  #
@@ -186,14 +263,14 @@ module Google
186
263
  end
187
264
 
188
265
  ##
189
- # Sets the partitioning for the table. See [Partitioned
266
+ # Sets the time partitioning type for the table. See [Partitioned
190
267
  # Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
191
268
  #
192
- # You can only set partitioning when creating a table as in
193
- # the example below. BigQuery does not allow you to change partitioning
269
+ # You can only set time partitioning when creating a table as in
270
+ # the example below. BigQuery does not allow you to change time partitioning
194
271
  # on an existing table.
195
272
  #
196
- # @param [String] type The partition type. Currently the only
273
+ # @param [String] type The time partition type. Currently the only
197
274
  # supported value is "DAY".
198
275
  #
199
276
  # @example
@@ -201,8 +278,12 @@ module Google
201
278
  #
202
279
  # bigquery = Google::Cloud::Bigquery.new
203
280
  # dataset = bigquery.dataset "my_dataset"
204
- # table = dataset.create_table "my_table" do |table|
205
- # table.time_partitioning_type = "DAY"
281
+ # table = dataset.create_table "my_table" do |t|
282
+ # t.schema do |schema|
283
+ # schema.timestamp "dob", mode: :required
284
+ # end
285
+ # t.time_partitioning_type = "DAY"
286
+ # t.time_partitioning_field = "dob"
206
287
  # end
207
288
  #
208
289
  # @!group Attributes
@@ -215,13 +296,13 @@ module Google
215
296
  end
216
297
 
217
298
  ###
218
- # The field on which the table is partitioned, if any. If not
219
- # set, the destination table is partitioned by pseudo column
220
- # `_PARTITIONTIME`; if set, the table is partitioned by this field. See
299
+ # The field on which the table is time partitioned, if any. If not
300
+ # set, the destination table is time partitioned by pseudo column
301
+ # `_PARTITIONTIME`; if set, the table is time partitioned by this field. See
221
302
  # [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
222
303
  #
223
- # @return [String, nil] The partition field, if a field was configured.
224
- # `nil` if not partitioned, not set (partitioned by pseudo column
304
+ # @return [String, nil] The time partition field, if a field was configured.
305
+ # `nil` if not time partitioned, not set (time partitioned by pseudo column
225
306
  # '_PARTITIONTIME') or the object is a reference (see {#reference?}).
226
307
  #
227
308
  # @!group Attributes
@@ -233,19 +314,19 @@ module Google
233
314
  end
234
315
 
235
316
  ##
236
- # Sets the field on which to partition the table. If not
237
- # set, the destination table is partitioned by pseudo column
238
- # `_PARTITIONTIME`; if set, the table is partitioned by this field. See
317
+ # Sets the field on which to time partition the table. If not
318
+ # set, the destination table is time partitioned by pseudo column
319
+ # `_PARTITIONTIME`; if set, the table is time partitioned by this field. See
239
320
  # [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
240
- # The table must also be partitioned.
321
+ # The table must also be time partitioned.
241
322
  #
242
323
  # See {Table#time_partitioning_type=}.
243
324
  #
244
- # You can only set the partitioning field while creating a table as in
245
- # the example below. BigQuery does not allow you to change partitioning
325
+ # You can only set the time partitioning field while creating a table as in
326
+ # the example below. BigQuery does not allow you to change time partitioning
246
327
  # on an existing table.
247
328
  #
248
- # @param [String] field The partition field. The field must be a
329
+ # @param [String] field The time partition field. The field must be a
249
330
  # top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or
250
331
  # REQUIRED.
251
332
  #
@@ -254,12 +335,12 @@ module Google
254
335
  #
255
336
  # bigquery = Google::Cloud::Bigquery.new
256
337
  # dataset = bigquery.dataset "my_dataset"
257
- # table = dataset.create_table "my_table" do |table|
258
- # table.time_partitioning_type = "DAY"
259
- # table.time_partitioning_field = "dob"
260
- # table.schema do |schema|
338
+ # table = dataset.create_table "my_table" do |t|
339
+ # t.schema do |schema|
261
340
  # schema.timestamp "dob", mode: :required
262
341
  # end
342
+ # t.time_partitioning_type = "DAY"
343
+ # t.time_partitioning_field = "dob"
263
344
  # end
264
345
  #
265
346
  # @!group Attributes
@@ -272,11 +353,11 @@ module Google
272
353
  end
273
354
 
274
355
  ###
275
- # The expiration for the table partitions, if any, in seconds. See
356
+ # The expiration for the time partitions, if any, in seconds. See
276
357
  # [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
277
358
  #
278
359
  # @return [Integer, nil] The expiration time, in seconds, for data in
279
- # partitions, or `nil` if not present or the object is a reference
360
+ # time partitions, or `nil` if not present or the object is a reference
280
361
  # (see {#reference?}).
281
362
  #
282
363
  # @!group Attributes
@@ -290,9 +371,9 @@ module Google
290
371
  end
291
372
 
292
373
  ##
293
- # Sets the partition expiration for the table. See [Partitioned
374
+ # Sets the time partition expiration for the table. See [Partitioned
294
375
  # Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
295
- # The table must also be partitioned.
376
+ # The table must also be time partitioned.
296
377
  #
297
378
  # See {Table#time_partitioning_type=}.
298
379
  #
@@ -301,16 +382,20 @@ module Google
301
382
  # the update to comply with ETag-based optimistic concurrency control.
302
383
  #
303
384
  # @param [Integer] expiration An expiration time, in seconds,
304
- # for data in partitions.
385
+ # for data in time partitions.
305
386
  #
306
387
  # @example
307
388
  # require "google/cloud/bigquery"
308
389
  #
309
390
  # bigquery = Google::Cloud::Bigquery.new
310
391
  # dataset = bigquery.dataset "my_dataset"
311
- # table = dataset.create_table "my_table" do |table|
312
- # table.time_partitioning_type = "DAY"
313
- # table.time_partitioning_expiration = 86_400
392
+ # table = dataset.create_table "my_table" do |t|
393
+ # t.schema do |schema|
394
+ # schema.timestamp "dob", mode: :required
395
+ # end
396
+ # t.time_partitioning_type = "DAY"
397
+ # t.time_partitioning_field = "dob"
398
+ # t.time_partitioning_expiration = 86_400
314
399
  # end
315
400
  #
316
401
  # @!group Attributes
@@ -356,8 +441,8 @@ module Google
356
441
  #
357
442
  # bigquery = Google::Cloud::Bigquery.new
358
443
  # dataset = bigquery.dataset "my_dataset"
359
- # table = dataset.create_table "my_table" do |table|
360
- # table.require_partition_filter = true
444
+ # table = dataset.create_table "my_table" do |t|
445
+ # t.require_partition_filter = true
361
446
  # end
362
447
  #
363
448
  # @!group Attributes
@@ -387,7 +472,7 @@ module Google
387
472
 
388
473
  ###
389
474
  # One or more fields on which data should be clustered. Must be
390
- # specified with time-based partitioning, data in the table will be
475
+ # specified with time partitioning, data in the table will be
391
476
  # first partitioned and subsequently clustered. The order of the
392
477
  # returned fields determines the sort order of the data.
393
478
  #
@@ -735,12 +820,19 @@ module Google
735
820
  # @param [Hash<String, String>] labels A hash containing key/value
736
821
  # pairs.
737
822
  #
738
- # * Label keys and values can be no longer than 63 characters.
739
- # * Label keys and values can contain only lowercase letters, numbers,
740
- # underscores, hyphens, and international characters.
741
- # * Label keys and values cannot exceed 128 bytes in size.
742
- # * Label keys must begin with a letter.
743
- # * Label keys must be unique within a table.
823
+ # The labels applied to a resource must meet the following requirements:
824
+ #
825
+ # * Each resource can have multiple labels, up to a maximum of 64.
826
+ # * Each label must be a key-value pair.
827
+ # * Keys have a minimum length of 1 character and a maximum length of
828
+ # 63 characters, and cannot be empty. Values can be empty, and have
829
+ # a maximum length of 63 characters.
830
+ # * Keys and values can contain only lowercase letters, numeric characters,
831
+ # underscores, and dashes. All characters must use UTF-8 encoding, and
832
+ # international characters are allowed.
833
+ # * The key portion of a label must be unique. However, you can use the
834
+ # same key with multiple resources.
835
+ # * Keys must start with a lowercase letter or international character.
744
836
  #
745
837
  # @example
746
838
  # require "google/cloud/bigquery"
@@ -1096,12 +1188,20 @@ module Google
1096
1188
  # SQL](https://cloud.google.com/bigquery/docs/reference/legacy-sql)
1097
1189
  # dialect. Optional. The default value is false.
1098
1190
  # @param [Array<String>, String] udfs User-defined function resources
1099
- # used in the query. May be either a code resource to load from a
1100
- # Google Cloud Storage URI (`gs://bucket/path`), or an inline resource
1191
+ # used in a legacy SQL query. May be either a code resource to load from
1192
+ # a Google Cloud Storage URI (`gs://bucket/path`), or an inline resource
1101
1193
  # that contains code for a user-defined function (UDF). Providing an
1102
1194
  # inline code resource is equivalent to providing a URI for a file
1103
- # containing the same code. See [User-Defined
1104
- # Functions](https://cloud.google.com/bigquery/docs/reference/standard-sql/user-defined-functions).
1195
+ # containing the same code.
1196
+ #
1197
+ # This parameter is used for defining User Defined Function (UDF)
1198
+ # resources only when using legacy SQL. Users of standard SQL should
1199
+ # leverage either DDL (e.g. `CREATE [TEMPORARY] FUNCTION ...`) or the
1200
+ # Routines API to define UDF resources.
1201
+ #
1202
+ # For additional information on migrating, see: [Migrating to
1203
+ # standard SQL - Differences in user-defined JavaScript
1204
+ # functions](https://cloud.google.com/bigquery/docs/reference/standard-sql/migrating-from-legacy-sql#differences_in_user-defined_javascript_functions)
1105
1205
  #
1106
1206
  # @example
1107
1207
  # require "google/cloud/bigquery"
@@ -1193,12 +1293,13 @@ module Google
1193
1293
  # table = dataset.table "my_table"
1194
1294
  #
1195
1295
  # data = table.data
1296
+ #
1297
+ # # Iterate over the first page of results
1196
1298
  # data.each do |row|
1197
- # puts row[:first_name]
1198
- # end
1199
- # if data.next?
1200
- # more_data = data.next if data.next?
1299
+ # puts row[:name]
1201
1300
  # end
1301
+ # # Retrieve the next page of results
1302
+ # data = data.next if data.next?
1202
1303
  #
1203
1304
  # @example Retrieve all rows of data: (See {Data#all})
1204
1305
  # require "google/cloud/bigquery"
@@ -1208,8 +1309,9 @@ module Google
1208
1309
  # table = dataset.table "my_table"
1209
1310
  #
1210
1311
  # data = table.data
1312
+ #
1211
1313
  # data.all do |row|
1212
- # puts row[:first_name]
1314
+ # puts row[:name]
1213
1315
  # end
1214
1316
  #
1215
1317
  # @!group Data
@@ -1275,13 +1377,21 @@ module Google
1275
1377
  # is 1,024 characters. If `job_id` is provided, then `prefix` will not
1276
1378
  # be used.
1277
1379
  # @param [Hash] labels A hash of user-provided labels associated with
1278
- # the job. You can use these to organize and group your jobs. Label
1279
- # keys and values can be no longer than 63 characters, can only
1280
- # contain lowercase letters, numeric characters, underscores and
1281
- # dashes. International characters are allowed. Label values are
1282
- # optional. Label keys must start with a letter and each label in the
1283
- # list must have a different key. See [Requirements for
1284
- # labels](https://cloud.google.com/bigquery/docs/creating-managing-labels#requirements).
1380
+ # the job. You can use these to organize and group your jobs.
1381
+ #
1382
+ # The labels applied to a resource must meet the following requirements:
1383
+ #
1384
+ # * Each resource can have multiple labels, up to a maximum of 64.
1385
+ # * Each label must be a key-value pair.
1386
+ # * Keys have a minimum length of 1 character and a maximum length of
1387
+ # 63 characters, and cannot be empty. Values can be empty, and have
1388
+ # a maximum length of 63 characters.
1389
+ # * Keys and values can contain only lowercase letters, numeric characters,
1390
+ # underscores, and dashes. All characters must use UTF-8 encoding, and
1391
+ # international characters are allowed.
1392
+ # * The key portion of a label must be unique. However, you can use the
1393
+ # same key with multiple resources.
1394
+ # * Keys must start with a lowercase letter or international character.
1285
1395
  # @param [Boolean] dryrun If set, don't actually run this job. Behavior
1286
1396
  # is undefined however for non-query jobs and may result in an error.
1287
1397
  # Deprecated.
@@ -1416,11 +1526,11 @@ module Google
1416
1526
  # The geographic location for the job ("US", "EU", etc.) can be set via
1417
1527
  # {ExtractJob::Updater#location=} in a block passed to this method. If
1418
1528
  # the table is a full resource representation (see {#resource_full?}),
1419
- # the location of the job will be automatically set to the location of
1529
+ # the location of the job will automatically be set to the location of
1420
1530
  # the table.
1421
1531
  #
1422
- # @see https://cloud.google.com/bigquery/exporting-data-from-bigquery
1423
- # Exporting Data From BigQuery
1532
+ # @see https://cloud.google.com/bigquery/docs/exporting-data
1533
+ # Exporting table data
1424
1534
  #
1425
1535
  # @param [Google::Cloud::Storage::File, String, Array<String>]
1426
1536
  # extract_url The Google Storage file or file URI pattern(s) to which
@@ -1456,13 +1566,21 @@ module Google
1456
1566
  # is 1,024 characters. If `job_id` is provided, then `prefix` will not
1457
1567
  # be used.
1458
1568
  # @param [Hash] labels A hash of user-provided labels associated with
1459
- # the job. You can use these to organize and group your jobs. Label
1460
- # keys and values can be no longer than 63 characters, can only
1461
- # contain lowercase letters, numeric characters, underscores and
1462
- # dashes. International characters are allowed. Label values are
1463
- # optional. Label keys must start with a letter and each label in the
1464
- # list must have a different key. See [Requirements for
1465
- # labels](https://cloud.google.com/bigquery/docs/creating-managing-labels#requirements).
1569
+ # the job. You can use these to organize and group your jobs.
1570
+ #
1571
+ # The labels applied to a resource must meet the following requirements:
1572
+ #
1573
+ # * Each resource can have multiple labels, up to a maximum of 64.
1574
+ # * Each label must be a key-value pair.
1575
+ # * Keys have a minimum length of 1 character and a maximum length of
1576
+ # 63 characters, and cannot be empty. Values can be empty, and have
1577
+ # a maximum length of 63 characters.
1578
+ # * Keys and values can contain only lowercase letters, numeric characters,
1579
+ # underscores, and dashes. All characters must use UTF-8 encoding, and
1580
+ # international characters are allowed.
1581
+ # * The key portion of a label must be unique. However, you can use the
1582
+ # same key with multiple resources.
1583
+ # * Keys must start with a lowercase letter or international character.
1466
1584
  # @param [Boolean] dryrun If set, don't actually run this job. Behavior
1467
1585
  # is undefined however for non-query jobs and may result in an error.
1468
1586
  # Deprecated.
@@ -1514,8 +1632,8 @@ module Google
1514
1632
  # the location of the job will be automatically set to the location of
1515
1633
  # the table.
1516
1634
  #
1517
- # @see https://cloud.google.com/bigquery/exporting-data-from-bigquery
1518
- # Exporting Data From BigQuery
1635
+ # @see https://cloud.google.com/bigquery/docs/exporting-data
1636
+ # Exporting table data
1519
1637
  #
1520
1638
  # @param [Google::Cloud::Storage::File, String, Array<String>]
1521
1639
  # extract_url The Google Storage file or file URI pattern(s) to which
@@ -1696,13 +1814,21 @@ module Google
1696
1814
  # is 1,024 characters. If `job_id` is provided, then `prefix` will not
1697
1815
  # be used.
1698
1816
  # @param [Hash] labels A hash of user-provided labels associated with
1699
- # the job. You can use these to organize and group your jobs. Label
1700
- # keys and values can be no longer than 63 characters, can only
1701
- # contain lowercase letters, numeric characters, underscores and
1702
- # dashes. International characters are allowed. Label values are
1703
- # optional. Label keys must start with a letter and each label in the
1704
- # list must have a different key. See [Requirements for
1705
- # labels](https://cloud.google.com/bigquery/docs/creating-managing-labels#requirements).
1817
+ # the job. You can use these to organize and group your jobs.
1818
+ #
1819
+ # The labels applied to a resource must meet the following requirements:
1820
+ #
1821
+ # * Each resource can have multiple labels, up to a maximum of 64.
1822
+ # * Each label must be a key-value pair.
1823
+ # * Keys have a minimum length of 1 character and a maximum length of
1824
+ # 63 characters, and cannot be empty. Values can be empty, and have
1825
+ # a maximum length of 63 characters.
1826
+ # * Keys and values can contain only lowercase letters, numeric characters,
1827
+ # underscores, and dashes. All characters must use UTF-8 encoding, and
1828
+ # international characters are allowed.
1829
+ # * The key portion of a label must be unique. However, you can use the
1830
+ # same key with multiple resources.
1831
+ # * Keys must start with a lowercase letter or international character.
1706
1832
  # @param [Boolean] dryrun If set, don't actually run this job. Behavior
1707
1833
  # is undefined however for non-query jobs and may result in an error.
1708
1834
  # Deprecated.
@@ -2521,6 +2647,168 @@ module Google
2521
2647
  @schema = nil
2522
2648
  end
2523
2649
 
2650
+ ##
2651
+ # Sets the field on which to range partition the table. See [Creating and using integer range partitioned
2652
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
2653
+ #
2654
+ # See {Table::Updater#range_partitioning_start=}, {Table::Updater#range_partitioning_interval=} and
2655
+ # {Table::Updater#range_partitioning_end=}.
2656
+ #
2657
+ # You can only set range partitioning when creating a table as in the example below. BigQuery does not allow
2658
+ # you to change partitioning on an existing table.
2659
+ #
2660
+ # @param [String] field The range partition field. The table is partitioned by this
2661
+ # field. The field must be a top-level `NULLABLE/REQUIRED` field. The only supported
2662
+ # type is `INTEGER/INT64`.
2663
+ #
2664
+ # @example
2665
+ # require "google/cloud/bigquery"
2666
+ #
2667
+ # bigquery = Google::Cloud::Bigquery.new
2668
+ # dataset = bigquery.dataset "my_dataset"
2669
+ #
2670
+ # table = dataset.create_table "my_table" do |t|
2671
+ # t.schema do |schema|
2672
+ # schema.integer "my_table_id", mode: :required
2673
+ # schema.string "my_table_data", mode: :required
2674
+ # end
2675
+ # t.range_partitioning_field = "my_table_id"
2676
+ # t.range_partitioning_start = 0
2677
+ # t.range_partitioning_interval = 10
2678
+ # t.range_partitioning_end = 100
2679
+ # end
2680
+ #
2681
+ # @!group Attributes
2682
+ #
2683
+ def range_partitioning_field= field
2684
+ reload! unless resource_full?
2685
+ @gapi.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
2686
+ range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
2687
+ )
2688
+ @gapi.range_partitioning.field = field
2689
+ patch_gapi! :range_partitioning
2690
+ end
2691
+
2692
+ ##
2693
+ # Sets the start of range partitioning, inclusive, for the table. See [Creating and using integer range
2694
+ # partitioned tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
2695
+ #
2696
+ # You can only set range partitioning when creating a table as in the example below. BigQuery does not allow
2697
+ # you to change partitioning on an existing table.
2698
+ #
2699
+ # See {Table::Updater#range_partitioning_field=}, {Table::Updater#range_partitioning_interval=} and
2700
+ # {Table::Updater#range_partitioning_end=}.
2701
+ #
2702
+ # @param [Integer] range_start The start of range partitioning, inclusive.
2703
+ #
2704
+ # @example
2705
+ # require "google/cloud/bigquery"
2706
+ #
2707
+ # bigquery = Google::Cloud::Bigquery.new
2708
+ # dataset = bigquery.dataset "my_dataset"
2709
+ #
2710
+ # table = dataset.create_table "my_table" do |t|
2711
+ # t.schema do |schema|
2712
+ # schema.integer "my_table_id", mode: :required
2713
+ # schema.string "my_table_data", mode: :required
2714
+ # end
2715
+ # t.range_partitioning_field = "my_table_id"
2716
+ # t.range_partitioning_start = 0
2717
+ # t.range_partitioning_interval = 10
2718
+ # t.range_partitioning_end = 100
2719
+ # end
2720
+ #
2721
+ # @!group Attributes
2722
+ #
2723
+ def range_partitioning_start= range_start
2724
+ reload! unless resource_full?
2725
+ @gapi.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
2726
+ range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
2727
+ )
2728
+ @gapi.range_partitioning.range.start = range_start
2729
+ patch_gapi! :range_partitioning
2730
+ end
2731
+
2732
+ ##
2733
+ # Sets width of each interval for data in range partitions. See [Creating and using integer range partitioned
2734
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
2735
+ #
2736
+ # You can only set range partitioning when creating a table as in the example below. BigQuery does not allow
2737
+ # you to change partitioning on an existing table.
2738
+ #
2739
+ # See {Table::Updater#range_partitioning_field=}, {Table::Updater#range_partitioning_start=} and
2740
+ # {Table::Updater#range_partitioning_end=}.
2741
+ #
2742
+ # @param [Integer] range_interval The width of each interval, for data in partitions.
2743
+ #
2744
+ # @example
2745
+ # require "google/cloud/bigquery"
2746
+ #
2747
+ # bigquery = Google::Cloud::Bigquery.new
2748
+ # dataset = bigquery.dataset "my_dataset"
2749
+ #
2750
+ # table = dataset.create_table "my_table" do |t|
2751
+ # t.schema do |schema|
2752
+ # schema.integer "my_table_id", mode: :required
2753
+ # schema.string "my_table_data", mode: :required
2754
+ # end
2755
+ # t.range_partitioning_field = "my_table_id"
2756
+ # t.range_partitioning_start = 0
2757
+ # t.range_partitioning_interval = 10
2758
+ # t.range_partitioning_end = 100
2759
+ # end
2760
+ #
2761
+ # @!group Attributes
2762
+ #
2763
+ def range_partitioning_interval= range_interval
2764
+ reload! unless resource_full?
2765
+ @gapi.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
2766
+ range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
2767
+ )
2768
+ @gapi.range_partitioning.range.interval = range_interval
2769
+ patch_gapi! :range_partitioning
2770
+ end
2771
+
2772
+ ##
2773
+ # Sets the end of range partitioning, exclusive, for the table. See [Creating and using integer range
2774
+ # partitioned tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
2775
+ #
2776
+ # You can only set range partitioning when creating a table as in the example below. BigQuery does not allow
2777
+ # you to change partitioning on an existing table.
2778
+ #
2779
+ # See {Table::Updater#range_partitioning_start=}, {Table::Updater#range_partitioning_interval=} and
2780
+ # {Table::Updater#range_partitioning_field=}.
2781
+ #
2782
+ # @param [Integer] range_end The end of range partitioning, exclusive.
2783
+ #
2784
+ # @example
2785
+ # require "google/cloud/bigquery"
2786
+ #
2787
+ # bigquery = Google::Cloud::Bigquery.new
2788
+ # dataset = bigquery.dataset "my_dataset"
2789
+ #
2790
+ # table = dataset.create_table "my_table" do |t|
2791
+ # t.schema do |schema|
2792
+ # schema.integer "my_table_id", mode: :required
2793
+ # schema.string "my_table_data", mode: :required
2794
+ # end
2795
+ # t.range_partitioning_field = "my_table_id"
2796
+ # t.range_partitioning_start = 0
2797
+ # t.range_partitioning_interval = 10
2798
+ # t.range_partitioning_end = 100
2799
+ # end
2800
+ #
2801
+ # @!group Attributes
2802
+ #
2803
+ def range_partitioning_end= range_end
2804
+ reload! unless resource_full?
2805
+ @gapi.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
2806
+ range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
2807
+ )
2808
+ @gapi.range_partitioning.range.end = range_end
2809
+ patch_gapi! :range_partitioning
2810
+ end
2811
+
2524
2812
  ##
2525
2813
  # Sets one or more fields on which data should be clustered. Must be
2526
2814
  # specified with time-based partitioning, data in the table will be
@@ -2552,15 +2840,15 @@ module Google
2552
2840
  #
2553
2841
  # bigquery = Google::Cloud::Bigquery.new
2554
2842
  # dataset = bigquery.dataset "my_dataset"
2555
- # table = dataset.create_table "my_table" do |table|
2556
- # table.time_partitioning_type = "DAY"
2557
- # table.time_partitioning_field = "dob"
2558
- # table.schema do |schema|
2843
+ # table = dataset.create_table "my_table" do |t|
2844
+ # t.schema do |schema|
2559
2845
  # schema.timestamp "dob", mode: :required
2560
2846
  # schema.string "first_name", mode: :required
2561
2847
  # schema.string "last_name", mode: :required
2562
2848
  # end
2563
- # table.clustering_fields = ["last_name", "first_name"]
2849
+ # t.time_partitioning_type = "DAY"
2850
+ # t.time_partitioning_field = "dob"
2851
+ # t.clustering_fields = ["last_name", "first_name"]
2564
2852
  # end
2565
2853
  #
2566
2854
  # @!group Attributes