google-cloud-bigquery 1.21.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.yardopts +16 -0
- data/AUTHENTICATION.md +158 -0
- data/CHANGELOG.md +397 -0
- data/CODE_OF_CONDUCT.md +40 -0
- data/CONTRIBUTING.md +188 -0
- data/LICENSE +201 -0
- data/LOGGING.md +27 -0
- data/OVERVIEW.md +463 -0
- data/TROUBLESHOOTING.md +31 -0
- data/lib/google-cloud-bigquery.rb +139 -0
- data/lib/google/cloud/bigquery.rb +145 -0
- data/lib/google/cloud/bigquery/argument.rb +197 -0
- data/lib/google/cloud/bigquery/convert.rb +383 -0
- data/lib/google/cloud/bigquery/copy_job.rb +316 -0
- data/lib/google/cloud/bigquery/credentials.rb +50 -0
- data/lib/google/cloud/bigquery/data.rb +526 -0
- data/lib/google/cloud/bigquery/dataset.rb +2845 -0
- data/lib/google/cloud/bigquery/dataset/access.rb +1021 -0
- data/lib/google/cloud/bigquery/dataset/list.rb +162 -0
- data/lib/google/cloud/bigquery/encryption_configuration.rb +123 -0
- data/lib/google/cloud/bigquery/external.rb +2432 -0
- data/lib/google/cloud/bigquery/extract_job.rb +368 -0
- data/lib/google/cloud/bigquery/insert_response.rb +180 -0
- data/lib/google/cloud/bigquery/job.rb +657 -0
- data/lib/google/cloud/bigquery/job/list.rb +162 -0
- data/lib/google/cloud/bigquery/load_job.rb +1704 -0
- data/lib/google/cloud/bigquery/model.rb +740 -0
- data/lib/google/cloud/bigquery/model/list.rb +164 -0
- data/lib/google/cloud/bigquery/project.rb +1655 -0
- data/lib/google/cloud/bigquery/project/list.rb +161 -0
- data/lib/google/cloud/bigquery/query_job.rb +1695 -0
- data/lib/google/cloud/bigquery/routine.rb +1108 -0
- data/lib/google/cloud/bigquery/routine/list.rb +165 -0
- data/lib/google/cloud/bigquery/schema.rb +564 -0
- data/lib/google/cloud/bigquery/schema/field.rb +668 -0
- data/lib/google/cloud/bigquery/service.rb +589 -0
- data/lib/google/cloud/bigquery/standard_sql.rb +495 -0
- data/lib/google/cloud/bigquery/table.rb +3340 -0
- data/lib/google/cloud/bigquery/table/async_inserter.rb +520 -0
- data/lib/google/cloud/bigquery/table/list.rb +172 -0
- data/lib/google/cloud/bigquery/time.rb +65 -0
- data/lib/google/cloud/bigquery/version.rb +22 -0
- metadata +297 -0
@@ -0,0 +1,3340 @@
|
|
1
|
+
# Copyright 2015 Google LLC
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# https://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
|
16
|
+
require "google/cloud/errors"
|
17
|
+
require "google/cloud/bigquery/service"
|
18
|
+
require "google/cloud/bigquery/data"
|
19
|
+
require "google/cloud/bigquery/table/list"
|
20
|
+
require "google/cloud/bigquery/schema"
|
21
|
+
require "google/cloud/bigquery/encryption_configuration"
|
22
|
+
require "google/cloud/bigquery/external"
|
23
|
+
require "google/cloud/bigquery/insert_response"
|
24
|
+
require "google/cloud/bigquery/table/async_inserter"
|
25
|
+
require "google/cloud/bigquery/convert"
|
26
|
+
require "google/apis/bigquery_v2"
|
27
|
+
|
28
|
+
module Google
|
29
|
+
module Cloud
|
30
|
+
module Bigquery
|
31
|
+
##
|
32
|
+
# # Table
|
33
|
+
#
|
34
|
+
# A named resource representing a BigQuery table that holds zero or more
|
35
|
+
# records. Every table is defined by a schema that may contain nested and
|
36
|
+
# repeated fields.
|
37
|
+
#
|
38
|
+
# The Table class can also represent a
|
39
|
+
# [view](https://cloud.google.com/bigquery/docs/views), which is a virtual
|
40
|
+
# table defined by a SQL query. BigQuery's views are logical views, not
|
41
|
+
# materialized views, which means that the query that defines the view is
|
42
|
+
# re-executed every time the view is queried. Queries are billed according
|
43
|
+
# to the total amount of data in all table fields referenced directly or
|
44
|
+
# indirectly by the top-level query. (See {#view?}, {#query}, {#query=},
|
45
|
+
# and {Dataset#create_view}.)
|
46
|
+
#
|
47
|
+
# @see https://cloud.google.com/bigquery/docs/loading-data#loading_denormalized_nested_and_repeated_data
|
48
|
+
# Loading denormalized, nested, and repeated data
|
49
|
+
#
|
50
|
+
# @example
|
51
|
+
# require "google/cloud/bigquery"
|
52
|
+
#
|
53
|
+
# bigquery = Google::Cloud::Bigquery.new
|
54
|
+
# dataset = bigquery.dataset "my_dataset"
|
55
|
+
#
|
56
|
+
# table = dataset.create_table "my_table" do |schema|
|
57
|
+
# schema.string "first_name", mode: :required
|
58
|
+
# schema.record "cities_lived", mode: :repeated do |nested_schema|
|
59
|
+
# nested_schema.string "place", mode: :required
|
60
|
+
# nested_schema.integer "number_of_years", mode: :required
|
61
|
+
# end
|
62
|
+
# end
|
63
|
+
#
|
64
|
+
# row = {
|
65
|
+
# "first_name" => "Alice",
|
66
|
+
# "cities_lived" => [
|
67
|
+
# {
|
68
|
+
# "place" => "Seattle",
|
69
|
+
# "number_of_years" => 5
|
70
|
+
# },
|
71
|
+
# {
|
72
|
+
# "place" => "Stockholm",
|
73
|
+
# "number_of_years" => 6
|
74
|
+
# }
|
75
|
+
# ]
|
76
|
+
# }
|
77
|
+
# table.insert row
|
78
|
+
#
|
79
|
+
# @example Creating a BigQuery view:
|
80
|
+
# require "google/cloud/bigquery"
|
81
|
+
#
|
82
|
+
# bigquery = Google::Cloud::Bigquery.new
|
83
|
+
# dataset = bigquery.dataset "my_dataset"
|
84
|
+
# view = dataset.create_view "my_view",
|
85
|
+
# "SELECT name, age FROM `my_project.my_dataset.my_table`"
|
86
|
+
# view.view? # true
|
87
|
+
#
|
88
|
+
class Table
|
89
|
+
##
|
90
|
+
# @private The Service object.
|
91
|
+
attr_accessor :service
|
92
|
+
|
93
|
+
##
|
94
|
+
# @private The Google API Client object.
|
95
|
+
attr_accessor :gapi
|
96
|
+
|
97
|
+
##
|
98
|
+
# @private A Google API Client Table Reference object.
|
99
|
+
attr_reader :reference
|
100
|
+
|
101
|
+
##
|
102
|
+
# @private Create an empty Table object.
|
103
|
+
def initialize
|
104
|
+
@service = nil
|
105
|
+
@gapi = nil
|
106
|
+
@reference = nil
|
107
|
+
end
|
108
|
+
|
109
|
+
##
|
110
|
+
# A unique ID for this table.
|
111
|
+
#
|
112
|
+
# @return [String] The ID must contain only letters (a-z, A-Z), numbers
|
113
|
+
# (0-9), or underscores (_). The maximum length is 1,024 characters.
|
114
|
+
#
|
115
|
+
# @!group Attributes
|
116
|
+
#
|
117
|
+
def table_id
|
118
|
+
return reference.table_id if reference?
|
119
|
+
@gapi.table_reference.table_id
|
120
|
+
end
|
121
|
+
|
122
|
+
##
|
123
|
+
# The ID of the `Dataset` containing this table.
|
124
|
+
#
|
125
|
+
# @return [String] The ID must contain only letters (a-z, A-Z), numbers
|
126
|
+
# (0-9), or underscores (_). The maximum length is 1,024 characters.
|
127
|
+
#
|
128
|
+
# @!group Attributes
|
129
|
+
#
|
130
|
+
def dataset_id
|
131
|
+
return reference.dataset_id if reference?
|
132
|
+
@gapi.table_reference.dataset_id
|
133
|
+
end
|
134
|
+
|
135
|
+
##
|
136
|
+
# The ID of the `Project` containing this table.
|
137
|
+
#
|
138
|
+
# @return [String] The project ID.
|
139
|
+
#
|
140
|
+
# @!group Attributes
|
141
|
+
#
|
142
|
+
def project_id
|
143
|
+
return reference.project_id if reference?
|
144
|
+
@gapi.table_reference.project_id
|
145
|
+
end
|
146
|
+
|
147
|
+
##
|
148
|
+
# @private The gapi fragment containing the Project ID, Dataset ID, and
|
149
|
+
# Table ID.
|
150
|
+
#
|
151
|
+
# @return [Google::Apis::BigqueryV2::TableReference]
|
152
|
+
#
|
153
|
+
def table_ref
|
154
|
+
reference? ? reference : @gapi.table_reference
|
155
|
+
end
|
156
|
+
|
157
|
+
###
|
158
|
+
# Checks if the table is range partitioned. See [Creating and using integer range partitioned
|
159
|
+
# tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
|
160
|
+
#
|
161
|
+
# @return [Boolean, nil] `true` when the table is range partitioned, or
|
162
|
+
# `false` otherwise, if the object is a resource (see {#resource?});
|
163
|
+
# `nil` if the object is a reference (see {#reference?}).
|
164
|
+
#
|
165
|
+
# @!group Attributes
|
166
|
+
#
|
167
|
+
def range_partitioning?
|
168
|
+
return nil if reference?
|
169
|
+
!@gapi.range_partitioning.nil?
|
170
|
+
end
|
171
|
+
|
172
|
+
###
|
173
|
+
# The field on which the table is range partitioned, if any. The field must be a top-level `NULLABLE/REQUIRED`
|
174
|
+
# field. The only supported type is `INTEGER/INT64`. See [Creating and using integer range partitioned
|
175
|
+
# tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
|
176
|
+
#
|
177
|
+
# @return [Integer, nil] The range partition field, or `nil` if not range partitioned or the object is a
|
178
|
+
# reference (see {#reference?}).
|
179
|
+
#
|
180
|
+
# @!group Attributes
|
181
|
+
#
|
182
|
+
def range_partitioning_field
|
183
|
+
return nil if reference?
|
184
|
+
ensure_full_data!
|
185
|
+
@gapi.range_partitioning.field if range_partitioning?
|
186
|
+
end
|
187
|
+
|
188
|
+
###
|
189
|
+
# The start of range partitioning, inclusive. See [Creating and using integer range partitioned
|
190
|
+
# tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
|
191
|
+
#
|
192
|
+
# @return [Integer, nil] The start of range partitioning, inclusive, or `nil` if not range partitioned or the
|
193
|
+
# object is a reference (see {#reference?}).
|
194
|
+
#
|
195
|
+
# @!group Attributes
|
196
|
+
#
|
197
|
+
def range_partitioning_start
|
198
|
+
return nil if reference?
|
199
|
+
ensure_full_data!
|
200
|
+
@gapi.range_partitioning.range.start if range_partitioning?
|
201
|
+
end
|
202
|
+
|
203
|
+
###
|
204
|
+
# The width of each interval. See [Creating and using integer range partitioned
|
205
|
+
# tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
|
206
|
+
#
|
207
|
+
# @return [Integer, nil] The width of each interval, for data in range partitions, or `nil` if not range
|
208
|
+
# partitioned or the object is a reference (see {#reference?}).
|
209
|
+
#
|
210
|
+
# @!group Attributes
|
211
|
+
#
|
212
|
+
def range_partitioning_interval
|
213
|
+
return nil if reference?
|
214
|
+
ensure_full_data!
|
215
|
+
return nil unless range_partitioning?
|
216
|
+
@gapi.range_partitioning.range.interval
|
217
|
+
end
|
218
|
+
|
219
|
+
###
|
220
|
+
# The end of range partitioning, exclusive. See [Creating and using integer range partitioned
|
221
|
+
# tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
|
222
|
+
#
|
223
|
+
# @return [Integer, nil] The end of range partitioning, exclusive, or `nil` if not range partitioned or the
|
224
|
+
# object is a reference (see {#reference?}).
|
225
|
+
#
|
226
|
+
# @!group Attributes
|
227
|
+
#
|
228
|
+
def range_partitioning_end
|
229
|
+
return nil if reference?
|
230
|
+
ensure_full_data!
|
231
|
+
@gapi.range_partitioning.range.end if range_partitioning?
|
232
|
+
end
|
233
|
+
|
234
|
+
###
|
235
|
+
# Checks if the table is time partitioned. See [Partitioned
|
236
|
+
# Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
237
|
+
#
|
238
|
+
# @return [Boolean, nil] `true` when the table is time partitioned, or
|
239
|
+
# `false` otherwise, if the object is a resource (see {#resource?});
|
240
|
+
# `nil` if the object is a reference (see {#reference?}).
|
241
|
+
#
|
242
|
+
# @!group Attributes
|
243
|
+
#
|
244
|
+
def time_partitioning?
|
245
|
+
return nil if reference?
|
246
|
+
!@gapi.time_partitioning.nil?
|
247
|
+
end
|
248
|
+
|
249
|
+
###
|
250
|
+
# The period for which the table is time partitioned, if any. See
|
251
|
+
# [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
252
|
+
#
|
253
|
+
# @return [String, nil] The time partition type. Currently the only supported
|
254
|
+
# value is "DAY", or `nil` if the object is a reference (see
|
255
|
+
# {#reference?}).
|
256
|
+
#
|
257
|
+
# @!group Attributes
|
258
|
+
#
|
259
|
+
def time_partitioning_type
|
260
|
+
return nil if reference?
|
261
|
+
ensure_full_data!
|
262
|
+
@gapi.time_partitioning.type if time_partitioning?
|
263
|
+
end
|
264
|
+
|
265
|
+
##
|
266
|
+
# Sets the time partitioning type for the table. See [Partitioned
|
267
|
+
# Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
268
|
+
#
|
269
|
+
# You can only set time partitioning when creating a table as in
|
270
|
+
# the example below. BigQuery does not allow you to change time partitioning
|
271
|
+
# on an existing table.
|
272
|
+
#
|
273
|
+
# @param [String] type The time partition type. Currently the only
|
274
|
+
# supported value is "DAY".
|
275
|
+
#
|
276
|
+
# @example
|
277
|
+
# require "google/cloud/bigquery"
|
278
|
+
#
|
279
|
+
# bigquery = Google::Cloud::Bigquery.new
|
280
|
+
# dataset = bigquery.dataset "my_dataset"
|
281
|
+
# table = dataset.create_table "my_table" do |t|
|
282
|
+
# t.schema do |schema|
|
283
|
+
# schema.timestamp "dob", mode: :required
|
284
|
+
# end
|
285
|
+
# t.time_partitioning_type = "DAY"
|
286
|
+
# t.time_partitioning_field = "dob"
|
287
|
+
# end
|
288
|
+
#
|
289
|
+
# @!group Attributes
|
290
|
+
#
|
291
|
+
def time_partitioning_type= type
|
292
|
+
reload! unless resource_full?
|
293
|
+
@gapi.time_partitioning ||= Google::Apis::BigqueryV2::TimePartitioning.new
|
294
|
+
@gapi.time_partitioning.type = type
|
295
|
+
patch_gapi! :time_partitioning
|
296
|
+
end
|
297
|
+
|
298
|
+
###
|
299
|
+
# The field on which the table is time partitioned, if any. If not
|
300
|
+
# set, the destination table is time partitioned by pseudo column
|
301
|
+
# `_PARTITIONTIME`; if set, the table is time partitioned by this field. See
|
302
|
+
# [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
303
|
+
#
|
304
|
+
# @return [String, nil] The time partition field, if a field was configured.
|
305
|
+
# `nil` if not time partitioned, not set (time partitioned by pseudo column
|
306
|
+
# '_PARTITIONTIME') or the object is a reference (see {#reference?}).
|
307
|
+
#
|
308
|
+
# @!group Attributes
|
309
|
+
#
|
310
|
+
def time_partitioning_field
|
311
|
+
return nil if reference?
|
312
|
+
ensure_full_data!
|
313
|
+
@gapi.time_partitioning.field if time_partitioning?
|
314
|
+
end
|
315
|
+
|
316
|
+
##
|
317
|
+
# Sets the field on which to time partition the table. If not
|
318
|
+
# set, the destination table is time partitioned by pseudo column
|
319
|
+
# `_PARTITIONTIME`; if set, the table is time partitioned by this field. See
|
320
|
+
# [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
321
|
+
# The table must also be time partitioned.
|
322
|
+
#
|
323
|
+
# See {Table#time_partitioning_type=}.
|
324
|
+
#
|
325
|
+
# You can only set the time partitioning field while creating a table as in
|
326
|
+
# the example below. BigQuery does not allow you to change time partitioning
|
327
|
+
# on an existing table.
|
328
|
+
#
|
329
|
+
# @param [String] field The time partition field. The field must be a
|
330
|
+
# top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or
|
331
|
+
# REQUIRED.
|
332
|
+
#
|
333
|
+
# @example
|
334
|
+
# require "google/cloud/bigquery"
|
335
|
+
#
|
336
|
+
# bigquery = Google::Cloud::Bigquery.new
|
337
|
+
# dataset = bigquery.dataset "my_dataset"
|
338
|
+
# table = dataset.create_table "my_table" do |t|
|
339
|
+
# t.schema do |schema|
|
340
|
+
# schema.timestamp "dob", mode: :required
|
341
|
+
# end
|
342
|
+
# t.time_partitioning_type = "DAY"
|
343
|
+
# t.time_partitioning_field = "dob"
|
344
|
+
# end
|
345
|
+
#
|
346
|
+
# @!group Attributes
|
347
|
+
#
|
348
|
+
def time_partitioning_field= field
|
349
|
+
reload! unless resource_full?
|
350
|
+
@gapi.time_partitioning ||= Google::Apis::BigqueryV2::TimePartitioning.new
|
351
|
+
@gapi.time_partitioning.field = field
|
352
|
+
patch_gapi! :time_partitioning
|
353
|
+
end
|
354
|
+
|
355
|
+
###
|
356
|
+
# The expiration for the time partitions, if any, in seconds. See
|
357
|
+
# [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
358
|
+
#
|
359
|
+
# @return [Integer, nil] The expiration time, in seconds, for data in
|
360
|
+
# time partitions, or `nil` if not present or the object is a reference
|
361
|
+
# (see {#reference?}).
|
362
|
+
#
|
363
|
+
# @!group Attributes
|
364
|
+
#
|
365
|
+
def time_partitioning_expiration
|
366
|
+
return nil if reference?
|
367
|
+
ensure_full_data!
|
368
|
+
return nil unless time_partitioning?
|
369
|
+
return nil if @gapi.time_partitioning.expiration_ms.nil?
|
370
|
+
@gapi.time_partitioning.expiration_ms / 1_000
|
371
|
+
end
|
372
|
+
|
373
|
+
##
|
374
|
+
# Sets the time partition expiration for the table. See [Partitioned
|
375
|
+
# Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
376
|
+
# The table must also be time partitioned.
|
377
|
+
#
|
378
|
+
# See {Table#time_partitioning_type=}.
|
379
|
+
#
|
380
|
+
# If the table is not a full resource representation (see
|
381
|
+
# {#resource_full?}), the full representation will be retrieved before
|
382
|
+
# the update to comply with ETag-based optimistic concurrency control.
|
383
|
+
#
|
384
|
+
# @param [Integer] expiration An expiration time, in seconds,
|
385
|
+
# for data in time partitions.
|
386
|
+
#
|
387
|
+
# @example
|
388
|
+
# require "google/cloud/bigquery"
|
389
|
+
#
|
390
|
+
# bigquery = Google::Cloud::Bigquery.new
|
391
|
+
# dataset = bigquery.dataset "my_dataset"
|
392
|
+
# table = dataset.create_table "my_table" do |t|
|
393
|
+
# t.schema do |schema|
|
394
|
+
# schema.timestamp "dob", mode: :required
|
395
|
+
# end
|
396
|
+
# t.time_partitioning_type = "DAY"
|
397
|
+
# t.time_partitioning_field = "dob"
|
398
|
+
# t.time_partitioning_expiration = 86_400
|
399
|
+
# end
|
400
|
+
#
|
401
|
+
# @!group Attributes
|
402
|
+
#
|
403
|
+
def time_partitioning_expiration= expiration
|
404
|
+
reload! unless resource_full?
|
405
|
+
@gapi.time_partitioning ||= Google::Apis::BigqueryV2::TimePartitioning.new
|
406
|
+
@gapi.time_partitioning.expiration_ms = expiration * 1000
|
407
|
+
patch_gapi! :time_partitioning
|
408
|
+
end
|
409
|
+
|
410
|
+
###
|
411
|
+
# Whether queries over this table require a partition filter that can be
|
412
|
+
# used for partition elimination to be specified. See [Partitioned
|
413
|
+
# Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
414
|
+
#
|
415
|
+
# @return [Boolean, nil] `true` when a partition filter will be
|
416
|
+
# required, `false` otherwise, or `nil` if the object is a reference
|
417
|
+
# (see {#reference?}).
|
418
|
+
#
|
419
|
+
# @!group Attributes
|
420
|
+
#
|
421
|
+
def require_partition_filter
|
422
|
+
return nil if reference?
|
423
|
+
ensure_full_data!
|
424
|
+
@gapi.require_partition_filter
|
425
|
+
end
|
426
|
+
|
427
|
+
##
|
428
|
+
# Sets whether queries over this table require a partition filter. See
|
429
|
+
# [Partitioned
|
430
|
+
# Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
431
|
+
#
|
432
|
+
# If the table is not a full resource representation (see
|
433
|
+
# {#resource_full?}), the full representation will be retrieved before
|
434
|
+
# the update to comply with ETag-based optimistic concurrency control.
|
435
|
+
#
|
436
|
+
# @param [Boolean] new_require Whether queries over this table require a
|
437
|
+
# partition filter.
|
438
|
+
#
|
439
|
+
# @example
|
440
|
+
# require "google/cloud/bigquery"
|
441
|
+
#
|
442
|
+
# bigquery = Google::Cloud::Bigquery.new
|
443
|
+
# dataset = bigquery.dataset "my_dataset"
|
444
|
+
# table = dataset.create_table "my_table" do |t|
|
445
|
+
# t.require_partition_filter = true
|
446
|
+
# end
|
447
|
+
#
|
448
|
+
# @!group Attributes
|
449
|
+
#
|
450
|
+
def require_partition_filter= new_require
|
451
|
+
reload! unless resource_full?
|
452
|
+
@gapi.require_partition_filter = new_require
|
453
|
+
patch_gapi! :require_partition_filter
|
454
|
+
end
|
455
|
+
|
456
|
+
###
|
457
|
+
# Checks if the table is clustered.
|
458
|
+
#
|
459
|
+
# @see https://cloud.google.com/bigquery/docs/clustered-tables
|
460
|
+
# Introduction to Clustered Tables
|
461
|
+
#
|
462
|
+
# @return [Boolean, nil] `true` when the table is clustered, or
|
463
|
+
# `false` otherwise, if the object is a resource (see {#resource?});
|
464
|
+
# `nil` if the object is a reference (see {#reference?}).
|
465
|
+
#
|
466
|
+
# @!group Attributes
|
467
|
+
#
|
468
|
+
def clustering?
|
469
|
+
return nil if reference?
|
470
|
+
!@gapi.clustering.nil?
|
471
|
+
end
|
472
|
+
|
473
|
+
###
|
474
|
+
# One or more fields on which data should be clustered. Must be
|
475
|
+
# specified with time partitioning, data in the table will be
|
476
|
+
# first partitioned and subsequently clustered. The order of the
|
477
|
+
# returned fields determines the sort order of the data.
|
478
|
+
#
|
479
|
+
# See {Table::Updater#clustering_fields=}.
|
480
|
+
#
|
481
|
+
# @see https://cloud.google.com/bigquery/docs/partitioned-tables
|
482
|
+
# Partitioned Tables
|
483
|
+
# @see https://cloud.google.com/bigquery/docs/clustered-tables
|
484
|
+
# Introduction to Clustered Tables
|
485
|
+
# @see https://cloud.google.com/bigquery/docs/creating-clustered-tables
|
486
|
+
# Creating and Using Clustered Tables
|
487
|
+
#
|
488
|
+
# @return [Array<String>, nil] The clustering fields, or `nil` if the
|
489
|
+
# table is not clustered or if the table is a reference (see
|
490
|
+
# {#reference?}).
|
491
|
+
#
|
492
|
+
# @!group Attributes
|
493
|
+
#
|
494
|
+
def clustering_fields
|
495
|
+
return nil if reference?
|
496
|
+
ensure_full_data!
|
497
|
+
@gapi.clustering.fields if clustering?
|
498
|
+
end
|
499
|
+
|
500
|
+
##
|
501
|
+
# The combined Project ID, Dataset ID, and Table ID for this table, in
|
502
|
+
# the format specified by the [Legacy SQL Query
|
503
|
+
# Reference](https://cloud.google.com/bigquery/query-reference#from)
|
504
|
+
# (`project-name:dataset_id.table_id`). This is useful for referencing
|
505
|
+
# tables in other projects and datasets. To use this value in queries
|
506
|
+
# see {#query_id}.
|
507
|
+
#
|
508
|
+
# @return [String, nil] The combined ID, or `nil` if the object is a
|
509
|
+
# reference (see {#reference?}).
|
510
|
+
#
|
511
|
+
# @!group Attributes
|
512
|
+
#
|
513
|
+
def id
|
514
|
+
return nil if reference?
|
515
|
+
@gapi.id
|
516
|
+
end
|
517
|
+
|
518
|
+
##
|
519
|
+
# The value returned by {#id}, wrapped in backticks (Standard SQL) or s
|
520
|
+
# quare brackets (Legacy SQL) to accommodate project IDs
|
521
|
+
# containing dashes. Useful in queries.
|
522
|
+
#
|
523
|
+
# @param [Boolean] standard_sql Specifies whether to use BigQuery's
|
524
|
+
# [standard
|
525
|
+
# SQL](https://cloud.google.com/bigquery/docs/reference/standard-sql/)
|
526
|
+
# dialect. Optional. The default value is true.
|
527
|
+
# @param [Boolean] legacy_sql Specifies whether to use BigQuery's
|
528
|
+
# [legacy
|
529
|
+
# SQL](https://cloud.google.com/bigquery/docs/reference/legacy-sql)
|
530
|
+
# dialect. Optional. The default value is false.
|
531
|
+
#
|
532
|
+
# @return [String] The appropriate table ID for use in queries,
|
533
|
+
# depending on SQL type.
|
534
|
+
#
|
535
|
+
# @example
|
536
|
+
# require "google/cloud/bigquery"
|
537
|
+
#
|
538
|
+
# bigquery = Google::Cloud::Bigquery.new
|
539
|
+
# dataset = bigquery.dataset "my_dataset"
|
540
|
+
# table = dataset.table "my_table"
|
541
|
+
#
|
542
|
+
# data = bigquery.query "SELECT first_name FROM #{table.query_id}"
|
543
|
+
#
|
544
|
+
# @!group Attributes
|
545
|
+
#
|
546
|
+
def query_id standard_sql: nil, legacy_sql: nil
|
547
|
+
if Convert.resolve_legacy_sql standard_sql, legacy_sql
|
548
|
+
"[#{project_id}:#{dataset_id}.#{table_id}]"
|
549
|
+
else
|
550
|
+
"`#{project_id}.#{dataset_id}.#{table_id}`"
|
551
|
+
end
|
552
|
+
end
|
553
|
+
|
554
|
+
##
|
555
|
+
# The name of the table.
|
556
|
+
#
|
557
|
+
# @return [String, nil] The friendly name, or `nil` if the object is a
|
558
|
+
# reference (see {#reference?}).
|
559
|
+
#
|
560
|
+
# @!group Attributes
|
561
|
+
#
|
562
|
+
def name
|
563
|
+
return nil if reference?
|
564
|
+
@gapi.friendly_name
|
565
|
+
end
|
566
|
+
|
567
|
+
##
|
568
|
+
# Updates the name of the table.
|
569
|
+
#
|
570
|
+
# If the table is not a full resource representation (see
|
571
|
+
# {#resource_full?}), the full representation will be retrieved before
|
572
|
+
# the update to comply with ETag-based optimistic concurrency control.
|
573
|
+
#
|
574
|
+
# @param [String] new_name The new friendly name.
|
575
|
+
#
|
576
|
+
# @!group Attributes
|
577
|
+
#
|
578
|
+
def name= new_name
|
579
|
+
reload! unless resource_full?
|
580
|
+
@gapi.update! friendly_name: new_name
|
581
|
+
patch_gapi! :friendly_name
|
582
|
+
end
|
583
|
+
|
584
|
+
##
|
585
|
+
# The ETag hash of the table.
|
586
|
+
#
|
587
|
+
# @return [String, nil] The ETag hash, or `nil` if the object is a
|
588
|
+
# reference (see {#reference?}).
|
589
|
+
#
|
590
|
+
# @!group Attributes
|
591
|
+
#
|
592
|
+
def etag
|
593
|
+
return nil if reference?
|
594
|
+
ensure_full_data!
|
595
|
+
@gapi.etag
|
596
|
+
end
|
597
|
+
|
598
|
+
##
|
599
|
+
# A URL that can be used to access the table using the REST API.
|
600
|
+
#
|
601
|
+
# @return [String, nil] A REST URL for the resource, or `nil` if the
|
602
|
+
# object is a reference (see {#reference?}).
|
603
|
+
#
|
604
|
+
# @!group Attributes
|
605
|
+
#
|
606
|
+
def api_url
|
607
|
+
return nil if reference?
|
608
|
+
ensure_full_data!
|
609
|
+
@gapi.self_link
|
610
|
+
end
|
611
|
+
|
612
|
+
##
|
613
|
+
# A user-friendly description of the table.
|
614
|
+
#
|
615
|
+
# @return [String, nil] The description, or `nil` if the object is a
|
616
|
+
# reference (see {#reference?}).
|
617
|
+
#
|
618
|
+
# @!group Attributes
|
619
|
+
#
|
620
|
+
def description
|
621
|
+
return nil if reference?
|
622
|
+
ensure_full_data!
|
623
|
+
@gapi.description
|
624
|
+
end
|
625
|
+
|
626
|
+
##
|
627
|
+
# Updates the user-friendly description of the table.
|
628
|
+
#
|
629
|
+
# If the table is not a full resource representation (see
|
630
|
+
# {#resource_full?}), the full representation will be retrieved before
|
631
|
+
# the update to comply with ETag-based optimistic concurrency control.
|
632
|
+
#
|
633
|
+
# @param [String] new_description The new user-friendly description.
|
634
|
+
#
|
635
|
+
# @!group Attributes
|
636
|
+
#
|
637
|
+
def description= new_description
|
638
|
+
reload! unless resource_full?
|
639
|
+
@gapi.update! description: new_description
|
640
|
+
patch_gapi! :description
|
641
|
+
end
|
642
|
+
|
643
|
+
##
|
644
|
+
# The number of bytes in the table.
|
645
|
+
#
|
646
|
+
# @return [Integer, nil] The count of bytes in the table, or `nil` if
|
647
|
+
# the object is a reference (see {#reference?}).
|
648
|
+
#
|
649
|
+
# @!group Data
|
650
|
+
#
|
651
|
+
def bytes_count
|
652
|
+
return nil if reference?
|
653
|
+
ensure_full_data!
|
654
|
+
begin
|
655
|
+
Integer @gapi.num_bytes
|
656
|
+
rescue StandardError
|
657
|
+
nil
|
658
|
+
end
|
659
|
+
end
|
660
|
+
|
661
|
+
##
|
662
|
+
# The number of rows in the table.
|
663
|
+
#
|
664
|
+
# @return [Integer, nil] The count of rows in the table, or `nil` if the
|
665
|
+
# object is a reference (see {#reference?}).
|
666
|
+
#
|
667
|
+
# @!group Data
|
668
|
+
#
|
669
|
+
def rows_count
|
670
|
+
return nil if reference?
|
671
|
+
ensure_full_data!
|
672
|
+
begin
|
673
|
+
Integer @gapi.num_rows
|
674
|
+
rescue StandardError
|
675
|
+
nil
|
676
|
+
end
|
677
|
+
end
|
678
|
+
|
679
|
+
##
|
680
|
+
# The time when this table was created.
|
681
|
+
#
|
682
|
+
# @return [Time, nil] The creation time, or `nil` if the object is a
|
683
|
+
# reference (see {#reference?}).
|
684
|
+
#
|
685
|
+
# @!group Attributes
|
686
|
+
#
|
687
|
+
def created_at
|
688
|
+
return nil if reference?
|
689
|
+
ensure_full_data!
|
690
|
+
Convert.millis_to_time @gapi.creation_time
|
691
|
+
end
|
692
|
+
|
693
|
+
##
|
694
|
+
# The time when this table expires.
|
695
|
+
# If not present, the table will persist indefinitely.
|
696
|
+
# Expired tables will be deleted and their storage reclaimed.
|
697
|
+
#
|
698
|
+
# @return [Time, nil] The expiration time, or `nil` if not present or
|
699
|
+
# the object is a reference (see {#reference?}).
|
700
|
+
#
|
701
|
+
# @!group Attributes
|
702
|
+
#
|
703
|
+
def expires_at
|
704
|
+
return nil if reference?
|
705
|
+
ensure_full_data!
|
706
|
+
Convert.millis_to_time @gapi.expiration_time
|
707
|
+
end
|
708
|
+
|
709
|
+
##
|
710
|
+
# The date when this table was last modified.
|
711
|
+
#
|
712
|
+
# @return [Time, nil] The last modified time, or `nil` if not present or
|
713
|
+
# the object is a reference (see {#reference?}).
|
714
|
+
#
|
715
|
+
# @!group Attributes
|
716
|
+
#
|
717
|
+
def modified_at
|
718
|
+
return nil if reference?
|
719
|
+
ensure_full_data!
|
720
|
+
Convert.millis_to_time @gapi.last_modified_time
|
721
|
+
end
|
722
|
+
|
723
|
+
##
|
724
|
+
# Checks if the table's type is "TABLE".
|
725
|
+
#
|
726
|
+
# @return [Boolean, nil] `true` when the type is `TABLE`, `false`
|
727
|
+
# otherwise, if the object is a resource (see {#resource?}); `nil` if
|
728
|
+
# the object is a reference (see {#reference?}).
|
729
|
+
#
|
730
|
+
# @!group Attributes
|
731
|
+
#
|
732
|
+
def table?
|
733
|
+
return nil if reference?
|
734
|
+
@gapi.type == "TABLE"
|
735
|
+
end
|
736
|
+
|
737
|
+
##
|
738
|
+
# Checks if the table's type is "VIEW", indicating that the table
|
739
|
+
# represents a BigQuery view. See {Dataset#create_view}.
|
740
|
+
#
|
741
|
+
# @return [Boolean, nil] `true` when the type is `VIEW`, `false`
|
742
|
+
# otherwise, if the object is a resource (see {#resource?}); `nil` if
|
743
|
+
# the object is a reference (see {#reference?}).
|
744
|
+
#
|
745
|
+
# @!group Attributes
|
746
|
+
#
|
747
|
+
def view?
|
748
|
+
return nil if reference?
|
749
|
+
@gapi.type == "VIEW"
|
750
|
+
end
|
751
|
+
|
752
|
+
##
|
753
|
+
# Checks if the table's type is "EXTERNAL", indicating that the table
|
754
|
+
# represents an External Data Source. See {#external?} and
|
755
|
+
# {External::DataSource}.
|
756
|
+
#
|
757
|
+
# @return [Boolean, nil] `true` when the type is `EXTERNAL`, `false`
|
758
|
+
# otherwise, if the object is a resource (see {#resource?}); `nil` if
|
759
|
+
# the object is a reference (see {#reference?}).
|
760
|
+
#
|
761
|
+
# @!group Attributes
|
762
|
+
#
|
763
|
+
def external?
|
764
|
+
return nil if reference?
|
765
|
+
@gapi.type == "EXTERNAL"
|
766
|
+
end
|
767
|
+
|
768
|
+
##
|
769
|
+
# The geographic location where the table should reside. Possible
|
770
|
+
# values include `EU` and `US`. The default value is `US`.
|
771
|
+
#
|
772
|
+
# @return [String, nil] The location code.
|
773
|
+
#
|
774
|
+
# @!group Attributes
|
775
|
+
#
|
776
|
+
def location
|
777
|
+
return nil if reference?
|
778
|
+
ensure_full_data!
|
779
|
+
@gapi.location
|
780
|
+
end
|
781
|
+
|
782
|
+
##
|
783
|
+
# A hash of user-provided labels associated with this table. Labels
|
784
|
+
# are used to organize and group tables. See [Using
|
785
|
+
# Labels](https://cloud.google.com/bigquery/docs/labels).
|
786
|
+
#
|
787
|
+
# The returned hash is frozen and changes are not allowed. Use
|
788
|
+
# {#labels=} to replace the entire hash.
|
789
|
+
#
|
790
|
+
# @return [Hash<String, String>, nil] A hash containing key/value pairs.
|
791
|
+
#
|
792
|
+
# @example
|
793
|
+
# require "google/cloud/bigquery"
|
794
|
+
#
|
795
|
+
# bigquery = Google::Cloud::Bigquery.new
|
796
|
+
# dataset = bigquery.dataset "my_dataset"
|
797
|
+
# table = dataset.table "my_table"
|
798
|
+
#
|
799
|
+
# labels = table.labels
|
800
|
+
# labels["department"] #=> "shipping"
|
801
|
+
#
|
802
|
+
# @!group Attributes
|
803
|
+
#
|
804
|
+
def labels
|
805
|
+
return nil if reference?
|
806
|
+
m = @gapi.labels
|
807
|
+
m = m.to_h if m.respond_to? :to_h
|
808
|
+
m.dup.freeze
|
809
|
+
end
|
810
|
+
|
811
|
+
##
|
812
|
+
# Updates the hash of user-provided labels associated with this table.
|
813
|
+
# Labels are used to organize and group tables. See [Using
|
814
|
+
# Labels](https://cloud.google.com/bigquery/docs/labels).
|
815
|
+
#
|
816
|
+
# If the table is not a full resource representation (see
|
817
|
+
# {#resource_full?}), the full representation will be retrieved before
|
818
|
+
# the update to comply with ETag-based optimistic concurrency control.
|
819
|
+
#
|
820
|
+
# @param [Hash<String, String>] labels A hash containing key/value
|
821
|
+
# pairs.
|
822
|
+
#
|
823
|
+
# * Label keys and values can be no longer than 63 characters.
|
824
|
+
# * Label keys and values can contain only lowercase letters, numbers,
|
825
|
+
# underscores, hyphens, and international characters.
|
826
|
+
# * Label keys and values cannot exceed 128 bytes in size.
|
827
|
+
# * Label keys must begin with a letter.
|
828
|
+
# * Label keys must be unique within a table.
|
829
|
+
#
|
830
|
+
# @example
|
831
|
+
# require "google/cloud/bigquery"
|
832
|
+
#
|
833
|
+
# bigquery = Google::Cloud::Bigquery.new
|
834
|
+
# dataset = bigquery.dataset "my_dataset"
|
835
|
+
# table = dataset.table "my_table"
|
836
|
+
#
|
837
|
+
# table.labels = { "department" => "shipping" }
|
838
|
+
#
|
839
|
+
# @!group Attributes
|
840
|
+
#
|
841
|
+
def labels= labels
|
842
|
+
reload! unless resource_full?
|
843
|
+
@gapi.labels = labels
|
844
|
+
patch_gapi! :labels
|
845
|
+
end
|
846
|
+
|
847
|
+
##
|
848
|
+
# Returns the table's schema. If the table is not a view (See {#view?}),
|
849
|
+
# this method can also be used to set, replace, or add to the schema by
|
850
|
+
# passing a block. See {Schema} for available methods.
|
851
|
+
#
|
852
|
+
# If the table is not a full resource representation (see
|
853
|
+
# {#resource_full?}), the full representation will be retrieved.
|
854
|
+
#
|
855
|
+
# @param [Boolean] replace Whether to replace the existing schema with
|
856
|
+
# the new schema. If `true`, the fields will replace the existing
|
857
|
+
# schema. If `false`, the fields will be added to the existing schema.
|
858
|
+
# When a table already contains data, schema changes must be additive.
|
859
|
+
# Thus, the default value is `false`.
|
860
|
+
# When loading from a file this will always replace the schema, no
|
861
|
+
# matter what `replace` is set to. You can update the schema (for
|
862
|
+
# example, for a table that already contains data) by providing a
|
863
|
+
# schema file that includes the existing schema plus any new
|
864
|
+
# fields.
|
865
|
+
# @yield [schema] a block for setting the schema
|
866
|
+
# @yieldparam [Schema] schema the object accepting the schema
|
867
|
+
#
|
868
|
+
# @return [Google::Cloud::Bigquery::Schema, nil] A frozen schema object.
|
869
|
+
#
|
870
|
+
# @example
|
871
|
+
# require "google/cloud/bigquery"
|
872
|
+
#
|
873
|
+
# bigquery = Google::Cloud::Bigquery.new
|
874
|
+
# dataset = bigquery.dataset "my_dataset"
|
875
|
+
# table = dataset.create_table "my_table"
|
876
|
+
#
|
877
|
+
# table.schema do |schema|
|
878
|
+
# schema.string "first_name", mode: :required
|
879
|
+
# schema.record "cities_lived", mode: :repeated do |nested_schema|
|
880
|
+
# nested_schema.string "place", mode: :required
|
881
|
+
# nested_schema.integer "number_of_years", mode: :required
|
882
|
+
# end
|
883
|
+
# end
|
884
|
+
#
|
885
|
+
# @example Load the schema from a file
|
886
|
+
# require "google/cloud/bigquery"
|
887
|
+
#
|
888
|
+
# bigquery = Google::Cloud::Bigquery.new
|
889
|
+
# dataset = bigquery.dataset "my_dataset"
|
890
|
+
# table = dataset.create_table "my_table"
|
891
|
+
# table.schema do |schema|
|
892
|
+
# schema.load File.open("schema.json")
|
893
|
+
# end
|
894
|
+
#
|
895
|
+
# @!group Attributes
|
896
|
+
#
|
897
|
+
def schema replace: false
|
898
|
+
return nil if reference? && !block_given?
|
899
|
+
reload! unless resource_full?
|
900
|
+
schema_builder = Schema.from_gapi @gapi.schema
|
901
|
+
if block_given?
|
902
|
+
schema_builder = Schema.from_gapi if replace
|
903
|
+
yield schema_builder
|
904
|
+
if schema_builder.changed?
|
905
|
+
@gapi.schema = schema_builder.to_gapi
|
906
|
+
patch_gapi! :schema
|
907
|
+
end
|
908
|
+
end
|
909
|
+
schema_builder.freeze
|
910
|
+
end
|
911
|
+
|
912
|
+
##
|
913
|
+
# The fields of the table, obtained from its schema.
|
914
|
+
#
|
915
|
+
# @return [Array<Schema::Field>, nil] An array of field objects.
|
916
|
+
#
|
917
|
+
# @example
|
918
|
+
# require "google/cloud/bigquery"
|
919
|
+
#
|
920
|
+
# bigquery = Google::Cloud::Bigquery.new
|
921
|
+
# dataset = bigquery.dataset "my_dataset"
|
922
|
+
# table = dataset.table "my_table"
|
923
|
+
#
|
924
|
+
# table.fields.each do |field|
|
925
|
+
# puts field.name
|
926
|
+
# end
|
927
|
+
#
|
928
|
+
# @!group Attributes
|
929
|
+
#
|
930
|
+
def fields
|
931
|
+
return nil if reference?
|
932
|
+
schema.fields
|
933
|
+
end
|
934
|
+
|
935
|
+
##
|
936
|
+
# The names of the columns in the table, obtained from its schema.
|
937
|
+
#
|
938
|
+
# @return [Array<Symbol>, nil] An array of column names.
|
939
|
+
#
|
940
|
+
# @example
|
941
|
+
# require "google/cloud/bigquery"
|
942
|
+
#
|
943
|
+
# bigquery = Google::Cloud::Bigquery.new
|
944
|
+
# dataset = bigquery.dataset "my_dataset"
|
945
|
+
# table = dataset.table "my_table"
|
946
|
+
#
|
947
|
+
# table.headers.each do |header|
|
948
|
+
# puts header
|
949
|
+
# end
|
950
|
+
#
|
951
|
+
# @!group Attributes
|
952
|
+
#
|
953
|
+
def headers
|
954
|
+
return nil if reference?
|
955
|
+
schema.headers
|
956
|
+
end
|
957
|
+
|
958
|
+
##
|
959
|
+
# The types of the fields in the table, obtained from its schema.
|
960
|
+
# Types use the same format as the optional query parameter types.
|
961
|
+
#
|
962
|
+
# @return [Hash] A hash with field names as keys, and types as values.
|
963
|
+
#
|
964
|
+
# @example
|
965
|
+
# require "google/cloud/bigquery"
|
966
|
+
#
|
967
|
+
# bigquery = Google::Cloud::Bigquery.new
|
968
|
+
# dataset = bigquery.dataset "my_dataset"
|
969
|
+
# table = dataset.table "my_table"
|
970
|
+
#
|
971
|
+
# table.param_types
|
972
|
+
#
|
973
|
+
def param_types
|
974
|
+
return nil if reference?
|
975
|
+
schema.param_types
|
976
|
+
end
|
977
|
+
|
978
|
+
##
|
979
|
+
# The {EncryptionConfiguration} object that represents the custom
|
980
|
+
# encryption method used to protect the table. If not set,
|
981
|
+
# {Dataset#default_encryption} is used.
|
982
|
+
#
|
983
|
+
# Present only if the table is using custom encryption.
|
984
|
+
#
|
985
|
+
# @see https://cloud.google.com/bigquery/docs/customer-managed-encryption
|
986
|
+
# Protecting Data with Cloud KMS Keys
|
987
|
+
#
|
988
|
+
# @return [EncryptionConfiguration, nil] The encryption configuration.
|
989
|
+
#
|
990
|
+
# @!group Attributes
|
991
|
+
#
|
992
|
+
def encryption
|
993
|
+
return nil if reference?
|
994
|
+
ensure_full_data!
|
995
|
+
return nil if @gapi.encryption_configuration.nil?
|
996
|
+
EncryptionConfiguration.from_gapi(@gapi.encryption_configuration).freeze
|
997
|
+
end
|
998
|
+
|
999
|
+
##
|
1000
|
+
# Set the {EncryptionConfiguration} object that represents the custom
|
1001
|
+
# encryption method used to protect the table. If not set,
|
1002
|
+
# {Dataset#default_encryption} is used.
|
1003
|
+
#
|
1004
|
+
# Present only if the table is using custom encryption.
|
1005
|
+
#
|
1006
|
+
# If the table is not a full resource representation (see
|
1007
|
+
# {#resource_full?}), the full representation will be retrieved before
|
1008
|
+
# the update to comply with ETag-based optimistic concurrency control.
|
1009
|
+
#
|
1010
|
+
# @see https://cloud.google.com/bigquery/docs/customer-managed-encryption
|
1011
|
+
# Protecting Data with Cloud KMS Keys
|
1012
|
+
#
|
1013
|
+
# @param [EncryptionConfiguration] value The new encryption config.
|
1014
|
+
#
|
1015
|
+
# @!group Attributes
|
1016
|
+
#
|
1017
|
+
def encryption= value
|
1018
|
+
reload! unless resource_full?
|
1019
|
+
@gapi.encryption_configuration = value.to_gapi
|
1020
|
+
patch_gapi! :encryption_configuration
|
1021
|
+
end
|
1022
|
+
|
1023
|
+
##
|
1024
|
+
# The {External::DataSource} (or subclass) object that represents the
|
1025
|
+
# external data source that the table represents. Data can be queried
|
1026
|
+
# the table, even though the data is not stored in BigQuery. Instead of
|
1027
|
+
# loading or streaming the data, this object references the external
|
1028
|
+
# data source.
|
1029
|
+
#
|
1030
|
+
# Present only if the table represents an External Data Source. See
|
1031
|
+
# {#external?} and {External::DataSource}.
|
1032
|
+
#
|
1033
|
+
# @see https://cloud.google.com/bigquery/external-data-sources
|
1034
|
+
# Querying External Data Sources
|
1035
|
+
#
|
1036
|
+
# @return [External::DataSource, nil] The external data source.
|
1037
|
+
#
|
1038
|
+
# @!group Attributes
|
1039
|
+
#
|
1040
|
+
def external
|
1041
|
+
return nil if reference?
|
1042
|
+
ensure_full_data!
|
1043
|
+
return nil if @gapi.external_data_configuration.nil?
|
1044
|
+
External.from_gapi(@gapi.external_data_configuration).freeze
|
1045
|
+
end
|
1046
|
+
|
1047
|
+
##
|
1048
|
+
# Set the {External::DataSource} (or subclass) object that represents
|
1049
|
+
# the external data source that the table represents. Data can be
|
1050
|
+
# queried the table, even though the data is not stored in BigQuery.
|
1051
|
+
# Instead of loading or streaming the data, this object references the
|
1052
|
+
# external data source.
|
1053
|
+
#
|
1054
|
+
# Use only if the table represents an External Data Source. See
|
1055
|
+
# {#external?} and {External::DataSource}.
|
1056
|
+
#
|
1057
|
+
# If the table is not a full resource representation (see
|
1058
|
+
# {#resource_full?}), the full representation will be retrieved before
|
1059
|
+
# the update to comply with ETag-based optimistic concurrency control.
|
1060
|
+
#
|
1061
|
+
# @see https://cloud.google.com/bigquery/external-data-sources
|
1062
|
+
# Querying External Data Sources
|
1063
|
+
#
|
1064
|
+
# @param [External::DataSource] external An external data source.
|
1065
|
+
#
|
1066
|
+
# @!group Attributes
|
1067
|
+
#
|
1068
|
+
def external= external
|
1069
|
+
reload! unless resource_full?
|
1070
|
+
@gapi.external_data_configuration = external.to_gapi
|
1071
|
+
patch_gapi! :external_data_configuration
|
1072
|
+
end
|
1073
|
+
|
1074
|
+
##
|
1075
|
+
# A lower-bound estimate of the number of bytes currently in this
|
1076
|
+
# table's streaming buffer, if one is present. This field will be absent
|
1077
|
+
# if the table is not being streamed to or if there is no data in the
|
1078
|
+
# streaming buffer.
|
1079
|
+
#
|
1080
|
+
# @return [Integer, nil] The estimated number of bytes in the buffer, or
|
1081
|
+
# `nil` if not present or the object is a reference (see
|
1082
|
+
# {#reference?}).
|
1083
|
+
#
|
1084
|
+
# @!group Attributes
|
1085
|
+
#
|
1086
|
+
def buffer_bytes
|
1087
|
+
return nil if reference?
|
1088
|
+
ensure_full_data!
|
1089
|
+
@gapi.streaming_buffer&.estimated_bytes
|
1090
|
+
end
|
1091
|
+
|
1092
|
+
##
|
1093
|
+
# A lower-bound estimate of the number of rows currently in this
|
1094
|
+
# table's streaming buffer, if one is present. This field will be absent
|
1095
|
+
# if the table is not being streamed to or if there is no data in the
|
1096
|
+
# streaming buffer.
|
1097
|
+
#
|
1098
|
+
# @return [Integer, nil] The estimated number of rows in the buffer, or
|
1099
|
+
# `nil` if not present or the object is a reference (see
|
1100
|
+
# {#reference?}).
|
1101
|
+
#
|
1102
|
+
# @!group Attributes
|
1103
|
+
#
|
1104
|
+
def buffer_rows
|
1105
|
+
return nil if reference?
|
1106
|
+
ensure_full_data!
|
1107
|
+
@gapi.streaming_buffer&.estimated_rows
|
1108
|
+
end
|
1109
|
+
|
1110
|
+
##
|
1111
|
+
# The time of the oldest entry currently in this table's streaming
|
1112
|
+
# buffer, if one is present. This field will be absent if the table is
|
1113
|
+
# not being streamed to or if there is no data in the streaming buffer.
|
1114
|
+
#
|
1115
|
+
# @return [Time, nil] The oldest entry time, or `nil` if not present or
|
1116
|
+
# the object is a reference (see {#reference?}).
|
1117
|
+
#
|
1118
|
+
# @!group Attributes
|
1119
|
+
#
|
1120
|
+
def buffer_oldest_at
|
1121
|
+
return nil if reference?
|
1122
|
+
ensure_full_data!
|
1123
|
+
return nil unless @gapi.streaming_buffer
|
1124
|
+
oldest_entry_time = @gapi.streaming_buffer.oldest_entry_time
|
1125
|
+
Convert.millis_to_time oldest_entry_time
|
1126
|
+
end
|
1127
|
+
|
1128
|
+
##
|
1129
|
+
# The query that executes each time the view is loaded.
|
1130
|
+
#
|
1131
|
+
# @return [String] The query that defines the view.
|
1132
|
+
#
|
1133
|
+
# @!group Attributes
|
1134
|
+
#
|
1135
|
+
def query
|
1136
|
+
@gapi.view&.query
|
1137
|
+
end
|
1138
|
+
|
1139
|
+
##
|
1140
|
+
# Updates the query that executes each time the view is loaded.
|
1141
|
+
#
|
1142
|
+
# This sets the query using standard SQL. To specify legacy SQL or to
|
1143
|
+
# use user-defined function resources use (#set_query) instead.
|
1144
|
+
#
|
1145
|
+
# @see https://cloud.google.com/bigquery/query-reference BigQuery Query
|
1146
|
+
# Reference
|
1147
|
+
#
|
1148
|
+
# @param [String] new_query The query that defines the view.
|
1149
|
+
#
|
1150
|
+
# @example
|
1151
|
+
# require "google/cloud/bigquery"
|
1152
|
+
#
|
1153
|
+
# bigquery = Google::Cloud::Bigquery.new
|
1154
|
+
# dataset = bigquery.dataset "my_dataset"
|
1155
|
+
# view = dataset.table "my_view"
|
1156
|
+
#
|
1157
|
+
# view.query = "SELECT first_name FROM " \
|
1158
|
+
# "`my_project.my_dataset.my_table`"
|
1159
|
+
#
|
1160
|
+
# @!group Lifecycle
|
1161
|
+
#
|
1162
|
+
def query= new_query
|
1163
|
+
set_query new_query
|
1164
|
+
end
|
1165
|
+
|
1166
|
+
##
|
1167
|
+
# Updates the query that executes each time the view is loaded. Allows
|
1168
|
+
# setting of standard vs. legacy SQL and user-defined function
|
1169
|
+
# resources.
|
1170
|
+
#
|
1171
|
+
# @see https://cloud.google.com/bigquery/query-reference BigQuery Query
|
1172
|
+
# Reference
|
1173
|
+
#
|
1174
|
+
# @param [String] query The query that defines the view.
|
1175
|
+
# @param [Boolean] standard_sql Specifies whether to use BigQuery's
|
1176
|
+
# [standard
|
1177
|
+
# SQL](https://cloud.google.com/bigquery/docs/reference/standard-sql/)
|
1178
|
+
# dialect. Optional. The default value is true.
|
1179
|
+
# @param [Boolean] legacy_sql Specifies whether to use BigQuery's
|
1180
|
+
# [legacy
|
1181
|
+
# SQL](https://cloud.google.com/bigquery/docs/reference/legacy-sql)
|
1182
|
+
# dialect. Optional. The default value is false.
|
1183
|
+
# @param [Array<String>, String] udfs User-defined function resources
|
1184
|
+
# used in a legacy SQL query. May be either a code resource to load from
|
1185
|
+
# a Google Cloud Storage URI (`gs://bucket/path`), or an inline resource
|
1186
|
+
# that contains code for a user-defined function (UDF). Providing an
|
1187
|
+
# inline code resource is equivalent to providing a URI for a file
|
1188
|
+
# containing the same code.
|
1189
|
+
#
|
1190
|
+
# This parameter is used for defining User Defined Function (UDF)
|
1191
|
+
# resources only when using legacy SQL. Users of standard SQL should
|
1192
|
+
# leverage either DDL (e.g. `CREATE [TEMPORARY] FUNCTION ...`) or the
|
1193
|
+
# Routines API to define UDF resources.
|
1194
|
+
#
|
1195
|
+
# For additional information on migrating, see: [Migrating to
|
1196
|
+
# standard SQL - Differences in user-defined JavaScript
|
1197
|
+
# functions](https://cloud.google.com/bigquery/docs/reference/standard-sql/migrating-from-legacy-sql#differences_in_user-defined_javascript_functions)
|
1198
|
+
#
|
1199
|
+
# @example
|
1200
|
+
# require "google/cloud/bigquery"
|
1201
|
+
#
|
1202
|
+
# bigquery = Google::Cloud::Bigquery.new
|
1203
|
+
# dataset = bigquery.dataset "my_dataset"
|
1204
|
+
# view = dataset.table "my_view"
|
1205
|
+
#
|
1206
|
+
# view.set_query "SELECT first_name FROM " \
|
1207
|
+
# "`my_project.my_dataset.my_table`",
|
1208
|
+
# standard_sql: true
|
1209
|
+
#
|
1210
|
+
# @!group Lifecycle
|
1211
|
+
#
|
1212
|
+
def set_query query, standard_sql: nil, legacy_sql: nil, udfs: nil
|
1213
|
+
use_legacy_sql = Convert.resolve_legacy_sql standard_sql, legacy_sql
|
1214
|
+
@gapi.view = Google::Apis::BigqueryV2::ViewDefinition.new(
|
1215
|
+
query: query,
|
1216
|
+
use_legacy_sql: use_legacy_sql,
|
1217
|
+
user_defined_function_resources: udfs_gapi(udfs)
|
1218
|
+
)
|
1219
|
+
patch_gapi! :view
|
1220
|
+
end
|
1221
|
+
|
1222
|
+
##
|
1223
|
+
# Checks if the view's query is using legacy sql.
|
1224
|
+
#
|
1225
|
+
# @return [Boolean] `true` when legacy sql is used, `false` otherwise.
|
1226
|
+
#
|
1227
|
+
# @!group Attributes
|
1228
|
+
#
|
1229
|
+
def query_legacy_sql?
|
1230
|
+
val = @gapi.view.use_legacy_sql
|
1231
|
+
return true if val.nil?
|
1232
|
+
val
|
1233
|
+
end
|
1234
|
+
|
1235
|
+
##
|
1236
|
+
# Checks if the view's query is using standard sql.
|
1237
|
+
#
|
1238
|
+
# @return [Boolean] `true` when standard sql is used, `false` otherwise.
|
1239
|
+
#
|
1240
|
+
# @!group Attributes
|
1241
|
+
#
|
1242
|
+
def query_standard_sql?
|
1243
|
+
!query_legacy_sql?
|
1244
|
+
end
|
1245
|
+
|
1246
|
+
##
|
1247
|
+
# The user-defined function resources used in the view's query. May be
|
1248
|
+
# either a code resource to load from a Google Cloud Storage URI
|
1249
|
+
# (`gs://bucket/path`), or an inline resource that contains code for a
|
1250
|
+
# user-defined function (UDF). Providing an inline code resource is
|
1251
|
+
# equivalent to providing a URI for a file containing the same code. See
|
1252
|
+
# [User-Defined
|
1253
|
+
# Functions](https://cloud.google.com/bigquery/docs/reference/standard-sql/user-defined-functions).
|
1254
|
+
#
|
1255
|
+
# @return [Array<String>] An array containing Google Cloud Storage URIs
|
1256
|
+
# and/or inline source code.
|
1257
|
+
#
|
1258
|
+
# @!group Attributes
|
1259
|
+
#
|
1260
|
+
def query_udfs
|
1261
|
+
udfs_gapi = @gapi.view.user_defined_function_resources
|
1262
|
+
return [] if udfs_gapi.nil?
|
1263
|
+
Array(udfs_gapi).map { |udf| udf.inline_code || udf.resource_uri }
|
1264
|
+
end
|
1265
|
+
|
1266
|
+
##
|
1267
|
+
# Retrieves data from the table.
|
1268
|
+
#
|
1269
|
+
# If the table is not a full resource representation (see
|
1270
|
+
# {#resource_full?}), the full representation will be retrieved before
|
1271
|
+
# the data retrieval.
|
1272
|
+
#
|
1273
|
+
# @param [String] token Page token, returned by a previous call,
|
1274
|
+
# identifying the result set.
|
1275
|
+
#
|
1276
|
+
# @param [Integer] max Maximum number of results to return.
|
1277
|
+
# @param [Integer] start Zero-based index of the starting row to read.
|
1278
|
+
#
|
1279
|
+
# @return [Google::Cloud::Bigquery::Data]
|
1280
|
+
#
|
1281
|
+
# @example Paginate rows of data: (See {Data#next})
|
1282
|
+
# require "google/cloud/bigquery"
|
1283
|
+
#
|
1284
|
+
# bigquery = Google::Cloud::Bigquery.new
|
1285
|
+
# dataset = bigquery.dataset "my_dataset"
|
1286
|
+
# table = dataset.table "my_table"
|
1287
|
+
#
|
1288
|
+
# data = table.data
|
1289
|
+
#
|
1290
|
+
# # Iterate over the first page of results
|
1291
|
+
# data.each do |row|
|
1292
|
+
# puts row[:name]
|
1293
|
+
# end
|
1294
|
+
# # Retrieve the next page of results
|
1295
|
+
# data = data.next if data.next?
|
1296
|
+
#
|
1297
|
+
# @example Retrieve all rows of data: (See {Data#all})
|
1298
|
+
# require "google/cloud/bigquery"
|
1299
|
+
#
|
1300
|
+
# bigquery = Google::Cloud::Bigquery.new
|
1301
|
+
# dataset = bigquery.dataset "my_dataset"
|
1302
|
+
# table = dataset.table "my_table"
|
1303
|
+
#
|
1304
|
+
# data = table.data
|
1305
|
+
#
|
1306
|
+
# data.all do |row|
|
1307
|
+
# puts row[:name]
|
1308
|
+
# end
|
1309
|
+
#
|
1310
|
+
# @!group Data
|
1311
|
+
#
|
1312
|
+
def data token: nil, max: nil, start: nil
|
1313
|
+
ensure_service!
|
1314
|
+
reload! unless resource_full?
|
1315
|
+
data_json = service.list_tabledata dataset_id, table_id, token: token, max: max, start: start
|
1316
|
+
Data.from_gapi_json data_json, gapi, nil, service
|
1317
|
+
end
|
1318
|
+
|
1319
|
+
##
|
1320
|
+
# Copies the data from the table to another table using an asynchronous
|
1321
|
+
# method. In this method, a {CopyJob} is immediately returned. The
|
1322
|
+
# caller may poll the service by repeatedly calling {Job#reload!} and
|
1323
|
+
# {Job#done?} to detect when the job is done, or simply block until the
|
1324
|
+
# job is done by calling #{Job#wait_until_done!}. See also {#copy}.
|
1325
|
+
#
|
1326
|
+
# The geographic location for the job ("US", "EU", etc.) can be set via
|
1327
|
+
# {CopyJob::Updater#location=} in a block passed to this method. If the
|
1328
|
+
# table is a full resource representation (see {#resource_full?}), the
|
1329
|
+
# location of the job will be automatically set to the location of the
|
1330
|
+
# table.
|
1331
|
+
#
|
1332
|
+
# @param [Table, String] destination_table The destination for the
|
1333
|
+
# copied data. This can also be a string identifier as specified by
|
1334
|
+
# the [Standard SQL Query
|
1335
|
+
# Reference](https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#from-clause)
|
1336
|
+
# (`project-name.dataset_id.table_id`) or the [Legacy SQL Query
|
1337
|
+
# Reference](https://cloud.google.com/bigquery/query-reference#from)
|
1338
|
+
# (`project-name:dataset_id.table_id`). This is useful for referencing
|
1339
|
+
# tables in other projects and datasets.
|
1340
|
+
# @param [String] create Specifies whether the job is allowed to create
|
1341
|
+
# new tables. The default value is `needed`.
|
1342
|
+
#
|
1343
|
+
# The following values are supported:
|
1344
|
+
#
|
1345
|
+
# * `needed` - Create the table if it does not exist.
|
1346
|
+
# * `never` - The table must already exist. A 'notFound' error is
|
1347
|
+
# raised if the table does not exist.
|
1348
|
+
# @param [String] write Specifies how to handle data already present in
|
1349
|
+
# the destination table. The default value is `empty`.
|
1350
|
+
#
|
1351
|
+
# The following values are supported:
|
1352
|
+
#
|
1353
|
+
# * `truncate` - BigQuery overwrites the table data.
|
1354
|
+
# * `append` - BigQuery appends the data to the table.
|
1355
|
+
# * `empty` - An error will be returned if the destination table
|
1356
|
+
# already contains data.
|
1357
|
+
# @param [String] job_id A user-defined ID for the copy job. The ID
|
1358
|
+
# must contain only letters (a-z, A-Z), numbers (0-9), underscores
|
1359
|
+
# (_), or dashes (-). The maximum length is 1,024 characters. If
|
1360
|
+
# `job_id` is provided, then `prefix` will not be used.
|
1361
|
+
#
|
1362
|
+
# See [Generating a job
|
1363
|
+
# ID](https://cloud.google.com/bigquery/docs/managing-jobs#generate-jobid).
|
1364
|
+
# @param [String] prefix A string, usually human-readable, that will be
|
1365
|
+
# prepended to a generated value to produce a unique job ID. For
|
1366
|
+
# example, the prefix `daily_import_job_` can be given to generate a
|
1367
|
+
# job ID such as `daily_import_job_12vEDtMQ0mbp1Mo5Z7mzAFQJZazh`. The
|
1368
|
+
# prefix must contain only letters (a-z, A-Z), numbers (0-9),
|
1369
|
+
# underscores (_), or dashes (-). The maximum length of the entire ID
|
1370
|
+
# is 1,024 characters. If `job_id` is provided, then `prefix` will not
|
1371
|
+
# be used.
|
1372
|
+
# @param [Hash] labels A hash of user-provided labels associated with
|
1373
|
+
# the job. You can use these to organize and group your jobs. Label
|
1374
|
+
# keys and values can be no longer than 63 characters, can only
|
1375
|
+
# contain lowercase letters, numeric characters, underscores and
|
1376
|
+
# dashes. International characters are allowed. Label values are
|
1377
|
+
# optional. Label keys must start with a letter and each label in the
|
1378
|
+
# list must have a different key. See [Requirements for
|
1379
|
+
# labels](https://cloud.google.com/bigquery/docs/creating-managing-labels#requirements).
|
1380
|
+
# @param [Boolean] dryrun If set, don't actually run this job. Behavior
|
1381
|
+
# is undefined however for non-query jobs and may result in an error.
|
1382
|
+
# Deprecated.
|
1383
|
+
#
|
1384
|
+
# @yield [job] a job configuration object
|
1385
|
+
# @yieldparam [Google::Cloud::Bigquery::CopyJob::Updater] job a job
|
1386
|
+
# configuration object for setting additional options.
|
1387
|
+
#
|
1388
|
+
# @return [Google::Cloud::Bigquery::CopyJob]
|
1389
|
+
#
|
1390
|
+
# @example
|
1391
|
+
# require "google/cloud/bigquery"
|
1392
|
+
#
|
1393
|
+
# bigquery = Google::Cloud::Bigquery.new
|
1394
|
+
# dataset = bigquery.dataset "my_dataset"
|
1395
|
+
# table = dataset.table "my_table"
|
1396
|
+
# destination_table = dataset.table "my_destination_table"
|
1397
|
+
#
|
1398
|
+
# copy_job = table.copy_job destination_table
|
1399
|
+
#
|
1400
|
+
# @example Passing a string identifier for the destination table:
|
1401
|
+
# require "google/cloud/bigquery"
|
1402
|
+
#
|
1403
|
+
# bigquery = Google::Cloud::Bigquery.new
|
1404
|
+
# dataset = bigquery.dataset "my_dataset"
|
1405
|
+
# table = dataset.table "my_table"
|
1406
|
+
#
|
1407
|
+
# copy_job = table.copy_job "other-project:other_dataset.other_table"
|
1408
|
+
#
|
1409
|
+
# copy_job.wait_until_done!
|
1410
|
+
# copy_job.done? #=> true
|
1411
|
+
#
|
1412
|
+
# @!group Data
|
1413
|
+
#
|
1414
|
+
def copy_job destination_table, create: nil, write: nil, job_id: nil, prefix: nil, labels: nil, dryrun: nil
|
1415
|
+
ensure_service!
|
1416
|
+
options = { create: create, write: write, dryrun: dryrun, labels: labels, job_id: job_id, prefix: prefix }
|
1417
|
+
updater = CopyJob::Updater.from_options(
|
1418
|
+
service,
|
1419
|
+
table_ref,
|
1420
|
+
Service.get_table_ref(destination_table, default_ref: table_ref),
|
1421
|
+
options
|
1422
|
+
)
|
1423
|
+
updater.location = location if location # may be table reference
|
1424
|
+
|
1425
|
+
yield updater if block_given?
|
1426
|
+
|
1427
|
+
job_gapi = updater.to_gapi
|
1428
|
+
gapi = service.copy_table job_gapi
|
1429
|
+
Job.from_gapi gapi, service
|
1430
|
+
end
|
1431
|
+
|
1432
|
+
##
|
1433
|
+
# Copies the data from the table to another table using a synchronous
|
1434
|
+
# method that blocks for a response. Timeouts and transient errors are
|
1435
|
+
# generally handled as needed to complete the job. See also
|
1436
|
+
# {#copy_job}.
|
1437
|
+
#
|
1438
|
+
# The geographic location for the job ("US", "EU", etc.) can be set via
|
1439
|
+
# {CopyJob::Updater#location=} in a block passed to this method. If the
|
1440
|
+
# table is a full resource representation (see {#resource_full?}), the
|
1441
|
+
# location of the job will be automatically set to the location of the
|
1442
|
+
# table.
|
1443
|
+
#
|
1444
|
+
# @param [Table, String] destination_table The destination for the
|
1445
|
+
# copied data. This can also be a string identifier as specified by
|
1446
|
+
# the [Standard SQL Query
|
1447
|
+
# Reference](https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#from-clause)
|
1448
|
+
# (`project-name.dataset_id.table_id`) or the [Legacy SQL Query
|
1449
|
+
# Reference](https://cloud.google.com/bigquery/query-reference#from)
|
1450
|
+
# (`project-name:dataset_id.table_id`). This is useful for referencing
|
1451
|
+
# tables in other projects and datasets.
|
1452
|
+
# @param [String] create Specifies whether the job is allowed to create
|
1453
|
+
# new tables. The default value is `needed`.
|
1454
|
+
#
|
1455
|
+
# The following values are supported:
|
1456
|
+
#
|
1457
|
+
# * `needed` - Create the table if it does not exist.
|
1458
|
+
# * `never` - The table must already exist. A 'notFound' error is
|
1459
|
+
# raised if the table does not exist.
|
1460
|
+
# @param [String] write Specifies how to handle data already present in
|
1461
|
+
# the destination table. The default value is `empty`.
|
1462
|
+
#
|
1463
|
+
# The following values are supported:
|
1464
|
+
#
|
1465
|
+
# * `truncate` - BigQuery overwrites the table data.
|
1466
|
+
# * `append` - BigQuery appends the data to the table.
|
1467
|
+
# * `empty` - An error will be returned if the destination table
|
1468
|
+
# already contains data.
|
1469
|
+
# @yield [job] a job configuration object
|
1470
|
+
# @yieldparam [Google::Cloud::Bigquery::CopyJob::Updater] job a job
|
1471
|
+
# configuration object for setting additional options.
|
1472
|
+
#
|
1473
|
+
# @return [Boolean] Returns `true` if the copy operation succeeded.
|
1474
|
+
#
|
1475
|
+
# @example
|
1476
|
+
# require "google/cloud/bigquery"
|
1477
|
+
#
|
1478
|
+
# bigquery = Google::Cloud::Bigquery.new
|
1479
|
+
# dataset = bigquery.dataset "my_dataset"
|
1480
|
+
# table = dataset.table "my_table"
|
1481
|
+
# destination_table = dataset.table "my_destination_table"
|
1482
|
+
#
|
1483
|
+
# table.copy destination_table
|
1484
|
+
#
|
1485
|
+
# @example Passing a string identifier for the destination table:
|
1486
|
+
# require "google/cloud/bigquery"
|
1487
|
+
#
|
1488
|
+
# bigquery = Google::Cloud::Bigquery.new
|
1489
|
+
# dataset = bigquery.dataset "my_dataset"
|
1490
|
+
# table = dataset.table "my_table"
|
1491
|
+
#
|
1492
|
+
# table.copy "other-project:other_dataset.other_table"
|
1493
|
+
#
|
1494
|
+
# @!group Data
|
1495
|
+
#
|
1496
|
+
def copy destination_table, create: nil, write: nil, &block
|
1497
|
+
job = copy_job destination_table, create: create, write: write, &block
|
1498
|
+
job.wait_until_done!
|
1499
|
+
ensure_job_succeeded! job
|
1500
|
+
true
|
1501
|
+
end
|
1502
|
+
|
1503
|
+
##
|
1504
|
+
# Extracts the data from the table to a Google Cloud Storage file using
|
1505
|
+
# an asynchronous method. In this method, an {ExtractJob} is immediately
|
1506
|
+
# returned. The caller may poll the service by repeatedly calling
|
1507
|
+
# {Job#reload!} and {Job#done?} to detect when the job is done, or
|
1508
|
+
# simply block until the job is done by calling #{Job#wait_until_done!}.
|
1509
|
+
# See also {#extract}.
|
1510
|
+
#
|
1511
|
+
# The geographic location for the job ("US", "EU", etc.) can be set via
|
1512
|
+
# {ExtractJob::Updater#location=} in a block passed to this method. If
|
1513
|
+
# the table is a full resource representation (see {#resource_full?}),
|
1514
|
+
# the location of the job will be automatically set to the location of
|
1515
|
+
# the table.
|
1516
|
+
#
|
1517
|
+
# @see https://cloud.google.com/bigquery/exporting-data-from-bigquery
|
1518
|
+
# Exporting Data From BigQuery
|
1519
|
+
#
|
1520
|
+
# @param [Google::Cloud::Storage::File, String, Array<String>]
|
1521
|
+
# extract_url The Google Storage file or file URI pattern(s) to which
|
1522
|
+
# BigQuery should extract the table data.
|
1523
|
+
# @param [String] format The exported file format. The default value is
|
1524
|
+
# `csv`.
|
1525
|
+
#
|
1526
|
+
# The following values are supported:
|
1527
|
+
#
|
1528
|
+
# * `csv` - CSV
|
1529
|
+
# * `json` - [Newline-delimited JSON](http://jsonlines.org/)
|
1530
|
+
# * `avro` - [Avro](http://avro.apache.org/)
|
1531
|
+
# @param [String] compression The compression type to use for exported
|
1532
|
+
# files. Possible values include `GZIP` and `NONE`. The default value
|
1533
|
+
# is `NONE`.
|
1534
|
+
# @param [String] delimiter Delimiter to use between fields in the
|
1535
|
+
# exported data. Default is <code>,</code>.
|
1536
|
+
# @param [Boolean] header Whether to print out a header row in the
|
1537
|
+
# results. Default is `true`.
|
1538
|
+
# @param [String] job_id A user-defined ID for the extract job. The ID
|
1539
|
+
# must contain only letters (a-z, A-Z), numbers (0-9), underscores
|
1540
|
+
# (_), or dashes (-). The maximum length is 1,024 characters. If
|
1541
|
+
# `job_id` is provided, then `prefix` will not be used.
|
1542
|
+
#
|
1543
|
+
# See [Generating a job
|
1544
|
+
# ID](https://cloud.google.com/bigquery/docs/managing-jobs#generate-jobid).
|
1545
|
+
# @param [String] prefix A string, usually human-readable, that will be
|
1546
|
+
# prepended to a generated value to produce a unique job ID. For
|
1547
|
+
# example, the prefix `daily_import_job_` can be given to generate a
|
1548
|
+
# job ID such as `daily_import_job_12vEDtMQ0mbp1Mo5Z7mzAFQJZazh`. The
|
1549
|
+
# prefix must contain only letters (a-z, A-Z), numbers (0-9),
|
1550
|
+
# underscores (_), or dashes (-). The maximum length of the entire ID
|
1551
|
+
# is 1,024 characters. If `job_id` is provided, then `prefix` will not
|
1552
|
+
# be used.
|
1553
|
+
# @param [Hash] labels A hash of user-provided labels associated with
|
1554
|
+
# the job. You can use these to organize and group your jobs. Label
|
1555
|
+
# keys and values can be no longer than 63 characters, can only
|
1556
|
+
# contain lowercase letters, numeric characters, underscores and
|
1557
|
+
# dashes. International characters are allowed. Label values are
|
1558
|
+
# optional. Label keys must start with a letter and each label in the
|
1559
|
+
# list must have a different key. See [Requirements for
|
1560
|
+
# labels](https://cloud.google.com/bigquery/docs/creating-managing-labels#requirements).
|
1561
|
+
# @param [Boolean] dryrun If set, don't actually run this job. Behavior
|
1562
|
+
# is undefined however for non-query jobs and may result in an error.
|
1563
|
+
# Deprecated.
|
1564
|
+
#
|
1565
|
+
# @yield [job] a job configuration object
|
1566
|
+
# @yieldparam [Google::Cloud::Bigquery::ExtractJob::Updater] job a job
|
1567
|
+
# configuration object for setting additional options.
|
1568
|
+
#
|
1569
|
+
# @return [Google::Cloud::Bigquery::ExtractJob]
|
1570
|
+
#
|
1571
|
+
# @example
|
1572
|
+
# require "google/cloud/bigquery"
|
1573
|
+
#
|
1574
|
+
# bigquery = Google::Cloud::Bigquery.new
|
1575
|
+
# dataset = bigquery.dataset "my_dataset"
|
1576
|
+
# table = dataset.table "my_table"
|
1577
|
+
#
|
1578
|
+
# extract_job = table.extract_job "gs://my-bucket/file-name.json",
|
1579
|
+
# format: "json"
|
1580
|
+
# extract_job.wait_until_done!
|
1581
|
+
# extract_job.done? #=> true
|
1582
|
+
#
|
1583
|
+
# @!group Data
|
1584
|
+
#
|
1585
|
+
def extract_job extract_url, format: nil, compression: nil, delimiter: nil, header: nil, job_id: nil,
|
1586
|
+
prefix: nil, labels: nil, dryrun: nil
|
1587
|
+
ensure_service!
|
1588
|
+
options = { format: format, compression: compression, delimiter: delimiter, header: header, dryrun: dryrun,
|
1589
|
+
job_id: job_id, prefix: prefix, labels: labels }
|
1590
|
+
updater = ExtractJob::Updater.from_options service, table_ref, extract_url, options
|
1591
|
+
updater.location = location if location # may be table reference
|
1592
|
+
|
1593
|
+
yield updater if block_given?
|
1594
|
+
|
1595
|
+
job_gapi = updater.to_gapi
|
1596
|
+
gapi = service.extract_table job_gapi
|
1597
|
+
Job.from_gapi gapi, service
|
1598
|
+
end
|
1599
|
+
|
1600
|
+
##
|
1601
|
+
# Extracts the data from the table to a Google Cloud Storage file using
|
1602
|
+
# a synchronous method that blocks for a response. Timeouts and
|
1603
|
+
# transient errors are generally handled as needed to complete the job.
|
1604
|
+
# See also {#extract_job}.
|
1605
|
+
#
|
1606
|
+
# The geographic location for the job ("US", "EU", etc.) can be set via
|
1607
|
+
# {ExtractJob::Updater#location=} in a block passed to this method. If
|
1608
|
+
# the table is a full resource representation (see {#resource_full?}),
|
1609
|
+
# the location of the job will be automatically set to the location of
|
1610
|
+
# the table.
|
1611
|
+
#
|
1612
|
+
# @see https://cloud.google.com/bigquery/exporting-data-from-bigquery
|
1613
|
+
# Exporting Data From BigQuery
|
1614
|
+
#
|
1615
|
+
# @param [Google::Cloud::Storage::File, String, Array<String>]
|
1616
|
+
# extract_url The Google Storage file or file URI pattern(s) to which
|
1617
|
+
# BigQuery should extract the table data.
|
1618
|
+
# @param [String] format The exported file format. The default value is
|
1619
|
+
# `csv`.
|
1620
|
+
#
|
1621
|
+
# The following values are supported:
|
1622
|
+
#
|
1623
|
+
# * `csv` - CSV
|
1624
|
+
# * `json` - [Newline-delimited JSON](http://jsonlines.org/)
|
1625
|
+
# * `avro` - [Avro](http://avro.apache.org/)
|
1626
|
+
# @param [String] compression The compression type to use for exported
|
1627
|
+
# files. Possible values include `GZIP` and `NONE`. The default value
|
1628
|
+
# is `NONE`.
|
1629
|
+
# @param [String] delimiter Delimiter to use between fields in the
|
1630
|
+
# exported data. Default is <code>,</code>.
|
1631
|
+
# @param [Boolean] header Whether to print out a header row in the
|
1632
|
+
# results. Default is `true`.
|
1633
|
+
# @yield [job] a job configuration object
|
1634
|
+
# @yieldparam [Google::Cloud::Bigquery::ExtractJob::Updater] job a job
|
1635
|
+
# configuration object for setting additional options.
|
1636
|
+
#
|
1637
|
+
# @return [Boolean] Returns `true` if the extract operation succeeded.
|
1638
|
+
#
|
1639
|
+
# @example Extract to a JSON file:
|
1640
|
+
# require "google/cloud/bigquery"
|
1641
|
+
#
|
1642
|
+
# bigquery = Google::Cloud::Bigquery.new
|
1643
|
+
# dataset = bigquery.dataset "my_dataset"
|
1644
|
+
# table = dataset.table "my_table"
|
1645
|
+
#
|
1646
|
+
# table.extract "gs://my-bucket/file-name.json", format: "json"
|
1647
|
+
#
|
1648
|
+
# @example Extract to a CSV file, attaching labels to the job:
|
1649
|
+
# require "google/cloud/bigquery"
|
1650
|
+
#
|
1651
|
+
# bigquery = Google::Cloud::Bigquery.new
|
1652
|
+
# dataset = bigquery.dataset "my_dataset"
|
1653
|
+
# table = dataset.table "my_table"
|
1654
|
+
#
|
1655
|
+
# table.extract "gs://my-bucket/file-name.csv" do |extract|
|
1656
|
+
# extract.labels = { "custom-label" => "custom-value" }
|
1657
|
+
# end
|
1658
|
+
#
|
1659
|
+
# @!group Data
|
1660
|
+
#
|
1661
|
+
def extract extract_url, format: nil, compression: nil, delimiter: nil, header: nil, &block
|
1662
|
+
job = extract_job extract_url,
|
1663
|
+
format: format,
|
1664
|
+
compression: compression,
|
1665
|
+
delimiter: delimiter,
|
1666
|
+
header: header,
|
1667
|
+
&block
|
1668
|
+
job.wait_until_done!
|
1669
|
+
ensure_job_succeeded! job
|
1670
|
+
true
|
1671
|
+
end
|
1672
|
+
|
1673
|
+
##
|
1674
|
+
# Loads data into the table. You can pass a google-cloud storage file
|
1675
|
+
# path or a google-cloud storage file instance. Or, you can upload a
|
1676
|
+
# file directly. See [Loading Data with a POST Request](
|
1677
|
+
# https://cloud.google.com/bigquery/loading-data-post-request#multipart).
|
1678
|
+
#
|
1679
|
+
# The geographic location for the job ("US", "EU", etc.) can be set via
|
1680
|
+
# {LoadJob::Updater#location=} in a block passed to this method. If the
|
1681
|
+
# table is a full resource representation (see {#resource_full?}), the
|
1682
|
+
# location of the job will be automatically set to the location of the
|
1683
|
+
# table.
|
1684
|
+
#
|
1685
|
+
# @param [File, Google::Cloud::Storage::File, String, URI,
|
1686
|
+
# Array<Google::Cloud::Storage::File, String, URI>] files
|
1687
|
+
# A file or the URI of a Google Cloud Storage file, or an Array of
|
1688
|
+
# those, containing data to load into the table.
|
1689
|
+
# @param [String] format The exported file format. The default value is
|
1690
|
+
# `csv`.
|
1691
|
+
#
|
1692
|
+
# The following values are supported:
|
1693
|
+
#
|
1694
|
+
# * `csv` - CSV
|
1695
|
+
# * `json` - [Newline-delimited JSON](http://jsonlines.org/)
|
1696
|
+
# * `avro` - [Avro](http://avro.apache.org/)
|
1697
|
+
# * `orc` - [ORC](https://cloud.google.com/bigquery/docs/loading-data-cloud-storage-orc)
|
1698
|
+
# * `parquet` - [Parquet](https://parquet.apache.org/)
|
1699
|
+
# * `datastore_backup` - Cloud Datastore backup
|
1700
|
+
# @param [String] create Specifies whether the job is allowed to create
|
1701
|
+
# new tables. The default value is `needed`.
|
1702
|
+
#
|
1703
|
+
# The following values are supported:
|
1704
|
+
#
|
1705
|
+
# * `needed` - Create the table if it does not exist.
|
1706
|
+
# * `never` - The table must already exist. A 'notFound' error is
|
1707
|
+
# raised if the table does not exist.
|
1708
|
+
# @param [String] write Specifies how to handle data already present in
|
1709
|
+
# the table. The default value is `append`.
|
1710
|
+
#
|
1711
|
+
# The following values are supported:
|
1712
|
+
#
|
1713
|
+
# * `truncate` - BigQuery overwrites the table data.
|
1714
|
+
# * `append` - BigQuery appends the data to the table.
|
1715
|
+
# * `empty` - An error will be returned if the table already contains
|
1716
|
+
# data.
|
1717
|
+
# @param [Array<String>] projection_fields If the `format` option is set
|
1718
|
+
# to `datastore_backup`, indicates which entity properties to load
|
1719
|
+
# from a Cloud Datastore backup. Property names are case sensitive and
|
1720
|
+
# must be top-level properties. If not set, BigQuery loads all
|
1721
|
+
# properties. If any named property isn't found in the Cloud Datastore
|
1722
|
+
# backup, an invalid error is returned.
|
1723
|
+
# @param [Boolean] jagged_rows Accept rows that are missing trailing
|
1724
|
+
# optional columns. The missing values are treated as nulls. If
|
1725
|
+
# `false`, records with missing trailing columns are treated as bad
|
1726
|
+
# records, and if there are too many bad records, an invalid error is
|
1727
|
+
# returned in the job result. The default value is `false`. Only
|
1728
|
+
# applicable to CSV, ignored for other formats.
|
1729
|
+
# @param [Boolean] quoted_newlines Indicates if BigQuery should allow
|
1730
|
+
# quoted data sections that contain newline characters in a CSV file.
|
1731
|
+
# The default value is `false`.
|
1732
|
+
# @param [Boolean] autodetect Indicates if BigQuery should
|
1733
|
+
# automatically infer the options and schema for CSV and JSON sources.
|
1734
|
+
# The default value is `false`.
|
1735
|
+
# @param [String] encoding The character encoding of the data. The
|
1736
|
+
# supported values are `UTF-8` or `ISO-8859-1`. The default value is
|
1737
|
+
# `UTF-8`.
|
1738
|
+
# @param [String] delimiter Specifices the separator for fields in a CSV
|
1739
|
+
# file. BigQuery converts the string to `ISO-8859-1` encoding, and
|
1740
|
+
# then uses the first byte of the encoded string to split the data in
|
1741
|
+
# its raw, binary state. Default is <code>,</code>.
|
1742
|
+
# @param [Boolean] ignore_unknown Indicates if BigQuery should allow
|
1743
|
+
# extra values that are not represented in the table schema. If true,
|
1744
|
+
# the extra values are ignored. If false, records with extra columns
|
1745
|
+
# are treated as bad records, and if there are too many bad records,
|
1746
|
+
# an invalid error is returned in the job result. The default value is
|
1747
|
+
# `false`.
|
1748
|
+
#
|
1749
|
+
# The `format` property determines what BigQuery treats as an extra
|
1750
|
+
# value:
|
1751
|
+
#
|
1752
|
+
# * `CSV`: Trailing columns
|
1753
|
+
# * `JSON`: Named values that don't match any column names
|
1754
|
+
# @param [Integer] max_bad_records The maximum number of bad records
|
1755
|
+
# that BigQuery can ignore when running the job. If the number of bad
|
1756
|
+
# records exceeds this value, an invalid error is returned in the job
|
1757
|
+
# result. The default value is `0`, which requires that all records
|
1758
|
+
# are valid.
|
1759
|
+
# @param [String] null_marker Specifies a string that represents a null
|
1760
|
+
# value in a CSV file. For example, if you specify `\N`, BigQuery
|
1761
|
+
# interprets `\N` as a null value when loading a CSV file. The default
|
1762
|
+
# value is the empty string. If you set this property to a custom
|
1763
|
+
# value, BigQuery throws an error if an empty string is present for
|
1764
|
+
# all data types except for STRING and BYTE. For STRING and BYTE
|
1765
|
+
# columns, BigQuery interprets the empty string as an empty value.
|
1766
|
+
# @param [String] quote The value that is used to quote data sections in
|
1767
|
+
# a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and
|
1768
|
+
# then uses the first byte of the encoded string to split the data in
|
1769
|
+
# its raw, binary state. The default value is a double-quote
|
1770
|
+
# <code>"</code>. If your data does not contain quoted sections, set
|
1771
|
+
# the property value to an empty string. If your data contains quoted
|
1772
|
+
# newline characters, you must also set the allowQuotedNewlines
|
1773
|
+
# property to true.
|
1774
|
+
# @param [Integer] skip_leading The number of rows at the top of a CSV
|
1775
|
+
# file that BigQuery will skip when loading the data. The default
|
1776
|
+
# value is `0`. This property is useful if you have header rows in the
|
1777
|
+
# file that should be skipped.
|
1778
|
+
# @param [String] job_id A user-defined ID for the load job. The ID
|
1779
|
+
# must contain only letters (a-z, A-Z), numbers (0-9), underscores
|
1780
|
+
# (_), or dashes (-). The maximum length is 1,024 characters. If
|
1781
|
+
# `job_id` is provided, then `prefix` will not be used.
|
1782
|
+
#
|
1783
|
+
# See [Generating a job
|
1784
|
+
# ID](https://cloud.google.com/bigquery/docs/managing-jobs#generate-jobid).
|
1785
|
+
# @param [String] prefix A string, usually human-readable, that will be
|
1786
|
+
# prepended to a generated value to produce a unique job ID. For
|
1787
|
+
# example, the prefix `daily_import_job_` can be given to generate a
|
1788
|
+
# job ID such as `daily_import_job_12vEDtMQ0mbp1Mo5Z7mzAFQJZazh`. The
|
1789
|
+
# prefix must contain only letters (a-z, A-Z), numbers (0-9),
|
1790
|
+
# underscores (_), or dashes (-). The maximum length of the entire ID
|
1791
|
+
# is 1,024 characters. If `job_id` is provided, then `prefix` will not
|
1792
|
+
# be used.
|
1793
|
+
# @param [Hash] labels A hash of user-provided labels associated with
|
1794
|
+
# the job. You can use these to organize and group your jobs. Label
|
1795
|
+
# keys and values can be no longer than 63 characters, can only
|
1796
|
+
# contain lowercase letters, numeric characters, underscores and
|
1797
|
+
# dashes. International characters are allowed. Label values are
|
1798
|
+
# optional. Label keys must start with a letter and each label in the
|
1799
|
+
# list must have a different key. See [Requirements for
|
1800
|
+
# labels](https://cloud.google.com/bigquery/docs/creating-managing-labels#requirements).
|
1801
|
+
# @param [Boolean] dryrun If set, don't actually run this job. Behavior
|
1802
|
+
# is undefined however for non-query jobs and may result in an error.
|
1803
|
+
# Deprecated.
|
1804
|
+
#
|
1805
|
+
# @yield [load_job] a block for setting the load job
|
1806
|
+
# @yieldparam [LoadJob] load_job the load job object to be updated
|
1807
|
+
#
|
1808
|
+
# @return [Google::Cloud::Bigquery::LoadJob]
|
1809
|
+
#
|
1810
|
+
# @example
|
1811
|
+
# require "google/cloud/bigquery"
|
1812
|
+
#
|
1813
|
+
# bigquery = Google::Cloud::Bigquery.new
|
1814
|
+
# dataset = bigquery.dataset "my_dataset"
|
1815
|
+
# table = dataset.table "my_table"
|
1816
|
+
#
|
1817
|
+
# load_job = table.load_job "gs://my-bucket/file-name.csv"
|
1818
|
+
#
|
1819
|
+
# @example Pass a google-cloud-storage `File` instance:
|
1820
|
+
# require "google/cloud/bigquery"
|
1821
|
+
# require "google/cloud/storage"
|
1822
|
+
#
|
1823
|
+
# bigquery = Google::Cloud::Bigquery.new
|
1824
|
+
# dataset = bigquery.dataset "my_dataset"
|
1825
|
+
# table = dataset.table "my_table"
|
1826
|
+
#
|
1827
|
+
# storage = Google::Cloud::Storage.new
|
1828
|
+
# bucket = storage.bucket "my-bucket"
|
1829
|
+
# file = bucket.file "file-name.csv"
|
1830
|
+
# load_job = table.load_job file
|
1831
|
+
#
|
1832
|
+
# @example Pass a list of google-cloud-storage files:
|
1833
|
+
# require "google/cloud/bigquery"
|
1834
|
+
# require "google/cloud/storage"
|
1835
|
+
#
|
1836
|
+
# bigquery = Google::Cloud::Bigquery.new
|
1837
|
+
# dataset = bigquery.dataset "my_dataset"
|
1838
|
+
# table = dataset.table "my_table"
|
1839
|
+
#
|
1840
|
+
# storage = Google::Cloud::Storage.new
|
1841
|
+
# bucket = storage.bucket "my-bucket"
|
1842
|
+
# file = bucket.file "file-name.csv"
|
1843
|
+
# load_job = table.load_job [file, "gs://my-bucket/file-name2.csv"]
|
1844
|
+
#
|
1845
|
+
# @example Upload a file directly:
|
1846
|
+
# require "google/cloud/bigquery"
|
1847
|
+
#
|
1848
|
+
# bigquery = Google::Cloud::Bigquery.new
|
1849
|
+
# dataset = bigquery.dataset "my_dataset"
|
1850
|
+
# table = dataset.table "my_table"
|
1851
|
+
#
|
1852
|
+
# file = File.open "my_data.csv"
|
1853
|
+
# load_job = table.load_job file
|
1854
|
+
#
|
1855
|
+
# @!group Data
|
1856
|
+
#
|
1857
|
+
def load_job files, format: nil, create: nil, write: nil, projection_fields: nil, jagged_rows: nil,
|
1858
|
+
quoted_newlines: nil, encoding: nil, delimiter: nil, ignore_unknown: nil, max_bad_records: nil,
|
1859
|
+
quote: nil, skip_leading: nil, job_id: nil, prefix: nil, labels: nil, autodetect: nil,
|
1860
|
+
null_marker: nil, dryrun: nil
|
1861
|
+
ensure_service!
|
1862
|
+
|
1863
|
+
updater = load_job_updater format: format, create: create, write: write, projection_fields: projection_fields,
|
1864
|
+
jagged_rows: jagged_rows, quoted_newlines: quoted_newlines, encoding: encoding,
|
1865
|
+
delimiter: delimiter, ignore_unknown: ignore_unknown,
|
1866
|
+
max_bad_records: max_bad_records, quote: quote, skip_leading: skip_leading,
|
1867
|
+
dryrun: dryrun, job_id: job_id, prefix: prefix, schema: schema, labels: labels,
|
1868
|
+
autodetect: autodetect, null_marker: null_marker
|
1869
|
+
|
1870
|
+
yield updater if block_given?
|
1871
|
+
|
1872
|
+
job_gapi = updater.to_gapi
|
1873
|
+
|
1874
|
+
return load_local files, job_gapi if local_file? files
|
1875
|
+
load_storage files, job_gapi
|
1876
|
+
end
|
1877
|
+
|
1878
|
+
##
|
1879
|
+
# Loads data into the table. You can pass a google-cloud storage file
|
1880
|
+
# path or a google-cloud storage file instance. Or, you can upload a
|
1881
|
+
# file directly. See [Loading Data with a POST Request](
|
1882
|
+
# https://cloud.google.com/bigquery/loading-data-post-request#multipart).
|
1883
|
+
#
|
1884
|
+
# The geographic location for the job ("US", "EU", etc.) can be set via
|
1885
|
+
# {LoadJob::Updater#location=} in a block passed to this method. If the
|
1886
|
+
# table is a full resource representation (see {#resource_full?}), the
|
1887
|
+
# location of the job will be automatically set to the location of the
|
1888
|
+
# table.
|
1889
|
+
#
|
1890
|
+
# @param [File, Google::Cloud::Storage::File, String, URI,
|
1891
|
+
# Array<Google::Cloud::Storage::File, String, URI>] files
|
1892
|
+
# A file or the URI of a Google Cloud Storage file, or an Array of
|
1893
|
+
# those, containing data to load into the table.
|
1894
|
+
# @param [String] format The exported file format. The default value is
|
1895
|
+
# `csv`.
|
1896
|
+
#
|
1897
|
+
# The following values are supported:
|
1898
|
+
#
|
1899
|
+
# * `csv` - CSV
|
1900
|
+
# * `json` - [Newline-delimited JSON](http://jsonlines.org/)
|
1901
|
+
# * `avro` - [Avro](http://avro.apache.org/)
|
1902
|
+
# * `orc` - [ORC](https://cloud.google.com/bigquery/docs/loading-data-cloud-storage-orc)
|
1903
|
+
# * `parquet` - [Parquet](https://parquet.apache.org/)
|
1904
|
+
# * `datastore_backup` - Cloud Datastore backup
|
1905
|
+
# @param [String] create Specifies whether the job is allowed to create
|
1906
|
+
# new tables. The default value is `needed`.
|
1907
|
+
#
|
1908
|
+
# The following values are supported:
|
1909
|
+
#
|
1910
|
+
# * `needed` - Create the table if it does not exist.
|
1911
|
+
# * `never` - The table must already exist. A 'notFound' error is
|
1912
|
+
# raised if the table does not exist.
|
1913
|
+
# @param [String] write Specifies how to handle data already present in
|
1914
|
+
# the table. The default value is `append`.
|
1915
|
+
#
|
1916
|
+
# The following values are supported:
|
1917
|
+
#
|
1918
|
+
# * `truncate` - BigQuery overwrites the table data.
|
1919
|
+
# * `append` - BigQuery appends the data to the table.
|
1920
|
+
# * `empty` - An error will be returned if the table already contains
|
1921
|
+
# data.
|
1922
|
+
# @param [Array<String>] projection_fields If the `format` option is set
|
1923
|
+
# to `datastore_backup`, indicates which entity properties to load
|
1924
|
+
# from a Cloud Datastore backup. Property names are case sensitive and
|
1925
|
+
# must be top-level properties. If not set, BigQuery loads all
|
1926
|
+
# properties. If any named property isn't found in the Cloud Datastore
|
1927
|
+
# backup, an invalid error is returned.
|
1928
|
+
# @param [Boolean] jagged_rows Accept rows that are missing trailing
|
1929
|
+
# optional columns. The missing values are treated as nulls. If
|
1930
|
+
# `false`, records with missing trailing columns are treated as bad
|
1931
|
+
# records, and if there are too many bad records, an invalid error is
|
1932
|
+
# returned in the job result. The default value is `false`. Only
|
1933
|
+
# applicable to CSV, ignored for other formats.
|
1934
|
+
# @param [Boolean] quoted_newlines Indicates if BigQuery should allow
|
1935
|
+
# quoted data sections that contain newline characters in a CSV file.
|
1936
|
+
# The default value is `false`.
|
1937
|
+
# @param [Boolean] autodetect Indicates if BigQuery should
|
1938
|
+
# automatically infer the options and schema for CSV and JSON sources.
|
1939
|
+
# The default value is `false`.
|
1940
|
+
# @param [String] encoding The character encoding of the data. The
|
1941
|
+
# supported values are `UTF-8` or `ISO-8859-1`. The default value is
|
1942
|
+
# `UTF-8`.
|
1943
|
+
# @param [String] delimiter Specifices the separator for fields in a CSV
|
1944
|
+
# file. BigQuery converts the string to `ISO-8859-1` encoding, and
|
1945
|
+
# then uses the first byte of the encoded string to split the data in
|
1946
|
+
# its raw, binary state. Default is <code>,</code>.
|
1947
|
+
# @param [Boolean] ignore_unknown Indicates if BigQuery should allow
|
1948
|
+
# extra values that are not represented in the table schema. If true,
|
1949
|
+
# the extra values are ignored. If false, records with extra columns
|
1950
|
+
# are treated as bad records, and if there are too many bad records,
|
1951
|
+
# an invalid error is returned in the job result. The default value is
|
1952
|
+
# `false`.
|
1953
|
+
#
|
1954
|
+
# The `format` property determines what BigQuery treats as an extra
|
1955
|
+
# value:
|
1956
|
+
#
|
1957
|
+
# * `CSV`: Trailing columns
|
1958
|
+
# * `JSON`: Named values that don't match any column names
|
1959
|
+
# @param [Integer] max_bad_records The maximum number of bad records
|
1960
|
+
# that BigQuery can ignore when running the job. If the number of bad
|
1961
|
+
# records exceeds this value, an invalid error is returned in the job
|
1962
|
+
# result. The default value is `0`, which requires that all records
|
1963
|
+
# are valid.
|
1964
|
+
# @param [String] null_marker Specifies a string that represents a null
|
1965
|
+
# value in a CSV file. For example, if you specify `\N`, BigQuery
|
1966
|
+
# interprets `\N` as a null value when loading a CSV file. The default
|
1967
|
+
# value is the empty string. If you set this property to a custom
|
1968
|
+
# value, BigQuery throws an error if an empty string is present for
|
1969
|
+
# all data types except for STRING and BYTE. For STRING and BYTE
|
1970
|
+
# columns, BigQuery interprets the empty string as an empty value.
|
1971
|
+
# @param [String] quote The value that is used to quote data sections in
|
1972
|
+
# a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and
|
1973
|
+
# then uses the first byte of the encoded string to split the data in
|
1974
|
+
# its raw, binary state. The default value is a double-quote
|
1975
|
+
# <code>"</code>. If your data does not contain quoted sections, set
|
1976
|
+
# the property value to an empty string. If your data contains quoted
|
1977
|
+
# newline characters, you must also set the allowQuotedNewlines
|
1978
|
+
# property to true.
|
1979
|
+
# @param [Integer] skip_leading The number of rows at the top of a CSV
|
1980
|
+
# file that BigQuery will skip when loading the data. The default
|
1981
|
+
# value is `0`. This property is useful if you have header rows in the
|
1982
|
+
# file that should be skipped.
|
1983
|
+
#
|
1984
|
+
# @yield [updater] A block for setting the schema of the destination
|
1985
|
+
# table and other options for the load job. The schema can be omitted
|
1986
|
+
# if the destination table already exists, or if you're loading data
|
1987
|
+
# from a Google Cloud Datastore backup.
|
1988
|
+
# @yieldparam [Google::Cloud::Bigquery::LoadJob::Updater] updater An
|
1989
|
+
# updater to modify the load job and its schema.
|
1990
|
+
#
|
1991
|
+
# @return [Boolean] Returns `true` if the load job was successful.
|
1992
|
+
#
|
1993
|
+
# @example
|
1994
|
+
# require "google/cloud/bigquery"
|
1995
|
+
#
|
1996
|
+
# bigquery = Google::Cloud::Bigquery.new
|
1997
|
+
# dataset = bigquery.dataset "my_dataset"
|
1998
|
+
# table = dataset.table "my_table"
|
1999
|
+
#
|
2000
|
+
# success = table.load "gs://my-bucket/file-name.csv"
|
2001
|
+
#
|
2002
|
+
# @example Pass a google-cloud-storage `File` instance:
|
2003
|
+
# require "google/cloud/bigquery"
|
2004
|
+
# require "google/cloud/storage"
|
2005
|
+
#
|
2006
|
+
# bigquery = Google::Cloud::Bigquery.new
|
2007
|
+
# dataset = bigquery.dataset "my_dataset"
|
2008
|
+
# table = dataset.table "my_table"
|
2009
|
+
#
|
2010
|
+
# storage = Google::Cloud::Storage.new
|
2011
|
+
# bucket = storage.bucket "my-bucket"
|
2012
|
+
# file = bucket.file "file-name.csv"
|
2013
|
+
# success = table.load file
|
2014
|
+
#
|
2015
|
+
# @example Pass a list of google-cloud-storage files:
|
2016
|
+
# require "google/cloud/bigquery"
|
2017
|
+
# require "google/cloud/storage"
|
2018
|
+
#
|
2019
|
+
# bigquery = Google::Cloud::Bigquery.new
|
2020
|
+
# dataset = bigquery.dataset "my_dataset"
|
2021
|
+
# table = dataset.table "my_table"
|
2022
|
+
#
|
2023
|
+
# storage = Google::Cloud::Storage.new
|
2024
|
+
# bucket = storage.bucket "my-bucket"
|
2025
|
+
# file = bucket.file "file-name.csv"
|
2026
|
+
# table.load [file, "gs://my-bucket/file-name2.csv"]
|
2027
|
+
#
|
2028
|
+
# @example Upload a file directly:
|
2029
|
+
# require "google/cloud/bigquery"
|
2030
|
+
#
|
2031
|
+
# bigquery = Google::Cloud::Bigquery.new
|
2032
|
+
# dataset = bigquery.dataset "my_dataset"
|
2033
|
+
# table = dataset.table "my_table"
|
2034
|
+
#
|
2035
|
+
# file = File.open "my_data.json"
|
2036
|
+
# success = table.load file do |j|
|
2037
|
+
# j.format = "newline_delimited_json"
|
2038
|
+
# end
|
2039
|
+
#
|
2040
|
+
# @!group Data
|
2041
|
+
#
|
2042
|
+
def load files, format: nil, create: nil, write: nil, projection_fields: nil, jagged_rows: nil,
|
2043
|
+
quoted_newlines: nil, encoding: nil, delimiter: nil, ignore_unknown: nil, max_bad_records: nil,
|
2044
|
+
quote: nil, skip_leading: nil, autodetect: nil, null_marker: nil, &block
|
2045
|
+
job = load_job files, format: format, create: create, write: write, projection_fields: projection_fields,
|
2046
|
+
jagged_rows: jagged_rows, quoted_newlines: quoted_newlines, encoding: encoding,
|
2047
|
+
delimiter: delimiter, ignore_unknown: ignore_unknown, max_bad_records: max_bad_records,
|
2048
|
+
quote: quote, skip_leading: skip_leading, autodetect: autodetect,
|
2049
|
+
null_marker: null_marker, &block
|
2050
|
+
|
2051
|
+
job.wait_until_done!
|
2052
|
+
ensure_job_succeeded! job
|
2053
|
+
true
|
2054
|
+
end
|
2055
|
+
|
2056
|
+
##
|
2057
|
+
# Inserts data into the table for near-immediate querying, without the
|
2058
|
+
# need to complete a load operation before the data can appear in query
|
2059
|
+
# results.
|
2060
|
+
#
|
2061
|
+
# Because BigQuery's streaming API is designed for high insertion rates,
|
2062
|
+
# modifications to the underlying table metadata are eventually
|
2063
|
+
# consistent when interacting with the streaming system. In most cases
|
2064
|
+
# metadata changes are propagated within minutes, but during this period
|
2065
|
+
# API responses may reflect the inconsistent state of the table.
|
2066
|
+
#
|
2067
|
+
# @see https://cloud.google.com/bigquery/streaming-data-into-bigquery
|
2068
|
+
# Streaming Data Into BigQuery
|
2069
|
+
#
|
2070
|
+
# @see https://cloud.google.com/bigquery/troubleshooting-errors#metadata-errors-for-streaming-inserts
|
2071
|
+
# BigQuery Troubleshooting: Metadata errors for streaming inserts
|
2072
|
+
#
|
2073
|
+
# @param [Hash, Array<Hash>] rows A hash object or array of hash objects
|
2074
|
+
# containing the data. Required.
|
2075
|
+
# @param [Array<String|Symbol>, Symbol] insert_ids A unique ID for each row. BigQuery uses this property to
|
2076
|
+
# detect duplicate insertion requests on a best-effort basis. For more information, see [data
|
2077
|
+
# consistency](https://cloud.google.com/bigquery/streaming-data-into-bigquery#dataconsistency). Optional. If
|
2078
|
+
# not provided, the client library will assign a UUID to each row before the request is sent.
|
2079
|
+
#
|
2080
|
+
# The value `:skip` can be provided to skip the generation of IDs for all rows, or to skip the generation of an
|
2081
|
+
# ID for a specific row in the array.
|
2082
|
+
# @param [Boolean] skip_invalid Insert all valid rows of a request, even
|
2083
|
+
# if invalid rows exist. The default value is `false`, which causes
|
2084
|
+
# the entire request to fail if any invalid rows exist.
|
2085
|
+
# @param [Boolean] ignore_unknown Accept rows that contain values that
|
2086
|
+
# do not match the schema. The unknown values are ignored. Default is
|
2087
|
+
# false, which treats unknown values as errors.
|
2088
|
+
#
|
2089
|
+
# @return [Google::Cloud::Bigquery::InsertResponse]
|
2090
|
+
#
|
2091
|
+
# @example
|
2092
|
+
# require "google/cloud/bigquery"
|
2093
|
+
#
|
2094
|
+
# bigquery = Google::Cloud::Bigquery.new
|
2095
|
+
# dataset = bigquery.dataset "my_dataset"
|
2096
|
+
# table = dataset.table "my_table"
|
2097
|
+
#
|
2098
|
+
# rows = [
|
2099
|
+
# { "first_name" => "Alice", "age" => 21 },
|
2100
|
+
# { "first_name" => "Bob", "age" => 22 }
|
2101
|
+
# ]
|
2102
|
+
# table.insert rows
|
2103
|
+
#
|
2104
|
+
# @example Avoid retrieving the dataset and table with `skip_lookup`:
|
2105
|
+
# require "google/cloud/bigquery"
|
2106
|
+
#
|
2107
|
+
# bigquery = Google::Cloud::Bigquery.new
|
2108
|
+
# dataset = bigquery.dataset "my_dataset", skip_lookup: true
|
2109
|
+
# table = dataset.table "my_table", skip_lookup: true
|
2110
|
+
#
|
2111
|
+
# rows = [
|
2112
|
+
# { "first_name" => "Alice", "age" => 21 },
|
2113
|
+
# { "first_name" => "Bob", "age" => 22 }
|
2114
|
+
# ]
|
2115
|
+
# table.insert rows
|
2116
|
+
#
|
2117
|
+
# @!group Data
|
2118
|
+
#
|
2119
|
+
def insert rows, insert_ids: nil, skip_invalid: nil, ignore_unknown: nil
|
2120
|
+
rows = [rows] if rows.is_a? Hash
|
2121
|
+
raise ArgumentError, "No rows provided" if rows.empty?
|
2122
|
+
|
2123
|
+
insert_ids = Array.new(rows.count) { :skip } if insert_ids == :skip
|
2124
|
+
insert_ids = Array insert_ids
|
2125
|
+
if insert_ids.count.positive? && insert_ids.count != rows.count
|
2126
|
+
raise ArgumentError, "insert_ids must be the same size as rows"
|
2127
|
+
end
|
2128
|
+
|
2129
|
+
ensure_service!
|
2130
|
+
options = { skip_invalid: skip_invalid, ignore_unknown: ignore_unknown, insert_ids: insert_ids }
|
2131
|
+
gapi = service.insert_tabledata dataset_id, table_id, rows, options
|
2132
|
+
InsertResponse.from_gapi rows, gapi
|
2133
|
+
end
|
2134
|
+
|
2135
|
+
##
|
2136
|
+
# Create an asynchronous inserter object used to insert rows in batches.
|
2137
|
+
#
|
2138
|
+
# @param [Boolean] skip_invalid Insert all valid rows of a request, even
|
2139
|
+
# if invalid rows exist. The default value is `false`, which causes
|
2140
|
+
# the entire request to fail if any invalid rows exist.
|
2141
|
+
# @param [Boolean] ignore_unknown Accept rows that contain values that
|
2142
|
+
# do not match the schema. The unknown values are ignored. Default is
|
2143
|
+
# false, which treats unknown values as errors.
|
2144
|
+
# @attr_reader [Integer] max_bytes The maximum size of rows to be
|
2145
|
+
# collected before the batch is published. Default is 10,000,000
|
2146
|
+
# (10MB).
|
2147
|
+
# @param [Integer] max_rows The maximum number of rows to be collected
|
2148
|
+
# before the batch is published. Default is 500.
|
2149
|
+
# @attr_reader [Numeric] interval The number of seconds to collect
|
2150
|
+
# messages before the batch is published. Default is 10.
|
2151
|
+
# @attr_reader [Numeric] threads The number of threads used to insert
|
2152
|
+
# batches of rows. Default is 4.
|
2153
|
+
# @yield [response] the callback for when a batch of rows is inserted
|
2154
|
+
# @yieldparam [Table::AsyncInserter::Result] result the result of the
|
2155
|
+
# asynchronous insert
|
2156
|
+
#
|
2157
|
+
# @return [Table::AsyncInserter] Returns inserter object.
|
2158
|
+
#
|
2159
|
+
# @example
|
2160
|
+
# require "google/cloud/bigquery"
|
2161
|
+
#
|
2162
|
+
# bigquery = Google::Cloud::Bigquery.new
|
2163
|
+
# dataset = bigquery.dataset "my_dataset"
|
2164
|
+
# table = dataset.table "my_table"
|
2165
|
+
# inserter = table.insert_async do |result|
|
2166
|
+
# if result.error?
|
2167
|
+
# log_error result.error
|
2168
|
+
# else
|
2169
|
+
# log_insert "inserted #{result.insert_count} rows " \
|
2170
|
+
# "with #{result.error_count} errors"
|
2171
|
+
# end
|
2172
|
+
# end
|
2173
|
+
#
|
2174
|
+
# rows = [
|
2175
|
+
# { "first_name" => "Alice", "age" => 21 },
|
2176
|
+
# { "first_name" => "Bob", "age" => 22 }
|
2177
|
+
# ]
|
2178
|
+
# inserter.insert rows
|
2179
|
+
#
|
2180
|
+
# inserter.stop.wait!
|
2181
|
+
#
|
2182
|
+
def insert_async skip_invalid: nil, ignore_unknown: nil, max_bytes: 10_000_000, max_rows: 500, interval: 10,
|
2183
|
+
threads: 4, &block
|
2184
|
+
ensure_service!
|
2185
|
+
|
2186
|
+
AsyncInserter.new self, skip_invalid: skip_invalid, ignore_unknown: ignore_unknown, max_bytes: max_bytes,
|
2187
|
+
max_rows: max_rows, interval: interval, threads: threads, &block
|
2188
|
+
end
|
2189
|
+
|
2190
|
+
##
|
2191
|
+
# Permanently deletes the table.
|
2192
|
+
#
|
2193
|
+
# @return [Boolean] Returns `true` if the table was deleted.
|
2194
|
+
#
|
2195
|
+
# @example
|
2196
|
+
# require "google/cloud/bigquery"
|
2197
|
+
#
|
2198
|
+
# bigquery = Google::Cloud::Bigquery.new
|
2199
|
+
# dataset = bigquery.dataset "my_dataset"
|
2200
|
+
# table = dataset.table "my_table"
|
2201
|
+
#
|
2202
|
+
# table.delete
|
2203
|
+
#
|
2204
|
+
# @!group Lifecycle
|
2205
|
+
#
|
2206
|
+
def delete
|
2207
|
+
ensure_service!
|
2208
|
+
service.delete_table dataset_id, table_id
|
2209
|
+
# Set flag for #exists?
|
2210
|
+
@exists = false
|
2211
|
+
true
|
2212
|
+
end
|
2213
|
+
|
2214
|
+
##
|
2215
|
+
# Reloads the table with current data from the BigQuery service.
|
2216
|
+
#
|
2217
|
+
# @return [Google::Cloud::Bigquery::Table] Returns the reloaded
|
2218
|
+
# table.
|
2219
|
+
#
|
2220
|
+
# @example Skip retrieving the table from the service, then load it:
|
2221
|
+
# require "google/cloud/bigquery"
|
2222
|
+
#
|
2223
|
+
# bigquery = Google::Cloud::Bigquery.new
|
2224
|
+
#
|
2225
|
+
# dataset = bigquery.dataset "my_dataset"
|
2226
|
+
# table = dataset.table "my_table", skip_lookup: true
|
2227
|
+
#
|
2228
|
+
# table.reload!
|
2229
|
+
#
|
2230
|
+
# @!group Lifecycle
|
2231
|
+
#
|
2232
|
+
def reload!
|
2233
|
+
ensure_service!
|
2234
|
+
@gapi = service.get_table dataset_id, table_id
|
2235
|
+
@reference = nil
|
2236
|
+
@exists = nil
|
2237
|
+
self
|
2238
|
+
end
|
2239
|
+
alias refresh! reload!
|
2240
|
+
|
2241
|
+
##
|
2242
|
+
# Determines whether the table exists in the BigQuery service. The
|
2243
|
+
# result is cached locally. To refresh state, set `force` to `true`.
|
2244
|
+
#
|
2245
|
+
# @param [Boolean] force Force the latest resource representation to be
|
2246
|
+
# retrieved from the BigQuery service when `true`. Otherwise the
|
2247
|
+
# return value of this method will be memoized to reduce the number of
|
2248
|
+
# API calls made to the BigQuery service. The default is `false`.
|
2249
|
+
#
|
2250
|
+
# @return [Boolean] `true` when the table exists in the BigQuery
|
2251
|
+
# service, `false` otherwise.
|
2252
|
+
#
|
2253
|
+
# @example
|
2254
|
+
# require "google/cloud/bigquery"
|
2255
|
+
#
|
2256
|
+
# bigquery = Google::Cloud::Bigquery.new
|
2257
|
+
#
|
2258
|
+
# dataset = bigquery.dataset "my_dataset"
|
2259
|
+
# table = dataset.table "my_table", skip_lookup: true
|
2260
|
+
# table.exists? # true
|
2261
|
+
#
|
2262
|
+
def exists? force: false
|
2263
|
+
return gapi_exists? if force
|
2264
|
+
# If we have a value, return it
|
2265
|
+
return @exists unless @exists.nil?
|
2266
|
+
# Always true if we have a gapi object
|
2267
|
+
return true if resource?
|
2268
|
+
gapi_exists?
|
2269
|
+
end
|
2270
|
+
|
2271
|
+
##
|
2272
|
+
# Whether the table was created without retrieving the resource
|
2273
|
+
# representation from the BigQuery service.
|
2274
|
+
#
|
2275
|
+
# @return [Boolean] `true` when the table is just a local reference
|
2276
|
+
# object, `false` otherwise.
|
2277
|
+
#
|
2278
|
+
# @example
|
2279
|
+
# require "google/cloud/bigquery"
|
2280
|
+
#
|
2281
|
+
# bigquery = Google::Cloud::Bigquery.new
|
2282
|
+
#
|
2283
|
+
# dataset = bigquery.dataset "my_dataset"
|
2284
|
+
# table = dataset.table "my_table", skip_lookup: true
|
2285
|
+
#
|
2286
|
+
# table.reference? # true
|
2287
|
+
# table.reload!
|
2288
|
+
# table.reference? # false
|
2289
|
+
#
|
2290
|
+
def reference?
|
2291
|
+
@gapi.nil?
|
2292
|
+
end
|
2293
|
+
|
2294
|
+
##
|
2295
|
+
# Whether the table was created with a resource representation from
|
2296
|
+
# the BigQuery service.
|
2297
|
+
#
|
2298
|
+
# @return [Boolean] `true` when the table was created with a resource
|
2299
|
+
# representation, `false` otherwise.
|
2300
|
+
#
|
2301
|
+
# @example
|
2302
|
+
# require "google/cloud/bigquery"
|
2303
|
+
#
|
2304
|
+
# bigquery = Google::Cloud::Bigquery.new
|
2305
|
+
#
|
2306
|
+
# dataset = bigquery.dataset "my_dataset"
|
2307
|
+
# table = dataset.table "my_table", skip_lookup: true
|
2308
|
+
#
|
2309
|
+
# table.resource? # false
|
2310
|
+
# table.reload!
|
2311
|
+
# table.resource? # true
|
2312
|
+
#
|
2313
|
+
def resource?
|
2314
|
+
!@gapi.nil?
|
2315
|
+
end
|
2316
|
+
|
2317
|
+
##
|
2318
|
+
# Whether the table was created with a partial resource representation
|
2319
|
+
# from the BigQuery service by retrieval through {Dataset#tables}.
|
2320
|
+
# See [Tables: list
|
2321
|
+
# response](https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/list#response)
|
2322
|
+
# for the contents of the partial representation. Accessing any
|
2323
|
+
# attribute outside of the partial representation will result in loading
|
2324
|
+
# the full representation.
|
2325
|
+
#
|
2326
|
+
# @return [Boolean] `true` when the table was created with a partial
|
2327
|
+
# resource representation, `false` otherwise.
|
2328
|
+
#
|
2329
|
+
# @example
|
2330
|
+
# require "google/cloud/bigquery"
|
2331
|
+
#
|
2332
|
+
# bigquery = Google::Cloud::Bigquery.new
|
2333
|
+
#
|
2334
|
+
# dataset = bigquery.dataset "my_dataset"
|
2335
|
+
# table = dataset.tables.first
|
2336
|
+
#
|
2337
|
+
# table.resource_partial? # true
|
2338
|
+
# table.description # Loads the full resource.
|
2339
|
+
# table.resource_partial? # false
|
2340
|
+
#
|
2341
|
+
def resource_partial?
|
2342
|
+
@gapi.is_a? Google::Apis::BigqueryV2::TableList::Table
|
2343
|
+
end
|
2344
|
+
|
2345
|
+
##
|
2346
|
+
# Whether the table was created with a full resource representation
|
2347
|
+
# from the BigQuery service.
|
2348
|
+
#
|
2349
|
+
# @return [Boolean] `true` when the table was created with a full
|
2350
|
+
# resource representation, `false` otherwise.
|
2351
|
+
#
|
2352
|
+
# @example
|
2353
|
+
# require "google/cloud/bigquery"
|
2354
|
+
#
|
2355
|
+
# bigquery = Google::Cloud::Bigquery.new
|
2356
|
+
#
|
2357
|
+
# dataset = bigquery.dataset "my_dataset"
|
2358
|
+
# table = dataset.table "my_table"
|
2359
|
+
#
|
2360
|
+
# table.resource_full? # true
|
2361
|
+
#
|
2362
|
+
def resource_full?
|
2363
|
+
@gapi.is_a? Google::Apis::BigqueryV2::Table
|
2364
|
+
end
|
2365
|
+
|
2366
|
+
##
|
2367
|
+
# @private New Table from a Google API Client object.
|
2368
|
+
def self.from_gapi gapi, service
|
2369
|
+
new.tap do |f|
|
2370
|
+
f.gapi = gapi
|
2371
|
+
f.service = service
|
2372
|
+
end
|
2373
|
+
end
|
2374
|
+
|
2375
|
+
##
|
2376
|
+
# @private New lazy Table object without making an HTTP request, for use with the skip_lookup option.
|
2377
|
+
def self.new_reference project_id, dataset_id, table_id, service
|
2378
|
+
raise ArgumentError, "dataset_id is required" unless dataset_id
|
2379
|
+
raise ArgumentError, "table_id is required" unless table_id
|
2380
|
+
new.tap do |b|
|
2381
|
+
reference_gapi = Google::Apis::BigqueryV2::TableReference.new(
|
2382
|
+
project_id: project_id,
|
2383
|
+
dataset_id: dataset_id,
|
2384
|
+
table_id: table_id
|
2385
|
+
)
|
2386
|
+
b.service = service
|
2387
|
+
b.instance_variable_set :@reference, reference_gapi
|
2388
|
+
end
|
2389
|
+
end
|
2390
|
+
|
2391
|
+
##
|
2392
|
+
# @private New lazy Table object from a Google API Client object.
|
2393
|
+
def self.new_reference_from_gapi gapi, service
|
2394
|
+
new.tap do |b|
|
2395
|
+
b.service = service
|
2396
|
+
b.instance_variable_set :@reference, gapi
|
2397
|
+
end
|
2398
|
+
end
|
2399
|
+
|
2400
|
+
protected
|
2401
|
+
|
2402
|
+
##
|
2403
|
+
# Raise an error unless an active service is available.
|
2404
|
+
def ensure_service!
|
2405
|
+
raise "Must have active connection" unless service
|
2406
|
+
end
|
2407
|
+
|
2408
|
+
##
|
2409
|
+
# Ensures the Google::Apis::BigqueryV2::Table object has been loaded
|
2410
|
+
# from the service.
|
2411
|
+
def ensure_gapi!
|
2412
|
+
ensure_service!
|
2413
|
+
return unless reference?
|
2414
|
+
reload!
|
2415
|
+
end
|
2416
|
+
|
2417
|
+
##
|
2418
|
+
# Fetch gapi and memoize whether resource exists.
|
2419
|
+
def gapi_exists?
|
2420
|
+
reload!
|
2421
|
+
@exists = true
|
2422
|
+
rescue Google::Cloud::NotFoundError
|
2423
|
+
@exists = false
|
2424
|
+
end
|
2425
|
+
|
2426
|
+
def patch_gapi! *attributes
|
2427
|
+
return if attributes.empty?
|
2428
|
+
ensure_service!
|
2429
|
+
patch_args = Hash[attributes.map { |attr| [attr, @gapi.send(attr)] }]
|
2430
|
+
patch_gapi = Google::Apis::BigqueryV2::Table.new patch_args
|
2431
|
+
patch_gapi.etag = etag if etag
|
2432
|
+
@gapi = service.patch_table dataset_id, table_id, patch_gapi
|
2433
|
+
|
2434
|
+
# TODO: restore original impl after acceptance test indicates that
|
2435
|
+
# service etag bug is fixed
|
2436
|
+
reload!
|
2437
|
+
end
|
2438
|
+
|
2439
|
+
def ensure_job_succeeded! job
|
2440
|
+
return unless job.failed?
|
2441
|
+
begin
|
2442
|
+
# raise to activate ruby exception cause handling
|
2443
|
+
raise job.gapi_error
|
2444
|
+
rescue StandardError => e
|
2445
|
+
# wrap Google::Apis::Error with Google::Cloud::Error
|
2446
|
+
raise Google::Cloud::Error.from_error(e)
|
2447
|
+
end
|
2448
|
+
end
|
2449
|
+
|
2450
|
+
def load_job_gapi table_id, dryrun, job_id: nil, prefix: nil
|
2451
|
+
job_ref = service.job_ref_from job_id, prefix
|
2452
|
+
Google::Apis::BigqueryV2::Job.new(
|
2453
|
+
job_reference: job_ref,
|
2454
|
+
configuration: Google::Apis::BigqueryV2::JobConfiguration.new(
|
2455
|
+
load: Google::Apis::BigqueryV2::JobConfigurationLoad.new(
|
2456
|
+
destination_table: Google::Apis::BigqueryV2::TableReference.new(
|
2457
|
+
project_id: @service.project,
|
2458
|
+
dataset_id: dataset_id,
|
2459
|
+
table_id: table_id
|
2460
|
+
)
|
2461
|
+
),
|
2462
|
+
dry_run: dryrun
|
2463
|
+
)
|
2464
|
+
)
|
2465
|
+
end
|
2466
|
+
|
2467
|
+
def load_job_csv_options! job, jagged_rows: nil, quoted_newlines: nil, delimiter: nil, quote: nil,
|
2468
|
+
skip_leading: nil, null_marker: nil
|
2469
|
+
job.jagged_rows = jagged_rows unless jagged_rows.nil?
|
2470
|
+
job.quoted_newlines = quoted_newlines unless quoted_newlines.nil?
|
2471
|
+
job.delimiter = delimiter unless delimiter.nil?
|
2472
|
+
job.null_marker = null_marker unless null_marker.nil?
|
2473
|
+
job.quote = quote unless quote.nil?
|
2474
|
+
job.skip_leading = skip_leading unless skip_leading.nil?
|
2475
|
+
end
|
2476
|
+
|
2477
|
+
def load_job_file_options! job, format: nil, projection_fields: nil, jagged_rows: nil, quoted_newlines: nil,
|
2478
|
+
encoding: nil, delimiter: nil, ignore_unknown: nil, max_bad_records: nil, quote: nil,
|
2479
|
+
skip_leading: nil, null_marker: nil
|
2480
|
+
job.format = format unless format.nil?
|
2481
|
+
job.projection_fields = projection_fields unless projection_fields.nil?
|
2482
|
+
job.encoding = encoding unless encoding.nil?
|
2483
|
+
job.ignore_unknown = ignore_unknown unless ignore_unknown.nil?
|
2484
|
+
job.max_bad_records = max_bad_records unless max_bad_records.nil?
|
2485
|
+
load_job_csv_options! job, jagged_rows: jagged_rows,
|
2486
|
+
quoted_newlines: quoted_newlines,
|
2487
|
+
delimiter: delimiter,
|
2488
|
+
quote: quote,
|
2489
|
+
skip_leading: skip_leading,
|
2490
|
+
null_marker: null_marker
|
2491
|
+
end
|
2492
|
+
|
2493
|
+
def load_job_updater format: nil, create: nil, write: nil, projection_fields: nil, jagged_rows: nil,
|
2494
|
+
quoted_newlines: nil, encoding: nil, delimiter: nil, ignore_unknown: nil,
|
2495
|
+
max_bad_records: nil, quote: nil, skip_leading: nil, dryrun: nil, schema: nil, job_id: nil,
|
2496
|
+
prefix: nil, labels: nil, autodetect: nil, null_marker: nil
|
2497
|
+
new_job = load_job_gapi table_id, dryrun, job_id: job_id, prefix: prefix
|
2498
|
+
LoadJob::Updater.new(new_job).tap do |job|
|
2499
|
+
job.location = location if location # may be table reference
|
2500
|
+
job.create = create unless create.nil?
|
2501
|
+
job.write = write unless write.nil?
|
2502
|
+
job.schema = schema unless schema.nil?
|
2503
|
+
job.autodetect = autodetect unless autodetect.nil?
|
2504
|
+
job.labels = labels unless labels.nil?
|
2505
|
+
load_job_file_options! job, format: format,
|
2506
|
+
projection_fields: projection_fields,
|
2507
|
+
jagged_rows: jagged_rows,
|
2508
|
+
quoted_newlines: quoted_newlines,
|
2509
|
+
encoding: encoding,
|
2510
|
+
delimiter: delimiter,
|
2511
|
+
ignore_unknown: ignore_unknown,
|
2512
|
+
max_bad_records: max_bad_records,
|
2513
|
+
quote: quote,
|
2514
|
+
skip_leading: skip_leading,
|
2515
|
+
null_marker: null_marker
|
2516
|
+
end
|
2517
|
+
end
|
2518
|
+
|
2519
|
+
def load_storage urls, job_gapi
|
2520
|
+
# Convert to storage URL
|
2521
|
+
urls = [urls].flatten.map do |url|
|
2522
|
+
if url.respond_to? :to_gs_url
|
2523
|
+
url.to_gs_url
|
2524
|
+
elsif url.is_a? URI
|
2525
|
+
url.to_s
|
2526
|
+
else
|
2527
|
+
url
|
2528
|
+
end
|
2529
|
+
end
|
2530
|
+
|
2531
|
+
unless urls.nil?
|
2532
|
+
job_gapi.configuration.load.update! source_uris: urls
|
2533
|
+
if job_gapi.configuration.load.source_format.nil?
|
2534
|
+
source_format = Convert.derive_source_format_from_list urls
|
2535
|
+
job_gapi.configuration.load.source_format = source_format unless source_format.nil?
|
2536
|
+
end
|
2537
|
+
end
|
2538
|
+
|
2539
|
+
gapi = service.load_table_gs_url job_gapi
|
2540
|
+
Job.from_gapi gapi, service
|
2541
|
+
end
|
2542
|
+
|
2543
|
+
def load_local file, job_gapi
|
2544
|
+
path = Pathname(file).to_path
|
2545
|
+
if job_gapi.configuration.load.source_format.nil?
|
2546
|
+
source_format = Convert.derive_source_format path
|
2547
|
+
job_gapi.configuration.load.source_format = source_format unless source_format.nil?
|
2548
|
+
end
|
2549
|
+
|
2550
|
+
gapi = service.load_table_file file, job_gapi
|
2551
|
+
Job.from_gapi gapi, service
|
2552
|
+
end
|
2553
|
+
|
2554
|
+
def load_local_or_uri file, updater
|
2555
|
+
job_gapi = updater.to_gapi
|
2556
|
+
job = if local_file? file
|
2557
|
+
load_local file, job_gapi
|
2558
|
+
else
|
2559
|
+
load_storage file, job_gapi
|
2560
|
+
end
|
2561
|
+
job
|
2562
|
+
end
|
2563
|
+
|
2564
|
+
def storage_url? files
|
2565
|
+
[files].flatten.all? do |file|
|
2566
|
+
file.respond_to?(:to_gs_url) ||
|
2567
|
+
(file.respond_to?(:to_str) && file.to_str.downcase.start_with?("gs://")) ||
|
2568
|
+
(file.is_a?(URI) && file.to_s.downcase.start_with?("gs://"))
|
2569
|
+
end
|
2570
|
+
end
|
2571
|
+
|
2572
|
+
def local_file? file
|
2573
|
+
::File.file? file
|
2574
|
+
rescue StandardError
|
2575
|
+
false
|
2576
|
+
end
|
2577
|
+
|
2578
|
+
##
|
2579
|
+
# Load the complete representation of the table if it has been
|
2580
|
+
# only partially loaded by a request to the API list method.
|
2581
|
+
def ensure_full_data!
|
2582
|
+
reload! unless data_complete?
|
2583
|
+
end
|
2584
|
+
|
2585
|
+
def data_complete?
|
2586
|
+
@gapi.is_a? Google::Apis::BigqueryV2::Table
|
2587
|
+
end
|
2588
|
+
|
2589
|
+
##
|
2590
|
+
# Supports views.
|
2591
|
+
def udfs_gapi array_or_str
|
2592
|
+
return [] if array_or_str.nil?
|
2593
|
+
Array(array_or_str).map do |uri_or_code|
|
2594
|
+
resource = Google::Apis::BigqueryV2::UserDefinedFunctionResource.new
|
2595
|
+
if uri_or_code.start_with? "gs://"
|
2596
|
+
resource.resource_uri = uri_or_code
|
2597
|
+
else
|
2598
|
+
resource.inline_code = uri_or_code
|
2599
|
+
end
|
2600
|
+
resource
|
2601
|
+
end
|
2602
|
+
end
|
2603
|
+
|
2604
|
+
##
|
2605
|
+
# Yielded to a block to accumulate changes for a create request. See {Dataset#create_table}.
|
2606
|
+
class Updater < Table
|
2607
|
+
##
|
2608
|
+
# @private A list of attributes that were updated.
|
2609
|
+
attr_reader :updates
|
2610
|
+
|
2611
|
+
##
|
2612
|
+
# @private Create an Updater object.
|
2613
|
+
def initialize gapi
|
2614
|
+
@updates = []
|
2615
|
+
@gapi = gapi
|
2616
|
+
@schema = nil
|
2617
|
+
end
|
2618
|
+
|
2619
|
+
##
|
2620
|
+
# Sets the field on which to range partition the table. See [Creating and using integer range partitioned
|
2621
|
+
# tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
|
2622
|
+
#
|
2623
|
+
# See {Table::Updater#range_partitioning_start=}, {Table::Updater#range_partitioning_interval=} and
|
2624
|
+
# {Table::Updater#range_partitioning_end=}.
|
2625
|
+
#
|
2626
|
+
# You can only set range partitioning when creating a table as in the example below. BigQuery does not allow
|
2627
|
+
# you to change partitioning on an existing table.
|
2628
|
+
#
|
2629
|
+
# @param [String] field The range partition field. The table is partitioned by this
|
2630
|
+
# field. The field must be a top-level `NULLABLE/REQUIRED` field. The only supported
|
2631
|
+
# type is `INTEGER/INT64`.
|
2632
|
+
#
|
2633
|
+
# @example
|
2634
|
+
# require "google/cloud/bigquery"
|
2635
|
+
#
|
2636
|
+
# bigquery = Google::Cloud::Bigquery.new
|
2637
|
+
# dataset = bigquery.dataset "my_dataset"
|
2638
|
+
#
|
2639
|
+
# table = dataset.create_table "my_table" do |t|
|
2640
|
+
# t.schema do |schema|
|
2641
|
+
# schema.integer "my_table_id", mode: :required
|
2642
|
+
# schema.string "my_table_data", mode: :required
|
2643
|
+
# end
|
2644
|
+
# t.range_partitioning_field = "my_table_id"
|
2645
|
+
# t.range_partitioning_start = 0
|
2646
|
+
# t.range_partitioning_interval = 10
|
2647
|
+
# t.range_partitioning_end = 100
|
2648
|
+
# end
|
2649
|
+
#
|
2650
|
+
# @!group Attributes
|
2651
|
+
#
|
2652
|
+
def range_partitioning_field= field
|
2653
|
+
reload! unless resource_full?
|
2654
|
+
@gapi.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
|
2655
|
+
range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
|
2656
|
+
)
|
2657
|
+
@gapi.range_partitioning.field = field
|
2658
|
+
patch_gapi! :range_partitioning
|
2659
|
+
end
|
2660
|
+
|
2661
|
+
##
|
2662
|
+
# Sets the start of range partitioning, inclusive, for the table. See [Creating and using integer range
|
2663
|
+
# partitioned tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
|
2664
|
+
#
|
2665
|
+
# You can only set range partitioning when creating a table as in the example below. BigQuery does not allow
|
2666
|
+
# you to change partitioning on an existing table.
|
2667
|
+
#
|
2668
|
+
# See {Table::Updater#range_partitioning_field=}, {Table::Updater#range_partitioning_interval=} and
|
2669
|
+
# {Table::Updater#range_partitioning_end=}.
|
2670
|
+
#
|
2671
|
+
# @param [Integer] range_start The start of range partitioning, inclusive.
|
2672
|
+
#
|
2673
|
+
# @example
|
2674
|
+
# require "google/cloud/bigquery"
|
2675
|
+
#
|
2676
|
+
# bigquery = Google::Cloud::Bigquery.new
|
2677
|
+
# dataset = bigquery.dataset "my_dataset"
|
2678
|
+
#
|
2679
|
+
# table = dataset.create_table "my_table" do |t|
|
2680
|
+
# t.schema do |schema|
|
2681
|
+
# schema.integer "my_table_id", mode: :required
|
2682
|
+
# schema.string "my_table_data", mode: :required
|
2683
|
+
# end
|
2684
|
+
# t.range_partitioning_field = "my_table_id"
|
2685
|
+
# t.range_partitioning_start = 0
|
2686
|
+
# t.range_partitioning_interval = 10
|
2687
|
+
# t.range_partitioning_end = 100
|
2688
|
+
# end
|
2689
|
+
#
|
2690
|
+
# @!group Attributes
|
2691
|
+
#
|
2692
|
+
def range_partitioning_start= range_start
|
2693
|
+
reload! unless resource_full?
|
2694
|
+
@gapi.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
|
2695
|
+
range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
|
2696
|
+
)
|
2697
|
+
@gapi.range_partitioning.range.start = range_start
|
2698
|
+
patch_gapi! :range_partitioning
|
2699
|
+
end
|
2700
|
+
|
2701
|
+
##
|
2702
|
+
# Sets width of each interval for data in range partitions. See [Creating and using integer range partitioned
|
2703
|
+
# tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
|
2704
|
+
#
|
2705
|
+
# You can only set range partitioning when creating a table as in the example below. BigQuery does not allow
|
2706
|
+
# you to change partitioning on an existing table.
|
2707
|
+
#
|
2708
|
+
# See {Table::Updater#range_partitioning_field=}, {Table::Updater#range_partitioning_start=} and
|
2709
|
+
# {Table::Updater#range_partitioning_end=}.
|
2710
|
+
#
|
2711
|
+
# @param [Integer] range_interval The width of each interval, for data in partitions.
|
2712
|
+
#
|
2713
|
+
# @example
|
2714
|
+
# require "google/cloud/bigquery"
|
2715
|
+
#
|
2716
|
+
# bigquery = Google::Cloud::Bigquery.new
|
2717
|
+
# dataset = bigquery.dataset "my_dataset"
|
2718
|
+
#
|
2719
|
+
# table = dataset.create_table "my_table" do |t|
|
2720
|
+
# t.schema do |schema|
|
2721
|
+
# schema.integer "my_table_id", mode: :required
|
2722
|
+
# schema.string "my_table_data", mode: :required
|
2723
|
+
# end
|
2724
|
+
# t.range_partitioning_field = "my_table_id"
|
2725
|
+
# t.range_partitioning_start = 0
|
2726
|
+
# t.range_partitioning_interval = 10
|
2727
|
+
# t.range_partitioning_end = 100
|
2728
|
+
# end
|
2729
|
+
#
|
2730
|
+
# @!group Attributes
|
2731
|
+
#
|
2732
|
+
def range_partitioning_interval= range_interval
|
2733
|
+
reload! unless resource_full?
|
2734
|
+
@gapi.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
|
2735
|
+
range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
|
2736
|
+
)
|
2737
|
+
@gapi.range_partitioning.range.interval = range_interval
|
2738
|
+
patch_gapi! :range_partitioning
|
2739
|
+
end
|
2740
|
+
|
2741
|
+
##
|
2742
|
+
# Sets the end of range partitioning, exclusive, for the table. See [Creating and using integer range
|
2743
|
+
# partitioned tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
|
2744
|
+
#
|
2745
|
+
# You can only set range partitioning when creating a table as in the example below. BigQuery does not allow
|
2746
|
+
# you to change partitioning on an existing table.
|
2747
|
+
#
|
2748
|
+
# See {Table::Updater#range_partitioning_start=}, {Table::Updater#range_partitioning_interval=} and
|
2749
|
+
# {Table::Updater#range_partitioning_field=}.
|
2750
|
+
#
|
2751
|
+
# @param [Integer] range_end The end of range partitioning, exclusive.
|
2752
|
+
#
|
2753
|
+
# @example
|
2754
|
+
# require "google/cloud/bigquery"
|
2755
|
+
#
|
2756
|
+
# bigquery = Google::Cloud::Bigquery.new
|
2757
|
+
# dataset = bigquery.dataset "my_dataset"
|
2758
|
+
#
|
2759
|
+
# table = dataset.create_table "my_table" do |t|
|
2760
|
+
# t.schema do |schema|
|
2761
|
+
# schema.integer "my_table_id", mode: :required
|
2762
|
+
# schema.string "my_table_data", mode: :required
|
2763
|
+
# end
|
2764
|
+
# t.range_partitioning_field = "my_table_id"
|
2765
|
+
# t.range_partitioning_start = 0
|
2766
|
+
# t.range_partitioning_interval = 10
|
2767
|
+
# t.range_partitioning_end = 100
|
2768
|
+
# end
|
2769
|
+
#
|
2770
|
+
# @!group Attributes
|
2771
|
+
#
|
2772
|
+
def range_partitioning_end= range_end
|
2773
|
+
reload! unless resource_full?
|
2774
|
+
@gapi.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
|
2775
|
+
range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
|
2776
|
+
)
|
2777
|
+
@gapi.range_partitioning.range.end = range_end
|
2778
|
+
patch_gapi! :range_partitioning
|
2779
|
+
end
|
2780
|
+
|
2781
|
+
##
|
2782
|
+
# Sets one or more fields on which data should be clustered. Must be
|
2783
|
+
# specified with time-based partitioning, data in the table will be
|
2784
|
+
# first partitioned and subsequently clustered.
|
2785
|
+
#
|
2786
|
+
# Only top-level, non-repeated, simple-type fields are supported. When
|
2787
|
+
# you cluster a table using multiple columns, the order of columns you
|
2788
|
+
# specify is important. The order of the specified columns determines
|
2789
|
+
# the sort order of the data.
|
2790
|
+
#
|
2791
|
+
# You can only set the clustering fields while creating a table as in
|
2792
|
+
# the example below. BigQuery does not allow you to change clustering
|
2793
|
+
# on an existing table.
|
2794
|
+
#
|
2795
|
+
# See {Table#clustering_fields}.
|
2796
|
+
#
|
2797
|
+
# @see https://cloud.google.com/bigquery/docs/partitioned-tables
|
2798
|
+
# Partitioned Tables
|
2799
|
+
# @see https://cloud.google.com/bigquery/docs/clustered-tables
|
2800
|
+
# Introduction to Clustered Tables
|
2801
|
+
# @see https://cloud.google.com/bigquery/docs/creating-clustered-tables
|
2802
|
+
# Creating and Using Clustered Tables
|
2803
|
+
#
|
2804
|
+
# @param [Array<String>] fields The clustering fields. Only top-level,
|
2805
|
+
# non-repeated, simple-type fields are supported.
|
2806
|
+
#
|
2807
|
+
# @example
|
2808
|
+
# require "google/cloud/bigquery"
|
2809
|
+
#
|
2810
|
+
# bigquery = Google::Cloud::Bigquery.new
|
2811
|
+
# dataset = bigquery.dataset "my_dataset"
|
2812
|
+
# table = dataset.create_table "my_table" do |t|
|
2813
|
+
# t.schema do |schema|
|
2814
|
+
# schema.timestamp "dob", mode: :required
|
2815
|
+
# schema.string "first_name", mode: :required
|
2816
|
+
# schema.string "last_name", mode: :required
|
2817
|
+
# end
|
2818
|
+
# t.time_partitioning_type = "DAY"
|
2819
|
+
# t.time_partitioning_field = "dob"
|
2820
|
+
# t.clustering_fields = ["last_name", "first_name"]
|
2821
|
+
# end
|
2822
|
+
#
|
2823
|
+
# @!group Attributes
|
2824
|
+
#
|
2825
|
+
def clustering_fields= fields
|
2826
|
+
@gapi.clustering ||= Google::Apis::BigqueryV2::Clustering.new
|
2827
|
+
@gapi.clustering.fields = fields
|
2828
|
+
patch_gapi! :clustering
|
2829
|
+
end
|
2830
|
+
|
2831
|
+
##
|
2832
|
+
# Returns the table's schema. This method can also be used to set,
|
2833
|
+
# replace, or add to the schema by passing a block. See {Schema} for
|
2834
|
+
# available methods.
|
2835
|
+
#
|
2836
|
+
# @param [Boolean] replace Whether to replace the existing schema with
|
2837
|
+
# the new schema. If `true`, the fields will replace the existing
|
2838
|
+
# schema. If `false`, the fields will be added to the existing
|
2839
|
+
# schema. When a table already contains data, schema changes must be
|
2840
|
+
# additive. Thus, the default value is `false`.
|
2841
|
+
# When loading from a file this will always replace the schema, no
|
2842
|
+
# matter what `replace` is set to. You can update the schema (for
|
2843
|
+
# example, for a table that already contains data) by providing a
|
2844
|
+
# schema file that includes the existing schema plus any new
|
2845
|
+
# fields.
|
2846
|
+
# @yield [schema] a block for setting the schema
|
2847
|
+
# @yieldparam [Schema] schema the object accepting the schema
|
2848
|
+
#
|
2849
|
+
# @return [Google::Cloud::Bigquery::Schema]
|
2850
|
+
#
|
2851
|
+
# @example
|
2852
|
+
# require "google/cloud/bigquery"
|
2853
|
+
#
|
2854
|
+
# bigquery = Google::Cloud::Bigquery.new
|
2855
|
+
# dataset = bigquery.dataset "my_dataset"
|
2856
|
+
# table = dataset.create_table "my_table" do |t|
|
2857
|
+
# t.name = "My Table"
|
2858
|
+
# t.description = "A description of my table."
|
2859
|
+
# t.schema do |s|
|
2860
|
+
# s.string "first_name", mode: :required
|
2861
|
+
# s.record "cities_lived", mode: :repeated do |r|
|
2862
|
+
# r.string "place", mode: :required
|
2863
|
+
# r.integer "number_of_years", mode: :required
|
2864
|
+
# end
|
2865
|
+
# end
|
2866
|
+
# end
|
2867
|
+
#
|
2868
|
+
# @example Load the schema from a file
|
2869
|
+
# require "google/cloud/bigquery"
|
2870
|
+
#
|
2871
|
+
# bigquery = Google::Cloud::Bigquery.new
|
2872
|
+
# dataset = bigquery.dataset "my_dataset"
|
2873
|
+
# table = dataset.create_table "my_table" do |t|
|
2874
|
+
# t.name = "My Table"
|
2875
|
+
# t.description = "A description of my table."
|
2876
|
+
# t.schema do |s|
|
2877
|
+
# s.load File.open("schema.json")
|
2878
|
+
# end
|
2879
|
+
# end
|
2880
|
+
#
|
2881
|
+
# @!group Schema
|
2882
|
+
#
|
2883
|
+
def schema replace: false
|
2884
|
+
# Same as Table#schema, but not frozen
|
2885
|
+
# TODO: make sure to call ensure_full_data! on Dataset#update
|
2886
|
+
@schema ||= Schema.from_gapi @gapi.schema
|
2887
|
+
if block_given?
|
2888
|
+
@schema = Schema.from_gapi if replace
|
2889
|
+
yield @schema
|
2890
|
+
check_for_mutated_schema!
|
2891
|
+
end
|
2892
|
+
# Do not freeze on updater, allow modifications
|
2893
|
+
@schema
|
2894
|
+
end
|
2895
|
+
|
2896
|
+
##
|
2897
|
+
# Adds a string field to the schema.
|
2898
|
+
#
|
2899
|
+
# See {Schema#string}.
|
2900
|
+
#
|
2901
|
+
# @param [String] name The field name. The name must contain only
|
2902
|
+
# letters (a-z, A-Z), numbers (0-9), or underscores (_), and must
|
2903
|
+
# start with a letter or underscore. The maximum length is 128
|
2904
|
+
# characters.
|
2905
|
+
# @param [String] description A description of the field.
|
2906
|
+
# @param [Symbol] mode The field's mode. The possible values are
|
2907
|
+
# `:nullable`, `:required`, and `:repeated`. The default value is
|
2908
|
+
# `:nullable`.
|
2909
|
+
#
|
2910
|
+
# @example
|
2911
|
+
# require "google/cloud/bigquery"
|
2912
|
+
#
|
2913
|
+
# bigquery = Google::Cloud::Bigquery.new
|
2914
|
+
# dataset = bigquery.dataset "my_dataset"
|
2915
|
+
# table = dataset.create_table "my_table" do |schema|
|
2916
|
+
# schema.string "first_name", mode: :required
|
2917
|
+
# end
|
2918
|
+
#
|
2919
|
+
# @!group Schema
|
2920
|
+
def string name, description: nil, mode: :nullable
|
2921
|
+
schema.string name, description: description, mode: mode
|
2922
|
+
end
|
2923
|
+
|
2924
|
+
##
|
2925
|
+
# Adds an integer field to the schema.
|
2926
|
+
#
|
2927
|
+
# See {Schema#integer}.
|
2928
|
+
#
|
2929
|
+
# @param [String] name The field name. The name must contain only
|
2930
|
+
# letters (a-z, A-Z), numbers (0-9), or underscores (_), and must
|
2931
|
+
# start with a letter or underscore. The maximum length is 128
|
2932
|
+
# characters.
|
2933
|
+
# @param [String] description A description of the field.
|
2934
|
+
# @param [Symbol] mode The field's mode. The possible values are
|
2935
|
+
# `:nullable`, `:required`, and `:repeated`. The default value is
|
2936
|
+
# `:nullable`.
|
2937
|
+
#
|
2938
|
+
# @example
|
2939
|
+
# require "google/cloud/bigquery"
|
2940
|
+
#
|
2941
|
+
# bigquery = Google::Cloud::Bigquery.new
|
2942
|
+
# dataset = bigquery.dataset "my_dataset"
|
2943
|
+
# table = dataset.create_table "my_table" do |schema|
|
2944
|
+
# schema.integer "age", mode: :required
|
2945
|
+
# end
|
2946
|
+
#
|
2947
|
+
# @!group Schema
|
2948
|
+
def integer name, description: nil, mode: :nullable
|
2949
|
+
schema.integer name, description: description, mode: mode
|
2950
|
+
end
|
2951
|
+
|
2952
|
+
##
|
2953
|
+
# Adds a floating-point number field to the schema.
|
2954
|
+
#
|
2955
|
+
# See {Schema#float}.
|
2956
|
+
#
|
2957
|
+
# @param [String] name The field name. The name must contain only
|
2958
|
+
# letters (a-z, A-Z), numbers (0-9), or underscores (_), and must
|
2959
|
+
# start with a letter or underscore. The maximum length is 128
|
2960
|
+
# characters.
|
2961
|
+
# @param [String] description A description of the field.
|
2962
|
+
# @param [Symbol] mode The field's mode. The possible values are
|
2963
|
+
# `:nullable`, `:required`, and `:repeated`. The default value is
|
2964
|
+
# `:nullable`.
|
2965
|
+
#
|
2966
|
+
# @example
|
2967
|
+
# require "google/cloud/bigquery"
|
2968
|
+
#
|
2969
|
+
# bigquery = Google::Cloud::Bigquery.new
|
2970
|
+
# dataset = bigquery.dataset "my_dataset"
|
2971
|
+
# table = dataset.create_table "my_table" do |schema|
|
2972
|
+
# schema.float "price", mode: :required
|
2973
|
+
# end
|
2974
|
+
#
|
2975
|
+
# @!group Schema
|
2976
|
+
def float name, description: nil, mode: :nullable
|
2977
|
+
schema.float name, description: description, mode: mode
|
2978
|
+
end
|
2979
|
+
|
2980
|
+
##
|
2981
|
+
# Adds a numeric number field to the schema. Numeric is a
|
2982
|
+
# fixed-precision numeric type with 38 decimal digits, 9 that follow
|
2983
|
+
# the decimal point.
|
2984
|
+
#
|
2985
|
+
# See {Schema#numeric}
|
2986
|
+
#
|
2987
|
+
# @param [String] name The field name. The name must contain only
|
2988
|
+
# letters (a-z, A-Z), numbers (0-9), or underscores (_), and must
|
2989
|
+
# start with a letter or underscore. The maximum length is 128
|
2990
|
+
# characters.
|
2991
|
+
# @param [String] description A description of the field.
|
2992
|
+
# @param [Symbol] mode The field's mode. The possible values are
|
2993
|
+
# `:nullable`, `:required`, and `:repeated`. The default value is
|
2994
|
+
# `:nullable`.
|
2995
|
+
#
|
2996
|
+
# @example
|
2997
|
+
# require "google/cloud/bigquery"
|
2998
|
+
#
|
2999
|
+
# bigquery = Google::Cloud::Bigquery.new
|
3000
|
+
# dataset = bigquery.dataset "my_dataset"
|
3001
|
+
# table = dataset.create_table "my_table" do |schema|
|
3002
|
+
# schema.numeric "total_cost", mode: :required
|
3003
|
+
# end
|
3004
|
+
#
|
3005
|
+
# @!group Schema
|
3006
|
+
def numeric name, description: nil, mode: :nullable
|
3007
|
+
schema.numeric name, description: description, mode: mode
|
3008
|
+
end
|
3009
|
+
|
3010
|
+
##
|
3011
|
+
# Adds a boolean field to the schema.
|
3012
|
+
#
|
3013
|
+
# See {Schema#boolean}.
|
3014
|
+
#
|
3015
|
+
# @param [String] name The field name. The name must contain only
|
3016
|
+
# letters (a-z, A-Z), numbers (0-9), or underscores (_), and must
|
3017
|
+
# start with a letter or underscore. The maximum length is 128
|
3018
|
+
# characters.
|
3019
|
+
# @param [String] description A description of the field.
|
3020
|
+
# @param [Symbol] mode The field's mode. The possible values are
|
3021
|
+
# `:nullable`, `:required`, and `:repeated`. The default value is
|
3022
|
+
# `:nullable`.
|
3023
|
+
#
|
3024
|
+
# @example
|
3025
|
+
# require "google/cloud/bigquery"
|
3026
|
+
#
|
3027
|
+
# bigquery = Google::Cloud::Bigquery.new
|
3028
|
+
# dataset = bigquery.dataset "my_dataset"
|
3029
|
+
# table = dataset.create_table "my_table" do |schema|
|
3030
|
+
# schema.boolean "active", mode: :required
|
3031
|
+
# end
|
3032
|
+
#
|
3033
|
+
# @!group Schema
|
3034
|
+
def boolean name, description: nil, mode: :nullable
|
3035
|
+
schema.boolean name, description: description, mode: mode
|
3036
|
+
end
|
3037
|
+
|
3038
|
+
##
|
3039
|
+
# Adds a bytes field to the schema.
|
3040
|
+
#
|
3041
|
+
# See {Schema#bytes}.
|
3042
|
+
#
|
3043
|
+
# @param [String] name The field name. The name must contain only
|
3044
|
+
# letters (a-z, A-Z), numbers (0-9), or underscores (_), and must
|
3045
|
+
# start with a letter or underscore. The maximum length is 128
|
3046
|
+
# characters.
|
3047
|
+
# @param [String] description A description of the field.
|
3048
|
+
# @param [Symbol] mode The field's mode. The possible values are
|
3049
|
+
# `:nullable`, `:required`, and `:repeated`. The default value is
|
3050
|
+
# `:nullable`.
|
3051
|
+
#
|
3052
|
+
# @example
|
3053
|
+
# require "google/cloud/bigquery"
|
3054
|
+
#
|
3055
|
+
# bigquery = Google::Cloud::Bigquery.new
|
3056
|
+
# dataset = bigquery.dataset "my_dataset"
|
3057
|
+
# table = dataset.create_table "my_table" do |schema|
|
3058
|
+
# schema.bytes "avatar", mode: :required
|
3059
|
+
# end
|
3060
|
+
#
|
3061
|
+
# @!group Schema
|
3062
|
+
def bytes name, description: nil, mode: :nullable
|
3063
|
+
schema.bytes name, description: description, mode: mode
|
3064
|
+
end
|
3065
|
+
|
3066
|
+
##
|
3067
|
+
# Adds a timestamp field to the schema.
|
3068
|
+
#
|
3069
|
+
# See {Schema#timestamp}.
|
3070
|
+
#
|
3071
|
+
# @param [String] name The field name. The name must contain only
|
3072
|
+
# letters (a-z, A-Z), numbers (0-9), or underscores (_), and must
|
3073
|
+
# start with a letter or underscore. The maximum length is 128
|
3074
|
+
# characters.
|
3075
|
+
# @param [String] description A description of the field.
|
3076
|
+
# @param [Symbol] mode The field's mode. The possible values are
|
3077
|
+
# `:nullable`, `:required`, and `:repeated`. The default value is
|
3078
|
+
# `:nullable`.
|
3079
|
+
#
|
3080
|
+
# @example
|
3081
|
+
# require "google/cloud/bigquery"
|
3082
|
+
#
|
3083
|
+
# bigquery = Google::Cloud::Bigquery.new
|
3084
|
+
# dataset = bigquery.dataset "my_dataset"
|
3085
|
+
# table = dataset.create_table "my_table" do |schema|
|
3086
|
+
# schema.timestamp "creation_date", mode: :required
|
3087
|
+
# end
|
3088
|
+
#
|
3089
|
+
# @!group Schema
|
3090
|
+
def timestamp name, description: nil, mode: :nullable
|
3091
|
+
schema.timestamp name, description: description, mode: mode
|
3092
|
+
end
|
3093
|
+
|
3094
|
+
##
|
3095
|
+
# Adds a time field to the schema.
|
3096
|
+
#
|
3097
|
+
# See {Schema#time}.
|
3098
|
+
#
|
3099
|
+
# @param [String] name The field name. The name must contain only
|
3100
|
+
# letters (a-z, A-Z), numbers (0-9), or underscores (_), and must
|
3101
|
+
# start with a letter or underscore. The maximum length is 128
|
3102
|
+
# characters.
|
3103
|
+
# @param [String] description A description of the field.
|
3104
|
+
# @param [Symbol] mode The field's mode. The possible values are
|
3105
|
+
# `:nullable`, `:required`, and `:repeated`. The default value is
|
3106
|
+
# `:nullable`.
|
3107
|
+
#
|
3108
|
+
# @example
|
3109
|
+
# require "google/cloud/bigquery"
|
3110
|
+
#
|
3111
|
+
# bigquery = Google::Cloud::Bigquery.new
|
3112
|
+
# dataset = bigquery.dataset "my_dataset"
|
3113
|
+
# table = dataset.create_table "my_table" do |schema|
|
3114
|
+
# schema.time "duration", mode: :required
|
3115
|
+
# end
|
3116
|
+
#
|
3117
|
+
# @!group Schema
|
3118
|
+
def time name, description: nil, mode: :nullable
|
3119
|
+
schema.time name, description: description, mode: mode
|
3120
|
+
end
|
3121
|
+
|
3122
|
+
##
|
3123
|
+
# Adds a datetime field to the schema.
|
3124
|
+
#
|
3125
|
+
# See {Schema#datetime}.
|
3126
|
+
#
|
3127
|
+
# @param [String] name The field name. The name must contain only
|
3128
|
+
# letters (a-z, A-Z), numbers (0-9), or underscores (_), and must
|
3129
|
+
# start with a letter or underscore. The maximum length is 128
|
3130
|
+
# characters.
|
3131
|
+
# @param [String] description A description of the field.
|
3132
|
+
# @param [Symbol] mode The field's mode. The possible values are
|
3133
|
+
# `:nullable`, `:required`, and `:repeated`. The default value is
|
3134
|
+
# `:nullable`.
|
3135
|
+
#
|
3136
|
+
# @example
|
3137
|
+
# require "google/cloud/bigquery"
|
3138
|
+
#
|
3139
|
+
# bigquery = Google::Cloud::Bigquery.new
|
3140
|
+
# dataset = bigquery.dataset "my_dataset"
|
3141
|
+
# table = dataset.create_table "my_table" do |schema|
|
3142
|
+
# schema.datetime "target_end", mode: :required
|
3143
|
+
# end
|
3144
|
+
#
|
3145
|
+
# @!group Schema
|
3146
|
+
def datetime name, description: nil, mode: :nullable
|
3147
|
+
schema.datetime name, description: description, mode: mode
|
3148
|
+
end
|
3149
|
+
|
3150
|
+
##
|
3151
|
+
# Adds a date field to the schema.
|
3152
|
+
#
|
3153
|
+
# See {Schema#date}.
|
3154
|
+
#
|
3155
|
+
# @param [String] name The field name. The name must contain only
|
3156
|
+
# letters (a-z, A-Z), numbers (0-9), or underscores (_), and must
|
3157
|
+
# start with a letter or underscore. The maximum length is 128
|
3158
|
+
# characters.
|
3159
|
+
# @param [String] description A description of the field.
|
3160
|
+
# @param [Symbol] mode The field's mode. The possible values are
|
3161
|
+
# `:nullable`, `:required`, and `:repeated`. The default value is
|
3162
|
+
# `:nullable`.
|
3163
|
+
#
|
3164
|
+
# @example
|
3165
|
+
# require "google/cloud/bigquery"
|
3166
|
+
#
|
3167
|
+
# bigquery = Google::Cloud::Bigquery.new
|
3168
|
+
# dataset = bigquery.dataset "my_dataset"
|
3169
|
+
# table = dataset.create_table "my_table" do |schema|
|
3170
|
+
# schema.date "birthday", mode: :required
|
3171
|
+
# end
|
3172
|
+
#
|
3173
|
+
# @!group Schema
|
3174
|
+
def date name, description: nil, mode: :nullable
|
3175
|
+
schema.date name, description: description, mode: mode
|
3176
|
+
end
|
3177
|
+
|
3178
|
+
##
|
3179
|
+
# Adds a record field to the schema. A block must be passed describing
|
3180
|
+
# the nested fields of the record. For more information about nested
|
3181
|
+
# and repeated records, see [Loading denormalized, nested, and
|
3182
|
+
# repeated data
|
3183
|
+
# ](https://cloud.google.com/bigquery/docs/loading-data#loading_denormalized_nested_and_repeated_data).
|
3184
|
+
#
|
3185
|
+
# See {Schema#record}.
|
3186
|
+
#
|
3187
|
+
# @param [String] name The field name. The name must contain only
|
3188
|
+
# letters (a-z, A-Z), numbers (0-9), or underscores (_), and must
|
3189
|
+
# start with a letter or underscore. The maximum length is 128
|
3190
|
+
# characters.
|
3191
|
+
# @param [String] description A description of the field.
|
3192
|
+
# @param [Symbol] mode The field's mode. The possible values are
|
3193
|
+
# `:nullable`, `:required`, and `:repeated`. The default value is
|
3194
|
+
# `:nullable`.
|
3195
|
+
# @yield [nested_schema] a block for setting the nested schema
|
3196
|
+
# @yieldparam [Schema] nested_schema the object accepting the
|
3197
|
+
# nested schema
|
3198
|
+
#
|
3199
|
+
# @example
|
3200
|
+
# require "google/cloud/bigquery"
|
3201
|
+
#
|
3202
|
+
# bigquery = Google::Cloud::Bigquery.new
|
3203
|
+
# dataset = bigquery.dataset "my_dataset"
|
3204
|
+
# table = dataset.create_table "my_table" do |schema|
|
3205
|
+
# schema.record "cities_lived", mode: :repeated do |cities_lived|
|
3206
|
+
# cities_lived.string "place", mode: :required
|
3207
|
+
# cities_lived.integer "number_of_years", mode: :required
|
3208
|
+
# end
|
3209
|
+
# end
|
3210
|
+
#
|
3211
|
+
# @!group Schema
|
3212
|
+
#
|
3213
|
+
def record name, description: nil, mode: nil, &block
|
3214
|
+
schema.record name, description: description, mode: mode, &block
|
3215
|
+
end
|
3216
|
+
|
3217
|
+
# rubocop:disable Style/MethodDefParentheses
|
3218
|
+
|
3219
|
+
##
|
3220
|
+
# @raise [RuntimeError] not implemented
|
3221
|
+
def data(*)
|
3222
|
+
raise "not implemented in #{self.class}"
|
3223
|
+
end
|
3224
|
+
|
3225
|
+
##
|
3226
|
+
# @raise [RuntimeError] not implemented
|
3227
|
+
def copy_job(*)
|
3228
|
+
raise "not implemented in #{self.class}"
|
3229
|
+
end
|
3230
|
+
|
3231
|
+
##
|
3232
|
+
# @raise [RuntimeError] not implemented
|
3233
|
+
def copy(*)
|
3234
|
+
raise "not implemented in #{self.class}"
|
3235
|
+
end
|
3236
|
+
|
3237
|
+
##
|
3238
|
+
# @raise [RuntimeError] not implemented
|
3239
|
+
def extract_job(*)
|
3240
|
+
raise "not implemented in #{self.class}"
|
3241
|
+
end
|
3242
|
+
|
3243
|
+
##
|
3244
|
+
# @raise [RuntimeError] not implemented
|
3245
|
+
def extract(*)
|
3246
|
+
raise "not implemented in #{self.class}"
|
3247
|
+
end
|
3248
|
+
|
3249
|
+
##
|
3250
|
+
# @raise [RuntimeError] not implemented
|
3251
|
+
def load_job(*)
|
3252
|
+
raise "not implemented in #{self.class}"
|
3253
|
+
end
|
3254
|
+
|
3255
|
+
##
|
3256
|
+
# @raise [RuntimeError] not implemented
|
3257
|
+
def load(*)
|
3258
|
+
raise "not implemented in #{self.class}"
|
3259
|
+
end
|
3260
|
+
|
3261
|
+
##
|
3262
|
+
# @raise [RuntimeError] not implemented
|
3263
|
+
def insert(*)
|
3264
|
+
raise "not implemented in #{self.class}"
|
3265
|
+
end
|
3266
|
+
|
3267
|
+
##
|
3268
|
+
# @raise [RuntimeError] not implemented
|
3269
|
+
def insert_async(*)
|
3270
|
+
raise "not implemented in #{self.class}"
|
3271
|
+
end
|
3272
|
+
|
3273
|
+
##
|
3274
|
+
# @raise [RuntimeError] not implemented
|
3275
|
+
def delete
|
3276
|
+
raise "not implemented in #{self.class}"
|
3277
|
+
end
|
3278
|
+
|
3279
|
+
##
|
3280
|
+
# @raise [RuntimeError] not implemented
|
3281
|
+
def query_job(*)
|
3282
|
+
raise "not implemented in #{self.class}"
|
3283
|
+
end
|
3284
|
+
|
3285
|
+
##
|
3286
|
+
# @raise [RuntimeError] not implemented
|
3287
|
+
def query(*)
|
3288
|
+
raise "not implemented in #{self.class}"
|
3289
|
+
end
|
3290
|
+
|
3291
|
+
##
|
3292
|
+
# @raise [RuntimeError] not implemented
|
3293
|
+
def external(*)
|
3294
|
+
raise "not implemented in #{self.class}"
|
3295
|
+
end
|
3296
|
+
|
3297
|
+
##
|
3298
|
+
# @raise [RuntimeError] not implemented
|
3299
|
+
def reload!
|
3300
|
+
raise "not implemented in #{self.class}"
|
3301
|
+
end
|
3302
|
+
alias refresh! reload!
|
3303
|
+
|
3304
|
+
# rubocop:enable Style/MethodDefParentheses
|
3305
|
+
|
3306
|
+
##
|
3307
|
+
# @private Make sure any access changes are saved
|
3308
|
+
def check_for_mutated_schema!
|
3309
|
+
return if @schema.nil?
|
3310
|
+
return unless @schema.changed?
|
3311
|
+
@gapi.schema = @schema.to_gapi
|
3312
|
+
patch_gapi! :schema
|
3313
|
+
end
|
3314
|
+
|
3315
|
+
##
|
3316
|
+
# @private
|
3317
|
+
def to_gapi
|
3318
|
+
check_for_mutated_schema!
|
3319
|
+
@gapi
|
3320
|
+
end
|
3321
|
+
|
3322
|
+
protected
|
3323
|
+
|
3324
|
+
##
|
3325
|
+
# Change to a NOOP
|
3326
|
+
def ensure_full_data!
|
3327
|
+
# Do nothing because we trust the gapi is full before we get here.
|
3328
|
+
end
|
3329
|
+
|
3330
|
+
##
|
3331
|
+
# Queue up all the updates instead of making them.
|
3332
|
+
def patch_gapi! attribute
|
3333
|
+
@updates << attribute
|
3334
|
+
@updates.uniq!
|
3335
|
+
end
|
3336
|
+
end
|
3337
|
+
end
|
3338
|
+
end
|
3339
|
+
end
|
3340
|
+
end
|