google-cloud-bigquery 1.9.0 → 1.10.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 783828eac7c8335eced73772ee8d7f154daa10adfb2976c0961bca8132d449f5
4
- data.tar.gz: baf70806a64f209d6fd7d5567b11df9287879ea941478054ca4806dafb8980ec
3
+ metadata.gz: 475fe669391639d85bc48bdabb141ab8efa86dd9cdcfa97544d92b77099a1f63
4
+ data.tar.gz: ad906eefc520fff184d81d1496e7e65914be90d631d4d31aead94fa41f4fff0f
5
5
  SHA512:
6
- metadata.gz: dd35d5d59e70c1d6aa417bed8e48a5d94454353953b8a674f28f94a03ee748d8c559028ef5551a9e2c6d3ec98a67b036e48c8bbc261cf44671eb3f4d1ccd6086
7
- data.tar.gz: 2bffb6a6249419fdc9745e8f829a04f53daae9bdbc67fd72c9e58e8a70741e9261df1f189139bd73b5aacfe0584acacbd438720a6d314c5410cd40c30c675063
6
+ metadata.gz: 9639819d2a90cf202dcc9f2d4ef14fb1ddfab512ddb965af8fd394d7228a6ac5d061ba4e7444a98ac5c5a09a2bb189b0f6c81146375cd5d5bbbe78576f249d05
7
+ data.tar.gz: d3c854d3eb4ed7a053262842ab48e8bdcde7b929f1059695c8e67e36447c52fdaa1225b78334590d54ee0e445da8dcefd0402d04d5202c4d66155e9d27edb3b2
@@ -1,5 +1,16 @@
1
1
  # Release History
2
2
 
3
+ ### 1.10.0 / 2018-12-06
4
+
5
+ * Add dryrun param to Project#query_job and Dataset#query_job
6
+ * Add copy and extract methods to Project
7
+ * Add Project#extract and Project#extract_job
8
+ * Add Project#copy and Project#copy_job
9
+ * Deprecate dryrun param in Table#copy_job, Table#extract_job and
10
+ Table#load_job
11
+ * Fix memoization in Dataset#exists? and Table#exists?
12
+ * Add force param to Dataset#exists? and Table#exists?
13
+
3
14
  ### 1.9.0 / 2018-10-25
4
15
 
5
16
  * Add clustering fields to LoadJob, QueryJob and Table
@@ -150,6 +150,7 @@ module Google
150
150
  # field.required? #=> true
151
151
  #
152
152
  def schema
153
+ return nil unless @table_gapi
153
154
  Schema.from_gapi(@table_gapi.schema).freeze
154
155
  end
155
156
 
@@ -418,6 +418,8 @@ module Google
418
418
  def delete force: nil
419
419
  ensure_service!
420
420
  service.delete_dataset dataset_id, force
421
+ # Set flag for #exists?
422
+ @exists = false
421
423
  true
422
424
  end
423
425
 
@@ -748,6 +750,10 @@ module Google
748
750
  # * `append` - BigQuery appends the data to the table.
749
751
  # * `empty` - A 'duplicate' error is returned in the job result if the
750
752
  # table exists and contains data.
753
+ # @param [Boolean] dryrun If set to true, BigQuery doesn't run the job.
754
+ # Instead, if the query is valid, BigQuery returns statistics about
755
+ # the job such as how many bytes would be processed. If the query is
756
+ # invalid, an error returns. The default value is false.
751
757
  # @param [Boolean] standard_sql Specifies whether to use BigQuery's
752
758
  # [standard
753
759
  # SQL](https://cloud.google.com/bigquery/docs/reference/standard-sql/)
@@ -795,7 +801,8 @@ module Google
795
801
  # contain lowercase letters, numeric characters, underscores and
796
802
  # dashes. International characters are allowed. Label values are
797
803
  # optional. Label keys must start with a letter and each label in the
798
- # list must have a different key.
804
+ # list must have a different key. See [Requirements for
805
+ # labels](https://cloud.google.com/bigquery/docs/creating-managing-labels#requirements).
799
806
  # @param [Array<String>, String] udfs User-defined function resources
800
807
  # used in the query. May be either a code resource to load from a
801
808
  # Google Cloud Storage URI (`gs://bucket/path`), or an inline resource
@@ -928,13 +935,13 @@ module Google
928
935
  #
929
936
  def query_job query, params: nil, external: nil,
930
937
  priority: "INTERACTIVE", cache: true, table: nil,
931
- create: nil, write: nil, standard_sql: nil,
938
+ create: nil, write: nil, dryrun: nil, standard_sql: nil,
932
939
  legacy_sql: nil, large_results: nil, flatten: nil,
933
940
  maximum_billing_tier: nil, maximum_bytes_billed: nil,
934
941
  job_id: nil, prefix: nil, labels: nil, udfs: nil
935
942
  ensure_service!
936
943
  options = { priority: priority, cache: cache, table: table,
937
- create: create, write: write,
944
+ create: create, write: write, dryrun: dryrun,
938
945
  large_results: large_results, flatten: flatten,
939
946
  legacy_sql: legacy_sql, standard_sql: standard_sql,
940
947
  maximum_billing_tier: maximum_billing_tier,
@@ -1341,14 +1348,17 @@ module Google
1341
1348
  # contain lowercase letters, numeric characters, underscores and
1342
1349
  # dashes. International characters are allowed. Label values are
1343
1350
  # optional. Label keys must start with a letter and each label in the
1344
- # list must have a different key.
1345
- #
1351
+ # list must have a different key. See [Requirements for
1352
+ # labels](https://cloud.google.com/bigquery/docs/creating-managing-labels#requirements).
1346
1353
  # @yield [updater] A block for setting the schema and other
1347
1354
  # options for the destination table. The schema can be omitted if the
1348
1355
  # destination table already exists, or if you're loading data from a
1349
1356
  # Google Cloud Datastore backup.
1350
1357
  # @yieldparam [Google::Cloud::Bigquery::LoadJob::Updater] updater An
1351
1358
  # updater to modify the load job and its schema.
1359
+ # @param [Boolean] dryrun If set, don't actually run this job. Behavior
1360
+ # is undefined however for non-query jobs and may result in an error.
1361
+ # Deprecated.
1352
1362
  #
1353
1363
  # @return [Google::Cloud::Bigquery::LoadJob] A new load job object.
1354
1364
  #
@@ -1437,8 +1447,8 @@ module Google
1437
1447
  projection_fields: nil, jagged_rows: nil,
1438
1448
  quoted_newlines: nil, encoding: nil, delimiter: nil,
1439
1449
  ignore_unknown: nil, max_bad_records: nil, quote: nil,
1440
- skip_leading: nil, dryrun: nil, schema: nil, job_id: nil,
1441
- prefix: nil, labels: nil, autodetect: nil, null_marker: nil
1450
+ skip_leading: nil, schema: nil, job_id: nil, prefix: nil,
1451
+ labels: nil, autodetect: nil, null_marker: nil, dryrun: nil
1442
1452
  ensure_service!
1443
1453
 
1444
1454
  updater = load_job_updater table_id,
@@ -1709,16 +1719,21 @@ module Google
1709
1719
  #
1710
1720
  def reload!
1711
1721
  ensure_service!
1712
- reloaded_gapi = service.get_dataset dataset_id
1722
+ @gapi = service.get_dataset dataset_id
1713
1723
  @reference = nil
1714
- @gapi = reloaded_gapi
1724
+ @exists = nil
1715
1725
  self
1716
1726
  end
1717
1727
  alias refresh! reload!
1718
1728
 
1719
1729
  ##
1720
1730
  # Determines whether the dataset exists in the BigQuery service. The
1721
- # result is cached locally.
1731
+ # result is cached locally. To refresh state, set `force` to `true`.
1732
+ #
1733
+ # @param [Boolean] force Force the latest resource representation to be
1734
+ # retrieved from the BigQuery service when `true`. Otherwise the
1735
+ # return value of this method will be memoized to reduce the number of
1736
+ # API calls made to the BigQuery service. The default is `false`.
1722
1737
  #
1723
1738
  # @return [Boolean] `true` when the dataset exists in the BigQuery
1724
1739
  # service, `false` otherwise.
@@ -1731,15 +1746,13 @@ module Google
1731
1746
  # dataset = bigquery.dataset "my_dataset", skip_lookup: true
1732
1747
  # dataset.exists? # true
1733
1748
  #
1734
- def exists?
1735
- # Always true if we have a gapi object
1736
- return true unless reference?
1737
- # If we have a value, return it
1749
+ def exists? force: nil
1750
+ return gapi_exists? if force
1751
+ # If we have a memoized value, return it
1738
1752
  return @exists unless @exists.nil?
1739
- ensure_gapi!
1740
- @exists = true
1741
- rescue Google::Cloud::NotFoundError
1742
- @exists = false
1753
+ # Always true if we have a gapi object
1754
+ return true if resource?
1755
+ gapi_exists?
1743
1756
  end
1744
1757
 
1745
1758
  ##
@@ -2056,6 +2069,15 @@ module Google
2056
2069
  reload!
2057
2070
  end
2058
2071
 
2072
+ ##
2073
+ # Fetch gapi and memoize whether resource exists.
2074
+ def gapi_exists?
2075
+ reload!
2076
+ @exists = true
2077
+ rescue Google::Cloud::NotFoundError
2078
+ @exists = false
2079
+ end
2080
+
2059
2081
  def patch_gapi! *attributes
2060
2082
  return if attributes.empty?
2061
2083
  ensure_service!
@@ -191,10 +191,12 @@ module Google
191
191
  ##
192
192
  # Add reader access to a view.
193
193
  #
194
- # @param [Google::Cloud::Bigquery::Table, String] view A table object
195
- # or a string identifier as specified by the [Query
196
- # Reference](https://cloud.google.com/bigquery/query-reference#from):
197
- # `project_name:datasetId.tableId`.
194
+ # @param [Google::Cloud::Bigquery::Table, String] view A table object,
195
+ # or a string identifier as specified by the [Standard SQL Query
196
+ # Reference](https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#from-clause)
197
+ # (`project-name.dataset_id.table_id`) or the [Legacy SQL Query
198
+ # Reference](https://cloud.google.com/bigquery/query-reference#from)
199
+ # (`project-name:dataset_id.table_id`).
198
200
  #
199
201
  # @example
200
202
  # require "google/cloud/bigquery"
@@ -450,10 +452,12 @@ module Google
450
452
  ##
451
453
  # Remove reader access from a view.
452
454
  #
453
- # @param [Google::Cloud::Bigquery::Table, String] view A table object
454
- # or a string identifier as specified by the [Query
455
- # Reference](https://cloud.google.com/bigquery/query-reference#from):
456
- # `project_name:datasetId.tableId`.
455
+ # @param [Google::Cloud::Bigquery::Table, String] view A table object,
456
+ # or a string identifier as specified by the [Standard SQL Query
457
+ # Reference](https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#from-clause)
458
+ # (`project-name.dataset_id.table_id`) or the [Legacy SQL Query
459
+ # Reference](https://cloud.google.com/bigquery/query-reference#from)
460
+ # (`project-name:dataset_id.table_id`).
457
461
  #
458
462
  # @example
459
463
  # require "google/cloud/bigquery"
@@ -705,10 +709,12 @@ module Google
705
709
  ##
706
710
  # Checks reader access for a view.
707
711
  #
708
- # @param [Google::Cloud::Bigquery::Table, String] view A table object
709
- # or a string identifier as specified by the [Query
710
- # Reference](https://cloud.google.com/bigquery/query-reference#from):
711
- # `project_name:datasetId.tableId`.
712
+ # @param [Google::Cloud::Bigquery::Table, String] view A table object,
713
+ # or a string identifier as specified by the [Standard SQL Query
714
+ # Reference](https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#from-clause)
715
+ # (`project-name.dataset_id.table_id`) or the [Legacy SQL Query
716
+ # Reference](https://cloud.google.com/bigquery/query-reference#from)
717
+ # (`project-name:dataset_id.table_id`).
712
718
  #
713
719
  # @example
714
720
  # require "google/cloud/bigquery"
@@ -923,7 +929,7 @@ module Google
923
929
  if view.respond_to? :table_ref
924
930
  view.table_ref
925
931
  else
926
- Service.table_ref_from_s view, @dataset_reference
932
+ Service.table_ref_from_s view, default_ref: @dataset_reference
927
933
  end
928
934
  end
929
935
 
@@ -95,6 +95,184 @@ module Google
95
95
  service.project_service_account.email
96
96
  end
97
97
 
98
+ ##
99
+ # Copies the data from the source table to the destination table using
100
+ # an asynchronous method. In this method, a {CopyJob} is immediately
101
+ # returned. The caller may poll the service by repeatedly calling
102
+ # {Job#reload!} and {Job#done?} to detect when the job is done, or
103
+ # simply block until the job is done by calling #{Job#wait_until_done!}.
104
+ # See {#copy} for the synchronous version. Use this method instead of
105
+ # {Table#copy_job} to copy from source tables in other projects.
106
+ #
107
+ # The geographic location for the job ("US", "EU", etc.) can be set via
108
+ # {CopyJob::Updater#location=} in a block passed to this method.
109
+ #
110
+ # @param [String, Table] source_table The source table for the
111
+ # copied data. This can be a table object; or a string ID as specified
112
+ # by the [Standard SQL Query
113
+ # Reference](https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#from-clause)
114
+ # (`project-name.dataset_id.table_id`) or the [Legacy SQL Query
115
+ # Reference](https://cloud.google.com/bigquery/query-reference#from)
116
+ # (`project-name:dataset_id.table_id`).
117
+ # @param [String, Table] destination_table The destination table for the
118
+ # copied data. This can be a table object; or a string ID as specified
119
+ # by the [Standard SQL Query
120
+ # Reference](https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#from-clause)
121
+ # (`project-name.dataset_id.table_id`) or the [Legacy SQL Query
122
+ # Reference](https://cloud.google.com/bigquery/query-reference#from)
123
+ # (`project-name:dataset_id.table_id`).
124
+ # @param [String] create Specifies whether the job is allowed to create
125
+ # new tables. The default value is `needed`.
126
+ #
127
+ # The following values are supported:
128
+ #
129
+ # * `needed` - Create the table if it does not exist.
130
+ # * `never` - The table must already exist. A 'notFound' error is
131
+ # raised if the table does not exist.
132
+ # @param [String] write Specifies how to handle data already present in
133
+ # the destination table. The default value is `empty`.
134
+ #
135
+ # The following values are supported:
136
+ #
137
+ # * `truncate` - BigQuery overwrites the table data.
138
+ # * `append` - BigQuery appends the data to the table.
139
+ # * `empty` - An error will be returned if the destination table
140
+ # already contains data.
141
+ # @param [String] job_id A user-defined ID for the copy job. The ID
142
+ # must contain only letters (a-z, A-Z), numbers (0-9), underscores
143
+ # (_), or dashes (-). The maximum length is 1,024 characters. If
144
+ # `job_id` is provided, then `prefix` will not be used.
145
+ #
146
+ # See [Generating a job
147
+ # ID](https://cloud.google.com/bigquery/docs/managing-jobs#generate-jobid).
148
+ # @param [String] prefix A string, usually human-readable, that will be
149
+ # prepended to a generated value to produce a unique job ID. For
150
+ # example, the prefix `daily_import_job_` can be given to generate a
151
+ # job ID such as `daily_import_job_12vEDtMQ0mbp1Mo5Z7mzAFQJZazh`. The
152
+ # prefix must contain only letters (a-z, A-Z), numbers (0-9),
153
+ # underscores (_), or dashes (-). The maximum length of the entire ID
154
+ # is 1,024 characters. If `job_id` is provided, then `prefix` will not
155
+ # be used.
156
+ # @param [Hash] labels A hash of user-provided labels associated with
157
+ # the job. You can use these to organize and group your jobs. Label
158
+ # keys and values can be no longer than 63 characters, can only
159
+ # contain lowercase letters, numeric characters, underscores and
160
+ # dashes. International characters are allowed. Label values are
161
+ # optional. Label keys must start with a letter and each label in the
162
+ # list must have a different key. See [Requirements for
163
+ # labels](https://cloud.google.com/bigquery/docs/creating-managing-labels#requirements).
164
+ # @yield [job] a job configuration object
165
+ # @yieldparam [Google::Cloud::Bigquery::CopyJob::Updater] job a job
166
+ # configuration object for setting additional options.
167
+ #
168
+ # @return [Google::Cloud::Bigquery::CopyJob]
169
+ #
170
+ # @example
171
+ # require "google/cloud/bigquery"
172
+ #
173
+ # bigquery = Google::Cloud::Bigquery.new
174
+ # dataset = bigquery.dataset "my_dataset"
175
+ # source_table_id = "bigquery-public-data.samples.shakespeare"
176
+ # destination_table = dataset.table "my_destination_table"
177
+ #
178
+ # copy_job = bigquery.copy_job source_table_id, destination_table
179
+ #
180
+ # copy_job.wait_until_done!
181
+ # copy_job.done? #=> true
182
+ #
183
+ # @!group Data
184
+ #
185
+ def copy_job source_table, destination_table, create: nil, write: nil,
186
+ job_id: nil, prefix: nil, labels: nil
187
+ ensure_service!
188
+ options = { create: create, write: write, labels: labels,
189
+ job_id: job_id, prefix: prefix }
190
+
191
+ updater = CopyJob::Updater.from_options(
192
+ service,
193
+ Service.get_table_ref(source_table, default_ref: project_ref),
194
+ Service.get_table_ref(destination_table, default_ref: project_ref),
195
+ options
196
+ )
197
+
198
+ yield updater if block_given?
199
+
200
+ job_gapi = updater.to_gapi
201
+ gapi = service.copy_table job_gapi
202
+ Job.from_gapi gapi, service
203
+ end
204
+
205
+ ##
206
+ # Copies the data from the source table to the destination table using a
207
+ # synchronous method that blocks for a response. Timeouts and transient
208
+ # errors are generally handled as needed to complete the job. See
209
+ # {#copy_job} for the asynchronous version. Use this method instead of
210
+ # {Table#copy} to copy from source tables in other projects.
211
+ #
212
+ # The geographic location for the job ("US", "EU", etc.) can be set via
213
+ # {CopyJob::Updater#location=} in a block passed to this method.
214
+ #
215
+ # @param [String, Table] source_table The source table for the
216
+ # copied data. This can be a table object; or a string ID as specified
217
+ # by the [Standard SQL Query
218
+ # Reference](https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#from-clause)
219
+ # (`project-name.dataset_id.table_id`) or the [Legacy SQL Query
220
+ # Reference](https://cloud.google.com/bigquery/query-reference#from)
221
+ # (`project-name:dataset_id.table_id`).
222
+ # @param [String, Table] destination_table The destination table for the
223
+ # copied data. This can be a table object; or a string ID as specified
224
+ # by the [Standard SQL Query
225
+ # Reference](https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#from-clause)
226
+ # (`project-name.dataset_id.table_id`) or the [Legacy SQL Query
227
+ # Reference](https://cloud.google.com/bigquery/query-reference#from)
228
+ # (`project-name:dataset_id.table_id`).
229
+ # @param [String] create Specifies whether the job is allowed to create
230
+ # new tables. The default value is `needed`.
231
+ #
232
+ # The following values are supported:
233
+ #
234
+ # * `needed` - Create the table if it does not exist.
235
+ # * `never` - The table must already exist. A 'notFound' error is
236
+ # raised if the table does not exist.
237
+ # @param [String] write Specifies how to handle data already present in
238
+ # the destination table. The default value is `empty`.
239
+ #
240
+ # The following values are supported:
241
+ #
242
+ # * `truncate` - BigQuery overwrites the table data.
243
+ # * `append` - BigQuery appends the data to the table.
244
+ # * `empty` - An error will be returned if the destination table
245
+ # already contains data.
246
+ # @yield [job] a job configuration object
247
+ # @yieldparam [Google::Cloud::Bigquery::CopyJob::Updater] job a job
248
+ # configuration object for setting additional options.
249
+ #
250
+ # @return [Boolean] Returns `true` if the copy operation succeeded.
251
+ #
252
+ # @example
253
+ # require "google/cloud/bigquery"
254
+ #
255
+ # bigquery = Google::Cloud::Bigquery.new
256
+ # dataset = bigquery.dataset "my_dataset"
257
+ # destination_table = dataset.table "my_destination_table"
258
+ #
259
+ # bigquery.copy "bigquery-public-data.samples.shakespeare",
260
+ # destination_table
261
+ #
262
+ # @!group Data
263
+ #
264
+ def copy source_table, destination_table, create: nil, write: nil,
265
+ &block
266
+ job = copy_job source_table,
267
+ destination_table,
268
+ create: create,
269
+ write: write,
270
+ &block
271
+ job.wait_until_done!
272
+ ensure_job_succeeded! job
273
+ true
274
+ end
275
+
98
276
  ##
99
277
  # Queries data by creating a [query
100
278
  # job](https://cloud.google.com/bigquery/docs/query-overview#query_jobs).
@@ -166,6 +344,10 @@ module Google
166
344
  # * `append` - BigQuery appends the data to the table.
167
345
  # * `empty` - A 'duplicate' error is returned in the job result if the
168
346
  # table exists and contains data.
347
+ # @param [Boolean] dryrun If set to true, BigQuery doesn't run the job.
348
+ # Instead, if the query is valid, BigQuery returns statistics about
349
+ # the job such as how many bytes would be processed. If the query is
350
+ # invalid, an error returns. The default value is false.
169
351
  # @param [Dataset, String] dataset The default dataset to use for
170
352
  # unqualified table names in the query. Optional.
171
353
  # @param [String] project Specifies the default projectId to assume for
@@ -221,7 +403,8 @@ module Google
221
403
  # contain lowercase letters, numeric characters, underscores and
222
404
  # dashes. International characters are allowed. Label values are
223
405
  # optional. Label keys must start with a letter and each label in the
224
- # list must have a different key.
406
+ # list must have a different key. See [Requirements for
407
+ # labels](https://cloud.google.com/bigquery/docs/creating-managing-labels#requirements).
225
408
  # @param [Array<String>, String] udfs User-defined function resources
226
409
  # used in the query. May be either a code resource to load from a
227
410
  # Google Cloud Storage URI (`gs://bucket/path`), or an inline resource
@@ -357,14 +540,14 @@ module Google
357
540
  #
358
541
  def query_job query, params: nil, external: nil,
359
542
  priority: "INTERACTIVE", cache: true, table: nil,
360
- create: nil, write: nil, dataset: nil, project: nil,
361
- standard_sql: nil, legacy_sql: nil, large_results: nil,
362
- flatten: nil, maximum_billing_tier: nil,
363
- maximum_bytes_billed: nil, job_id: nil, prefix: nil,
364
- labels: nil, udfs: nil
543
+ create: nil, write: nil, dryrun: nil, dataset: nil,
544
+ project: nil, standard_sql: nil, legacy_sql: nil,
545
+ large_results: nil, flatten: nil,
546
+ maximum_billing_tier: nil, maximum_bytes_billed: nil,
547
+ job_id: nil, prefix: nil, labels: nil, udfs: nil
365
548
  ensure_service!
366
549
  options = { priority: priority, cache: cache, table: table,
367
- create: create, write: write,
550
+ create: create, write: write, dryrun: dryrun,
368
551
  large_results: large_results, flatten: flatten,
369
552
  dataset: dataset, project: (project || self.project),
370
553
  legacy_sql: legacy_sql, standard_sql: standard_sql,
@@ -1096,6 +1279,177 @@ module Google
1096
1279
  encrypt_config
1097
1280
  end
1098
1281
 
1282
+ ##
1283
+ # Extracts the data from the provided table to a Google Cloud Storage
1284
+ # file using an asynchronous method. In this method, an {ExtractJob} is
1285
+ # immediately returned. The caller may poll the service by repeatedly
1286
+ # calling {Job#reload!} and {Job#done?} to detect when the job is done,
1287
+ # or simply block until the job is done by calling
1288
+ # #{Job#wait_until_done!}. See {#extract} for the synchronous version.
1289
+ # Use this method instead of {Table#extract_job} to extract data from
1290
+ # source tables in other projects.
1291
+ #
1292
+ # The geographic location for the job ("US", "EU", etc.) can be set via
1293
+ # {ExtractJob::Updater#location=} in a block passed to this method.
1294
+ #
1295
+ # @see https://cloud.google.com/bigquery/exporting-data-from-bigquery
1296
+ # Exporting Data From BigQuery
1297
+ #
1298
+ # @param [String, Table] table The source table from which to extract
1299
+ # data. This can be a table object; or a string ID as specified by the
1300
+ # [Standard SQL Query
1301
+ # Reference](https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#from-clause)
1302
+ # (`project-name.dataset_id.table_id`) or the [Legacy SQL Query
1303
+ # Reference](https://cloud.google.com/bigquery/query-reference#from)
1304
+ # (`project-name:dataset_id.table_id`).
1305
+ # @param [Google::Cloud::Storage::File, String, Array<String>]
1306
+ # extract_url The Google Storage file or file URI pattern(s) to which
1307
+ # BigQuery should extract the table data.
1308
+ # @param [String] format The exported file format. The default value is
1309
+ # `csv`.
1310
+ #
1311
+ # The following values are supported:
1312
+ #
1313
+ # * `csv` - CSV
1314
+ # * `json` - [Newline-delimited JSON](http://jsonlines.org/)
1315
+ # * `avro` - [Avro](http://avro.apache.org/)
1316
+ # @param [String] compression The compression type to use for exported
1317
+ # files. Possible values include `GZIP` and `NONE`. The default value
1318
+ # is `NONE`.
1319
+ # @param [String] delimiter Delimiter to use between fields in the
1320
+ # exported data. Default is <code>,</code>.
1321
+ # @param [Boolean] header Whether to print out a header row in the
1322
+ # results. Default is `true`.
1323
+ # @param [String] job_id A user-defined ID for the extract job. The ID
1324
+ # must contain only letters (a-z, A-Z), numbers (0-9), underscores
1325
+ # (_), or dashes (-). The maximum length is 1,024 characters. If
1326
+ # `job_id` is provided, then `prefix` will not be used.
1327
+ #
1328
+ # See [Generating a job
1329
+ # ID](https://cloud.google.com/bigquery/docs/managing-jobs#generate-jobid).
1330
+ # @param [String] prefix A string, usually human-readable, that will be
1331
+ # prepended to a generated value to produce a unique job ID. For
1332
+ # example, the prefix `daily_import_job_` can be given to generate a
1333
+ # job ID such as `daily_import_job_12vEDtMQ0mbp1Mo5Z7mzAFQJZazh`. The
1334
+ # prefix must contain only letters (a-z, A-Z), numbers (0-9),
1335
+ # underscores (_), or dashes (-). The maximum length of the entire ID
1336
+ # is 1,024 characters. If `job_id` is provided, then `prefix` will not
1337
+ # be used.
1338
+ # @param [Hash] labels A hash of user-provided labels associated with
1339
+ # the job. You can use these to organize and group your jobs. Label
1340
+ # keys and values can be no longer than 63 characters, can only
1341
+ # contain lowercase letters, numeric characters, underscores and
1342
+ # dashes. International characters are allowed. Label values are
1343
+ # optional. Label keys must start with a letter and each label in the
1344
+ # list must have a different key. See [Requirements for
1345
+ # labels](https://cloud.google.com/bigquery/docs/creating-managing-labels#requirements).
1346
+ # @yield [job] a job configuration object
1347
+ # @yieldparam [Google::Cloud::Bigquery::ExtractJob::Updater] job a job
1348
+ # configuration object for setting additional options.
1349
+ #
1350
+ # @return [Google::Cloud::Bigquery::ExtractJob]
1351
+ #
1352
+ # @example
1353
+ # require "google/cloud/bigquery"
1354
+ #
1355
+ # bigquery = Google::Cloud::Bigquery.new
1356
+ #
1357
+ # table_id = "bigquery-public-data.samples.shakespeare"
1358
+ # extract_job = bigquery.extract_job table_id,
1359
+ # "gs://my-bucket/shakespeare.csv"
1360
+ # extract_job.wait_until_done!
1361
+ # extract_job.done? #=> true
1362
+ #
1363
+ # @!group Data
1364
+ #
1365
+ def extract_job table, extract_url, format: nil, compression: nil,
1366
+ delimiter: nil, header: nil, job_id: nil, prefix: nil,
1367
+ labels: nil
1368
+ ensure_service!
1369
+ options = { format: format, compression: compression,
1370
+ delimiter: delimiter, header: header, job_id: job_id,
1371
+ prefix: prefix, labels: labels }
1372
+
1373
+ table_ref = Service.get_table_ref table, default_ref: project_ref
1374
+ updater = ExtractJob::Updater.from_options service, table_ref,
1375
+ extract_url, options
1376
+
1377
+ yield updater if block_given?
1378
+
1379
+ job_gapi = updater.to_gapi
1380
+ gapi = service.extract_table job_gapi
1381
+ Job.from_gapi gapi, service
1382
+ end
1383
+
1384
+ ##
1385
+ # Extracts the data from the provided table to a Google Cloud Storage
1386
+ # file using a synchronous method that blocks for a response. Timeouts
1387
+ # and transient errors are generally handled as needed to complete the
1388
+ # job. See {#extract_job} for the asynchronous version. Use this method
1389
+ # instead of {Table#extract} to extract data from source tables in other
1390
+ # projects.
1391
+ #
1392
+ # The geographic location for the job ("US", "EU", etc.) can be set via
1393
+ # {ExtractJob::Updater#location=} in a block passed to this method.
1394
+ #
1395
+ # @see https://cloud.google.com/bigquery/exporting-data-from-bigquery
1396
+ # Exporting Data From BigQuery
1397
+ #
1398
+ # @param [String, Table] table The source table from which to extract
1399
+ # data. This can be a table object; or a string ID as specified by the
1400
+ # [Standard SQL Query
1401
+ # Reference](https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#from-clause)
1402
+ # (`project-name.dataset_id.table_id`) or the [Legacy SQL Query
1403
+ # Reference](https://cloud.google.com/bigquery/query-reference#from)
1404
+ # (`project-name:dataset_id.table_id`).
1405
+ # @param [Google::Cloud::Storage::File, String, Array<String>]
1406
+ # extract_url The Google Storage file or file URI pattern(s) to which
1407
+ # BigQuery should extract the table data.
1408
+ # @param [String] format The exported file format. The default value is
1409
+ # `csv`.
1410
+ #
1411
+ # The following values are supported:
1412
+ #
1413
+ # * `csv` - CSV
1414
+ # * `json` - [Newline-delimited JSON](http://jsonlines.org/)
1415
+ # * `avro` - [Avro](http://avro.apache.org/)
1416
+ # @param [String] compression The compression type to use for exported
1417
+ # files. Possible values include `GZIP` and `NONE`. The default value
1418
+ # is `NONE`.
1419
+ # @param [String] delimiter Delimiter to use between fields in the
1420
+ # exported data. Default is <code>,</code>.
1421
+ # @param [Boolean] header Whether to print out a header row in the
1422
+ # results. Default is `true`.
1423
+ # @yield [job] a job configuration object
1424
+ # @yieldparam [Google::Cloud::Bigquery::ExtractJob::Updater] job a job
1425
+ # configuration object for setting additional options.
1426
+ #
1427
+ # @return [Boolean] Returns `true` if the extract operation succeeded.
1428
+ #
1429
+ # @example
1430
+ # require "google/cloud/bigquery"
1431
+ #
1432
+ # bigquery = Google::Cloud::Bigquery.new
1433
+ #
1434
+ # bigquery.extract "bigquery-public-data.samples.shakespeare",
1435
+ # "gs://my-bucket/shakespeare.csv"
1436
+ #
1437
+ # @!group Data
1438
+ #
1439
+ def extract table, extract_url, format: nil, compression: nil,
1440
+ delimiter: nil, header: nil, &block
1441
+ job = extract_job table,
1442
+ extract_url,
1443
+ format: format,
1444
+ compression: compression,
1445
+ delimiter: delimiter,
1446
+ header: header,
1447
+ &block
1448
+ job.wait_until_done!
1449
+ ensure_job_succeeded! job
1450
+ true
1451
+ end
1452
+
1099
1453
  ##
1100
1454
  # @private New Project from a Google API Client object, using the
1101
1455
  # same Credentials as this project.
@@ -1122,6 +1476,21 @@ module Google
1122
1476
  def ensure_service!
1123
1477
  raise "Must have active connection" unless service
1124
1478
  end
1479
+
1480
+ def ensure_job_succeeded! job
1481
+ return unless job.failed?
1482
+ begin
1483
+ # raise to activate ruby exception cause handling
1484
+ raise job.gapi_error
1485
+ rescue StandardError => e
1486
+ # wrap Google::Apis::Error with Google::Cloud::Error
1487
+ raise Google::Cloud::Error.from_error(e)
1488
+ end
1489
+ end
1490
+
1491
+ def project_ref
1492
+ Google::Apis::BigqueryV2::ProjectReference.new project_id: project_id
1493
+ end
1125
1494
  end
1126
1495
  end
1127
1496
  end
@@ -99,6 +99,22 @@ module Google
99
99
  val
100
100
  end
101
101
 
102
+ ##
103
+ # If set, don't actually run this job. A valid query will return a
104
+ # mostly empty response with some processing statistics, while an
105
+ # invalid query will return the same error it would if it wasn't a dry
106
+ # run.
107
+ #
108
+ # @return [Boolean] `true` when the dry run flag is set for the query
109
+ # job, `false` otherwise.
110
+ #
111
+ def dryrun?
112
+ @gapi.configuration.dry_run
113
+ end
114
+ alias dryrun dryrun?
115
+ alias dry_run dryrun?
116
+ alias dry_run? dryrun?
117
+
102
118
  ##
103
119
  # Checks if the query job flattens nested and repeated fields in the
104
120
  # query results. The default is `true`. If the value is `false`,
@@ -557,6 +573,9 @@ module Google
557
573
  #
558
574
  def data token: nil, max: nil, start: nil
559
575
  return nil unless done?
576
+ if dryrun?
577
+ return Data.from_gapi_json({ rows: [] }, nil, @gapi, service)
578
+ end
560
579
  if ddl? || dml?
561
580
  data_hash = { totalRows: nil, rows: [] }
562
581
  return Data.from_gapi_json data_hash, nil, @gapi, service
@@ -609,6 +628,7 @@ module Google
609
628
  updater.create = options[:create]
610
629
  updater.write = options[:write]
611
630
  updater.table = options[:table]
631
+ updater.dryrun = options[:dryrun]
612
632
  updater.maximum_bytes_billed = options[:maximum_bytes_billed]
613
633
  updater.labels = options[:labels] if options[:labels]
614
634
  updater.legacy_sql = Convert.resolve_legacy_sql(
@@ -786,6 +806,20 @@ module Google
786
806
  Convert.write_disposition value
787
807
  end
788
808
 
809
+ ##
810
+ # Sets the dry run flag for the query job.
811
+ #
812
+ # @param [Boolean] value If set, don't actually run this job. A valid
813
+ # query will return a mostly empty response with some processing
814
+ # statistics, while an invalid query will return the same error it
815
+ # would if it wasn't a dry run..
816
+ #
817
+ # @!group Attributes
818
+ def dryrun= value
819
+ @gapi.configuration.dry_run = value
820
+ end
821
+ alias dry_run= dryrun=
822
+
789
823
  ##
790
824
  # Sets the destination for the query results table.
791
825
  #
@@ -324,14 +324,26 @@ module Google
324
324
  end
325
325
  end
326
326
 
327
+ def self.get_table_ref table, default_ref: nil
328
+ if table.respond_to? :table_ref
329
+ table.table_ref
330
+ else
331
+ table_ref_from_s table, default_ref: default_ref
332
+ end
333
+ end
334
+
327
335
  ##
328
336
  # Extracts at least `tbl` group, and possibly `dts` and `prj` groups,
329
337
  # from strings in the formats: "my_table", "my_dataset.my_table", or
330
338
  # "my-project:my_dataset.my_table". Then merges project_id and
331
- # dataset_id from the default table if they are missing.
332
- def self.table_ref_from_s str, default_table_ref
339
+ # dataset_id from the default table ref if they are missing.
340
+ #
341
+ # The regex matches both Standard SQL
342
+ # ("bigquery-public-data.samples.shakespeare") and Legacy SQL
343
+ # ("bigquery-public-data:samples.shakespeare").
344
+ def self.table_ref_from_s str, default_ref: {}
333
345
  str = str.to_s
334
- m = /\A(((?<prj>\S*):)?(?<dts>\S*)\.)?(?<tbl>\S*)\z/.match str
346
+ m = /\A(((?<prj>\S*)(:|\.))?(?<dts>\S*)\.)?(?<tbl>\S*)\z/.match str
335
347
  unless m
336
348
  raise ArgumentError, "unable to identify table from #{str.inspect}"
337
349
  end
@@ -340,8 +352,18 @@ module Google
340
352
  dataset_id: m["dts"],
341
353
  table_id: m["tbl"]
342
354
  }.delete_if { |_, v| v.nil? }
343
- new_table_ref_hash = default_table_ref.to_h.merge str_table_ref_hash
344
- Google::Apis::BigqueryV2::TableReference.new new_table_ref_hash
355
+ str_table_ref_hash = default_ref.to_h.merge str_table_ref_hash
356
+ ref = Google::Apis::BigqueryV2::TableReference.new str_table_ref_hash
357
+ validate_table_ref ref
358
+ ref
359
+ end
360
+
361
+ def self.validate_table_ref table_ref
362
+ %i[project_id dataset_id table_id].each do |f|
363
+ if table_ref.send(f).nil?
364
+ raise ArgumentError, "TableReference is missing #{f}"
365
+ end
366
+ end
345
367
  end
346
368
 
347
369
  ##
@@ -371,9 +371,10 @@ module Google
371
371
  ##
372
372
  # The combined Project ID, Dataset ID, and Table ID for this table, in
373
373
  # the format specified by the [Legacy SQL Query
374
- # Reference](https://cloud.google.com/bigquery/query-reference#from):
375
- # `project_name:datasetId.tableId`. To use this value in queries see
376
- # {#query_id}.
374
+ # Reference](https://cloud.google.com/bigquery/query-reference#from)
375
+ # (`project-name:dataset_id.table_id`). This is useful for referencing
376
+ # tables in other projects and datasets. To use this value in queries
377
+ # see {#query_id}.
377
378
  #
378
379
  # @return [String, nil] The combined ID, or `nil` if the object is a
379
380
  # reference (see {#reference?}).
@@ -386,10 +387,9 @@ module Google
386
387
  end
387
388
 
388
389
  ##
389
- # The value returned by {#id}, wrapped in square brackets if the Project
390
- # ID contains dashes, as specified by the [Query
391
- # Reference](https://cloud.google.com/bigquery/query-reference#from).
392
- # Useful in queries.
390
+ # The value returned by {#id}, wrapped in backticks (Standard SQL) or s
391
+ # quare brackets (Legacy SQL) to accommodate project IDs
392
+ # containing dashes. Useful in queries.
393
393
  #
394
394
  # @param [Boolean] standard_sql Specifies whether to use BigQuery's
395
395
  # [standard
@@ -1191,9 +1191,11 @@ module Google
1191
1191
  #
1192
1192
  # @param [Table, String] destination_table The destination for the
1193
1193
  # copied data. This can also be a string identifier as specified by
1194
- # the [Query
1195
- # Reference](https://cloud.google.com/bigquery/query-reference#from):
1196
- # `project_name:datasetId.tableId`. This is useful for referencing
1194
+ # the [Standard SQL Query
1195
+ # Reference](https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#from-clause)
1196
+ # (`project-name.dataset_id.table_id`) or the [Legacy SQL Query
1197
+ # Reference](https://cloud.google.com/bigquery/query-reference#from)
1198
+ # (`project-name:dataset_id.table_id`). This is useful for referencing
1197
1199
  # tables in other projects and datasets.
1198
1200
  # @param [String] create Specifies whether the job is allowed to create
1199
1201
  # new tables. The default value is `needed`.
@@ -1233,7 +1235,12 @@ module Google
1233
1235
  # contain lowercase letters, numeric characters, underscores and
1234
1236
  # dashes. International characters are allowed. Label values are
1235
1237
  # optional. Label keys must start with a letter and each label in the
1236
- # list must have a different key.
1238
+ # list must have a different key. See [Requirements for
1239
+ # labels](https://cloud.google.com/bigquery/docs/creating-managing-labels#requirements).
1240
+ # @param [Boolean] dryrun If set, don't actually run this job. Behavior
1241
+ # is undefined however for non-query jobs and may result in an error.
1242
+ # Deprecated.
1243
+ #
1237
1244
  # @yield [job] a job configuration object
1238
1245
  # @yieldparam [Google::Cloud::Bigquery::CopyJob::Updater] job a job
1239
1246
  # configuration object for setting additional options.
@@ -1259,17 +1266,20 @@ module Google
1259
1266
  #
1260
1267
  # copy_job = table.copy_job "other-project:other_dataset.other_table"
1261
1268
  #
1269
+ # copy_job.wait_until_done!
1270
+ # copy_job.done? #=> true
1271
+ #
1262
1272
  # @!group Data
1263
1273
  #
1264
- def copy_job destination_table, create: nil, write: nil, dryrun: nil,
1265
- job_id: nil, prefix: nil, labels: nil
1274
+ def copy_job destination_table, create: nil, write: nil, job_id: nil,
1275
+ prefix: nil, labels: nil, dryrun: nil
1266
1276
  ensure_service!
1267
1277
  options = { create: create, write: write, dryrun: dryrun,
1268
1278
  labels: labels, job_id: job_id, prefix: prefix }
1269
1279
  updater = CopyJob::Updater.from_options(
1270
1280
  service,
1271
1281
  table_ref,
1272
- get_table_ref(destination_table),
1282
+ Service.get_table_ref(destination_table, default_ref: table_ref),
1273
1283
  options
1274
1284
  )
1275
1285
  updater.location = location if location # may be table reference
@@ -1295,9 +1305,11 @@ module Google
1295
1305
  #
1296
1306
  # @param [Table, String] destination_table The destination for the
1297
1307
  # copied data. This can also be a string identifier as specified by
1298
- # the [Query
1299
- # Reference](https://cloud.google.com/bigquery/query-reference#from):
1300
- # `project_name:datasetId.tableId`. This is useful for referencing
1308
+ # the [Standard SQL Query
1309
+ # Reference](https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#from-clause)
1310
+ # (`project-name.dataset_id.table_id`) or the [Legacy SQL Query
1311
+ # Reference](https://cloud.google.com/bigquery/query-reference#from)
1312
+ # (`project-name:dataset_id.table_id`). This is useful for referencing
1301
1313
  # tables in other projects and datasets.
1302
1314
  # @param [String] create Specifies whether the job is allowed to create
1303
1315
  # new tables. The default value is `needed`.
@@ -1406,7 +1418,12 @@ module Google
1406
1418
  # contain lowercase letters, numeric characters, underscores and
1407
1419
  # dashes. International characters are allowed. Label values are
1408
1420
  # optional. Label keys must start with a letter and each label in the
1409
- # list must have a different key.
1421
+ # list must have a different key. See [Requirements for
1422
+ # labels](https://cloud.google.com/bigquery/docs/creating-managing-labels#requirements).
1423
+ # @param [Boolean] dryrun If set, don't actually run this job. Behavior
1424
+ # is undefined however for non-query jobs and may result in an error.
1425
+ # Deprecated.
1426
+ #
1410
1427
  # @yield [job] a job configuration object
1411
1428
  # @yieldparam [Google::Cloud::Bigquery::ExtractJob::Updater] job a job
1412
1429
  # configuration object for setting additional options.
@@ -1421,13 +1438,15 @@ module Google
1421
1438
  # table = dataset.table "my_table"
1422
1439
  #
1423
1440
  # extract_job = table.extract_job "gs://my-bucket/file-name.json",
1424
- # format: "json"
1441
+ # format: "json"
1442
+ # extract_job.wait_until_done!
1443
+ # extract_job.done? #=> true
1425
1444
  #
1426
1445
  # @!group Data
1427
1446
  #
1428
1447
  def extract_job extract_url, format: nil, compression: nil,
1429
- delimiter: nil, header: nil, dryrun: nil, job_id: nil,
1430
- prefix: nil, labels: nil
1448
+ delimiter: nil, header: nil, job_id: nil, prefix: nil,
1449
+ labels: nil, dryrun: nil
1431
1450
  ensure_service!
1432
1451
  options = { format: format, compression: compression,
1433
1452
  delimiter: delimiter, header: header, dryrun: dryrun,
@@ -1643,7 +1662,12 @@ module Google
1643
1662
  # contain lowercase letters, numeric characters, underscores and
1644
1663
  # dashes. International characters are allowed. Label values are
1645
1664
  # optional. Label keys must start with a letter and each label in the
1646
- # list must have a different key.
1665
+ # list must have a different key. See [Requirements for
1666
+ # labels](https://cloud.google.com/bigquery/docs/creating-managing-labels#requirements).
1667
+ # @param [Boolean] dryrun If set, don't actually run this job. Behavior
1668
+ # is undefined however for non-query jobs and may result in an error.
1669
+ # Deprecated.
1670
+ #
1647
1671
  # @yield [load_job] a block for setting the load job
1648
1672
  # @yieldparam [LoadJob] load_job the load job object to be updated
1649
1673
  #
@@ -1700,8 +1724,8 @@ module Google
1700
1724
  projection_fields: nil, jagged_rows: nil,
1701
1725
  quoted_newlines: nil, encoding: nil, delimiter: nil,
1702
1726
  ignore_unknown: nil, max_bad_records: nil, quote: nil,
1703
- skip_leading: nil, dryrun: nil, job_id: nil, prefix: nil,
1704
- labels: nil, autodetect: nil, null_marker: nil
1727
+ skip_leading: nil, job_id: nil, prefix: nil, labels: nil,
1728
+ autodetect: nil, null_marker: nil, dryrun: nil
1705
1729
  ensure_service!
1706
1730
 
1707
1731
  updater = load_job_updater format: format, create: create,
@@ -2058,6 +2082,8 @@ module Google
2058
2082
  def delete
2059
2083
  ensure_service!
2060
2084
  service.delete_table dataset_id, table_id
2085
+ # Set flag for #exists?
2086
+ @exists = false
2061
2087
  true
2062
2088
  end
2063
2089
 
@@ -2081,14 +2107,21 @@ module Google
2081
2107
  #
2082
2108
  def reload!
2083
2109
  ensure_service!
2084
- gapi = service.get_table dataset_id, table_id
2085
- @gapi = gapi
2110
+ @gapi = service.get_table dataset_id, table_id
2111
+ @reference = nil
2112
+ @exists = nil
2113
+ self
2086
2114
  end
2087
2115
  alias refresh! reload!
2088
2116
 
2089
2117
  ##
2090
2118
  # Determines whether the table exists in the BigQuery service. The
2091
- # result is cached locally.
2119
+ # result is cached locally. To refresh state, set `force` to `true`.
2120
+ #
2121
+ # @param [Boolean] force Force the latest resource representation to be
2122
+ # retrieved from the BigQuery service when `true`. Otherwise the
2123
+ # return value of this method will be memoized to reduce the number of
2124
+ # API calls made to the BigQuery service. The default is `false`.
2092
2125
  #
2093
2126
  # @return [Boolean] `true` when the table exists in the BigQuery
2094
2127
  # service, `false` otherwise.
@@ -2102,15 +2135,13 @@ module Google
2102
2135
  # table = dataset.table "my_table", skip_lookup: true
2103
2136
  # table.exists? # true
2104
2137
  #
2105
- def exists?
2106
- # Always true if we have a gapi object
2107
- return true unless reference?
2138
+ def exists? force: nil
2139
+ return gapi_exists? if force
2108
2140
  # If we have a value, return it
2109
2141
  return @exists unless @exists.nil?
2110
- ensure_gapi!
2111
- @exists = true
2112
- rescue Google::Cloud::NotFoundError
2113
- @exists = false
2142
+ # Always true if we have a gapi object
2143
+ return true if resource?
2144
+ gapi_exists?
2114
2145
  end
2115
2146
 
2116
2147
  ##
@@ -2259,6 +2290,15 @@ module Google
2259
2290
  reload!
2260
2291
  end
2261
2292
 
2293
+ ##
2294
+ # Fetch gapi and memoize whether resource exists.
2295
+ def gapi_exists?
2296
+ reload!
2297
+ @exists = true
2298
+ rescue Google::Cloud::NotFoundError
2299
+ @exists = false
2300
+ end
2301
+
2262
2302
  def patch_gapi! *attributes
2263
2303
  return if attributes.empty?
2264
2304
  ensure_service!
@@ -2459,16 +2499,6 @@ module Google
2459
2499
  end
2460
2500
  end
2461
2501
 
2462
- private
2463
-
2464
- def get_table_ref table
2465
- if table.respond_to? :table_ref
2466
- table.table_ref
2467
- else
2468
- Service.table_ref_from_s table, table_ref
2469
- end
2470
- end
2471
-
2472
2502
  ##
2473
2503
  # Yielded to a block to accumulate changes for a patch request.
2474
2504
  class Updater < Table
@@ -16,7 +16,7 @@
16
16
  module Google
17
17
  module Cloud
18
18
  module Bigquery
19
- VERSION = "1.9.0".freeze
19
+ VERSION = "1.10.0".freeze
20
20
  end
21
21
  end
22
22
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: google-cloud-bigquery
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.9.0
4
+ version: 1.10.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Mike Moore
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2018-10-25 00:00:00.000000000 Z
12
+ date: 2018-12-07 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: google-cloud-core