gcloud 0.5.0 → 0.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +8 -8
- data/CHANGELOG.md +8 -0
- data/lib/gcloud.rb +48 -30
- data/lib/gcloud/bigquery.rb +4 -6
- data/lib/gcloud/bigquery/connection.rb +2 -14
- data/lib/gcloud/bigquery/dataset.rb +41 -42
- data/lib/gcloud/bigquery/project.rb +50 -46
- data/lib/gcloud/bigquery/query_job.rb +7 -8
- data/lib/gcloud/bigquery/table.rb +54 -55
- data/lib/gcloud/bigquery/table/schema.rb +30 -40
- data/lib/gcloud/bigquery/view.rb +10 -11
- data/lib/gcloud/credentials.rb +19 -25
- data/lib/gcloud/datastore.rb +4 -6
- data/lib/gcloud/datastore/dataset.rb +3 -5
- data/lib/gcloud/dns.rb +4 -6
- data/lib/gcloud/dns/connection.rb +17 -16
- data/lib/gcloud/dns/importer.rb +5 -11
- data/lib/gcloud/dns/project.rb +11 -12
- data/lib/gcloud/dns/zone.rb +52 -92
- data/lib/gcloud/dns/zone/transaction.rb +2 -2
- data/lib/gcloud/pubsub.rb +4 -6
- data/lib/gcloud/pubsub/connection.rb +1 -12
- data/lib/gcloud/pubsub/project.rb +30 -36
- data/lib/gcloud/pubsub/subscription.rb +18 -26
- data/lib/gcloud/pubsub/topic.rb +16 -26
- data/lib/gcloud/resource_manager.rb +5 -6
- data/lib/gcloud/resource_manager/connection.rb +4 -4
- data/lib/gcloud/resource_manager/manager.rb +10 -14
- data/lib/gcloud/resource_manager/project.rb +3 -5
- data/lib/gcloud/search.rb +295 -0
- data/lib/gcloud/search/api_client.rb +144 -0
- data/lib/gcloud/search/connection.rb +146 -0
- data/lib/gcloud/search/credentials.rb +30 -0
- data/lib/gcloud/search/document.rb +301 -0
- data/lib/gcloud/search/document/list.rb +85 -0
- data/lib/gcloud/search/errors.rb +67 -0
- data/lib/gcloud/search/field_value.rb +164 -0
- data/lib/gcloud/search/field_values.rb +263 -0
- data/lib/gcloud/search/fields.rb +267 -0
- data/lib/gcloud/search/index.rb +613 -0
- data/lib/gcloud/search/index/list.rb +90 -0
- data/lib/gcloud/search/project.rb +197 -0
- data/lib/gcloud/search/result.rb +169 -0
- data/lib/gcloud/search/result/list.rb +95 -0
- data/lib/gcloud/storage.rb +4 -6
- data/lib/gcloud/storage/bucket.rb +55 -43
- data/lib/gcloud/storage/bucket/cors.rb +5 -7
- data/lib/gcloud/storage/file.rb +35 -30
- data/lib/gcloud/storage/file/acl.rb +12 -16
- data/lib/gcloud/storage/project.rb +56 -22
- data/lib/gcloud/version.rb +1 -1
- metadata +20 -3
@@ -93,25 +93,25 @@ module Gcloud
|
|
93
93
|
# syntax}[https://cloud.google.com/bigquery/query-reference], of the
|
94
94
|
# query to execute. Example: "SELECT count(f1) FROM
|
95
95
|
# [myProjectId:myDatasetId.myTableId]". (+String+)
|
96
|
-
#
|
96
|
+
# +priority+::
|
97
97
|
# Specifies a priority for the query. Possible values include
|
98
98
|
# +INTERACTIVE+ and +BATCH+. The default value is +INTERACTIVE+.
|
99
99
|
# (+String+)
|
100
|
-
#
|
100
|
+
# +cache+::
|
101
101
|
# Whether to look for the result in the query cache. The query cache is
|
102
102
|
# a best-effort cache that will be flushed whenever tables in the query
|
103
103
|
# are modified. The default value is +true+. (+Boolean+)
|
104
|
-
#
|
104
|
+
# +table+::
|
105
105
|
# The destination table where the query results should be stored. If not
|
106
106
|
# present, a new table will be created to store the results. (+Table+)
|
107
|
-
#
|
107
|
+
# +create+::
|
108
108
|
# Specifies whether the job is allowed to create new tables. (+String+)
|
109
109
|
#
|
110
110
|
# The following values are supported:
|
111
111
|
# * +needed+ - Create the table if it does not exist.
|
112
112
|
# * +never+ - The table must already exist. A 'notFound' error is
|
113
113
|
# raised if the table does not exist.
|
114
|
-
#
|
114
|
+
# +write+::
|
115
115
|
# Specifies the action that occurs if the destination table already
|
116
116
|
# exists. (+String+)
|
117
117
|
#
|
@@ -120,15 +120,15 @@ module Gcloud
|
|
120
120
|
# * +append+ - BigQuery appends the data to the table.
|
121
121
|
# * +empty+ - A 'duplicate' error is returned in the job result if the
|
122
122
|
# table exists and contains data.
|
123
|
-
#
|
123
|
+
# +large_results+::
|
124
124
|
# If +true+, allows the query to produce arbitrarily large result tables
|
125
|
-
# at a slight cost in performance. Requires
|
126
|
-
#
|
127
|
-
#
|
125
|
+
# at a slight cost in performance. Requires +table+ parameter to be set.
|
126
|
+
# (+Boolean+)
|
127
|
+
# +flatten+::
|
128
128
|
# Flattens all nested and repeated fields in the query results. The
|
129
|
-
# default value is +true+.
|
130
|
-
#
|
131
|
-
#
|
129
|
+
# default value is +true+. +large_results+ parameter must be +true+ if
|
130
|
+
# this is set to +false+. (+Boolean+)
|
131
|
+
# +dataset+::
|
132
132
|
# Specifies the default dataset to use for unqualified table names in
|
133
133
|
# the query. (+Dataset+ or +String+)
|
134
134
|
#
|
@@ -152,8 +152,13 @@ module Gcloud
|
|
152
152
|
# end
|
153
153
|
# end
|
154
154
|
#
|
155
|
-
def query_job query,
|
155
|
+
def query_job query, priority: "INTERACTIVE", cache: true, table: nil,
|
156
|
+
create: nil, write: nil, large_results: nil, flatten: nil,
|
157
|
+
dataset: nil
|
156
158
|
ensure_connection!
|
159
|
+
options = { priority: priority, cache: cache, table: table,
|
160
|
+
create: create, write: write, large_results: large_results,
|
161
|
+
flatten: flatten, dataset: dataset }
|
157
162
|
resp = connection.query_job query, options
|
158
163
|
if resp.success?
|
159
164
|
Job.from_gapi resp.data, connection
|
@@ -173,37 +178,37 @@ module Gcloud
|
|
173
178
|
# syntax}[https://cloud.google.com/bigquery/query-reference], of the
|
174
179
|
# query to execute. Example: "SELECT count(f1) FROM
|
175
180
|
# [myProjectId:myDatasetId.myTableId]". (+String+)
|
176
|
-
#
|
181
|
+
# +max+::
|
177
182
|
# The maximum number of rows of data to return per page of results.
|
178
183
|
# Setting this flag to a small value such as 1000 and then paging
|
179
184
|
# through results might improve reliability when the query result set is
|
180
185
|
# large. In addition to this limit, responses are also limited to 10 MB.
|
181
186
|
# By default, there is no maximum row count, and only the byte limit
|
182
187
|
# applies. (+Integer+)
|
183
|
-
#
|
188
|
+
# +timeout+::
|
184
189
|
# How long to wait for the query to complete, in milliseconds, before
|
185
190
|
# the request times out and returns. Note that this is only a timeout
|
186
191
|
# for the request, not the query. If the query takes longer to run than
|
187
192
|
# the timeout value, the call returns without any results and with
|
188
193
|
# QueryData#complete? set to false. The default value is 10000
|
189
194
|
# milliseconds (10 seconds). (+Integer+)
|
190
|
-
#
|
195
|
+
# +dryrun+::
|
191
196
|
# If set to +true+, BigQuery doesn't run the job. Instead, if the query
|
192
197
|
# is valid, BigQuery returns statistics about the job such as how many
|
193
198
|
# bytes would be processed. If the query is invalid, an error returns.
|
194
199
|
# The default value is +false+. (+Boolean+)
|
195
|
-
#
|
200
|
+
# +cache+::
|
196
201
|
# Whether to look for the result in the query cache. The query cache is
|
197
202
|
# a best-effort cache that will be flushed whenever tables in the query
|
198
203
|
# are modified. The default value is true. For more information, see
|
199
204
|
# {query caching}[https://developers.google.com/bigquery/querying-data].
|
200
205
|
# (+Boolean+)
|
201
|
-
#
|
206
|
+
# +dataset+::
|
202
207
|
# Specifies the default datasetId and projectId to assume for any
|
203
208
|
# unqualified table names in the query. If not set, all table names in
|
204
209
|
# the query string must be qualified in the format 'datasetId.tableId'.
|
205
210
|
# (+String+)
|
206
|
-
#
|
211
|
+
# +project+::
|
207
212
|
# Specifies the default projectId to assume for any unqualified table
|
208
213
|
# names in the query. Only used if +dataset+ option is set. (+String+)
|
209
214
|
#
|
@@ -223,8 +228,11 @@ module Gcloud
|
|
223
228
|
# puts row["name"]
|
224
229
|
# end
|
225
230
|
#
|
226
|
-
def query query,
|
231
|
+
def query query, max: nil, timeout: 10000, dryrun: nil, cache: true,
|
232
|
+
dataset: nil, project: nil
|
227
233
|
ensure_connection!
|
234
|
+
options = { max: max, timeout: timeout, dryrun: dryrun, cache: cache,
|
235
|
+
dataset: dataset, project: project }
|
228
236
|
resp = connection.query query, options
|
229
237
|
if resp.success?
|
230
238
|
QueryData.from_gapi resp.data, connection
|
@@ -275,16 +283,14 @@ module Gcloud
|
|
275
283
|
# A unique ID for this dataset, without the project name.
|
276
284
|
# The ID must contain only letters (a-z, A-Z), numbers (0-9), or
|
277
285
|
# underscores (_). The maximum length is 1,024 characters. (+String+)
|
278
|
-
# +
|
279
|
-
# An optional Hash for controlling additional behavior. (+Hash+)
|
280
|
-
# <code>options[:name]</code>::
|
286
|
+
# +name+::
|
281
287
|
# A descriptive name for the dataset. (+String+)
|
282
|
-
#
|
288
|
+
# +description+::
|
283
289
|
# A user-friendly description of the dataset. (+String+)
|
284
|
-
#
|
290
|
+
# +expiration+::
|
285
291
|
# The default lifetime of all tables in the dataset, in milliseconds.
|
286
292
|
# The minimum value is 3600000 milliseconds (one hour). (+Integer+)
|
287
|
-
#
|
293
|
+
# +access+::
|
288
294
|
# The access rules for a Dataset using the Google Cloud Datastore API
|
289
295
|
# data structure of an array of hashes. See {BigQuery Access
|
290
296
|
# Control}[https://cloud.google.com/bigquery/access-control] for more
|
@@ -336,21 +342,21 @@ module Gcloud
|
|
336
342
|
# access.add_writer_user "writers@example.com"
|
337
343
|
# end
|
338
344
|
#
|
339
|
-
def create_dataset dataset_id,
|
345
|
+
def create_dataset dataset_id, name: nil, description: nil,
|
346
|
+
expiration: nil, access: nil
|
340
347
|
if block_given?
|
341
348
|
access_builder = Dataset::Access.new connection.default_access_rules,
|
342
349
|
"projectId" => project
|
343
350
|
yield access_builder
|
344
|
-
|
351
|
+
access = access_builder.access if access_builder.changed?
|
345
352
|
end
|
346
353
|
|
347
354
|
ensure_connection!
|
355
|
+
options = { name: name, description: description,
|
356
|
+
expiration: expiration, access: access }
|
348
357
|
resp = connection.insert_dataset dataset_id, options
|
349
|
-
if resp.success?
|
350
|
-
|
351
|
-
else
|
352
|
-
fail ApiError.from_response(resp)
|
353
|
-
end
|
358
|
+
return Dataset.from_gapi(resp.data, connection) if resp.success?
|
359
|
+
fail ApiError.from_response(resp)
|
354
360
|
end
|
355
361
|
|
356
362
|
##
|
@@ -358,15 +364,13 @@ module Gcloud
|
|
358
364
|
#
|
359
365
|
# === Parameters
|
360
366
|
#
|
361
|
-
# +
|
362
|
-
# An optional Hash for controlling additional behavior. (+Hash+)
|
363
|
-
# <code>options[:all]</code>::
|
367
|
+
# +all+::
|
364
368
|
# Whether to list all datasets, including hidden ones. The default is
|
365
369
|
# +false+. (+Boolean+)
|
366
|
-
#
|
370
|
+
# +token+::
|
367
371
|
# A previously-returned page token representing part of the larger set
|
368
372
|
# of results to view. (+String+)
|
369
|
-
#
|
373
|
+
# +max+::
|
370
374
|
# Maximum number of datasets to return. (+Integer+)
|
371
375
|
#
|
372
376
|
# === Returns
|
@@ -415,8 +419,9 @@ module Gcloud
|
|
415
419
|
# tmp_datasets = bigquery.datasets token: tmp_datasets.token
|
416
420
|
# end
|
417
421
|
#
|
418
|
-
def datasets
|
422
|
+
def datasets all: nil, token: nil, max: nil
|
419
423
|
ensure_connection!
|
424
|
+
options = { all: all, token: token, max: max }
|
420
425
|
resp = connection.list_datasets options
|
421
426
|
if resp.success?
|
422
427
|
Dataset::List.from_response resp, connection
|
@@ -462,17 +467,15 @@ module Gcloud
|
|
462
467
|
#
|
463
468
|
# === Parameters
|
464
469
|
#
|
465
|
-
# +
|
466
|
-
# An optional Hash for controlling additional behavior. (+Hash+)
|
467
|
-
# <code>options[:all]</code>::
|
470
|
+
# +all+::
|
468
471
|
# Whether to display jobs owned by all users in the project.
|
469
472
|
# The default is +false+. (+Boolean+)
|
470
|
-
#
|
473
|
+
# +token+::
|
471
474
|
# A previously-returned page token representing part of the larger set
|
472
475
|
# of results to view. (+String+)
|
473
|
-
#
|
476
|
+
# +max+::
|
474
477
|
# Maximum number of jobs to return. (+Integer+)
|
475
|
-
#
|
478
|
+
# +filter+::
|
476
479
|
# A filter for job state. (+String+)
|
477
480
|
#
|
478
481
|
# Acceptable values are:
|
@@ -522,8 +525,9 @@ module Gcloud
|
|
522
525
|
# tmp_jobs = bigquery.jobs token: tmp_jobs.token
|
523
526
|
# end
|
524
527
|
#
|
525
|
-
def jobs
|
528
|
+
def jobs all: nil, token: nil, max: nil, filter: nil
|
526
529
|
ensure_connection!
|
530
|
+
options = { all: all, token: token, max: max, filter: filter }
|
527
531
|
resp = connection.list_jobs options
|
528
532
|
if resp.success?
|
529
533
|
Job::List.from_response resp, connection
|
@@ -47,7 +47,7 @@ module Gcloud
|
|
47
47
|
# Checks if the the query job allows arbitrarily large results at a slight
|
48
48
|
# cost to performance.
|
49
49
|
def large_results?
|
50
|
-
val = config["query"]["
|
50
|
+
val = config["query"]["allowLargeResults"]
|
51
51
|
return false if val.nil?
|
52
52
|
val
|
53
53
|
end
|
@@ -99,16 +99,14 @@ module Gcloud
|
|
99
99
|
#
|
100
100
|
# === Parameters
|
101
101
|
#
|
102
|
-
# +
|
103
|
-
# An optional Hash for controlling additional behavior. (+Hash+)
|
104
|
-
# <code>options[:token]</code>::
|
102
|
+
# +token+::
|
105
103
|
# Page token, returned by a previous call, identifying the result set.
|
106
104
|
# (+String+)
|
107
|
-
#
|
105
|
+
# +max+::
|
108
106
|
# Maximum number of results to return. (+Integer+)
|
109
|
-
#
|
107
|
+
# +start+::
|
110
108
|
# Zero-based index of the starting row to read. (+Integer+)
|
111
|
-
#
|
109
|
+
# +timeout+::
|
112
110
|
# How long to wait for the query to complete, in milliseconds, before
|
113
111
|
# returning. Default is 10,000 milliseconds (10 seconds). (+Integer+)
|
114
112
|
#
|
@@ -133,8 +131,9 @@ module Gcloud
|
|
133
131
|
# end
|
134
132
|
# data = data.next if data.next?
|
135
133
|
#
|
136
|
-
def query_results
|
134
|
+
def query_results token: nil, max: nil, start: nil, timeout: nil
|
137
135
|
ensure_connection!
|
136
|
+
options = { token: token, max: max, start: start, timeout: timeout }
|
138
137
|
resp = connection.job_query_results job_id, options
|
139
138
|
if resp.success?
|
140
139
|
QueryData.from_gapi resp.data, connection
|
@@ -300,9 +300,7 @@ module Gcloud
|
|
300
300
|
#
|
301
301
|
# === Parameters
|
302
302
|
#
|
303
|
-
# +
|
304
|
-
# An optional Hash for controlling additional behavior. (+Hash+)
|
305
|
-
# <code>options[:replace]</code>::
|
303
|
+
# +replace+::
|
306
304
|
# Whether to replace the existing schema with the new schema. If
|
307
305
|
# +true+, the fields will replace the existing schema. If
|
308
306
|
# +false+, the fields will be added to the existing schema. When a table
|
@@ -328,13 +326,13 @@ module Gcloud
|
|
328
326
|
#
|
329
327
|
# :category: Attributes
|
330
328
|
#
|
331
|
-
def schema
|
329
|
+
def schema replace: false
|
332
330
|
ensure_full_data!
|
333
331
|
g = @gapi
|
334
332
|
g = g.to_hash if g.respond_to? :to_hash
|
335
333
|
s = g["schema"] ||= {}
|
336
334
|
return s unless block_given?
|
337
|
-
s = nil if
|
335
|
+
s = nil if replace
|
338
336
|
schema_builder = Schema.new s
|
339
337
|
yield schema_builder
|
340
338
|
self.schema = schema_builder.schema if schema_builder.changed?
|
@@ -409,14 +407,12 @@ module Gcloud
|
|
409
407
|
#
|
410
408
|
# === Parameters
|
411
409
|
#
|
412
|
-
# +
|
413
|
-
# An optional Hash for controlling additional behavior. (+Hash+)
|
414
|
-
# <code>options[:token]</code>::
|
410
|
+
# +token+::
|
415
411
|
# Page token, returned by a previous call, identifying the result set.
|
416
412
|
# (+String+)
|
417
|
-
#
|
413
|
+
# +max+::
|
418
414
|
# Maximum number of results to return. (+Integer+)
|
419
|
-
#
|
415
|
+
# +start+::
|
420
416
|
# Zero-based index of the starting row to read. (+Integer+)
|
421
417
|
#
|
422
418
|
# === Returns
|
@@ -440,8 +436,9 @@ module Gcloud
|
|
440
436
|
#
|
441
437
|
# :category: Data
|
442
438
|
#
|
443
|
-
def data
|
439
|
+
def data token: nil, max: nil, start: nil
|
444
440
|
ensure_connection!
|
441
|
+
options = { token: token, max: max, start: start }
|
445
442
|
resp = connection.list_tabledata dataset_id, table_id, options
|
446
443
|
if resp.success?
|
447
444
|
Data.from_response resp, self
|
@@ -457,16 +454,14 @@ module Gcloud
|
|
457
454
|
#
|
458
455
|
# +destination_table+::
|
459
456
|
# The destination for the copied data. (+Table+ or +String+)
|
460
|
-
# +
|
461
|
-
# An optional Hash for controlling additional behavior. (+Hash+)
|
462
|
-
# <code>options[:create]</code>::
|
457
|
+
# +create+::
|
463
458
|
# Specifies whether the job is allowed to create new tables. (+String+)
|
464
459
|
#
|
465
460
|
# The following values are supported:
|
466
461
|
# * +needed+ - Create the table if it does not exist.
|
467
462
|
# * +never+ - The table must already exist. A 'notFound' error is
|
468
463
|
# raised if the table does not exist.
|
469
|
-
#
|
464
|
+
# +write+::
|
470
465
|
# Specifies how to handle data already present in the destination table.
|
471
466
|
# The default value is +empty+. (+String+)
|
472
467
|
#
|
@@ -509,8 +504,9 @@ module Gcloud
|
|
509
504
|
#
|
510
505
|
# :category: Data
|
511
506
|
#
|
512
|
-
def copy destination_table,
|
507
|
+
def copy destination_table, create: nil, write: nil, dryrun: nil
|
513
508
|
ensure_connection!
|
509
|
+
options = { create: create, write: write, dryrun: dryrun }
|
514
510
|
resp = connection.copy_table table_ref,
|
515
511
|
get_table_ref(destination_table),
|
516
512
|
options
|
@@ -528,16 +524,14 @@ module Gcloud
|
|
528
524
|
#
|
529
525
|
# +source_url+::
|
530
526
|
# The URI of source table to link. (+String+)
|
531
|
-
# +
|
532
|
-
# An optional Hash for controlling additional behavior. (+Hash+)
|
533
|
-
# <code>options[:create]</code>::
|
527
|
+
# +create+::
|
534
528
|
# Specifies whether the job is allowed to create new tables. (+String+)
|
535
529
|
#
|
536
530
|
# The following values are supported:
|
537
531
|
# * +needed+ - Create the table if it does not exist.
|
538
532
|
# * +never+ - The table must already exist. A 'notFound' error is
|
539
533
|
# raised if the table does not exist.
|
540
|
-
#
|
534
|
+
# +write+::
|
541
535
|
# Specifies how to handle data already present in the table.
|
542
536
|
# The default value is +empty+. (+String+)
|
543
537
|
#
|
@@ -553,8 +547,9 @@ module Gcloud
|
|
553
547
|
#
|
554
548
|
# :category: Data
|
555
549
|
#
|
556
|
-
def link source_url,
|
550
|
+
def link source_url, create: nil, write: nil, dryrun: nil #:nodoc:
|
557
551
|
ensure_connection!
|
552
|
+
options = { create: create, write: write, dryrun: dryrun }
|
558
553
|
resp = connection.link_table table_ref, source_url, options
|
559
554
|
if resp.success?
|
560
555
|
Job.from_gapi resp.data, connection
|
@@ -574,22 +569,20 @@ module Gcloud
|
|
574
569
|
# The Google Storage file or file URI pattern(s) to which BigQuery
|
575
570
|
# should extract the table data.
|
576
571
|
# (+Gcloud::Storage::File+ or +String+ or +Array+)
|
577
|
-
# +
|
578
|
-
# An optional Hash for controlling additional behavior. (+Hash+)
|
579
|
-
# <code>options[:format]</code>::
|
572
|
+
# +format+::
|
580
573
|
# The exported file format. The default value is +csv+. (+String+)
|
581
574
|
#
|
582
575
|
# The following values are supported:
|
583
576
|
# * +csv+ - CSV
|
584
577
|
# * +json+ - {Newline-delimited JSON}[http://jsonlines.org/]
|
585
578
|
# * +avro+ - {Avro}[http://avro.apache.org/]
|
586
|
-
#
|
579
|
+
# +compression+::
|
587
580
|
# The compression type to use for exported files. Possible values
|
588
581
|
# include +GZIP+ and +NONE+. The default value is +NONE+. (+String+)
|
589
|
-
#
|
582
|
+
# +delimiter+::
|
590
583
|
# Delimiter to use between fields in the exported data. Default is
|
591
584
|
# <code>,</code>. (+String+)
|
592
|
-
#
|
585
|
+
# +header+::
|
593
586
|
# Whether to print out a header row in the results. Default is +true+.
|
594
587
|
# (+Boolean+)
|
595
588
|
#
|
@@ -611,8 +604,11 @@ module Gcloud
|
|
611
604
|
#
|
612
605
|
# :category: Data
|
613
606
|
#
|
614
|
-
def extract extract_url,
|
607
|
+
def extract extract_url, format: nil, compression: nil, delimiter: nil,
|
608
|
+
header: nil, dryrun: nil
|
615
609
|
ensure_connection!
|
610
|
+
options = { format: format, compression: compression,
|
611
|
+
delimiter: delimiter, header: header, dryrun: dryrun }
|
616
612
|
resp = connection.extract_table table_ref, extract_url, options
|
617
613
|
if resp.success?
|
618
614
|
Job.from_gapi resp.data, connection
|
@@ -630,9 +626,7 @@ module Gcloud
|
|
630
626
|
# A file or the URI of a Google Cloud Storage file containing
|
631
627
|
# data to load into the table.
|
632
628
|
# (+File+ or +Gcloud::Storage::File+ or +String+)
|
633
|
-
# +
|
634
|
-
# An optional Hash for controlling additional behavior. (+Hash+)
|
635
|
-
# <code>options[:format]</code>::
|
629
|
+
# +format+::
|
636
630
|
# The exported file format. The default value is +csv+. (+String+)
|
637
631
|
#
|
638
632
|
# The following values are supported:
|
@@ -640,14 +634,14 @@ module Gcloud
|
|
640
634
|
# * +json+ - {Newline-delimited JSON}[http://jsonlines.org/]
|
641
635
|
# * +avro+ - {Avro}[http://avro.apache.org/]
|
642
636
|
# * +datastore_backup+ - Cloud Datastore backup
|
643
|
-
#
|
637
|
+
# +create+::
|
644
638
|
# Specifies whether the job is allowed to create new tables. (+String+)
|
645
639
|
#
|
646
640
|
# The following values are supported:
|
647
641
|
# * +needed+ - Create the table if it does not exist.
|
648
642
|
# * +never+ - The table must already exist. A 'notFound' error is
|
649
643
|
# raised if the table does not exist.
|
650
|
-
#
|
644
|
+
# +write+::
|
651
645
|
# Specifies how to handle data already present in the table.
|
652
646
|
# The default value is +empty+. (+String+)
|
653
647
|
#
|
@@ -656,32 +650,32 @@ module Gcloud
|
|
656
650
|
# * +append+ - BigQuery appends the data to the table.
|
657
651
|
# * +empty+ - An error will be returned if the table already contains
|
658
652
|
# data.
|
659
|
-
#
|
653
|
+
# +projection_fields+::
|
660
654
|
# If the +format+ option is set to +datastore_backup+, indicates which
|
661
655
|
# entity properties to load from a Cloud Datastore backup. Property
|
662
656
|
# names are case sensitive and must be top-level properties. If not set,
|
663
657
|
# BigQuery loads all properties. If any named property isn't found in
|
664
658
|
# the Cloud Datastore backup, an invalid error is returned. (+Array+)
|
665
|
-
#
|
659
|
+
# +jagged_rows+::
|
666
660
|
# Accept rows that are missing trailing optional columns. The missing
|
667
661
|
# values are treated as nulls. If +false+, records with missing trailing
|
668
662
|
# columns are treated as bad records, and if there are too many bad
|
669
663
|
# records, an invalid error is returned in the job result. The default
|
670
664
|
# value is +false+. Only applicable to CSV, ignored for other formats.
|
671
665
|
# (+Boolean+)
|
672
|
-
#
|
666
|
+
# +quoted_newlines+::
|
673
667
|
# Indicates if BigQuery should allow quoted data sections that contain
|
674
668
|
# newline characters in a CSV file. The default value is +false+.
|
675
669
|
# (+Boolean+)
|
676
|
-
#
|
670
|
+
# +encoding+::
|
677
671
|
# The character encoding of the data. The supported values are +UTF-8+
|
678
672
|
# or +ISO-8859-1+. The default value is +UTF-8+. (+String+)
|
679
|
-
#
|
673
|
+
# +delimiter+::
|
680
674
|
# Specifices the separator for fields in a CSV file. BigQuery converts
|
681
675
|
# the string to +ISO-8859-1+ encoding, and then uses the first byte of
|
682
676
|
# the encoded string to split the data in its raw, binary state. Default
|
683
677
|
# is <code>,</code>. (+String+)
|
684
|
-
#
|
678
|
+
# +ignore_unknown+::
|
685
679
|
# Indicates if BigQuery should allow extra values that are not
|
686
680
|
# represented in the table schema. If true, the extra values are
|
687
681
|
# ignored. If false, records with extra columns are treated as bad
|
@@ -693,12 +687,12 @@ module Gcloud
|
|
693
687
|
#
|
694
688
|
# * +CSV+: Trailing columns
|
695
689
|
# * +JSON+: Named values that don't match any column names
|
696
|
-
#
|
690
|
+
# +max_bad_records+::
|
697
691
|
# The maximum number of bad records that BigQuery can ignore when
|
698
692
|
# running the job. If the number of bad records exceeds this value, an
|
699
693
|
# invalid error is returned in the job result. The default value is +0+,
|
700
694
|
# which requires that all records are valid. (+Integer+)
|
701
|
-
#
|
695
|
+
# +quote+::
|
702
696
|
# The value that is used to quote data sections in a CSV file. BigQuery
|
703
697
|
# converts the string to ISO-8859-1 encoding, and then uses the first
|
704
698
|
# byte of the encoded string to split the data in its raw, binary state.
|
@@ -706,7 +700,7 @@ module Gcloud
|
|
706
700
|
# not contain quoted sections, set the property value to an empty
|
707
701
|
# string. If your data contains quoted newline characters, you must also
|
708
702
|
# set the allowQuotedNewlines property to true. (+String+)
|
709
|
-
#
|
703
|
+
# +skip_leading+::
|
710
704
|
# The number of rows at the top of a CSV file that BigQuery will skip
|
711
705
|
# when loading the data. The default value is +0+. This property is
|
712
706
|
# useful if you have header rows in the file that should be skipped.
|
@@ -784,15 +778,21 @@ module Gcloud
|
|
784
778
|
#
|
785
779
|
# :category: Data
|
786
780
|
#
|
787
|
-
def load file,
|
781
|
+
def load file, format: nil, create: nil, write: nil,
|
782
|
+
projection_fields: nil, jagged_rows: nil, quoted_newlines: nil,
|
783
|
+
encoding: nil, delimiter: nil, ignore_unknown: nil,
|
784
|
+
max_bad_records: nil, quote: nil, skip_leading: nil, dryrun: nil
|
788
785
|
ensure_connection!
|
789
|
-
|
790
|
-
|
791
|
-
|
792
|
-
|
793
|
-
|
794
|
-
|
795
|
-
|
786
|
+
options = { format: format, create: create, write: write,
|
787
|
+
projection_fields: projection_fields,
|
788
|
+
jagged_rows: jagged_rows, quoted_newlines: quoted_newlines,
|
789
|
+
encoding: encoding, delimiter: delimiter,
|
790
|
+
ignore_unknown: ignore_unknown,
|
791
|
+
max_bad_records: max_bad_records, quote: quote,
|
792
|
+
skip_leading: skip_leading, dryrun: dryrun }
|
793
|
+
return load_storage(file, options) if storage_url? file
|
794
|
+
return load_local(file, options) if local_file? file
|
795
|
+
fail Gcloud::Bigquery::Error, "Don't know how to load #{file}"
|
796
796
|
end
|
797
797
|
|
798
798
|
##
|
@@ -806,13 +806,11 @@ module Gcloud
|
|
806
806
|
# +rows+::
|
807
807
|
# A hash object or array of hash objects containing the data.
|
808
808
|
# (+Array+ or +Hash+)
|
809
|
-
# +
|
810
|
-
# An optional Hash for controlling additional behavior. (+Hash+)
|
811
|
-
# <code>options[:skip_invalid]</code>::
|
809
|
+
# +skip_invalid+::
|
812
810
|
# Insert all valid rows of a request, even if invalid rows exist. The
|
813
811
|
# default value is +false+, which causes the entire request to fail if
|
814
812
|
# any invalid rows exist. (+Boolean+)
|
815
|
-
#
|
813
|
+
# +ignore_unknown+::
|
816
814
|
# Accept rows that contain values that do not match the schema. The
|
817
815
|
# unknown values are ignored. Default is false, which treats unknown
|
818
816
|
# values as errors. (+Boolean+)
|
@@ -838,9 +836,10 @@ module Gcloud
|
|
838
836
|
#
|
839
837
|
# :category: Data
|
840
838
|
#
|
841
|
-
def insert rows,
|
839
|
+
def insert rows, skip_invalid: nil, ignore_unknown: nil
|
842
840
|
rows = [rows] if rows.is_a? Hash
|
843
841
|
ensure_connection!
|
842
|
+
options = { skip_invalid: skip_invalid, ignore_unknown: ignore_unknown }
|
844
843
|
resp = connection.insert_tabledata dataset_id, table_id, rows, options
|
845
844
|
if resp.success?
|
846
845
|
InsertResponse.from_gapi rows, resp.data
|