google-cloud-bigquery 1.25.0 → 1.30.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +55 -0
- data/CONTRIBUTING.md +4 -5
- data/LOGGING.md +1 -1
- data/OVERVIEW.md +15 -14
- data/lib/google/cloud/bigquery/convert.rb +72 -76
- data/lib/google/cloud/bigquery/copy_job.rb +1 -0
- data/lib/google/cloud/bigquery/data.rb +2 -2
- data/lib/google/cloud/bigquery/dataset.rb +181 -62
- data/lib/google/cloud/bigquery/dataset/access.rb +3 -3
- data/lib/google/cloud/bigquery/dataset/list.rb +2 -2
- data/lib/google/cloud/bigquery/external.rb +328 -3
- data/lib/google/cloud/bigquery/extract_job.rb +8 -10
- data/lib/google/cloud/bigquery/job.rb +43 -3
- data/lib/google/cloud/bigquery/job/list.rb +4 -4
- data/lib/google/cloud/bigquery/load_job.rb +228 -27
- data/lib/google/cloud/bigquery/model/list.rb +2 -2
- data/lib/google/cloud/bigquery/policy.rb +2 -1
- data/lib/google/cloud/bigquery/project.rb +47 -43
- data/lib/google/cloud/bigquery/project/list.rb +2 -2
- data/lib/google/cloud/bigquery/query_job.rb +62 -48
- data/lib/google/cloud/bigquery/routine.rb +128 -9
- data/lib/google/cloud/bigquery/routine/list.rb +2 -2
- data/lib/google/cloud/bigquery/schema.rb +39 -3
- data/lib/google/cloud/bigquery/schema/field.rb +63 -13
- data/lib/google/cloud/bigquery/service.rb +11 -13
- data/lib/google/cloud/bigquery/standard_sql.rb +15 -3
- data/lib/google/cloud/bigquery/table.rb +246 -52
- data/lib/google/cloud/bigquery/table/async_inserter.rb +44 -17
- data/lib/google/cloud/bigquery/table/list.rb +2 -2
- data/lib/google/cloud/bigquery/version.rb +1 -1
- metadata +15 -15
@@ -103,8 +103,7 @@ module Google
|
|
103
103
|
# table extraction.
|
104
104
|
def compression?
|
105
105
|
return false unless table?
|
106
|
-
|
107
|
-
val == "GZIP"
|
106
|
+
@gapi.configuration.extract.compression == "GZIP"
|
108
107
|
end
|
109
108
|
|
110
109
|
##
|
@@ -117,8 +116,7 @@ module Google
|
|
117
116
|
#
|
118
117
|
def json?
|
119
118
|
return false unless table?
|
120
|
-
|
121
|
-
val == "NEWLINE_DELIMITED_JSON"
|
119
|
+
@gapi.configuration.extract.destination_format == "NEWLINE_DELIMITED_JSON"
|
122
120
|
end
|
123
121
|
|
124
122
|
##
|
@@ -146,8 +144,7 @@ module Google
|
|
146
144
|
#
|
147
145
|
def avro?
|
148
146
|
return false unless table?
|
149
|
-
|
150
|
-
val == "AVRO"
|
147
|
+
@gapi.configuration.extract.destination_format == "AVRO"
|
151
148
|
end
|
152
149
|
|
153
150
|
##
|
@@ -173,8 +170,7 @@ module Google
|
|
173
170
|
#
|
174
171
|
def ml_xgboost_booster?
|
175
172
|
return false unless model?
|
176
|
-
|
177
|
-
val == "ML_XGBOOST_BOOSTER"
|
173
|
+
@gapi.configuration.extract.destination_format == "ML_XGBOOST_BOOSTER"
|
178
174
|
end
|
179
175
|
|
180
176
|
##
|
@@ -250,6 +246,7 @@ module Google
|
|
250
246
|
##
|
251
247
|
# @private Create an Updater object.
|
252
248
|
def initialize gapi
|
249
|
+
super()
|
253
250
|
@gapi = gapi
|
254
251
|
end
|
255
252
|
|
@@ -267,9 +264,10 @@ module Google
|
|
267
264
|
extract_config = Google::Apis::BigqueryV2::JobConfigurationExtract.new(
|
268
265
|
destination_uris: Array(storage_urls)
|
269
266
|
)
|
270
|
-
|
267
|
+
case source
|
268
|
+
when Google::Apis::BigqueryV2::TableReference
|
271
269
|
extract_config.source_table = source
|
272
|
-
|
270
|
+
when Google::Apis::BigqueryV2::ModelReference
|
273
271
|
extract_config.source_model = source
|
274
272
|
end
|
275
273
|
job = Google::Apis::BigqueryV2::Job.new(
|
@@ -215,6 +215,17 @@ module Google
|
|
215
215
|
@gapi.statistics.parent_job_id
|
216
216
|
end
|
217
217
|
|
218
|
+
##
|
219
|
+
# An array containing the job resource usage breakdown by reservation, if present. Reservation usage statistics
|
220
|
+
# are only reported for jobs that are executed within reservations. On-demand jobs do not report this data.
|
221
|
+
#
|
222
|
+
# @return [Array<Google::Cloud::Bigquery::Job::ReservationUsage>, nil] The reservation usage, if present.
|
223
|
+
#
|
224
|
+
def reservation_usage
|
225
|
+
return nil unless @gapi.statistics.reservation_usage
|
226
|
+
Array(@gapi.statistics.reservation_usage).map { |g| ReservationUsage.from_gapi g }
|
227
|
+
end
|
228
|
+
|
218
229
|
##
|
219
230
|
# The statistics including stack frames for a child job of a script.
|
220
231
|
#
|
@@ -489,6 +500,30 @@ module Google
|
|
489
500
|
end
|
490
501
|
end
|
491
502
|
|
503
|
+
##
|
504
|
+
# Represents Job resource usage breakdown by reservation.
|
505
|
+
#
|
506
|
+
# @attr_reader [String] name The reservation name or "unreserved" for on-demand resources usage.
|
507
|
+
# @attr_reader [Fixnum] slot_ms The slot-milliseconds the job spent in the given reservation.
|
508
|
+
#
|
509
|
+
class ReservationUsage
|
510
|
+
attr_reader :name
|
511
|
+
attr_reader :slot_ms
|
512
|
+
|
513
|
+
##
|
514
|
+
# @private Creates a new ReservationUsage instance.
|
515
|
+
def initialize name, slot_ms
|
516
|
+
@name = name
|
517
|
+
@slot_ms = slot_ms
|
518
|
+
end
|
519
|
+
|
520
|
+
##
|
521
|
+
# @private New ReservationUsage from a statistics.reservation_usage value.
|
522
|
+
def self.from_gapi gapi
|
523
|
+
new gapi.name, gapi.slot_ms
|
524
|
+
end
|
525
|
+
end
|
526
|
+
|
492
527
|
##
|
493
528
|
# Represents statistics for a child job of a script.
|
494
529
|
#
|
@@ -537,7 +572,8 @@ module Google
|
|
537
572
|
# end
|
538
573
|
#
|
539
574
|
class ScriptStatistics
|
540
|
-
attr_reader :evaluation_kind
|
575
|
+
attr_reader :evaluation_kind
|
576
|
+
attr_reader :stack_frames
|
541
577
|
|
542
578
|
##
|
543
579
|
# @private Creates a new ScriptStatistics instance.
|
@@ -547,7 +583,7 @@ module Google
|
|
547
583
|
end
|
548
584
|
|
549
585
|
##
|
550
|
-
# @private New ScriptStatistics from a statistics.script_statistics
|
586
|
+
# @private New ScriptStatistics from a statistics.script_statistics value.
|
551
587
|
def self.from_gapi gapi
|
552
588
|
frames = Array(gapi.stack_frames).map { |g| ScriptStackFrame.from_gapi g }
|
553
589
|
new gapi.evaluation_kind, frames
|
@@ -602,7 +638,11 @@ module Google
|
|
602
638
|
# end
|
603
639
|
#
|
604
640
|
class ScriptStackFrame
|
605
|
-
attr_reader :start_line
|
641
|
+
attr_reader :start_line
|
642
|
+
attr_reader :start_column
|
643
|
+
attr_reader :end_line
|
644
|
+
attr_reader :end_column
|
645
|
+
attr_reader :text
|
606
646
|
|
607
647
|
##
|
608
648
|
# @private Creates a new ScriptStackFrame instance.
|
@@ -72,8 +72,8 @@ module Google
|
|
72
72
|
return nil unless next?
|
73
73
|
ensure_service!
|
74
74
|
next_kwargs = @kwargs.merge token: token
|
75
|
-
next_gapi = @service.list_jobs
|
76
|
-
self.class.from_gapi next_gapi, @service, next_kwargs
|
75
|
+
next_gapi = @service.list_jobs(**next_kwargs)
|
76
|
+
self.class.from_gapi next_gapi, @service, **next_kwargs
|
77
77
|
end
|
78
78
|
|
79
79
|
##
|
@@ -121,12 +121,12 @@ module Google
|
|
121
121
|
# puts job.state
|
122
122
|
# end
|
123
123
|
#
|
124
|
-
def all request_limit: nil
|
124
|
+
def all request_limit: nil, &block
|
125
125
|
request_limit = request_limit.to_i if request_limit
|
126
126
|
return enum_for :all, request_limit: request_limit unless block_given?
|
127
127
|
results = self
|
128
128
|
loop do
|
129
|
-
results.each
|
129
|
+
results.each(&block)
|
130
130
|
if request_limit
|
131
131
|
request_limit -= 1
|
132
132
|
break if request_limit.negative?
|
@@ -37,8 +37,8 @@ module Google
|
|
37
37
|
# bigquery = Google::Cloud::Bigquery.new
|
38
38
|
# dataset = bigquery.dataset "my_dataset"
|
39
39
|
#
|
40
|
-
#
|
41
|
-
# load_job = dataset.load_job "my_new_table",
|
40
|
+
# gcs_uri = "gs://my-bucket/file-name.csv"
|
41
|
+
# load_job = dataset.load_job "my_new_table", gcs_uri do |schema|
|
42
42
|
# schema.string "first_name", mode: :required
|
43
43
|
# schema.record "cities_lived", mode: :repeated do |nested_schema|
|
44
44
|
# nested_schema.string "place", mode: :required
|
@@ -112,8 +112,7 @@ module Google
|
|
112
112
|
# `false` otherwise.
|
113
113
|
#
|
114
114
|
def iso8859_1?
|
115
|
-
|
116
|
-
val == "ISO-8859-1"
|
115
|
+
@gapi.configuration.load.encoding == "ISO-8859-1"
|
117
116
|
end
|
118
117
|
|
119
118
|
##
|
@@ -195,8 +194,7 @@ module Google
|
|
195
194
|
# `NEWLINE_DELIMITED_JSON`, `false` otherwise.
|
196
195
|
#
|
197
196
|
def json?
|
198
|
-
|
199
|
-
val == "NEWLINE_DELIMITED_JSON"
|
197
|
+
@gapi.configuration.load.source_format == "NEWLINE_DELIMITED_JSON"
|
200
198
|
end
|
201
199
|
|
202
200
|
##
|
@@ -218,8 +216,27 @@ module Google
|
|
218
216
|
# `false` otherwise.
|
219
217
|
#
|
220
218
|
def backup?
|
221
|
-
|
222
|
-
|
219
|
+
@gapi.configuration.load.source_format == "DATASTORE_BACKUP"
|
220
|
+
end
|
221
|
+
|
222
|
+
##
|
223
|
+
# Checks if the source format is ORC.
|
224
|
+
#
|
225
|
+
# @return [Boolean] `true` when the source format is `ORC`,
|
226
|
+
# `false` otherwise.
|
227
|
+
#
|
228
|
+
def orc?
|
229
|
+
@gapi.configuration.load.source_format == "ORC"
|
230
|
+
end
|
231
|
+
|
232
|
+
##
|
233
|
+
# Checks if the source format is Parquet.
|
234
|
+
#
|
235
|
+
# @return [Boolean] `true` when the source format is `PARQUET`,
|
236
|
+
# `false` otherwise.
|
237
|
+
#
|
238
|
+
def parquet?
|
239
|
+
@gapi.configuration.load.source_format == "PARQUET"
|
223
240
|
end
|
224
241
|
|
225
242
|
##
|
@@ -347,6 +364,58 @@ module Google
|
|
347
364
|
nil
|
348
365
|
end
|
349
366
|
|
367
|
+
###
|
368
|
+
# Checks if hive partitioning options are set.
|
369
|
+
#
|
370
|
+
# @see https://cloud.google.com/bigquery/docs/hive-partitioned-loads-gcs Loading externally partitioned data
|
371
|
+
#
|
372
|
+
# @return [Boolean] `true` when hive partitioning options are set, or `false` otherwise.
|
373
|
+
#
|
374
|
+
# @!group Attributes
|
375
|
+
#
|
376
|
+
def hive_partitioning?
|
377
|
+
!@gapi.configuration.load.hive_partitioning_options.nil?
|
378
|
+
end
|
379
|
+
|
380
|
+
###
|
381
|
+
# The mode of hive partitioning to use when reading data. The following modes are supported:
|
382
|
+
#
|
383
|
+
# 1. `AUTO`: automatically infer partition key name(s) and type(s).
|
384
|
+
# 2. `STRINGS`: automatically infer partition key name(s). All types are interpreted as strings.
|
385
|
+
# 3. `CUSTOM`: partition key schema is encoded in the source URI prefix.
|
386
|
+
#
|
387
|
+
# @see https://cloud.google.com/bigquery/docs/hive-partitioned-loads-gcs Loading externally partitioned data
|
388
|
+
#
|
389
|
+
# @return [String, nil] The mode of hive partitioning, or `nil` if not set.
|
390
|
+
#
|
391
|
+
# @!group Attributes
|
392
|
+
#
|
393
|
+
def hive_partitioning_mode
|
394
|
+
@gapi.configuration.load.hive_partitioning_options.mode if hive_partitioning?
|
395
|
+
end
|
396
|
+
|
397
|
+
###
|
398
|
+
# The common prefix for all source uris when hive partition detection is requested. The prefix must end
|
399
|
+
# immediately before the partition key encoding begins. For example, consider files following this data layout:
|
400
|
+
#
|
401
|
+
# ```
|
402
|
+
# gs://bucket/path_to_table/dt=2019-01-01/country=BR/id=7/file.avro
|
403
|
+
# gs://bucket/path_to_table/dt=2018-12-31/country=CA/id=3/file.avro
|
404
|
+
# ```
|
405
|
+
#
|
406
|
+
# When hive partitioning is requested with either `AUTO` or `STRINGS` mode, the common prefix can be either of
|
407
|
+
# `gs://bucket/path_to_table` or `gs://bucket/path_to_table/` (trailing slash does not matter).
|
408
|
+
#
|
409
|
+
# @see https://cloud.google.com/bigquery/docs/hive-partitioned-loads-gcs Loading externally partitioned data
|
410
|
+
#
|
411
|
+
# @return [String, nil] The common prefix for all source uris, or `nil` if not set.
|
412
|
+
#
|
413
|
+
# @!group Attributes
|
414
|
+
#
|
415
|
+
def hive_partitioning_source_uri_prefix
|
416
|
+
@gapi.configuration.load.hive_partitioning_options.source_uri_prefix if hive_partitioning?
|
417
|
+
end
|
418
|
+
|
350
419
|
###
|
351
420
|
# Checks if the destination table will be range partitioned. See [Creating and using integer range partitioned
|
352
421
|
# tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
|
@@ -537,6 +606,7 @@ module Google
|
|
537
606
|
##
|
538
607
|
# @private Create an Updater object.
|
539
608
|
def initialize gapi
|
609
|
+
super()
|
540
610
|
@updates = []
|
541
611
|
@gapi = gapi
|
542
612
|
@schema = nil
|
@@ -703,9 +773,18 @@ module Google
|
|
703
773
|
end
|
704
774
|
|
705
775
|
##
|
706
|
-
# Adds a numeric number field to the schema.
|
707
|
-
# fixed
|
708
|
-
# the
|
776
|
+
# Adds a numeric number field to the schema. `NUMERIC` is a decimal
|
777
|
+
# type with fixed precision and scale. Precision is the number of
|
778
|
+
# digits that the number contains. Scale is how many of these
|
779
|
+
# digits appear after the decimal point. It supports:
|
780
|
+
#
|
781
|
+
# Precision: 38
|
782
|
+
# Scale: 9
|
783
|
+
# Min: -9.9999999999999999999999999999999999999E+28
|
784
|
+
# Max: 9.9999999999999999999999999999999999999E+28
|
785
|
+
#
|
786
|
+
# This type can represent decimal fractions exactly, and is suitable
|
787
|
+
# for financial calculations.
|
709
788
|
#
|
710
789
|
# See {Schema#numeric}
|
711
790
|
#
|
@@ -732,6 +811,45 @@ module Google
|
|
732
811
|
schema.numeric name, description: description, mode: mode
|
733
812
|
end
|
734
813
|
|
814
|
+
##
|
815
|
+
# Adds a bignumeric number field to the schema. `BIGNUMERIC` is a
|
816
|
+
# decimal type with fixed precision and scale. Precision is the
|
817
|
+
# number of digits that the number contains. Scale is how many of
|
818
|
+
# these digits appear after the decimal point. It supports:
|
819
|
+
#
|
820
|
+
# Precision: 76.76 (the 77th digit is partial)
|
821
|
+
# Scale: 38
|
822
|
+
# Min: -5.7896044618658097711785492504343953926634992332820282019728792003956564819968E+38
|
823
|
+
# Max: 5.7896044618658097711785492504343953926634992332820282019728792003956564819967E+38
|
824
|
+
#
|
825
|
+
# This type can represent decimal fractions exactly, and is suitable
|
826
|
+
# for financial calculations.
|
827
|
+
#
|
828
|
+
# See {Schema#bignumeric}
|
829
|
+
#
|
830
|
+
# @param [String] name The field name. The name must contain only
|
831
|
+
# letters (a-z, A-Z), numbers (0-9), or underscores (_), and must
|
832
|
+
# start with a letter or underscore. The maximum length is 128
|
833
|
+
# characters.
|
834
|
+
# @param [String] description A description of the field.
|
835
|
+
# @param [Symbol] mode The field's mode. The possible values are
|
836
|
+
# `:nullable`, `:required`, and `:repeated`. The default value is
|
837
|
+
# `:nullable`.
|
838
|
+
#
|
839
|
+
# @example
|
840
|
+
# require "google/cloud/bigquery"
|
841
|
+
#
|
842
|
+
# bigquery = Google::Cloud::Bigquery.new
|
843
|
+
# dataset = bigquery.dataset "my_dataset"
|
844
|
+
# job = dataset.load_job "my_table", "gs://abc/file" do |schema|
|
845
|
+
# schema.bignumeric "total_cost", mode: :required
|
846
|
+
# end
|
847
|
+
#
|
848
|
+
# @!group Schema
|
849
|
+
def bignumeric name, description: nil, mode: :nullable
|
850
|
+
schema.bignumeric name, description: description, mode: mode
|
851
|
+
end
|
852
|
+
|
735
853
|
##
|
736
854
|
# Adds a boolean field to the schema.
|
737
855
|
#
|
@@ -1326,6 +1444,89 @@ module Google
|
|
1326
1444
|
@gapi.configuration.update! labels: val
|
1327
1445
|
end
|
1328
1446
|
|
1447
|
+
##
|
1448
|
+
# Sets the mode of hive partitioning to use when reading data. The following modes are supported:
|
1449
|
+
#
|
1450
|
+
# 1. `auto`: automatically infer partition key name(s) and type(s).
|
1451
|
+
# 2. `strings`: automatically infer partition key name(s). All types are interpreted as strings.
|
1452
|
+
# 3. `custom`: partition key schema is encoded in the source URI prefix.
|
1453
|
+
#
|
1454
|
+
# Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format
|
1455
|
+
# will lead to an error. Currently supported types include: `avro`, `csv`, `json`, `orc` and `parquet`.
|
1456
|
+
#
|
1457
|
+
# See {#format=} and {#hive_partitioning_source_uri_prefix=}.
|
1458
|
+
#
|
1459
|
+
# @see https://cloud.google.com/bigquery/docs/hive-partitioned-loads-gcs Loading externally partitioned data
|
1460
|
+
#
|
1461
|
+
# @param [String, Symbol] mode The mode of hive partitioning to use when reading data.
|
1462
|
+
#
|
1463
|
+
# @example
|
1464
|
+
# require "google/cloud/bigquery"
|
1465
|
+
#
|
1466
|
+
# bigquery = Google::Cloud::Bigquery.new
|
1467
|
+
# dataset = bigquery.dataset "my_dataset"
|
1468
|
+
#
|
1469
|
+
# gcs_uri = "gs://cloud-samples-data/bigquery/hive-partitioning-samples/autolayout/*"
|
1470
|
+
# source_uri_prefix = "gs://cloud-samples-data/bigquery/hive-partitioning-samples/autolayout/"
|
1471
|
+
# load_job = dataset.load_job "my_new_table", gcs_uri do |job|
|
1472
|
+
# job.format = :parquet
|
1473
|
+
# job.hive_partitioning_mode = :auto
|
1474
|
+
# job.hive_partitioning_source_uri_prefix = source_uri_prefix
|
1475
|
+
# end
|
1476
|
+
#
|
1477
|
+
# load_job.wait_until_done!
|
1478
|
+
# load_job.done? #=> true
|
1479
|
+
#
|
1480
|
+
# @!group Attributes
|
1481
|
+
#
|
1482
|
+
def hive_partitioning_mode= mode
|
1483
|
+
@gapi.configuration.load.hive_partitioning_options ||= Google::Apis::BigqueryV2::HivePartitioningOptions.new
|
1484
|
+
@gapi.configuration.load.hive_partitioning_options.mode = mode.to_s.upcase
|
1485
|
+
end
|
1486
|
+
|
1487
|
+
##
|
1488
|
+
# Sets the common prefix for all source uris when hive partition detection is requested. The prefix must end
|
1489
|
+
# immediately before the partition key encoding begins. For example, consider files following this data
|
1490
|
+
# layout:
|
1491
|
+
#
|
1492
|
+
# ```
|
1493
|
+
# gs://bucket/path_to_table/dt=2019-01-01/country=BR/id=7/file.avro
|
1494
|
+
# gs://bucket/path_to_table/dt=2018-12-31/country=CA/id=3/file.avro
|
1495
|
+
# ```
|
1496
|
+
#
|
1497
|
+
# When hive partitioning is requested with either `AUTO` or `STRINGS` mode, the common prefix can be either of
|
1498
|
+
# `gs://bucket/path_to_table` or `gs://bucket/path_to_table/` (trailing slash does not matter).
|
1499
|
+
#
|
1500
|
+
# See {#hive_partitioning_mode=}.
|
1501
|
+
#
|
1502
|
+
# @see https://cloud.google.com/bigquery/docs/hive-partitioned-loads-gcs Loading externally partitioned data
|
1503
|
+
#
|
1504
|
+
# @param [String] source_uri_prefix The common prefix for all source uris.
|
1505
|
+
#
|
1506
|
+
# @example
|
1507
|
+
# require "google/cloud/bigquery"
|
1508
|
+
#
|
1509
|
+
# bigquery = Google::Cloud::Bigquery.new
|
1510
|
+
# dataset = bigquery.dataset "my_dataset"
|
1511
|
+
#
|
1512
|
+
# gcs_uri = "gs://cloud-samples-data/bigquery/hive-partitioning-samples/autolayout/*"
|
1513
|
+
# source_uri_prefix = "gs://cloud-samples-data/bigquery/hive-partitioning-samples/autolayout/"
|
1514
|
+
# load_job = dataset.load_job "my_new_table", gcs_uri do |job|
|
1515
|
+
# job.format = :parquet
|
1516
|
+
# job.hive_partitioning_mode = :auto
|
1517
|
+
# job.hive_partitioning_source_uri_prefix = source_uri_prefix
|
1518
|
+
# end
|
1519
|
+
#
|
1520
|
+
# load_job.wait_until_done!
|
1521
|
+
# load_job.done? #=> true
|
1522
|
+
#
|
1523
|
+
# @!group Attributes
|
1524
|
+
#
|
1525
|
+
def hive_partitioning_source_uri_prefix= source_uri_prefix
|
1526
|
+
@gapi.configuration.load.hive_partitioning_options ||= Google::Apis::BigqueryV2::HivePartitioningOptions.new
|
1527
|
+
@gapi.configuration.load.hive_partitioning_options.source_uri_prefix = source_uri_prefix
|
1528
|
+
end
|
1529
|
+
|
1329
1530
|
##
|
1330
1531
|
# Sets the field on which to range partition the table. See [Creating and using integer range partitioned
|
1331
1532
|
# tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
|
@@ -1345,8 +1546,8 @@ module Google
|
|
1345
1546
|
# bigquery = Google::Cloud::Bigquery.new
|
1346
1547
|
# dataset = bigquery.dataset "my_dataset"
|
1347
1548
|
#
|
1348
|
-
#
|
1349
|
-
# load_job = dataset.load_job "my_new_table",
|
1549
|
+
# gcs_uri = "gs://my-bucket/file-name.csv"
|
1550
|
+
# load_job = dataset.load_job "my_new_table", gcs_uri do |job|
|
1350
1551
|
# job.schema do |schema|
|
1351
1552
|
# schema.integer "my_table_id", mode: :required
|
1352
1553
|
# schema.string "my_table_data", mode: :required
|
@@ -1386,8 +1587,8 @@ module Google
|
|
1386
1587
|
# bigquery = Google::Cloud::Bigquery.new
|
1387
1588
|
# dataset = bigquery.dataset "my_dataset"
|
1388
1589
|
#
|
1389
|
-
#
|
1390
|
-
# load_job = dataset.load_job "my_new_table",
|
1590
|
+
# gcs_uri = "gs://my-bucket/file-name.csv"
|
1591
|
+
# load_job = dataset.load_job "my_new_table", gcs_uri do |job|
|
1391
1592
|
# job.schema do |schema|
|
1392
1593
|
# schema.integer "my_table_id", mode: :required
|
1393
1594
|
# schema.string "my_table_data", mode: :required
|
@@ -1427,8 +1628,8 @@ module Google
|
|
1427
1628
|
# bigquery = Google::Cloud::Bigquery.new
|
1428
1629
|
# dataset = bigquery.dataset "my_dataset"
|
1429
1630
|
#
|
1430
|
-
#
|
1431
|
-
# load_job = dataset.load_job "my_new_table",
|
1631
|
+
# gcs_uri = "gs://my-bucket/file-name.csv"
|
1632
|
+
# load_job = dataset.load_job "my_new_table", gcs_uri do |job|
|
1432
1633
|
# job.schema do |schema|
|
1433
1634
|
# schema.integer "my_table_id", mode: :required
|
1434
1635
|
# schema.string "my_table_data", mode: :required
|
@@ -1468,8 +1669,8 @@ module Google
|
|
1468
1669
|
# bigquery = Google::Cloud::Bigquery.new
|
1469
1670
|
# dataset = bigquery.dataset "my_dataset"
|
1470
1671
|
#
|
1471
|
-
#
|
1472
|
-
# load_job = dataset.load_job "my_new_table",
|
1672
|
+
# gcs_uri = "gs://my-bucket/file-name.csv"
|
1673
|
+
# load_job = dataset.load_job "my_new_table", gcs_uri do |job|
|
1473
1674
|
# job.schema do |schema|
|
1474
1675
|
# schema.integer "my_table_id", mode: :required
|
1475
1676
|
# schema.string "my_table_data", mode: :required
|
@@ -1510,8 +1711,8 @@ module Google
|
|
1510
1711
|
# bigquery = Google::Cloud::Bigquery.new
|
1511
1712
|
# dataset = bigquery.dataset "my_dataset"
|
1512
1713
|
#
|
1513
|
-
#
|
1514
|
-
# load_job = dataset.load_job "my_new_table",
|
1714
|
+
# gcs_uri = "gs://my-bucket/file-name.csv"
|
1715
|
+
# load_job = dataset.load_job "my_new_table", gcs_uri do |job|
|
1515
1716
|
# job.time_partitioning_type = "DAY"
|
1516
1717
|
# end
|
1517
1718
|
#
|
@@ -1549,8 +1750,8 @@ module Google
|
|
1549
1750
|
# bigquery = Google::Cloud::Bigquery.new
|
1550
1751
|
# dataset = bigquery.dataset "my_dataset"
|
1551
1752
|
#
|
1552
|
-
#
|
1553
|
-
# load_job = dataset.load_job "my_new_table",
|
1753
|
+
# gcs_uri = "gs://my-bucket/file-name.csv"
|
1754
|
+
# load_job = dataset.load_job "my_new_table", gcs_uri do |job|
|
1554
1755
|
# job.time_partitioning_type = "DAY"
|
1555
1756
|
# job.time_partitioning_field = "dob"
|
1556
1757
|
# job.schema do |schema|
|
@@ -1585,8 +1786,8 @@ module Google
|
|
1585
1786
|
# bigquery = Google::Cloud::Bigquery.new
|
1586
1787
|
# dataset = bigquery.dataset "my_dataset"
|
1587
1788
|
#
|
1588
|
-
#
|
1589
|
-
# load_job = dataset.load_job "my_new_table",
|
1789
|
+
# gcs_uri = "gs://my-bucket/file-name.csv"
|
1790
|
+
# load_job = dataset.load_job "my_new_table", gcs_uri do |job|
|
1590
1791
|
# job.time_partitioning_type = "DAY"
|
1591
1792
|
# job.time_partitioning_expiration = 86_400
|
1592
1793
|
# end
|
@@ -1645,8 +1846,8 @@ module Google
|
|
1645
1846
|
# bigquery = Google::Cloud::Bigquery.new
|
1646
1847
|
# dataset = bigquery.dataset "my_dataset"
|
1647
1848
|
#
|
1648
|
-
#
|
1649
|
-
# load_job = dataset.load_job "my_new_table",
|
1849
|
+
# gcs_uri = "gs://my-bucket/file-name.csv"
|
1850
|
+
# load_job = dataset.load_job "my_new_table", gcs_uri do |job|
|
1650
1851
|
# job.time_partitioning_type = "DAY"
|
1651
1852
|
# job.time_partitioning_field = "dob"
|
1652
1853
|
# job.schema do |schema|
|