google-cloud-bigquery 1.18.0 → 1.21.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +48 -0
- data/TROUBLESHOOTING.md +2 -8
- data/lib/google/cloud/bigquery/argument.rb +197 -0
- data/lib/google/cloud/bigquery/copy_job.rb +18 -1
- data/lib/google/cloud/bigquery/data.rb +15 -0
- data/lib/google/cloud/bigquery/dataset.rb +379 -49
- data/lib/google/cloud/bigquery/dataset/list.rb +1 -2
- data/lib/google/cloud/bigquery/extract_job.rb +19 -2
- data/lib/google/cloud/bigquery/job.rb +198 -0
- data/lib/google/cloud/bigquery/job/list.rb +5 -5
- data/lib/google/cloud/bigquery/load_job.rb +273 -26
- data/lib/google/cloud/bigquery/model.rb +6 -4
- data/lib/google/cloud/bigquery/project.rb +82 -22
- data/lib/google/cloud/bigquery/project/list.rb +1 -2
- data/lib/google/cloud/bigquery/query_job.rb +292 -0
- data/lib/google/cloud/bigquery/routine.rb +1108 -0
- data/lib/google/cloud/bigquery/routine/list.rb +165 -0
- data/lib/google/cloud/bigquery/schema.rb +2 -2
- data/lib/google/cloud/bigquery/service.rb +96 -39
- data/lib/google/cloud/bigquery/standard_sql.rb +257 -53
- data/lib/google/cloud/bigquery/table.rb +410 -62
- data/lib/google/cloud/bigquery/table/async_inserter.rb +21 -11
- data/lib/google/cloud/bigquery/table/list.rb +1 -2
- data/lib/google/cloud/bigquery/version.rb +1 -1
- metadata +9 -6
@@ -449,7 +449,8 @@ module Google
|
|
449
449
|
def feature_columns
|
450
450
|
ensure_full_data!
|
451
451
|
Array(@gapi_json[:featureColumns]).map do |field_gapi_json|
|
452
|
-
|
452
|
+
field_gapi = Google::Apis::BigqueryV2::StandardSqlField.from_json field_gapi_json.to_json
|
453
|
+
StandardSql::Field.from_gapi field_gapi
|
453
454
|
end
|
454
455
|
end
|
455
456
|
|
@@ -464,7 +465,8 @@ module Google
|
|
464
465
|
def label_columns
|
465
466
|
ensure_full_data!
|
466
467
|
Array(@gapi_json[:labelColumns]).map do |field_gapi_json|
|
467
|
-
|
468
|
+
field_gapi = Google::Apis::BigqueryV2::StandardSqlField.from_json field_gapi_json.to_json
|
469
|
+
StandardSql::Field.from_gapi field_gapi
|
468
470
|
end
|
469
471
|
end
|
470
472
|
|
@@ -554,7 +556,7 @@ module Google
|
|
554
556
|
# model = dataset.model "my_model", skip_lookup: true
|
555
557
|
# model.exists? #=> true
|
556
558
|
#
|
557
|
-
def exists? force:
|
559
|
+
def exists? force: false
|
558
560
|
return resource_exists? if force
|
559
561
|
# If we have a value, return it
|
560
562
|
return @exists unless @exists.nil?
|
@@ -668,7 +670,7 @@ module Google
|
|
668
670
|
end
|
669
671
|
|
670
672
|
##
|
671
|
-
# @private New lazy Model object without making an HTTP request.
|
673
|
+
# @private New lazy Model object without making an HTTP request, for use with the skip_lookup option.
|
672
674
|
def self.new_reference project_id, dataset_id, model_id, service
|
673
675
|
raise ArgumentError, "project_id is required" unless project_id
|
674
676
|
raise ArgumentError, "dataset_id is required" unless dataset_id
|
@@ -419,12 +419,20 @@ module Google
|
|
419
419
|
# list must have a different key. See [Requirements for
|
420
420
|
# labels](https://cloud.google.com/bigquery/docs/creating-managing-labels#requirements).
|
421
421
|
# @param [Array<String>, String] udfs User-defined function resources
|
422
|
-
# used in
|
423
|
-
# Google Cloud Storage URI (`gs://bucket/path`), or an inline resource
|
422
|
+
# used in a legacy SQL query. May be either a code resource to load from
|
423
|
+
# a Google Cloud Storage URI (`gs://bucket/path`), or an inline resource
|
424
424
|
# that contains code for a user-defined function (UDF). Providing an
|
425
425
|
# inline code resource is equivalent to providing a URI for a file
|
426
|
-
# containing the same code.
|
427
|
-
#
|
426
|
+
# containing the same code.
|
427
|
+
#
|
428
|
+
# This parameter is used for defining User Defined Function (UDF)
|
429
|
+
# resources only when using legacy SQL. Users of standard SQL should
|
430
|
+
# leverage either DDL (e.g. `CREATE [TEMPORARY] FUNCTION ...`) or the
|
431
|
+
# Routines API to define UDF resources.
|
432
|
+
#
|
433
|
+
# For additional information on migrating, see: [Migrating to
|
434
|
+
# standard SQL - Differences in user-defined JavaScript
|
435
|
+
# functions](https://cloud.google.com/bigquery/docs/reference/standard-sql/migrating-from-legacy-sql#differences_in_user-defined_javascript_functions)
|
428
436
|
# @param [Integer] maximum_billing_tier Deprecated: Change the billing
|
429
437
|
# tier to allow high-compute queries.
|
430
438
|
# @yield [job] a job configuration object
|
@@ -527,7 +535,7 @@ module Google
|
|
527
535
|
#
|
528
536
|
# job.wait_until_done!
|
529
537
|
# if !job.failed?
|
530
|
-
# table_ref = job.ddl_target_table
|
538
|
+
# table_ref = job.ddl_target_table # Or ddl_target_routine for CREATE/DROP FUNCTION/PROCEDURE
|
531
539
|
# end
|
532
540
|
#
|
533
541
|
# @example Execute a DML statement:
|
@@ -786,7 +794,7 @@ module Google
|
|
786
794
|
#
|
787
795
|
# data = bigquery.query "CREATE TABLE `my_dataset.my_table` (x INT64)"
|
788
796
|
#
|
789
|
-
# table_ref = data.ddl_target_table
|
797
|
+
# table_ref = data.ddl_target_table # Or ddl_target_routine for CREATE/DROP FUNCTION/PROCEDURE
|
790
798
|
#
|
791
799
|
# @example Execute a DML statement:
|
792
800
|
# require "google/cloud/bigquery"
|
@@ -1046,8 +1054,7 @@ module Google
|
|
1046
1054
|
#
|
1047
1055
|
def datasets all: nil, filter: nil, token: nil, max: nil
|
1048
1056
|
ensure_service!
|
1049
|
-
|
1050
|
-
gapi = service.list_datasets options
|
1057
|
+
gapi = service.list_datasets all: all, filter: filter, token: token, max: max
|
1051
1058
|
Dataset::List.from_gapi gapi, service, all, filter, max
|
1052
1059
|
end
|
1053
1060
|
|
@@ -1085,18 +1092,22 @@ module Google
|
|
1085
1092
|
# part of the larger set of results to view. Optional.
|
1086
1093
|
# @param [Integer] max Maximum number of jobs to return. Optional.
|
1087
1094
|
# @param [String] filter A filter for job state. Optional.
|
1088
|
-
# @param [Time] min_created_at Min value for {Job#created_at}. When
|
1089
|
-
# provided, only jobs created after or at this time are returned.
|
1090
|
-
# Optional.
|
1091
|
-
# @param [Time] max_created_at Max value for {Job#created_at}. When
|
1092
|
-
# provided, only jobs created before or at this time are returned.
|
1093
|
-
# Optional.
|
1094
1095
|
#
|
1095
1096
|
# Acceptable values are:
|
1096
1097
|
#
|
1097
1098
|
# * `done` - Finished jobs
|
1098
1099
|
# * `pending` - Pending jobs
|
1099
1100
|
# * `running` - Running jobs
|
1101
|
+
# @param [Time] min_created_at Min value for {Job#created_at}. When
|
1102
|
+
# provided, only jobs created after or at this time are returned.
|
1103
|
+
# Optional.
|
1104
|
+
# @param [Time] max_created_at Max value for {Job#created_at}. When
|
1105
|
+
# provided, only jobs created before or at this time are returned.
|
1106
|
+
# Optional.
|
1107
|
+
# @param [Google::Cloud::Bigquery::Job, String] parent_job A job
|
1108
|
+
# object or a job ID. If set, retrieve only child jobs of the
|
1109
|
+
# specified parent. Optional. See {Job#job_id}, {Job#num_child_jobs},
|
1110
|
+
# and {Job#parent_job_id}.
|
1100
1111
|
#
|
1101
1112
|
# @return [Array<Google::Cloud::Bigquery::Job>] (See
|
1102
1113
|
# {Google::Cloud::Bigquery::Job::List})
|
@@ -1145,13 +1156,63 @@ module Google
|
|
1145
1156
|
# # process job
|
1146
1157
|
# end
|
1147
1158
|
#
|
1148
|
-
|
1149
|
-
|
1159
|
+
# @example Retrieve child jobs by setting `parent_job`:
|
1160
|
+
# require "google/cloud/bigquery"
|
1161
|
+
#
|
1162
|
+
# bigquery = Google::Cloud::Bigquery.new
|
1163
|
+
#
|
1164
|
+
# multi_statement_sql = <<~SQL
|
1165
|
+
# -- Declare a variable to hold names as an array.
|
1166
|
+
# DECLARE top_names ARRAY<STRING>;
|
1167
|
+
# -- Build an array of the top 100 names from the year 2017.
|
1168
|
+
# SET top_names = (
|
1169
|
+
# SELECT ARRAY_AGG(name ORDER BY number DESC LIMIT 100)
|
1170
|
+
# FROM `bigquery-public-data.usa_names.usa_1910_current`
|
1171
|
+
# WHERE year = 2017
|
1172
|
+
# );
|
1173
|
+
# -- Which names appear as words in Shakespeare's plays?
|
1174
|
+
# SELECT
|
1175
|
+
# name AS shakespeare_name
|
1176
|
+
# FROM UNNEST(top_names) AS name
|
1177
|
+
# WHERE name IN (
|
1178
|
+
# SELECT word
|
1179
|
+
# FROM `bigquery-public-data.samples.shakespeare`
|
1180
|
+
# );
|
1181
|
+
# SQL
|
1182
|
+
#
|
1183
|
+
# job = bigquery.query_job multi_statement_sql
|
1184
|
+
#
|
1185
|
+
# job.wait_until_done!
|
1186
|
+
#
|
1187
|
+
# child_jobs = bigquery.jobs parent_job: job
|
1188
|
+
#
|
1189
|
+
# child_jobs.each do |child_job|
|
1190
|
+
# script_statistics = child_job.script_statistics
|
1191
|
+
# puts script_statistics.evaluation_kind
|
1192
|
+
# script_statistics.stack_frames.each do |stack_frame|
|
1193
|
+
# puts stack_frame.text
|
1194
|
+
# end
|
1195
|
+
# end
|
1196
|
+
#
|
1197
|
+
def jobs all: nil,
|
1198
|
+
token: nil,
|
1199
|
+
max: nil,
|
1200
|
+
filter: nil,
|
1201
|
+
min_created_at: nil,
|
1202
|
+
max_created_at: nil,
|
1203
|
+
parent_job: nil
|
1150
1204
|
ensure_service!
|
1151
|
-
|
1152
|
-
|
1153
|
-
|
1154
|
-
|
1205
|
+
parent_job = parent_job.job_id if parent_job.is_a? Job
|
1206
|
+
options = {
|
1207
|
+
parent_job_id: parent_job,
|
1208
|
+
all: all,
|
1209
|
+
token: token,
|
1210
|
+
max: max, filter: filter,
|
1211
|
+
min_created_at: min_created_at,
|
1212
|
+
max_created_at: max_created_at
|
1213
|
+
}
|
1214
|
+
gapi = service.list_jobs(**options)
|
1215
|
+
Job::List.from_gapi gapi, service, **options
|
1155
1216
|
end
|
1156
1217
|
|
1157
1218
|
##
|
@@ -1197,8 +1258,7 @@ module Google
|
|
1197
1258
|
#
|
1198
1259
|
def projects token: nil, max: nil
|
1199
1260
|
ensure_service!
|
1200
|
-
|
1201
|
-
gapi = service.list_projects options
|
1261
|
+
gapi = service.list_projects token: token, max: max
|
1202
1262
|
Project::List.from_gapi gapi, service, max
|
1203
1263
|
end
|
1204
1264
|
|
@@ -72,8 +72,7 @@ module Google
|
|
72
72
|
def next
|
73
73
|
return nil unless next?
|
74
74
|
ensure_service!
|
75
|
-
|
76
|
-
gapi = @service.list_projects options
|
75
|
+
gapi = @service.list_projects token: token, max: @max
|
77
76
|
self.class.from_gapi gapi, @service, @max
|
78
77
|
end
|
79
78
|
|
@@ -48,6 +48,44 @@ module Google
|
|
48
48
|
# puts job.data.first
|
49
49
|
# end
|
50
50
|
#
|
51
|
+
# @example With multiple statements and child jobs:
|
52
|
+
# require "google/cloud/bigquery"
|
53
|
+
#
|
54
|
+
# bigquery = Google::Cloud::Bigquery.new
|
55
|
+
#
|
56
|
+
# multi_statement_sql = <<~SQL
|
57
|
+
# -- Declare a variable to hold names as an array.
|
58
|
+
# DECLARE top_names ARRAY<STRING>;
|
59
|
+
# -- Build an array of the top 100 names from the year 2017.
|
60
|
+
# SET top_names = (
|
61
|
+
# SELECT ARRAY_AGG(name ORDER BY number DESC LIMIT 100)
|
62
|
+
# FROM `bigquery-public-data.usa_names.usa_1910_current`
|
63
|
+
# WHERE year = 2017
|
64
|
+
# );
|
65
|
+
# -- Which names appear as words in Shakespeare's plays?
|
66
|
+
# SELECT
|
67
|
+
# name AS shakespeare_name
|
68
|
+
# FROM UNNEST(top_names) AS name
|
69
|
+
# WHERE name IN (
|
70
|
+
# SELECT word
|
71
|
+
# FROM `bigquery-public-data.samples.shakespeare`
|
72
|
+
# );
|
73
|
+
# SQL
|
74
|
+
#
|
75
|
+
# job = bigquery.query_job multi_statement_sql
|
76
|
+
#
|
77
|
+
# job.wait_until_done!
|
78
|
+
#
|
79
|
+
# child_jobs = bigquery.jobs parent_job: job
|
80
|
+
#
|
81
|
+
# child_jobs.each do |child_job|
|
82
|
+
# script_statistics = child_job.script_statistics
|
83
|
+
# puts script_statistics.evaluation_kind
|
84
|
+
# script_statistics.stack_frames.each do |stack_frame|
|
85
|
+
# puts stack_frame.text
|
86
|
+
# end
|
87
|
+
# end
|
88
|
+
#
|
51
89
|
class QueryJob < Job
|
52
90
|
##
|
53
91
|
# Checks if the priority for the query is `BATCH`.
|
@@ -305,6 +343,22 @@ module Google
|
|
305
343
|
@gapi.statistics.query.ddl_operation_performed
|
306
344
|
end
|
307
345
|
|
346
|
+
##
|
347
|
+
# The DDL target routine, in reference state. (See {Routine#reference?}.)
|
348
|
+
# Present only for `CREATE/DROP FUNCTION/PROCEDURE` queries. (See
|
349
|
+
# {#statement_type}.)
|
350
|
+
#
|
351
|
+
# @return [Google::Cloud::Bigquery::Routine, nil] The DDL target routine, in
|
352
|
+
# reference state.
|
353
|
+
#
|
354
|
+
def ddl_target_routine
|
355
|
+
return nil unless @gapi.statistics.query
|
356
|
+
ensure_service!
|
357
|
+
routine = @gapi.statistics.query.ddl_target_routine
|
358
|
+
return nil unless routine
|
359
|
+
Google::Cloud::Bigquery::Routine.new_reference_from_gapi routine, service
|
360
|
+
end
|
361
|
+
|
308
362
|
##
|
309
363
|
# The DDL target table, in reference state. (See {Table#reference?}.)
|
310
364
|
# Present only for `CREATE/DROP TABLE/VIEW` queries. (See
|
@@ -394,6 +448,69 @@ module Google
|
|
394
448
|
EncryptionConfiguration.from_gapi @gapi.configuration.query.destination_encryption_configuration
|
395
449
|
end
|
396
450
|
|
451
|
+
###
|
452
|
+
# Checks if the destination table will be range partitioned. See [Creating and using integer range partitioned
|
453
|
+
# tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
|
454
|
+
#
|
455
|
+
# @return [Boolean] `true` when the table is range partitioned, or `false` otherwise.
|
456
|
+
#
|
457
|
+
# @!group Attributes
|
458
|
+
#
|
459
|
+
def range_partitioning?
|
460
|
+
!@gapi.configuration.query.range_partitioning.nil?
|
461
|
+
end
|
462
|
+
|
463
|
+
###
|
464
|
+
# The field on which the destination table will be range partitioned, if any. The field must be a
|
465
|
+
# top-level `NULLABLE/REQUIRED` field. The only supported type is `INTEGER/INT64`. See
|
466
|
+
# [Creating and using integer range partitioned
|
467
|
+
# tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
|
468
|
+
#
|
469
|
+
# @return [String, nil] The partition field, if a field was configured, or `nil` if not range partitioned.
|
470
|
+
#
|
471
|
+
# @!group Attributes
|
472
|
+
#
|
473
|
+
def range_partitioning_field
|
474
|
+
@gapi.configuration.query.range_partitioning.field if range_partitioning?
|
475
|
+
end
|
476
|
+
|
477
|
+
###
|
478
|
+
# The start of range partitioning, inclusive. See [Creating and using integer range partitioned
|
479
|
+
# tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
|
480
|
+
#
|
481
|
+
# @return [Integer, nil] The start of range partitioning, inclusive, or `nil` if not range partitioned.
|
482
|
+
#
|
483
|
+
# @!group Attributes
|
484
|
+
#
|
485
|
+
def range_partitioning_start
|
486
|
+
@gapi.configuration.query.range_partitioning.range.start if range_partitioning?
|
487
|
+
end
|
488
|
+
|
489
|
+
###
|
490
|
+
# The width of each interval. See [Creating and using integer range partitioned
|
491
|
+
# tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
|
492
|
+
#
|
493
|
+
# @return [Integer, nil] The width of each interval, for data in range partitions, or `nil` if not range
|
494
|
+
# partitioned.
|
495
|
+
#
|
496
|
+
# @!group Attributes
|
497
|
+
#
|
498
|
+
def range_partitioning_interval
|
499
|
+
@gapi.configuration.query.range_partitioning.range.interval if range_partitioning?
|
500
|
+
end
|
501
|
+
|
502
|
+
###
|
503
|
+
# The end of range partitioning, exclusive. See [Creating and using integer range partitioned
|
504
|
+
# tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
|
505
|
+
#
|
506
|
+
# @return [Integer, nil] The end of range partitioning, exclusive, or `nil` if not range partitioned.
|
507
|
+
#
|
508
|
+
# @!group Attributes
|
509
|
+
#
|
510
|
+
def range_partitioning_end
|
511
|
+
@gapi.configuration.query.range_partitioning.range.end if range_partitioning?
|
512
|
+
end
|
513
|
+
|
397
514
|
###
|
398
515
|
# Checks if the destination table will be time-partitioned. See
|
399
516
|
# [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
@@ -1012,6 +1129,164 @@ module Google
|
|
1012
1129
|
@gapi.configuration.query.update! destination_encryption_configuration: val.to_gapi
|
1013
1130
|
end
|
1014
1131
|
|
1132
|
+
##
|
1133
|
+
# Sets the field on which to range partition the table. See [Creating and using integer range partitioned
|
1134
|
+
# tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
|
1135
|
+
#
|
1136
|
+
# See {#range_partitioning_start=}, {#range_partitioning_interval=} and {#range_partitioning_end=}.
|
1137
|
+
#
|
1138
|
+
# You can only set range partitioning when creating a table. BigQuery does not allow you to change
|
1139
|
+
# partitioning on an existing table.
|
1140
|
+
#
|
1141
|
+
# @param [String] field The range partition field. the destination table is partitioned by this
|
1142
|
+
# field. The field must be a top-level `NULLABLE/REQUIRED` field. The only supported
|
1143
|
+
# type is `INTEGER/INT64`.
|
1144
|
+
#
|
1145
|
+
# @example
|
1146
|
+
# require "google/cloud/bigquery"
|
1147
|
+
#
|
1148
|
+
# bigquery = Google::Cloud::Bigquery.new
|
1149
|
+
# dataset = bigquery.dataset "my_dataset"
|
1150
|
+
# destination_table = dataset.table "my_destination_table",
|
1151
|
+
# skip_lookup: true
|
1152
|
+
#
|
1153
|
+
# job = bigquery.query_job "SELECT num FROM UNNEST(GENERATE_ARRAY(0, 99)) AS num" do |job|
|
1154
|
+
# job.table = destination_table
|
1155
|
+
# job.range_partitioning_field = "num"
|
1156
|
+
# job.range_partitioning_start = 0
|
1157
|
+
# job.range_partitioning_interval = 10
|
1158
|
+
# job.range_partitioning_end = 100
|
1159
|
+
# end
|
1160
|
+
#
|
1161
|
+
# job.wait_until_done!
|
1162
|
+
# job.done? #=> true
|
1163
|
+
#
|
1164
|
+
# @!group Attributes
|
1165
|
+
#
|
1166
|
+
def range_partitioning_field= field
|
1167
|
+
@gapi.configuration.query.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
|
1168
|
+
range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
|
1169
|
+
)
|
1170
|
+
@gapi.configuration.query.range_partitioning.field = field
|
1171
|
+
end
|
1172
|
+
|
1173
|
+
##
|
1174
|
+
# Sets the start of range partitioning, inclusive, for the destination table. See [Creating and using integer
|
1175
|
+
# range partitioned tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
|
1176
|
+
#
|
1177
|
+
# You can only set range partitioning when creating a table. BigQuery does not allow you to change
|
1178
|
+
# partitioning on an existing table.
|
1179
|
+
#
|
1180
|
+
# See {#range_partitioning_field=}, {#range_partitioning_interval=} and {#range_partitioning_end=}.
|
1181
|
+
#
|
1182
|
+
# @param [Integer] range_start The start of range partitioning, inclusive.
|
1183
|
+
#
|
1184
|
+
# @example
|
1185
|
+
# require "google/cloud/bigquery"
|
1186
|
+
#
|
1187
|
+
# bigquery = Google::Cloud::Bigquery.new
|
1188
|
+
# dataset = bigquery.dataset "my_dataset"
|
1189
|
+
# destination_table = dataset.table "my_destination_table",
|
1190
|
+
# skip_lookup: true
|
1191
|
+
#
|
1192
|
+
# job = bigquery.query_job "SELECT num FROM UNNEST(GENERATE_ARRAY(0, 99)) AS num" do |job|
|
1193
|
+
# job.table = destination_table
|
1194
|
+
# job.range_partitioning_field = "num"
|
1195
|
+
# job.range_partitioning_start = 0
|
1196
|
+
# job.range_partitioning_interval = 10
|
1197
|
+
# job.range_partitioning_end = 100
|
1198
|
+
# end
|
1199
|
+
#
|
1200
|
+
# job.wait_until_done!
|
1201
|
+
# job.done? #=> true
|
1202
|
+
#
|
1203
|
+
# @!group Attributes
|
1204
|
+
#
|
1205
|
+
def range_partitioning_start= range_start
|
1206
|
+
@gapi.configuration.query.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
|
1207
|
+
range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
|
1208
|
+
)
|
1209
|
+
@gapi.configuration.query.range_partitioning.range.start = range_start
|
1210
|
+
end
|
1211
|
+
|
1212
|
+
##
|
1213
|
+
# Sets width of each interval for data in range partitions. See [Creating and using integer range partitioned
|
1214
|
+
# tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
|
1215
|
+
#
|
1216
|
+
# You can only set range partitioning when creating a table. BigQuery does not allow you to change
|
1217
|
+
# partitioning on an existing table.
|
1218
|
+
#
|
1219
|
+
# See {#range_partitioning_field=}, {#range_partitioning_start=} and {#range_partitioning_end=}.
|
1220
|
+
#
|
1221
|
+
# @param [Integer] range_interval The width of each interval, for data in partitions.
|
1222
|
+
#
|
1223
|
+
# @example
|
1224
|
+
# require "google/cloud/bigquery"
|
1225
|
+
#
|
1226
|
+
# bigquery = Google::Cloud::Bigquery.new
|
1227
|
+
# dataset = bigquery.dataset "my_dataset"
|
1228
|
+
# destination_table = dataset.table "my_destination_table",
|
1229
|
+
# skip_lookup: true
|
1230
|
+
#
|
1231
|
+
# job = bigquery.query_job "SELECT num FROM UNNEST(GENERATE_ARRAY(0, 99)) AS num" do |job|
|
1232
|
+
# job.table = destination_table
|
1233
|
+
# job.range_partitioning_field = "num"
|
1234
|
+
# job.range_partitioning_start = 0
|
1235
|
+
# job.range_partitioning_interval = 10
|
1236
|
+
# job.range_partitioning_end = 100
|
1237
|
+
# end
|
1238
|
+
#
|
1239
|
+
# job.wait_until_done!
|
1240
|
+
# job.done? #=> true
|
1241
|
+
#
|
1242
|
+
# @!group Attributes
|
1243
|
+
#
|
1244
|
+
def range_partitioning_interval= range_interval
|
1245
|
+
@gapi.configuration.query.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
|
1246
|
+
range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
|
1247
|
+
)
|
1248
|
+
@gapi.configuration.query.range_partitioning.range.interval = range_interval
|
1249
|
+
end
|
1250
|
+
|
1251
|
+
##
|
1252
|
+
# Sets the end of range partitioning, exclusive, for the destination table. See [Creating and using integer
|
1253
|
+
# range partitioned tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
|
1254
|
+
#
|
1255
|
+
# You can only set range partitioning when creating a table. BigQuery does not allow you to change
|
1256
|
+
# partitioning on an existing table.
|
1257
|
+
#
|
1258
|
+
# See {#range_partitioning_start=}, {#range_partitioning_interval=} and {#range_partitioning_field=}.
|
1259
|
+
#
|
1260
|
+
# @param [Integer] range_end The end of range partitioning, exclusive.
|
1261
|
+
#
|
1262
|
+
# @example
|
1263
|
+
# require "google/cloud/bigquery"
|
1264
|
+
#
|
1265
|
+
# bigquery = Google::Cloud::Bigquery.new
|
1266
|
+
# dataset = bigquery.dataset "my_dataset"
|
1267
|
+
# destination_table = dataset.table "my_destination_table",
|
1268
|
+
# skip_lookup: true
|
1269
|
+
#
|
1270
|
+
# job = bigquery.query_job "SELECT num FROM UNNEST(GENERATE_ARRAY(0, 99)) AS num" do |job|
|
1271
|
+
# job.table = destination_table
|
1272
|
+
# job.range_partitioning_field = "num"
|
1273
|
+
# job.range_partitioning_start = 0
|
1274
|
+
# job.range_partitioning_interval = 10
|
1275
|
+
# job.range_partitioning_end = 100
|
1276
|
+
# end
|
1277
|
+
#
|
1278
|
+
# job.wait_until_done!
|
1279
|
+
# job.done? #=> true
|
1280
|
+
#
|
1281
|
+
# @!group Attributes
|
1282
|
+
#
|
1283
|
+
def range_partitioning_end= range_end
|
1284
|
+
@gapi.configuration.query.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
|
1285
|
+
range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
|
1286
|
+
)
|
1287
|
+
@gapi.configuration.query.range_partitioning.range.end = range_end
|
1288
|
+
end
|
1289
|
+
|
1015
1290
|
##
|
1016
1291
|
# Sets the partitioning for the destination table. See [Partitioned
|
1017
1292
|
# Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
|
@@ -1198,6 +1473,23 @@ module Google
|
|
1198
1473
|
@gapi.configuration.query.clustering.fields = fields
|
1199
1474
|
end
|
1200
1475
|
|
1476
|
+
def cancel
|
1477
|
+
raise "not implemented in #{self.class}"
|
1478
|
+
end
|
1479
|
+
|
1480
|
+
def rerun!
|
1481
|
+
raise "not implemented in #{self.class}"
|
1482
|
+
end
|
1483
|
+
|
1484
|
+
def reload!
|
1485
|
+
raise "not implemented in #{self.class}"
|
1486
|
+
end
|
1487
|
+
alias refresh! reload!
|
1488
|
+
|
1489
|
+
def wait_until_done!
|
1490
|
+
raise "not implemented in #{self.class}"
|
1491
|
+
end
|
1492
|
+
|
1201
1493
|
##
|
1202
1494
|
# @private Returns the Google API client library version of this job.
|
1203
1495
|
#
|