google-cloud-bigquery 1.18.1 → 1.21.2

Sign up to get free protection for your applications and to get access to all the features.
@@ -449,7 +449,8 @@ module Google
449
449
  def feature_columns
450
450
  ensure_full_data!
451
451
  Array(@gapi_json[:featureColumns]).map do |field_gapi_json|
452
- StandardSql::Field.from_gapi_json field_gapi_json
452
+ field_gapi = Google::Apis::BigqueryV2::StandardSqlField.from_json field_gapi_json.to_json
453
+ StandardSql::Field.from_gapi field_gapi
453
454
  end
454
455
  end
455
456
 
@@ -464,7 +465,8 @@ module Google
464
465
  def label_columns
465
466
  ensure_full_data!
466
467
  Array(@gapi_json[:labelColumns]).map do |field_gapi_json|
467
- StandardSql::Field.from_gapi_json field_gapi_json
468
+ field_gapi = Google::Apis::BigqueryV2::StandardSqlField.from_json field_gapi_json.to_json
469
+ StandardSql::Field.from_gapi field_gapi
468
470
  end
469
471
  end
470
472
 
@@ -554,7 +556,7 @@ module Google
554
556
  # model = dataset.model "my_model", skip_lookup: true
555
557
  # model.exists? #=> true
556
558
  #
557
- def exists? force: nil
559
+ def exists? force: false
558
560
  return resource_exists? if force
559
561
  # If we have a value, return it
560
562
  return @exists unless @exists.nil?
@@ -668,7 +670,7 @@ module Google
668
670
  end
669
671
 
670
672
  ##
671
- # @private New lazy Model object without making an HTTP request.
673
+ # @private New lazy Model object without making an HTTP request, for use with the skip_lookup option.
672
674
  def self.new_reference project_id, dataset_id, model_id, service
673
675
  raise ArgumentError, "project_id is required" unless project_id
674
676
  raise ArgumentError, "dataset_id is required" unless dataset_id
@@ -419,12 +419,20 @@ module Google
419
419
  # list must have a different key. See [Requirements for
420
420
  # labels](https://cloud.google.com/bigquery/docs/creating-managing-labels#requirements).
421
421
  # @param [Array<String>, String] udfs User-defined function resources
422
- # used in the query. May be either a code resource to load from a
423
- # Google Cloud Storage URI (`gs://bucket/path`), or an inline resource
422
+ # used in a legacy SQL query. May be either a code resource to load from
423
+ # a Google Cloud Storage URI (`gs://bucket/path`), or an inline resource
424
424
  # that contains code for a user-defined function (UDF). Providing an
425
425
  # inline code resource is equivalent to providing a URI for a file
426
- # containing the same code. See [User-Defined
427
- # Functions](https://cloud.google.com/bigquery/docs/reference/standard-sql/user-defined-functions).
426
+ # containing the same code.
427
+ #
428
+ # This parameter is used for defining User Defined Function (UDF)
429
+ # resources only when using legacy SQL. Users of standard SQL should
430
+ # leverage either DDL (e.g. `CREATE [TEMPORARY] FUNCTION ...`) or the
431
+ # Routines API to define UDF resources.
432
+ #
433
+ # For additional information on migrating, see: [Migrating to
434
+ # standard SQL - Differences in user-defined JavaScript
435
+ # functions](https://cloud.google.com/bigquery/docs/reference/standard-sql/migrating-from-legacy-sql#differences_in_user-defined_javascript_functions)
428
436
  # @param [Integer] maximum_billing_tier Deprecated: Change the billing
429
437
  # tier to allow high-compute queries.
430
438
  # @yield [job] a job configuration object
@@ -527,7 +535,7 @@ module Google
527
535
  #
528
536
  # job.wait_until_done!
529
537
  # if !job.failed?
530
- # table_ref = job.ddl_target_table
538
+ # table_ref = job.ddl_target_table # Or ddl_target_routine for CREATE/DROP FUNCTION/PROCEDURE
531
539
  # end
532
540
  #
533
541
  # @example Execute a DML statement:
@@ -709,9 +717,12 @@ module Google
709
717
  # sql = "SELECT name FROM `my_project.my_dataset.my_table`"
710
718
  # data = bigquery.query sql
711
719
  #
720
+ # # Iterate over the first page of results
712
721
  # data.each do |row|
713
722
  # puts row[:name]
714
723
  # end
724
+ # # Retrieve the next page of results
725
+ # data = data.next if data.next?
715
726
  #
716
727
  # @example Query using legacy SQL:
717
728
  # require "google/cloud/bigquery"
@@ -721,9 +732,12 @@ module Google
721
732
  # sql = "SELECT name FROM [my_project:my_dataset.my_table]"
722
733
  # data = bigquery.query sql, legacy_sql: true
723
734
  #
735
+ # # Iterate over the first page of results
724
736
  # data.each do |row|
725
737
  # puts row[:name]
726
738
  # end
739
+ # # Retrieve the next page of results
740
+ # data = data.next if data.next?
727
741
  #
728
742
  # @example Retrieve all rows: (See {Data#all})
729
743
  # require "google/cloud/bigquery"
@@ -746,9 +760,12 @@ module Google
746
760
  # "WHERE id = ?",
747
761
  # params: [1]
748
762
  #
763
+ # # Iterate over the first page of results
749
764
  # data.each do |row|
750
765
  # puts row[:name]
751
766
  # end
767
+ # # Retrieve the next page of results
768
+ # data = data.next if data.next?
752
769
  #
753
770
  # @example Query using named query parameters:
754
771
  # require "google/cloud/bigquery"
@@ -760,9 +777,12 @@ module Google
760
777
  # "WHERE id = @id",
761
778
  # params: { id: 1 }
762
779
  #
780
+ # # Iterate over the first page of results
763
781
  # data.each do |row|
764
782
  # puts row[:name]
765
783
  # end
784
+ # # Retrieve the next page of results
785
+ # data = data.next if data.next?
766
786
  #
767
787
  # @example Query using named query parameters with types:
768
788
  # require "google/cloud/bigquery"
@@ -775,9 +795,12 @@ module Google
775
795
  # params: { ids: [] },
776
796
  # types: { ids: [:INT64] }
777
797
  #
798
+ # # Iterate over the first page of results
778
799
  # data.each do |row|
779
800
  # puts row[:name]
780
801
  # end
802
+ # # Retrieve the next page of results
803
+ # data = data.next if data.next?
781
804
  #
782
805
  # @example Execute a DDL statement:
783
806
  # require "google/cloud/bigquery"
@@ -786,7 +809,7 @@ module Google
786
809
  #
787
810
  # data = bigquery.query "CREATE TABLE `my_dataset.my_table` (x INT64)"
788
811
  #
789
- # table_ref = data.ddl_target_table
812
+ # table_ref = data.ddl_target_table # Or ddl_target_routine for CREATE/DROP FUNCTION/PROCEDURE
790
813
  #
791
814
  # @example Execute a DML statement:
792
815
  # require "google/cloud/bigquery"
@@ -816,9 +839,12 @@ module Google
816
839
  # query.table = dataset.table "my_table", skip_lookup: true
817
840
  # end
818
841
  #
842
+ # # Iterate over the first page of results
819
843
  # data.each do |row|
820
844
  # puts row[:name]
821
845
  # end
846
+ # # Retrieve the next page of results
847
+ # data = data.next if data.next?
822
848
  #
823
849
  def query query, params: nil, types: nil, external: nil, max: nil, cache: true, dataset: nil, project: nil,
824
850
  standard_sql: nil, legacy_sql: nil, &block
@@ -880,9 +906,12 @@ module Google
880
906
  # data = bigquery.query "SELECT * FROM my_ext_table",
881
907
  # external: { my_ext_table: csv_table }
882
908
  #
909
+ # # Iterate over the first page of results
883
910
  # data.each do |row|
884
911
  # puts row[:name]
885
912
  # end
913
+ # # Retrieve the next page of results
914
+ # data = data.next if data.next?
886
915
  #
887
916
  def external url, format: nil
888
917
  ext = External.from_urls url, format
@@ -1046,8 +1075,7 @@ module Google
1046
1075
  #
1047
1076
  def datasets all: nil, filter: nil, token: nil, max: nil
1048
1077
  ensure_service!
1049
- options = { all: all, filter: filter, token: token, max: max }
1050
- gapi = service.list_datasets options
1078
+ gapi = service.list_datasets all: all, filter: filter, token: token, max: max
1051
1079
  Dataset::List.from_gapi gapi, service, all, filter, max
1052
1080
  end
1053
1081
 
@@ -1085,18 +1113,22 @@ module Google
1085
1113
  # part of the larger set of results to view. Optional.
1086
1114
  # @param [Integer] max Maximum number of jobs to return. Optional.
1087
1115
  # @param [String] filter A filter for job state. Optional.
1088
- # @param [Time] min_created_at Min value for {Job#created_at}. When
1089
- # provided, only jobs created after or at this time are returned.
1090
- # Optional.
1091
- # @param [Time] max_created_at Max value for {Job#created_at}. When
1092
- # provided, only jobs created before or at this time are returned.
1093
- # Optional.
1094
1116
  #
1095
1117
  # Acceptable values are:
1096
1118
  #
1097
1119
  # * `done` - Finished jobs
1098
1120
  # * `pending` - Pending jobs
1099
1121
  # * `running` - Running jobs
1122
+ # @param [Time] min_created_at Min value for {Job#created_at}. When
1123
+ # provided, only jobs created after or at this time are returned.
1124
+ # Optional.
1125
+ # @param [Time] max_created_at Max value for {Job#created_at}. When
1126
+ # provided, only jobs created before or at this time are returned.
1127
+ # Optional.
1128
+ # @param [Google::Cloud::Bigquery::Job, String] parent_job A job
1129
+ # object or a job ID. If set, retrieve only child jobs of the
1130
+ # specified parent. Optional. See {Job#job_id}, {Job#num_child_jobs},
1131
+ # and {Job#parent_job_id}.
1100
1132
  #
1101
1133
  # @return [Array<Google::Cloud::Bigquery::Job>] (See
1102
1134
  # {Google::Cloud::Bigquery::Job::List})
@@ -1145,13 +1177,63 @@ module Google
1145
1177
  # # process job
1146
1178
  # end
1147
1179
  #
1148
- def jobs all: nil, token: nil, max: nil, filter: nil,
1149
- min_created_at: nil, max_created_at: nil
1180
+ # @example Retrieve child jobs by setting `parent_job`:
1181
+ # require "google/cloud/bigquery"
1182
+ #
1183
+ # bigquery = Google::Cloud::Bigquery.new
1184
+ #
1185
+ # multi_statement_sql = <<~SQL
1186
+ # -- Declare a variable to hold names as an array.
1187
+ # DECLARE top_names ARRAY<STRING>;
1188
+ # -- Build an array of the top 100 names from the year 2017.
1189
+ # SET top_names = (
1190
+ # SELECT ARRAY_AGG(name ORDER BY number DESC LIMIT 100)
1191
+ # FROM `bigquery-public-data.usa_names.usa_1910_current`
1192
+ # WHERE year = 2017
1193
+ # );
1194
+ # -- Which names appear as words in Shakespeare's plays?
1195
+ # SELECT
1196
+ # name AS shakespeare_name
1197
+ # FROM UNNEST(top_names) AS name
1198
+ # WHERE name IN (
1199
+ # SELECT word
1200
+ # FROM `bigquery-public-data.samples.shakespeare`
1201
+ # );
1202
+ # SQL
1203
+ #
1204
+ # job = bigquery.query_job multi_statement_sql
1205
+ #
1206
+ # job.wait_until_done!
1207
+ #
1208
+ # child_jobs = bigquery.jobs parent_job: job
1209
+ #
1210
+ # child_jobs.each do |child_job|
1211
+ # script_statistics = child_job.script_statistics
1212
+ # puts script_statistics.evaluation_kind
1213
+ # script_statistics.stack_frames.each do |stack_frame|
1214
+ # puts stack_frame.text
1215
+ # end
1216
+ # end
1217
+ #
1218
+ def jobs all: nil,
1219
+ token: nil,
1220
+ max: nil,
1221
+ filter: nil,
1222
+ min_created_at: nil,
1223
+ max_created_at: nil,
1224
+ parent_job: nil
1150
1225
  ensure_service!
1151
- options = { all: all, token: token, max: max, filter: filter, min_created_at: min_created_at,
1152
- max_created_at: max_created_at }
1153
- gapi = service.list_jobs options
1154
- Job::List.from_gapi gapi, service, options
1226
+ parent_job = parent_job.job_id if parent_job.is_a? Job
1227
+ options = {
1228
+ parent_job_id: parent_job,
1229
+ all: all,
1230
+ token: token,
1231
+ max: max, filter: filter,
1232
+ min_created_at: min_created_at,
1233
+ max_created_at: max_created_at
1234
+ }
1235
+ gapi = service.list_jobs(**options)
1236
+ Job::List.from_gapi gapi, service, **options
1155
1237
  end
1156
1238
 
1157
1239
  ##
@@ -1197,8 +1279,7 @@ module Google
1197
1279
  #
1198
1280
  def projects token: nil, max: nil
1199
1281
  ensure_service!
1200
- options = { token: token, max: max }
1201
- gapi = service.list_projects options
1282
+ gapi = service.list_projects token: token, max: max
1202
1283
  Project::List.from_gapi gapi, service, max
1203
1284
  end
1204
1285
 
@@ -1224,9 +1305,12 @@ module Google
1224
1305
  # "WHERE time_of_date = @time",
1225
1306
  # params: { time: fourpm }
1226
1307
  #
1308
+ # # Iterate over the first page of results
1227
1309
  # data.each do |row|
1228
1310
  # puts row[:name]
1229
1311
  # end
1312
+ # # Retrieve the next page of results
1313
+ # data = data.next if data.next?
1230
1314
  #
1231
1315
  # @example Create Time with fractional seconds:
1232
1316
  # require "google/cloud/bigquery"
@@ -1239,9 +1323,12 @@ module Google
1239
1323
  # "WHERE time_of_date >= @time",
1240
1324
  # params: { time: precise_time }
1241
1325
  #
1326
+ # # Iterate over the first page of results
1242
1327
  # data.each do |row|
1243
1328
  # puts row[:name]
1244
1329
  # end
1330
+ # # Retrieve the next page of results
1331
+ # data = data.next if data.next?
1245
1332
  #
1246
1333
  def time hour, minute, second
1247
1334
  Bigquery::Time.new "#{hour}:#{minute}:#{second}"
@@ -72,8 +72,7 @@ module Google
72
72
  def next
73
73
  return nil unless next?
74
74
  ensure_service!
75
- options = { all: @hidden, token: token, max: @max }
76
- gapi = @service.list_projects options
75
+ gapi = @service.list_projects token: token, max: @max
77
76
  self.class.from_gapi gapi, @service, @max
78
77
  end
79
78
 
@@ -48,6 +48,44 @@ module Google
48
48
  # puts job.data.first
49
49
  # end
50
50
  #
51
+ # @example With multiple statements and child jobs:
52
+ # require "google/cloud/bigquery"
53
+ #
54
+ # bigquery = Google::Cloud::Bigquery.new
55
+ #
56
+ # multi_statement_sql = <<~SQL
57
+ # -- Declare a variable to hold names as an array.
58
+ # DECLARE top_names ARRAY<STRING>;
59
+ # -- Build an array of the top 100 names from the year 2017.
60
+ # SET top_names = (
61
+ # SELECT ARRAY_AGG(name ORDER BY number DESC LIMIT 100)
62
+ # FROM `bigquery-public-data.usa_names.usa_1910_current`
63
+ # WHERE year = 2017
64
+ # );
65
+ # -- Which names appear as words in Shakespeare's plays?
66
+ # SELECT
67
+ # name AS shakespeare_name
68
+ # FROM UNNEST(top_names) AS name
69
+ # WHERE name IN (
70
+ # SELECT word
71
+ # FROM `bigquery-public-data.samples.shakespeare`
72
+ # );
73
+ # SQL
74
+ #
75
+ # job = bigquery.query_job multi_statement_sql
76
+ #
77
+ # job.wait_until_done!
78
+ #
79
+ # child_jobs = bigquery.jobs parent_job: job
80
+ #
81
+ # child_jobs.each do |child_job|
82
+ # script_statistics = child_job.script_statistics
83
+ # puts script_statistics.evaluation_kind
84
+ # script_statistics.stack_frames.each do |stack_frame|
85
+ # puts stack_frame.text
86
+ # end
87
+ # end
88
+ #
51
89
  class QueryJob < Job
52
90
  ##
53
91
  # Checks if the priority for the query is `BATCH`.
@@ -305,6 +343,22 @@ module Google
305
343
  @gapi.statistics.query.ddl_operation_performed
306
344
  end
307
345
 
346
+ ##
347
+ # The DDL target routine, in reference state. (See {Routine#reference?}.)
348
+ # Present only for `CREATE/DROP FUNCTION/PROCEDURE` queries. (See
349
+ # {#statement_type}.)
350
+ #
351
+ # @return [Google::Cloud::Bigquery::Routine, nil] The DDL target routine, in
352
+ # reference state.
353
+ #
354
+ def ddl_target_routine
355
+ return nil unless @gapi.statistics.query
356
+ ensure_service!
357
+ routine = @gapi.statistics.query.ddl_target_routine
358
+ return nil unless routine
359
+ Google::Cloud::Bigquery::Routine.new_reference_from_gapi routine, service
360
+ end
361
+
308
362
  ##
309
363
  # The DDL target table, in reference state. (See {Table#reference?}.)
310
364
  # Present only for `CREATE/DROP TABLE/VIEW` queries. (See
@@ -394,6 +448,69 @@ module Google
394
448
  EncryptionConfiguration.from_gapi @gapi.configuration.query.destination_encryption_configuration
395
449
  end
396
450
 
451
+ ###
452
+ # Checks if the destination table will be range partitioned. See [Creating and using integer range partitioned
453
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
454
+ #
455
+ # @return [Boolean] `true` when the table is range partitioned, or `false` otherwise.
456
+ #
457
+ # @!group Attributes
458
+ #
459
+ def range_partitioning?
460
+ !@gapi.configuration.query.range_partitioning.nil?
461
+ end
462
+
463
+ ###
464
+ # The field on which the destination table will be range partitioned, if any. The field must be a
465
+ # top-level `NULLABLE/REQUIRED` field. The only supported type is `INTEGER/INT64`. See
466
+ # [Creating and using integer range partitioned
467
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
468
+ #
469
+ # @return [String, nil] The partition field, if a field was configured, or `nil` if not range partitioned.
470
+ #
471
+ # @!group Attributes
472
+ #
473
+ def range_partitioning_field
474
+ @gapi.configuration.query.range_partitioning.field if range_partitioning?
475
+ end
476
+
477
+ ###
478
+ # The start of range partitioning, inclusive. See [Creating and using integer range partitioned
479
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
480
+ #
481
+ # @return [Integer, nil] The start of range partitioning, inclusive, or `nil` if not range partitioned.
482
+ #
483
+ # @!group Attributes
484
+ #
485
+ def range_partitioning_start
486
+ @gapi.configuration.query.range_partitioning.range.start if range_partitioning?
487
+ end
488
+
489
+ ###
490
+ # The width of each interval. See [Creating and using integer range partitioned
491
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
492
+ #
493
+ # @return [Integer, nil] The width of each interval, for data in range partitions, or `nil` if not range
494
+ # partitioned.
495
+ #
496
+ # @!group Attributes
497
+ #
498
+ def range_partitioning_interval
499
+ @gapi.configuration.query.range_partitioning.range.interval if range_partitioning?
500
+ end
501
+
502
+ ###
503
+ # The end of range partitioning, exclusive. See [Creating and using integer range partitioned
504
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
505
+ #
506
+ # @return [Integer, nil] The end of range partitioning, exclusive, or `nil` if not range partitioned.
507
+ #
508
+ # @!group Attributes
509
+ #
510
+ def range_partitioning_end
511
+ @gapi.configuration.query.range_partitioning.range.end if range_partitioning?
512
+ end
513
+
397
514
  ###
398
515
  # Checks if the destination table will be time-partitioned. See
399
516
  # [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
@@ -558,9 +675,12 @@ module Google
558
675
  #
559
676
  # job.wait_until_done!
560
677
  # data = job.data
678
+ #
679
+ # # Iterate over the first page of results
561
680
  # data.each do |row|
562
681
  # puts row[:word]
563
682
  # end
683
+ # # Retrieve the next page of results
564
684
  # data = data.next if data.next?
565
685
  #
566
686
  def data token: nil, max: nil, start: nil
@@ -1012,6 +1132,164 @@ module Google
1012
1132
  @gapi.configuration.query.update! destination_encryption_configuration: val.to_gapi
1013
1133
  end
1014
1134
 
1135
+ ##
1136
+ # Sets the field on which to range partition the table. See [Creating and using integer range partitioned
1137
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
1138
+ #
1139
+ # See {#range_partitioning_start=}, {#range_partitioning_interval=} and {#range_partitioning_end=}.
1140
+ #
1141
+ # You can only set range partitioning when creating a table. BigQuery does not allow you to change
1142
+ # partitioning on an existing table.
1143
+ #
1144
+ # @param [String] field The range partition field. the destination table is partitioned by this
1145
+ # field. The field must be a top-level `NULLABLE/REQUIRED` field. The only supported
1146
+ # type is `INTEGER/INT64`.
1147
+ #
1148
+ # @example
1149
+ # require "google/cloud/bigquery"
1150
+ #
1151
+ # bigquery = Google::Cloud::Bigquery.new
1152
+ # dataset = bigquery.dataset "my_dataset"
1153
+ # destination_table = dataset.table "my_destination_table",
1154
+ # skip_lookup: true
1155
+ #
1156
+ # job = bigquery.query_job "SELECT num FROM UNNEST(GENERATE_ARRAY(0, 99)) AS num" do |job|
1157
+ # job.table = destination_table
1158
+ # job.range_partitioning_field = "num"
1159
+ # job.range_partitioning_start = 0
1160
+ # job.range_partitioning_interval = 10
1161
+ # job.range_partitioning_end = 100
1162
+ # end
1163
+ #
1164
+ # job.wait_until_done!
1165
+ # job.done? #=> true
1166
+ #
1167
+ # @!group Attributes
1168
+ #
1169
+ def range_partitioning_field= field
1170
+ @gapi.configuration.query.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
1171
+ range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
1172
+ )
1173
+ @gapi.configuration.query.range_partitioning.field = field
1174
+ end
1175
+
1176
+ ##
1177
+ # Sets the start of range partitioning, inclusive, for the destination table. See [Creating and using integer
1178
+ # range partitioned tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
1179
+ #
1180
+ # You can only set range partitioning when creating a table. BigQuery does not allow you to change
1181
+ # partitioning on an existing table.
1182
+ #
1183
+ # See {#range_partitioning_field=}, {#range_partitioning_interval=} and {#range_partitioning_end=}.
1184
+ #
1185
+ # @param [Integer] range_start The start of range partitioning, inclusive.
1186
+ #
1187
+ # @example
1188
+ # require "google/cloud/bigquery"
1189
+ #
1190
+ # bigquery = Google::Cloud::Bigquery.new
1191
+ # dataset = bigquery.dataset "my_dataset"
1192
+ # destination_table = dataset.table "my_destination_table",
1193
+ # skip_lookup: true
1194
+ #
1195
+ # job = bigquery.query_job "SELECT num FROM UNNEST(GENERATE_ARRAY(0, 99)) AS num" do |job|
1196
+ # job.table = destination_table
1197
+ # job.range_partitioning_field = "num"
1198
+ # job.range_partitioning_start = 0
1199
+ # job.range_partitioning_interval = 10
1200
+ # job.range_partitioning_end = 100
1201
+ # end
1202
+ #
1203
+ # job.wait_until_done!
1204
+ # job.done? #=> true
1205
+ #
1206
+ # @!group Attributes
1207
+ #
1208
+ def range_partitioning_start= range_start
1209
+ @gapi.configuration.query.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
1210
+ range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
1211
+ )
1212
+ @gapi.configuration.query.range_partitioning.range.start = range_start
1213
+ end
1214
+
1215
+ ##
1216
+ # Sets width of each interval for data in range partitions. See [Creating and using integer range partitioned
1217
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
1218
+ #
1219
+ # You can only set range partitioning when creating a table. BigQuery does not allow you to change
1220
+ # partitioning on an existing table.
1221
+ #
1222
+ # See {#range_partitioning_field=}, {#range_partitioning_start=} and {#range_partitioning_end=}.
1223
+ #
1224
+ # @param [Integer] range_interval The width of each interval, for data in partitions.
1225
+ #
1226
+ # @example
1227
+ # require "google/cloud/bigquery"
1228
+ #
1229
+ # bigquery = Google::Cloud::Bigquery.new
1230
+ # dataset = bigquery.dataset "my_dataset"
1231
+ # destination_table = dataset.table "my_destination_table",
1232
+ # skip_lookup: true
1233
+ #
1234
+ # job = bigquery.query_job "SELECT num FROM UNNEST(GENERATE_ARRAY(0, 99)) AS num" do |job|
1235
+ # job.table = destination_table
1236
+ # job.range_partitioning_field = "num"
1237
+ # job.range_partitioning_start = 0
1238
+ # job.range_partitioning_interval = 10
1239
+ # job.range_partitioning_end = 100
1240
+ # end
1241
+ #
1242
+ # job.wait_until_done!
1243
+ # job.done? #=> true
1244
+ #
1245
+ # @!group Attributes
1246
+ #
1247
+ def range_partitioning_interval= range_interval
1248
+ @gapi.configuration.query.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
1249
+ range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
1250
+ )
1251
+ @gapi.configuration.query.range_partitioning.range.interval = range_interval
1252
+ end
1253
+
1254
+ ##
1255
+ # Sets the end of range partitioning, exclusive, for the destination table. See [Creating and using integer
1256
+ # range partitioned tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
1257
+ #
1258
+ # You can only set range partitioning when creating a table. BigQuery does not allow you to change
1259
+ # partitioning on an existing table.
1260
+ #
1261
+ # See {#range_partitioning_start=}, {#range_partitioning_interval=} and {#range_partitioning_field=}.
1262
+ #
1263
+ # @param [Integer] range_end The end of range partitioning, exclusive.
1264
+ #
1265
+ # @example
1266
+ # require "google/cloud/bigquery"
1267
+ #
1268
+ # bigquery = Google::Cloud::Bigquery.new
1269
+ # dataset = bigquery.dataset "my_dataset"
1270
+ # destination_table = dataset.table "my_destination_table",
1271
+ # skip_lookup: true
1272
+ #
1273
+ # job = bigquery.query_job "SELECT num FROM UNNEST(GENERATE_ARRAY(0, 99)) AS num" do |job|
1274
+ # job.table = destination_table
1275
+ # job.range_partitioning_field = "num"
1276
+ # job.range_partitioning_start = 0
1277
+ # job.range_partitioning_interval = 10
1278
+ # job.range_partitioning_end = 100
1279
+ # end
1280
+ #
1281
+ # job.wait_until_done!
1282
+ # job.done? #=> true
1283
+ #
1284
+ # @!group Attributes
1285
+ #
1286
+ def range_partitioning_end= range_end
1287
+ @gapi.configuration.query.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
1288
+ range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
1289
+ )
1290
+ @gapi.configuration.query.range_partitioning.range.end = range_end
1291
+ end
1292
+
1015
1293
  ##
1016
1294
  # Sets the partitioning for the destination table. See [Partitioned
1017
1295
  # Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
@@ -1198,6 +1476,23 @@ module Google
1198
1476
  @gapi.configuration.query.clustering.fields = fields
1199
1477
  end
1200
1478
 
1479
+ def cancel
1480
+ raise "not implemented in #{self.class}"
1481
+ end
1482
+
1483
+ def rerun!
1484
+ raise "not implemented in #{self.class}"
1485
+ end
1486
+
1487
+ def reload!
1488
+ raise "not implemented in #{self.class}"
1489
+ end
1490
+ alias refresh! reload!
1491
+
1492
+ def wait_until_done!
1493
+ raise "not implemented in #{self.class}"
1494
+ end
1495
+
1201
1496
  ##
1202
1497
  # @private Returns the Google API client library version of this job.
1203
1498
  #