google-cloud-bigquery 1.14.0 → 1.42.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. checksums.yaml +4 -4
  2. data/AUTHENTICATION.md +17 -54
  3. data/CHANGELOG.md +377 -0
  4. data/CONTRIBUTING.md +328 -116
  5. data/LOGGING.md +1 -1
  6. data/OVERVIEW.md +21 -20
  7. data/TROUBLESHOOTING.md +2 -8
  8. data/lib/google/cloud/bigquery/argument.rb +197 -0
  9. data/lib/google/cloud/bigquery/convert.rb +155 -173
  10. data/lib/google/cloud/bigquery/copy_job.rb +74 -26
  11. data/lib/google/cloud/bigquery/credentials.rb +5 -12
  12. data/lib/google/cloud/bigquery/data.rb +109 -18
  13. data/lib/google/cloud/bigquery/dataset/access.rb +474 -52
  14. data/lib/google/cloud/bigquery/dataset/list.rb +7 -13
  15. data/lib/google/cloud/bigquery/dataset/tag.rb +67 -0
  16. data/lib/google/cloud/bigquery/dataset.rb +1044 -287
  17. data/lib/google/cloud/bigquery/external/avro_source.rb +107 -0
  18. data/lib/google/cloud/bigquery/external/bigtable_source/column.rb +404 -0
  19. data/lib/google/cloud/bigquery/external/bigtable_source/column_family.rb +945 -0
  20. data/lib/google/cloud/bigquery/external/bigtable_source.rb +230 -0
  21. data/lib/google/cloud/bigquery/external/csv_source.rb +481 -0
  22. data/lib/google/cloud/bigquery/external/data_source.rb +771 -0
  23. data/lib/google/cloud/bigquery/external/json_source.rb +170 -0
  24. data/lib/google/cloud/bigquery/external/parquet_source.rb +148 -0
  25. data/lib/google/cloud/bigquery/external/sheets_source.rb +166 -0
  26. data/lib/google/cloud/bigquery/external.rb +50 -2256
  27. data/lib/google/cloud/bigquery/extract_job.rb +226 -61
  28. data/lib/google/cloud/bigquery/insert_response.rb +1 -3
  29. data/lib/google/cloud/bigquery/job/list.rb +10 -14
  30. data/lib/google/cloud/bigquery/job.rb +289 -14
  31. data/lib/google/cloud/bigquery/load_job.rb +810 -136
  32. data/lib/google/cloud/bigquery/model/list.rb +5 -9
  33. data/lib/google/cloud/bigquery/model.rb +247 -16
  34. data/lib/google/cloud/bigquery/policy.rb +432 -0
  35. data/lib/google/cloud/bigquery/project/list.rb +6 -11
  36. data/lib/google/cloud/bigquery/project.rb +509 -250
  37. data/lib/google/cloud/bigquery/query_job.rb +594 -128
  38. data/lib/google/cloud/bigquery/routine/list.rb +165 -0
  39. data/lib/google/cloud/bigquery/routine.rb +1227 -0
  40. data/lib/google/cloud/bigquery/schema/field.rb +413 -63
  41. data/lib/google/cloud/bigquery/schema.rb +221 -48
  42. data/lib/google/cloud/bigquery/service.rb +204 -112
  43. data/lib/google/cloud/bigquery/standard_sql.rb +269 -53
  44. data/lib/google/cloud/bigquery/table/async_inserter.rb +86 -43
  45. data/lib/google/cloud/bigquery/table/list.rb +6 -11
  46. data/lib/google/cloud/bigquery/table.rb +1470 -377
  47. data/lib/google/cloud/bigquery/time.rb +6 -0
  48. data/lib/google/cloud/bigquery/version.rb +1 -1
  49. data/lib/google/cloud/bigquery.rb +4 -6
  50. data/lib/google-cloud-bigquery.rb +14 -13
  51. metadata +66 -38
@@ -48,6 +48,44 @@ module Google
48
48
  # puts job.data.first
49
49
  # end
50
50
  #
51
+ # @example With multiple statements and child jobs:
52
+ # require "google/cloud/bigquery"
53
+ #
54
+ # bigquery = Google::Cloud::Bigquery.new
55
+ #
56
+ # multi_statement_sql = <<~SQL
57
+ # -- Declare a variable to hold names as an array.
58
+ # DECLARE top_names ARRAY<STRING>;
59
+ # -- Build an array of the top 100 names from the year 2017.
60
+ # SET top_names = (
61
+ # SELECT ARRAY_AGG(name ORDER BY number DESC LIMIT 100)
62
+ # FROM `bigquery-public-data.usa_names.usa_1910_current`
63
+ # WHERE year = 2017
64
+ # );
65
+ # -- Which names appear as words in Shakespeare's plays?
66
+ # SELECT
67
+ # name AS shakespeare_name
68
+ # FROM UNNEST(top_names) AS name
69
+ # WHERE name IN (
70
+ # SELECT word
71
+ # FROM `bigquery-public-data.samples.shakespeare`
72
+ # );
73
+ # SQL
74
+ #
75
+ # job = bigquery.query_job multi_statement_sql
76
+ #
77
+ # job.wait_until_done!
78
+ #
79
+ # child_jobs = bigquery.jobs parent_job: job
80
+ #
81
+ # child_jobs.each do |child_job|
82
+ # script_statistics = child_job.script_statistics
83
+ # puts script_statistics.evaluation_kind
84
+ # script_statistics.stack_frames.each do |stack_frame|
85
+ # puts stack_frame.text
86
+ # end
87
+ # end
88
+ #
51
89
  class QueryJob < Job
52
90
  ##
53
91
  # Checks if the priority for the query is `BATCH`.
@@ -56,8 +94,7 @@ module Google
56
94
  # otherwise.
57
95
  #
58
96
  def batch?
59
- val = @gapi.configuration.query.priority
60
- val == "BATCH"
97
+ @gapi.configuration.query.priority == "BATCH"
61
98
  end
62
99
 
63
100
  ##
@@ -205,17 +242,16 @@ module Google
205
242
  # end
206
243
  #
207
244
  def query_plan
208
- return nil unless @gapi.statistics.query &&
209
- @gapi.statistics.query.query_plan
210
- Array(@gapi.statistics.query.query_plan).map do |stage|
211
- Stage.from_gapi stage
212
- end
245
+ return nil unless @gapi&.statistics&.query&.query_plan
246
+ Array(@gapi.statistics.query.query_plan).map { |stage| Stage.from_gapi stage }
213
247
  end
214
248
 
215
249
  ##
216
250
  # The type of query statement, if valid. Possible values (new values
217
251
  # might be added in the future):
218
252
  #
253
+ # * "ALTER_TABLE": DDL statement, see [Using Data Definition Language
254
+ # Statements](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language)
219
255
  # * "CREATE_MODEL": DDL statement, see [Using Data Definition Language
220
256
  # Statements](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language)
221
257
  # * "CREATE_TABLE": DDL statement, see [Using Data Definition Language
@@ -261,8 +297,16 @@ module Google
261
297
  # query_job.ddl? #=> true
262
298
  #
263
299
  def ddl?
264
- %w[CREATE_MODEL CREATE_TABLE CREATE_TABLE_AS_SELECT CREATE_VIEW \
265
- DROP_MODEL DROP_TABLE DROP_VIEW].include? statement_type
300
+ [
301
+ "ALTER_TABLE",
302
+ "CREATE_MODEL",
303
+ "CREATE_TABLE",
304
+ "CREATE_TABLE_AS_SELECT",
305
+ "CREATE_VIEW",
306
+ "DROP_MODEL",
307
+ "DROP_TABLE",
308
+ "DROP_VIEW"
309
+ ].include? statement_type
266
310
  end
267
311
 
268
312
  ##
@@ -285,7 +329,12 @@ module Google
285
329
  # query_job.dml? #=> true
286
330
  #
287
331
  def dml?
288
- %w[INSERT UPDATE MERGE DELETE].include? statement_type
332
+ [
333
+ "INSERT",
334
+ "UPDATE",
335
+ "MERGE",
336
+ "DELETE"
337
+ ].include? statement_type
289
338
  end
290
339
 
291
340
  ##
@@ -308,6 +357,22 @@ module Google
308
357
  @gapi.statistics.query.ddl_operation_performed
309
358
  end
310
359
 
360
+ ##
361
+ # The DDL target routine, in reference state. (See {Routine#reference?}.)
362
+ # Present only for `CREATE/DROP FUNCTION/PROCEDURE` queries. (See
363
+ # {#statement_type}.)
364
+ #
365
+ # @return [Google::Cloud::Bigquery::Routine, nil] The DDL target routine, in
366
+ # reference state.
367
+ #
368
+ def ddl_target_routine
369
+ return nil unless @gapi.statistics.query
370
+ ensure_service!
371
+ routine = @gapi.statistics.query.ddl_target_routine
372
+ return nil unless routine
373
+ Google::Cloud::Bigquery::Routine.new_reference_from_gapi routine, service
374
+ end
375
+
311
376
  ##
312
377
  # The DDL target table, in reference state. (See {Table#reference?}.)
313
378
  # Present only for `CREATE/DROP TABLE/VIEW` queries. (See
@@ -336,17 +401,57 @@ module Google
336
401
  @gapi.statistics.query.num_dml_affected_rows
337
402
  end
338
403
 
404
+ ##
405
+ # The number of deleted rows. Present only for DML statements `DELETE`,
406
+ # `MERGE` and `TRUNCATE`. (See {#statement_type}.)
407
+ #
408
+ # @return [Integer, nil] The number of deleted rows, or `nil` if not
409
+ # applicable.
410
+ #
411
+ def deleted_row_count
412
+ @gapi.statistics.query&.dml_stats&.deleted_row_count
413
+ end
414
+
415
+ ##
416
+ # The number of inserted rows. Present only for DML statements `INSERT`
417
+ # and `MERGE`. (See {#statement_type}.)
418
+ #
419
+ # @return [Integer, nil] The number of inserted rows, or `nil` if not
420
+ # applicable.
421
+ #
422
+ def inserted_row_count
423
+ @gapi.statistics.query&.dml_stats&.inserted_row_count
424
+ end
425
+
426
+ ##
427
+ # The number of updated rows. Present only for DML statements `UPDATE`
428
+ # and `MERGE`. (See {#statement_type}.)
429
+ #
430
+ # @return [Integer, nil] The number of updated rows, or `nil` if not
431
+ # applicable.
432
+ #
433
+ def updated_row_count
434
+ @gapi.statistics.query&.dml_stats&.updated_row_count
435
+ end
436
+
339
437
  ##
340
438
  # The table in which the query results are stored.
341
439
  #
440
+ # @param [String] view Specifies the view that determines which table information is returned.
441
+ # By default, basic table information and storage statistics (STORAGE_STATS) are returned.
442
+ # Accepted values include `:unspecified`, `:basic`, `:storage`, and
443
+ # `:full`. For more information, see [BigQuery Classes](@todo: Update the link).
444
+ # The default value is the `:unspecified` view type.
445
+ #
342
446
  # @return [Table] A table instance.
343
447
  #
344
- def destination
448
+ def destination view: nil
345
449
  table = @gapi.configuration.query.destination_table
346
450
  return nil unless table
347
451
  retrieve_table table.project_id,
348
452
  table.dataset_id,
349
- table.table_id
453
+ table.table_id,
454
+ metadata_view: view
350
455
  end
351
456
 
352
457
  ##
@@ -383,9 +488,7 @@ module Google
383
488
  def udfs
384
489
  udfs_gapi = @gapi.configuration.query.user_defined_function_resources
385
490
  return nil unless udfs_gapi
386
- Array(udfs_gapi).map do |udf|
387
- udf.inline_code || udf.resource_uri
388
- end
491
+ Array(udfs_gapi).map { |udf| udf.inline_code || udf.resource_uri }
389
492
  end
390
493
 
391
494
  ##
@@ -396,16 +499,77 @@ module Google
396
499
  #
397
500
  # @!group Attributes
398
501
  def encryption
399
- EncryptionConfiguration.from_gapi(
400
- @gapi.configuration.query.destination_encryption_configuration
401
- )
502
+ EncryptionConfiguration.from_gapi @gapi.configuration.query.destination_encryption_configuration
503
+ end
504
+
505
+ ###
506
+ # Checks if the destination table will be range partitioned. See [Creating and using integer range partitioned
507
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
508
+ #
509
+ # @return [Boolean] `true` when the table is range partitioned, or `false` otherwise.
510
+ #
511
+ # @!group Attributes
512
+ #
513
+ def range_partitioning?
514
+ !@gapi.configuration.query.range_partitioning.nil?
515
+ end
516
+
517
+ ###
518
+ # The field on which the destination table will be range partitioned, if any. The field must be a
519
+ # top-level `NULLABLE/REQUIRED` field. The only supported type is `INTEGER/INT64`. See
520
+ # [Creating and using integer range partitioned
521
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
522
+ #
523
+ # @return [String, nil] The partition field, if a field was configured, or `nil` if not range partitioned.
524
+ #
525
+ # @!group Attributes
526
+ #
527
+ def range_partitioning_field
528
+ @gapi.configuration.query.range_partitioning.field if range_partitioning?
529
+ end
530
+
531
+ ###
532
+ # The start of range partitioning, inclusive. See [Creating and using integer range partitioned
533
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
534
+ #
535
+ # @return [Integer, nil] The start of range partitioning, inclusive, or `nil` if not range partitioned.
536
+ #
537
+ # @!group Attributes
538
+ #
539
+ def range_partitioning_start
540
+ @gapi.configuration.query.range_partitioning.range.start if range_partitioning?
541
+ end
542
+
543
+ ###
544
+ # The width of each interval. See [Creating and using integer range partitioned
545
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
546
+ #
547
+ # @return [Integer, nil] The width of each interval, for data in range partitions, or `nil` if not range
548
+ # partitioned.
549
+ #
550
+ # @!group Attributes
551
+ #
552
+ def range_partitioning_interval
553
+ @gapi.configuration.query.range_partitioning.range.interval if range_partitioning?
554
+ end
555
+
556
+ ###
557
+ # The end of range partitioning, exclusive. See [Creating and using integer range partitioned
558
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
559
+ #
560
+ # @return [Integer, nil] The end of range partitioning, exclusive, or `nil` if not range partitioned.
561
+ #
562
+ # @!group Attributes
563
+ #
564
+ def range_partitioning_end
565
+ @gapi.configuration.query.range_partitioning.range.end if range_partitioning?
402
566
  end
403
567
 
404
568
  ###
405
569
  # Checks if the destination table will be time-partitioned. See
406
570
  # [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
407
571
  #
408
- # @return [Boolean, nil] `true` when the table will be time-partitioned,
572
+ # @return [Boolean] `true` when the table will be time-partitioned,
409
573
  # or `false` otherwise.
410
574
  #
411
575
  # @!group Attributes
@@ -418,8 +582,9 @@ module Google
418
582
  # The period for which the destination table will be partitioned, if
419
583
  # any. See [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
420
584
  #
421
- # @return [String, nil] The partition type. Currently the only supported
422
- # value is "DAY", or `nil` if not present.
585
+ # @return [String, nil] The partition type. The supported types are `DAY`,
586
+ # `HOUR`, `MONTH`, and `YEAR`, which will generate one partition per day,
587
+ # hour, month, and year, respectively; or `nil` if not present.
423
588
  #
424
589
  # @!group Attributes
425
590
  #
@@ -479,10 +644,15 @@ module Google
479
644
  ###
480
645
  # Checks if the destination table will be clustered.
481
646
  #
647
+ # See {QueryJob::Updater#clustering_fields=}, {Table#clustering_fields} and
648
+ # {Table#clustering_fields=}.
649
+ #
482
650
  # @see https://cloud.google.com/bigquery/docs/clustered-tables
483
- # Introduction to Clustered Tables
651
+ # Introduction to clustered tables
652
+ # @see https://cloud.google.com/bigquery/docs/creating-clustered-tables
653
+ # Creating and using clustered tables
484
654
  #
485
- # @return [Boolean, nil] `true` when the table will be clustered,
655
+ # @return [Boolean] `true` when the table will be clustered,
486
656
  # or `false` otherwise.
487
657
  #
488
658
  # @!group Attributes
@@ -497,14 +667,16 @@ module Google
497
667
  # be first partitioned and subsequently clustered. The order of the
498
668
  # returned fields determines the sort order of the data.
499
669
  #
500
- # See {QueryJob::Updater#clustering_fields=}.
670
+ # BigQuery supports clustering for both partitioned and non-partitioned
671
+ # tables.
672
+ #
673
+ # See {QueryJob::Updater#clustering_fields=}, {Table#clustering_fields} and
674
+ # {Table#clustering_fields=}.
501
675
  #
502
- # @see https://cloud.google.com/bigquery/docs/partitioned-tables
503
- # Partitioned Tables
504
676
  # @see https://cloud.google.com/bigquery/docs/clustered-tables
505
- # Introduction to Clustered Tables
677
+ # Introduction to clustered tables
506
678
  # @see https://cloud.google.com/bigquery/docs/creating-clustered-tables
507
- # Creating and Using Clustered Tables
679
+ # Creating and using clustered tables
508
680
  #
509
681
  # @return [Array<String>, nil] The clustering fields, or `nil` if the
510
682
  # destination table will not be clustered.
@@ -535,8 +707,7 @@ module Google
535
707
 
536
708
  ensure_service!
537
709
  loop do
538
- query_results_gapi = service.job_query_results \
539
- job_id, location: location, max: 0
710
+ query_results_gapi = service.job_query_results job_id, location: location, max: 0
540
711
  if query_results_gapi.job_complete
541
712
  @destination_schema_gapi = query_results_gapi.schema
542
713
  break
@@ -566,27 +737,28 @@ module Google
566
737
  #
567
738
  # job.wait_until_done!
568
739
  # data = job.data
740
+ #
741
+ # # Iterate over the first page of results
569
742
  # data.each do |row|
570
743
  # puts row[:word]
571
744
  # end
745
+ # # Retrieve the next page of results
572
746
  # data = data.next if data.next?
573
747
  #
574
748
  def data token: nil, max: nil, start: nil
575
749
  return nil unless done?
576
- if dryrun?
577
- return Data.from_gapi_json({ rows: [] }, nil, @gapi, service)
578
- end
750
+ return Data.from_gapi_json({ rows: [] }, nil, @gapi, service) if dryrun?
579
751
  if ddl? || dml?
580
752
  data_hash = { totalRows: nil, rows: [] }
581
753
  return Data.from_gapi_json data_hash, nil, @gapi, service
582
754
  end
583
755
  ensure_schema!
584
756
 
585
- options = { token: token, max: max, start: start }
586
- data_hash = service.list_tabledata \
587
- destination_table_dataset_id,
588
- destination_table_table_id,
589
- options
757
+ data_hash = service.list_tabledata destination_table_dataset_id,
758
+ destination_table_table_id,
759
+ token: token,
760
+ max: max,
761
+ start: start
590
762
  Data.from_gapi_json data_hash, destination_table_gapi, @gapi, service
591
763
  end
592
764
  alias query_results data
@@ -597,12 +769,11 @@ module Google
597
769
  ##
598
770
  # @private Create an Updater object.
599
771
  def initialize service, gapi
772
+ super()
600
773
  @service = service
601
774
  @gapi = gapi
602
775
  end
603
776
 
604
- # rubocop:disable all
605
-
606
777
  ##
607
778
  # @private Create an Updater from an options hash.
608
779
  #
@@ -624,15 +795,16 @@ module Google
624
795
  )
625
796
 
626
797
  updater = QueryJob::Updater.new service, req
627
- updater.params = options[:params] if options[:params]
798
+ updater.set_params_and_types options[:params], options[:types] if options[:params]
628
799
  updater.create = options[:create]
800
+ updater.create_session = options[:create_session]
801
+ updater.session_id = options[:session_id] if options[:session_id]
629
802
  updater.write = options[:write]
630
803
  updater.table = options[:table]
631
804
  updater.dryrun = options[:dryrun]
632
805
  updater.maximum_bytes_billed = options[:maximum_bytes_billed]
633
806
  updater.labels = options[:labels] if options[:labels]
634
- updater.legacy_sql = Convert.resolve_legacy_sql(
635
- options[:standard_sql], options[:legacy_sql])
807
+ updater.legacy_sql = Convert.resolve_legacy_sql options[:standard_sql], options[:legacy_sql]
636
808
  updater.external = options[:external] if options[:external]
637
809
  updater.priority = options[:priority]
638
810
  updater.cache = options[:cache]
@@ -642,8 +814,6 @@ module Google
642
814
  updater
643
815
  end
644
816
 
645
- # rubocop:enable all
646
-
647
817
  ##
648
818
  # Sets the geographic location where the job should run. Required
649
819
  # except for US and EU.
@@ -731,41 +901,127 @@ module Google
731
901
  #
732
902
  # @!group Attributes
733
903
  def dataset= value
734
- @gapi.configuration.query.default_dataset =
735
- @service.dataset_ref_from value
904
+ @gapi.configuration.query.default_dataset = @service.dataset_ref_from value
736
905
  end
737
906
 
738
907
  ##
739
908
  # Sets the query parameters. Standard SQL only.
740
909
  #
741
- # @param [Array, Hash] params Used to pass query arguments when the
742
- # `query` string contains either positional (`?`) or named
743
- # (`@myparam`) query parameters. If value passed is an array
744
- # `["foo"]`, the query must use positional query parameters. If
745
- # value passed is a hash `{ myparam: "foo" }`, the query must use
746
- # named query parameters. When set, `legacy_sql` will automatically
747
- # be set to false and `standard_sql` to true.
910
+ # Use {set_params_and_types} to set both params and types.
911
+ #
912
+ # @param [Array, Hash] params Standard SQL only. Used to pass query arguments when the `query` string contains
913
+ # either positional (`?`) or named (`@myparam`) query parameters. If value passed is an array `["foo"]`, the
914
+ # query must use positional query parameters. If value passed is a hash `{ myparam: "foo" }`, the query must
915
+ # use named query parameters. When set, `legacy_sql` will automatically be set to false and `standard_sql`
916
+ # to true.
917
+ #
918
+ # BigQuery types are converted from Ruby types as follows:
919
+ #
920
+ # | BigQuery | Ruby | Notes |
921
+ # |--------------|--------------------------------------|--------------------------------------------------|
922
+ # | `BOOL` | `true`/`false` | |
923
+ # | `INT64` | `Integer` | |
924
+ # | `FLOAT64` | `Float` | |
925
+ # | `NUMERIC` | `BigDecimal` | `BigDecimal` values will be rounded to scale 9. |
926
+ # | `BIGNUMERIC` | `BigDecimal` | NOT AUTOMATIC: Must be mapped using `types`. |
927
+ # | `STRING` | `String` | |
928
+ # | `DATETIME` | `DateTime` | `DATETIME` does not support time zone. |
929
+ # | `DATE` | `Date` | |
930
+ # | `GEOGRAPHY` | `String` (WKT or GeoJSON) | NOT AUTOMATIC: Must be mapped using `types`. |
931
+ # | `TIMESTAMP` | `Time` | |
932
+ # | `TIME` | `Google::Cloud::BigQuery::Time` | |
933
+ # | `BYTES` | `File`, `IO`, `StringIO`, or similar | |
934
+ # | `ARRAY` | `Array` | Nested arrays, `nil` values are not supported. |
935
+ # | `STRUCT` | `Hash` | Hash keys may be strings or symbols. |
936
+ #
937
+ # See [Data Types](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types) for an overview
938
+ # of each BigQuery data type, including allowed values. For the `GEOGRAPHY` type, see [Working with BigQuery
939
+ # GIS data](https://cloud.google.com/bigquery/docs/gis-data).
748
940
  #
749
941
  # @!group Attributes
750
942
  def params= params
943
+ set_params_and_types params
944
+ end
945
+
946
+ ##
947
+ # Sets the query parameters. Standard SQL only.
948
+ #
949
+ # @param [Array, Hash] params Standard SQL only. Used to pass query arguments when the `query` string contains
950
+ # either positional (`?`) or named (`@myparam`) query parameters. If value passed is an array `["foo"]`, the
951
+ # query must use positional query parameters. If value passed is a hash `{ myparam: "foo" }`, the query must
952
+ # use named query parameters. When set, `legacy_sql` will automatically be set to false and `standard_sql`
953
+ # to true.
954
+ #
955
+ # BigQuery types are converted from Ruby types as follows:
956
+ #
957
+ # | BigQuery | Ruby | Notes |
958
+ # |--------------|--------------------------------------|--------------------------------------------------|
959
+ # | `BOOL` | `true`/`false` | |
960
+ # | `INT64` | `Integer` | |
961
+ # | `FLOAT64` | `Float` | |
962
+ # | `NUMERIC` | `BigDecimal` | `BigDecimal` values will be rounded to scale 9. |
963
+ # | `BIGNUMERIC` | `BigDecimal` | NOT AUTOMATIC: Must be mapped using `types`. |
964
+ # | `STRING` | `String` | |
965
+ # | `DATETIME` | `DateTime` | `DATETIME` does not support time zone. |
966
+ # | `DATE` | `Date` | |
967
+ # | `GEOGRAPHY` | `String` (WKT or GeoJSON) | NOT AUTOMATIC: Must be mapped using `types`. |
968
+ # | `TIMESTAMP` | `Time` | |
969
+ # | `TIME` | `Google::Cloud::BigQuery::Time` | |
970
+ # | `BYTES` | `File`, `IO`, `StringIO`, or similar | |
971
+ # | `ARRAY` | `Array` | Nested arrays, `nil` values are not supported. |
972
+ # | `STRUCT` | `Hash` | Hash keys may be strings or symbols. |
973
+ #
974
+ # See [Data Types](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types) for an overview
975
+ # of each BigQuery data type, including allowed values. For the `GEOGRAPHY` type, see [Working with BigQuery
976
+ # GIS data](https://cloud.google.com/bigquery/docs/gis-data).
977
+ # @param [Array, Hash] types Standard SQL only. Types of the SQL parameters in `params`. It is not always
978
+ # possible to infer the right SQL type from a value in `params`. In these cases, `types` must be used to
979
+ # specify the SQL type for these values.
980
+ #
981
+ # Arguments must match the value type passed to `params`. This must be an `Array` when the query uses
982
+ # positional query parameters. This must be an `Hash` when the query uses named query parameters. The values
983
+ # should be BigQuery type codes from the following list:
984
+ #
985
+ # * `:BOOL`
986
+ # * `:INT64`
987
+ # * `:FLOAT64`
988
+ # * `:NUMERIC`
989
+ # * `:BIGNUMERIC`
990
+ # * `:STRING`
991
+ # * `:DATETIME`
992
+ # * `:DATE`
993
+ # * `:GEOGRAPHY`
994
+ # * `:TIMESTAMP`
995
+ # * `:TIME`
996
+ # * `:BYTES`
997
+ # * `Array` - Lists are specified by providing the type code in an array. For example, an array of integers
998
+ # are specified as `[:INT64]`.
999
+ # * `Hash` - Types for STRUCT values (`Hash` objects) are specified using a `Hash` object, where the keys
1000
+ # match the `params` hash, and the values are the types value that matches the data.
1001
+ #
1002
+ # Types are optional.
1003
+ #
1004
+ # @!group Attributes
1005
+ def set_params_and_types params, types = nil
1006
+ types ||= params.class.new
1007
+ raise ArgumentError, "types must use the same format as params" if types.class != params.class
1008
+
751
1009
  case params
752
- when Array then
1010
+ when Array
753
1011
  @gapi.configuration.query.use_legacy_sql = false
754
1012
  @gapi.configuration.query.parameter_mode = "POSITIONAL"
755
- @gapi.configuration.query.query_parameters = params.map do |param|
756
- Convert.to_query_param param
1013
+ @gapi.configuration.query.query_parameters = params.zip(types).map do |param, type|
1014
+ Convert.to_query_param param, type
757
1015
  end
758
- when Hash then
1016
+ when Hash
759
1017
  @gapi.configuration.query.use_legacy_sql = false
760
1018
  @gapi.configuration.query.parameter_mode = "NAMED"
761
- @gapi.configuration.query.query_parameters =
762
- params.map do |name, param|
763
- Convert.to_query_param(param).tap do |named_param|
764
- named_param.name = String name
765
- end
766
- end
1019
+ @gapi.configuration.query.query_parameters = params.map do |name, param|
1020
+ type = types[name]
1021
+ Convert.to_query_param(param, type).tap { |named_param| named_param.name = String name }
1022
+ end
767
1023
  else
768
- raise "Query parameters must be an Array or a Hash."
1024
+ raise ArgumentError, "params must be an Array or a Hash"
769
1025
  end
770
1026
  end
771
1027
 
@@ -783,8 +1039,38 @@ module Google
783
1039
  #
784
1040
  # @!group Attributes
785
1041
  def create= value
786
- @gapi.configuration.query.create_disposition =
787
- Convert.create_disposition value
1042
+ @gapi.configuration.query.create_disposition = Convert.create_disposition value
1043
+ end
1044
+
1045
+ ##
1046
+ # Sets the create_session property. If true, creates a new session,
1047
+ # where session id will be a server generated random id. If false,
1048
+ # runs query with an existing {#session_id=}, otherwise runs query in
1049
+ # non-session mode. The default value is `false`.
1050
+ #
1051
+ # @param [Boolean] value The create_session property. The default
1052
+ # value is `false`.
1053
+ #
1054
+ # @!group Attributes
1055
+ def create_session= value
1056
+ @gapi.configuration.query.create_session = value
1057
+ end
1058
+
1059
+ ##
1060
+ # Sets the session ID for a query run in session mode. See {#create_session=}.
1061
+ #
1062
+ # @param [String] value The session ID. The default value is `nil`.
1063
+ #
1064
+ # @!group Attributes
1065
+ def session_id= value
1066
+ @gapi.configuration.query.connection_properties ||= []
1067
+ prop = @gapi.configuration.query.connection_properties.find { |cp| cp.key == "session_id" }
1068
+ if prop
1069
+ prop.value = value
1070
+ else
1071
+ prop = Google::Apis::BigqueryV2::ConnectionProperty.new key: "session_id", value: value
1072
+ @gapi.configuration.query.connection_properties << prop
1073
+ end
788
1074
  end
789
1075
 
790
1076
  ##
@@ -802,8 +1088,7 @@ module Google
802
1088
  #
803
1089
  # @!group Attributes
804
1090
  def write= value
805
- @gapi.configuration.query.write_disposition =
806
- Convert.write_disposition value
1091
+ @gapi.configuration.query.write_disposition = Convert.write_disposition value
807
1092
  end
808
1093
 
809
1094
  ##
@@ -849,12 +1134,21 @@ module Google
849
1134
  # Sets the labels to use for the job.
850
1135
  #
851
1136
  # @param [Hash] value A hash of user-provided labels associated with
852
- # the job. You can use these to organize and group your jobs. Label
853
- # keys and values can be no longer than 63 characters, can only
854
- # contain lowercase letters, numeric characters, underscores and
855
- # dashes. International characters are allowed. Label values are
856
- # optional. Label keys must start with a letter and each label in
857
- # the list must have a different key.
1137
+ # the job. You can use these to organize and group your jobs.
1138
+ #
1139
+ # The labels applied to a resource must meet the following requirements:
1140
+ #
1141
+ # * Each resource can have multiple labels, up to a maximum of 64.
1142
+ # * Each label must be a key-value pair.
1143
+ # * Keys have a minimum length of 1 character and a maximum length of
1144
+ # 63 characters, and cannot be empty. Values can be empty, and have
1145
+ # a maximum length of 63 characters.
1146
+ # * Keys and values can contain only lowercase letters, numeric characters,
1147
+ # underscores, and dashes. All characters must use UTF-8 encoding, and
1148
+ # international characters are allowed.
1149
+ # * The key portion of a label must be unique. However, you can use the
1150
+ # same key with multiple resources.
1151
+ # * Keys must start with a lowercase letter or international character.
858
1152
  #
859
1153
  # @!group Attributes
860
1154
  #
@@ -905,10 +1199,8 @@ module Google
905
1199
  # @!group Attributes
906
1200
  #
907
1201
  def external= value
908
- external_table_pairs = value.map do |name, obj|
909
- [String(name), obj.to_gapi]
910
- end
911
- external_table_hash = Hash[external_table_pairs]
1202
+ external_table_pairs = value.map { |name, obj| [String(name), obj.to_gapi] }
1203
+ external_table_hash = external_table_pairs.to_h
912
1204
  @gapi.configuration.query.table_definitions = external_table_hash
913
1205
  end
914
1206
 
@@ -925,8 +1217,7 @@ module Google
925
1217
  #
926
1218
  # @!group Attributes
927
1219
  def udfs= value
928
- @gapi.configuration.query.user_defined_function_resources =
929
- udfs_gapi_from value
1220
+ @gapi.configuration.query.user_defined_function_resources = udfs_gapi_from value
930
1221
  end
931
1222
 
932
1223
  ##
@@ -950,21 +1241,180 @@ module Google
950
1241
  #
951
1242
  # @!group Attributes
952
1243
  def encryption= val
953
- @gapi.configuration.query.update!(
954
- destination_encryption_configuration: val.to_gapi
1244
+ @gapi.configuration.query.update! destination_encryption_configuration: val.to_gapi
1245
+ end
1246
+
1247
+ ##
1248
+ # Sets the field on which to range partition the table. See [Creating and using integer range partitioned
1249
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
1250
+ #
1251
+ # See {#range_partitioning_start=}, {#range_partitioning_interval=} and {#range_partitioning_end=}.
1252
+ #
1253
+ # You can only set range partitioning when creating a table. BigQuery does not allow you to change
1254
+ # partitioning on an existing table.
1255
+ #
1256
+ # @param [String] field The range partition field. the destination table is partitioned by this
1257
+ # field. The field must be a top-level `NULLABLE/REQUIRED` field. The only supported
1258
+ # type is `INTEGER/INT64`.
1259
+ #
1260
+ # @example
1261
+ # require "google/cloud/bigquery"
1262
+ #
1263
+ # bigquery = Google::Cloud::Bigquery.new
1264
+ # dataset = bigquery.dataset "my_dataset"
1265
+ # destination_table = dataset.table "my_destination_table",
1266
+ # skip_lookup: true
1267
+ #
1268
+ # job = bigquery.query_job "SELECT num FROM UNNEST(GENERATE_ARRAY(0, 99)) AS num" do |job|
1269
+ # job.table = destination_table
1270
+ # job.range_partitioning_field = "num"
1271
+ # job.range_partitioning_start = 0
1272
+ # job.range_partitioning_interval = 10
1273
+ # job.range_partitioning_end = 100
1274
+ # end
1275
+ #
1276
+ # job.wait_until_done!
1277
+ # job.done? #=> true
1278
+ #
1279
+ # @!group Attributes
1280
+ #
1281
+ def range_partitioning_field= field
1282
+ @gapi.configuration.query.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
1283
+ range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
1284
+ )
1285
+ @gapi.configuration.query.range_partitioning.field = field
1286
+ end
1287
+
1288
+ ##
1289
+ # Sets the start of range partitioning, inclusive, for the destination table. See [Creating and using integer
1290
+ # range partitioned tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
1291
+ #
1292
+ # You can only set range partitioning when creating a table. BigQuery does not allow you to change
1293
+ # partitioning on an existing table.
1294
+ #
1295
+ # See {#range_partitioning_field=}, {#range_partitioning_interval=} and {#range_partitioning_end=}.
1296
+ #
1297
+ # @param [Integer] range_start The start of range partitioning, inclusive.
1298
+ #
1299
+ # @example
1300
+ # require "google/cloud/bigquery"
1301
+ #
1302
+ # bigquery = Google::Cloud::Bigquery.new
1303
+ # dataset = bigquery.dataset "my_dataset"
1304
+ # destination_table = dataset.table "my_destination_table",
1305
+ # skip_lookup: true
1306
+ #
1307
+ # job = bigquery.query_job "SELECT num FROM UNNEST(GENERATE_ARRAY(0, 99)) AS num" do |job|
1308
+ # job.table = destination_table
1309
+ # job.range_partitioning_field = "num"
1310
+ # job.range_partitioning_start = 0
1311
+ # job.range_partitioning_interval = 10
1312
+ # job.range_partitioning_end = 100
1313
+ # end
1314
+ #
1315
+ # job.wait_until_done!
1316
+ # job.done? #=> true
1317
+ #
1318
+ # @!group Attributes
1319
+ #
1320
+ def range_partitioning_start= range_start
1321
+ @gapi.configuration.query.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
1322
+ range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
1323
+ )
1324
+ @gapi.configuration.query.range_partitioning.range.start = range_start
1325
+ end
1326
+
1327
+ ##
1328
+ # Sets width of each interval for data in range partitions. See [Creating and using integer range partitioned
1329
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
1330
+ #
1331
+ # You can only set range partitioning when creating a table. BigQuery does not allow you to change
1332
+ # partitioning on an existing table.
1333
+ #
1334
+ # See {#range_partitioning_field=}, {#range_partitioning_start=} and {#range_partitioning_end=}.
1335
+ #
1336
+ # @param [Integer] range_interval The width of each interval, for data in partitions.
1337
+ #
1338
+ # @example
1339
+ # require "google/cloud/bigquery"
1340
+ #
1341
+ # bigquery = Google::Cloud::Bigquery.new
1342
+ # dataset = bigquery.dataset "my_dataset"
1343
+ # destination_table = dataset.table "my_destination_table",
1344
+ # skip_lookup: true
1345
+ #
1346
+ # job = bigquery.query_job "SELECT num FROM UNNEST(GENERATE_ARRAY(0, 99)) AS num" do |job|
1347
+ # job.table = destination_table
1348
+ # job.range_partitioning_field = "num"
1349
+ # job.range_partitioning_start = 0
1350
+ # job.range_partitioning_interval = 10
1351
+ # job.range_partitioning_end = 100
1352
+ # end
1353
+ #
1354
+ # job.wait_until_done!
1355
+ # job.done? #=> true
1356
+ #
1357
+ # @!group Attributes
1358
+ #
1359
+ def range_partitioning_interval= range_interval
1360
+ @gapi.configuration.query.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
1361
+ range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
955
1362
  )
1363
+ @gapi.configuration.query.range_partitioning.range.interval = range_interval
1364
+ end
1365
+
1366
+ ##
1367
+ # Sets the end of range partitioning, exclusive, for the destination table. See [Creating and using integer
1368
+ # range partitioned tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
1369
+ #
1370
+ # You can only set range partitioning when creating a table. BigQuery does not allow you to change
1371
+ # partitioning on an existing table.
1372
+ #
1373
+ # See {#range_partitioning_start=}, {#range_partitioning_interval=} and {#range_partitioning_field=}.
1374
+ #
1375
+ # @param [Integer] range_end The end of range partitioning, exclusive.
1376
+ #
1377
+ # @example
1378
+ # require "google/cloud/bigquery"
1379
+ #
1380
+ # bigquery = Google::Cloud::Bigquery.new
1381
+ # dataset = bigquery.dataset "my_dataset"
1382
+ # destination_table = dataset.table "my_destination_table",
1383
+ # skip_lookup: true
1384
+ #
1385
+ # job = bigquery.query_job "SELECT num FROM UNNEST(GENERATE_ARRAY(0, 99)) AS num" do |job|
1386
+ # job.table = destination_table
1387
+ # job.range_partitioning_field = "num"
1388
+ # job.range_partitioning_start = 0
1389
+ # job.range_partitioning_interval = 10
1390
+ # job.range_partitioning_end = 100
1391
+ # end
1392
+ #
1393
+ # job.wait_until_done!
1394
+ # job.done? #=> true
1395
+ #
1396
+ # @!group Attributes
1397
+ #
1398
+ def range_partitioning_end= range_end
1399
+ @gapi.configuration.query.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
1400
+ range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
1401
+ )
1402
+ @gapi.configuration.query.range_partitioning.range.end = range_end
956
1403
  end
957
1404
 
958
1405
  ##
959
1406
  # Sets the partitioning for the destination table. See [Partitioned
960
1407
  # Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
1408
+ # The supported types are `DAY`, `HOUR`, `MONTH`, and `YEAR`, which will
1409
+ # generate one partition per day, hour, month, and year, respectively.
961
1410
  #
962
1411
  # You can only set the partitioning field while creating a table.
963
1412
  # BigQuery does not allow you to change partitioning on an existing
964
1413
  # table.
965
1414
  #
966
- # @param [String] type The partition type. Currently the only
967
- # supported value is "DAY".
1415
+ # @param [String] type The partition type. The supported types are `DAY`,
1416
+ # `HOUR`, `MONTH`, and `YEAR`, which will generate one partition per day,
1417
+ # hour, month, and year, respectively.
968
1418
  #
969
1419
  # @example
970
1420
  # require "google/cloud/bigquery"
@@ -989,8 +1439,7 @@ module Google
989
1439
  # @!group Attributes
990
1440
  #
991
1441
  def time_partitioning_type= type
992
- @gapi.configuration.query.time_partitioning ||= \
993
- Google::Apis::BigqueryV2::TimePartitioning.new
1442
+ @gapi.configuration.query.time_partitioning ||= Google::Apis::BigqueryV2::TimePartitioning.new
994
1443
  @gapi.configuration.query.time_partitioning.update! type: type
995
1444
  end
996
1445
 
@@ -1036,8 +1485,7 @@ module Google
1036
1485
  # @!group Attributes
1037
1486
  #
1038
1487
  def time_partitioning_field= field
1039
- @gapi.configuration.query.time_partitioning ||= \
1040
- Google::Apis::BigqueryV2::TimePartitioning.new
1488
+ @gapi.configuration.query.time_partitioning ||= Google::Apis::BigqueryV2::TimePartitioning.new
1041
1489
  @gapi.configuration.query.time_partitioning.update! field: field
1042
1490
  end
1043
1491
 
@@ -1076,10 +1524,8 @@ module Google
1076
1524
  # @!group Attributes
1077
1525
  #
1078
1526
  def time_partitioning_expiration= expiration
1079
- @gapi.configuration.query.time_partitioning ||= \
1080
- Google::Apis::BigqueryV2::TimePartitioning.new
1081
- @gapi.configuration.query.time_partitioning.update! \
1082
- expiration_ms: expiration * 1000
1527
+ @gapi.configuration.query.time_partitioning ||= Google::Apis::BigqueryV2::TimePartitioning.new
1528
+ @gapi.configuration.query.time_partitioning.update! expiration_ms: expiration * 1000
1083
1529
  end
1084
1530
 
1085
1531
  ##
@@ -1094,30 +1540,28 @@ module Google
1094
1540
  # @!group Attributes
1095
1541
  #
1096
1542
  def time_partitioning_require_filter= val
1097
- @gapi.configuration.query.time_partitioning ||= \
1098
- Google::Apis::BigqueryV2::TimePartitioning.new
1099
- @gapi.configuration.query.time_partitioning.update! \
1100
- require_partition_filter: val
1543
+ @gapi.configuration.query.time_partitioning ||= Google::Apis::BigqueryV2::TimePartitioning.new
1544
+ @gapi.configuration.query.time_partitioning.update! require_partition_filter: val
1101
1545
  end
1102
1546
 
1103
1547
  ##
1104
- # Sets one or more fields on which the destination table should be
1105
- # clustered. Must be specified with time-based partitioning, data in
1106
- # the table will be first partitioned and subsequently clustered.
1548
+ # Sets the list of fields on which data should be clustered.
1107
1549
  #
1108
1550
  # Only top-level, non-repeated, simple-type fields are supported. When
1109
1551
  # you cluster a table using multiple columns, the order of columns you
1110
1552
  # specify is important. The order of the specified columns determines
1111
1553
  # the sort order of the data.
1112
1554
  #
1113
- # See {QueryJob#clustering_fields}.
1555
+ # BigQuery supports clustering for both partitioned and non-partitioned
1556
+ # tables.
1557
+ #
1558
+ # See {QueryJob#clustering_fields}, {Table#clustering_fields} and
1559
+ # {Table#clustering_fields=}.
1114
1560
  #
1115
- # @see https://cloud.google.com/bigquery/docs/partitioned-tables
1116
- # Partitioned Tables
1117
1561
  # @see https://cloud.google.com/bigquery/docs/clustered-tables
1118
- # Introduction to Clustered Tables
1562
+ # Introduction to clustered tables
1119
1563
  # @see https://cloud.google.com/bigquery/docs/creating-clustered-tables
1120
- # Creating and Using Clustered Tables
1564
+ # Creating and using clustered tables
1121
1565
  #
1122
1566
  # @param [Array<String>] fields The clustering fields. Only top-level,
1123
1567
  # non-repeated, simple-type fields are supported.
@@ -1143,11 +1587,27 @@ module Google
1143
1587
  # @!group Attributes
1144
1588
  #
1145
1589
  def clustering_fields= fields
1146
- @gapi.configuration.query.clustering ||= \
1147
- Google::Apis::BigqueryV2::Clustering.new
1590
+ @gapi.configuration.query.clustering ||= Google::Apis::BigqueryV2::Clustering.new
1148
1591
  @gapi.configuration.query.clustering.fields = fields
1149
1592
  end
1150
1593
 
1594
+ def cancel
1595
+ raise "not implemented in #{self.class}"
1596
+ end
1597
+
1598
+ def rerun!
1599
+ raise "not implemented in #{self.class}"
1600
+ end
1601
+
1602
+ def reload!
1603
+ raise "not implemented in #{self.class}"
1604
+ end
1605
+ alias refresh! reload!
1606
+
1607
+ def wait_until_done!
1608
+ raise "not implemented in #{self.class}"
1609
+ end
1610
+
1151
1611
  ##
1152
1612
  # @private Returns the Google API client library version of this job.
1153
1613
  #
@@ -1170,14 +1630,12 @@ module Google
1170
1630
  end
1171
1631
 
1172
1632
  def priority_value str
1173
- { "batch" => "BATCH",
1174
- "interactive" => "INTERACTIVE" }[str.to_s.downcase]
1633
+ { "batch" => "BATCH", "interactive" => "INTERACTIVE" }[str.to_s.downcase]
1175
1634
  end
1176
1635
 
1177
1636
  def udfs_gapi_from array_or_str
1178
1637
  Array(array_or_str).map do |uri_or_code|
1179
- resource =
1180
- Google::Apis::BigqueryV2::UserDefinedFunctionResource.new
1638
+ resource = Google::Apis::BigqueryV2::UserDefinedFunctionResource.new
1181
1639
  if uri_or_code.start_with? "gs://"
1182
1640
  resource.resource_uri = uri_or_code
1183
1641
  else
@@ -1237,17 +1695,26 @@ module Google
1237
1695
  # end
1238
1696
  #
1239
1697
  class Stage
1240
- attr_reader :compute_ratio_avg, :compute_ratio_max, :id, :name,
1241
- :read_ratio_avg, :read_ratio_max, :records_read,
1242
- :records_written, :status, :steps, :wait_ratio_avg,
1243
- :wait_ratio_max, :write_ratio_avg, :write_ratio_max
1698
+ attr_reader :compute_ratio_avg
1699
+ attr_reader :compute_ratio_max
1700
+ attr_reader :id
1701
+ attr_reader :name
1702
+ attr_reader :read_ratio_avg
1703
+ attr_reader :read_ratio_max
1704
+ attr_reader :records_read
1705
+ attr_reader :records_written
1706
+ attr_reader :status
1707
+ attr_reader :steps
1708
+ attr_reader :wait_ratio_avg
1709
+ attr_reader :wait_ratio_max
1710
+ attr_reader :write_ratio_avg
1711
+ attr_reader :write_ratio_max
1244
1712
 
1245
1713
  ##
1246
1714
  # @private Creates a new Stage instance.
1247
- def initialize compute_ratio_avg, compute_ratio_max, id, name,
1248
- read_ratio_avg, read_ratio_max, records_read,
1249
- records_written, status, steps, wait_ratio_avg,
1250
- wait_ratio_max, write_ratio_avg, write_ratio_max
1715
+ def initialize compute_ratio_avg, compute_ratio_max, id, name, read_ratio_avg, read_ratio_max, records_read,
1716
+ records_written, status, steps, wait_ratio_avg, wait_ratio_max, write_ratio_avg,
1717
+ write_ratio_max
1251
1718
  @compute_ratio_avg = compute_ratio_avg
1252
1719
  @compute_ratio_max = compute_ratio_max
1253
1720
  @id = id
@@ -1268,11 +1735,9 @@ module Google
1268
1735
  # @private New Stage from a statistics.query.queryPlan element.
1269
1736
  def self.from_gapi gapi
1270
1737
  steps = Array(gapi.steps).map { |g| Step.from_gapi g }
1271
- new gapi.compute_ratio_avg, gapi.compute_ratio_max, gapi.id,
1272
- gapi.name, gapi.read_ratio_avg, gapi.read_ratio_max,
1273
- gapi.records_read, gapi.records_written, gapi.status, steps,
1274
- gapi.wait_ratio_avg, gapi.wait_ratio_max, gapi.write_ratio_avg,
1275
- gapi.write_ratio_max
1738
+ new gapi.compute_ratio_avg, gapi.compute_ratio_max, gapi.id, gapi.name, gapi.read_ratio_avg,
1739
+ gapi.read_ratio_max, gapi.records_read, gapi.records_written, gapi.status, steps, gapi.wait_ratio_avg,
1740
+ gapi.wait_ratio_max, gapi.write_ratio_avg, gapi.write_ratio_max
1276
1741
  end
1277
1742
  end
1278
1743
 
@@ -1306,7 +1771,8 @@ module Google
1306
1771
  # end
1307
1772
  #
1308
1773
  class Step
1309
- attr_reader :kind, :substeps
1774
+ attr_reader :kind
1775
+ attr_reader :substeps
1310
1776
 
1311
1777
  ##
1312
1778
  # @private Creates a new Stage instance.
@@ -1327,8 +1793,7 @@ module Google
1327
1793
  def ensure_schema!
1328
1794
  return unless destination_schema.nil?
1329
1795
 
1330
- query_results_gapi = service.job_query_results \
1331
- job_id, location: location, max: 0
1796
+ query_results_gapi = service.job_query_results job_id, location: location, max: 0
1332
1797
  # raise "unable to retrieve schema" if query_results_gapi.schema.nil?
1333
1798
  @destination_schema_gapi = query_results_gapi.schema
1334
1799
  end
@@ -1346,9 +1811,10 @@ module Google
1346
1811
  end
1347
1812
 
1348
1813
  def destination_table_gapi
1349
- Google::Apis::BigqueryV2::Table.new \
1814
+ Google::Apis::BigqueryV2::Table.new(
1350
1815
  table_reference: @gapi.configuration.query.destination_table,
1351
1816
  schema: destination_schema
1817
+ )
1352
1818
  end
1353
1819
  end
1354
1820
  end