google-cloud-bigquery 1.12.0 → 1.38.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (50) hide show
  1. checksums.yaml +4 -4
  2. data/AUTHENTICATION.md +9 -28
  3. data/CHANGELOG.md +372 -1
  4. data/CONTRIBUTING.md +328 -116
  5. data/LOGGING.md +2 -2
  6. data/OVERVIEW.md +21 -20
  7. data/TROUBLESHOOTING.md +2 -8
  8. data/lib/google/cloud/bigquery/argument.rb +197 -0
  9. data/lib/google/cloud/bigquery/convert.rb +154 -170
  10. data/lib/google/cloud/bigquery/copy_job.rb +40 -23
  11. data/lib/google/cloud/bigquery/credentials.rb +5 -12
  12. data/lib/google/cloud/bigquery/data.rb +109 -18
  13. data/lib/google/cloud/bigquery/dataset/access.rb +322 -51
  14. data/lib/google/cloud/bigquery/dataset/list.rb +7 -13
  15. data/lib/google/cloud/bigquery/dataset.rb +960 -279
  16. data/lib/google/cloud/bigquery/external/avro_source.rb +107 -0
  17. data/lib/google/cloud/bigquery/external/bigtable_source/column.rb +404 -0
  18. data/lib/google/cloud/bigquery/external/bigtable_source/column_family.rb +945 -0
  19. data/lib/google/cloud/bigquery/external/bigtable_source.rb +230 -0
  20. data/lib/google/cloud/bigquery/external/csv_source.rb +481 -0
  21. data/lib/google/cloud/bigquery/external/data_source.rb +771 -0
  22. data/lib/google/cloud/bigquery/external/json_source.rb +170 -0
  23. data/lib/google/cloud/bigquery/external/parquet_source.rb +148 -0
  24. data/lib/google/cloud/bigquery/external/sheets_source.rb +166 -0
  25. data/lib/google/cloud/bigquery/external.rb +50 -2256
  26. data/lib/google/cloud/bigquery/extract_job.rb +217 -58
  27. data/lib/google/cloud/bigquery/insert_response.rb +1 -3
  28. data/lib/google/cloud/bigquery/job/list.rb +13 -20
  29. data/lib/google/cloud/bigquery/job.rb +286 -11
  30. data/lib/google/cloud/bigquery/load_job.rb +801 -133
  31. data/lib/google/cloud/bigquery/model/list.rb +5 -9
  32. data/lib/google/cloud/bigquery/model.rb +247 -16
  33. data/lib/google/cloud/bigquery/policy.rb +432 -0
  34. data/lib/google/cloud/bigquery/project/list.rb +6 -11
  35. data/lib/google/cloud/bigquery/project.rb +526 -243
  36. data/lib/google/cloud/bigquery/query_job.rb +584 -125
  37. data/lib/google/cloud/bigquery/routine/list.rb +165 -0
  38. data/lib/google/cloud/bigquery/routine.rb +1227 -0
  39. data/lib/google/cloud/bigquery/schema/field.rb +413 -63
  40. data/lib/google/cloud/bigquery/schema.rb +221 -48
  41. data/lib/google/cloud/bigquery/service.rb +186 -109
  42. data/lib/google/cloud/bigquery/standard_sql.rb +269 -53
  43. data/lib/google/cloud/bigquery/table/async_inserter.rb +86 -42
  44. data/lib/google/cloud/bigquery/table/list.rb +6 -11
  45. data/lib/google/cloud/bigquery/table.rb +1188 -326
  46. data/lib/google/cloud/bigquery/time.rb +6 -0
  47. data/lib/google/cloud/bigquery/version.rb +1 -1
  48. data/lib/google/cloud/bigquery.rb +18 -8
  49. data/lib/google-cloud-bigquery.rb +15 -13
  50. metadata +67 -40
@@ -48,6 +48,44 @@ module Google
48
48
  # puts job.data.first
49
49
  # end
50
50
  #
51
+ # @example With multiple statements and child jobs:
52
+ # require "google/cloud/bigquery"
53
+ #
54
+ # bigquery = Google::Cloud::Bigquery.new
55
+ #
56
+ # multi_statement_sql = <<~SQL
57
+ # -- Declare a variable to hold names as an array.
58
+ # DECLARE top_names ARRAY<STRING>;
59
+ # -- Build an array of the top 100 names from the year 2017.
60
+ # SET top_names = (
61
+ # SELECT ARRAY_AGG(name ORDER BY number DESC LIMIT 100)
62
+ # FROM `bigquery-public-data.usa_names.usa_1910_current`
63
+ # WHERE year = 2017
64
+ # );
65
+ # -- Which names appear as words in Shakespeare's plays?
66
+ # SELECT
67
+ # name AS shakespeare_name
68
+ # FROM UNNEST(top_names) AS name
69
+ # WHERE name IN (
70
+ # SELECT word
71
+ # FROM `bigquery-public-data.samples.shakespeare`
72
+ # );
73
+ # SQL
74
+ #
75
+ # job = bigquery.query_job multi_statement_sql
76
+ #
77
+ # job.wait_until_done!
78
+ #
79
+ # child_jobs = bigquery.jobs parent_job: job
80
+ #
81
+ # child_jobs.each do |child_job|
82
+ # script_statistics = child_job.script_statistics
83
+ # puts script_statistics.evaluation_kind
84
+ # script_statistics.stack_frames.each do |stack_frame|
85
+ # puts stack_frame.text
86
+ # end
87
+ # end
88
+ #
51
89
  class QueryJob < Job
52
90
  ##
53
91
  # Checks if the priority for the query is `BATCH`.
@@ -56,8 +94,7 @@ module Google
56
94
  # otherwise.
57
95
  #
58
96
  def batch?
59
- val = @gapi.configuration.query.priority
60
- val == "BATCH"
97
+ @gapi.configuration.query.priority == "BATCH"
61
98
  end
62
99
 
63
100
  ##
@@ -205,17 +242,16 @@ module Google
205
242
  # end
206
243
  #
207
244
  def query_plan
208
- return nil unless @gapi.statistics.query &&
209
- @gapi.statistics.query.query_plan
210
- Array(@gapi.statistics.query.query_plan).map do |stage|
211
- Stage.from_gapi stage
212
- end
245
+ return nil unless @gapi&.statistics&.query&.query_plan
246
+ Array(@gapi.statistics.query.query_plan).map { |stage| Stage.from_gapi stage }
213
247
  end
214
248
 
215
249
  ##
216
250
  # The type of query statement, if valid. Possible values (new values
217
251
  # might be added in the future):
218
252
  #
253
+ # * "ALTER_TABLE": DDL statement, see [Using Data Definition Language
254
+ # Statements](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language)
219
255
  # * "CREATE_MODEL": DDL statement, see [Using Data Definition Language
220
256
  # Statements](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language)
221
257
  # * "CREATE_TABLE": DDL statement, see [Using Data Definition Language
@@ -261,8 +297,16 @@ module Google
261
297
  # query_job.ddl? #=> true
262
298
  #
263
299
  def ddl?
264
- %w[CREATE_MODEL CREATE_TABLE CREATE_TABLE_AS_SELECT CREATE_VIEW \
265
- DROP_MODEL DROP_TABLE DROP_VIEW].include? statement_type
300
+ [
301
+ "ALTER_TABLE",
302
+ "CREATE_MODEL",
303
+ "CREATE_TABLE",
304
+ "CREATE_TABLE_AS_SELECT",
305
+ "CREATE_VIEW",
306
+ "DROP_MODEL",
307
+ "DROP_TABLE",
308
+ "DROP_VIEW"
309
+ ].include? statement_type
266
310
  end
267
311
 
268
312
  ##
@@ -285,7 +329,12 @@ module Google
285
329
  # query_job.dml? #=> true
286
330
  #
287
331
  def dml?
288
- %w[INSERT UPDATE MERGE DELETE].include? statement_type
332
+ [
333
+ "INSERT",
334
+ "UPDATE",
335
+ "MERGE",
336
+ "DELETE"
337
+ ].include? statement_type
289
338
  end
290
339
 
291
340
  ##
@@ -308,6 +357,22 @@ module Google
308
357
  @gapi.statistics.query.ddl_operation_performed
309
358
  end
310
359
 
360
+ ##
361
+ # The DDL target routine, in reference state. (See {Routine#reference?}.)
362
+ # Present only for `CREATE/DROP FUNCTION/PROCEDURE` queries. (See
363
+ # {#statement_type}.)
364
+ #
365
+ # @return [Google::Cloud::Bigquery::Routine, nil] The DDL target routine, in
366
+ # reference state.
367
+ #
368
+ def ddl_target_routine
369
+ return nil unless @gapi.statistics.query
370
+ ensure_service!
371
+ routine = @gapi.statistics.query.ddl_target_routine
372
+ return nil unless routine
373
+ Google::Cloud::Bigquery::Routine.new_reference_from_gapi routine, service
374
+ end
375
+
311
376
  ##
312
377
  # The DDL target table, in reference state. (See {Table#reference?}.)
313
378
  # Present only for `CREATE/DROP TABLE/VIEW` queries. (See
@@ -336,6 +401,39 @@ module Google
336
401
  @gapi.statistics.query.num_dml_affected_rows
337
402
  end
338
403
 
404
+ ##
405
+ # The number of deleted rows. Present only for DML statements `DELETE`,
406
+ # `MERGE` and `TRUNCATE`. (See {#statement_type}.)
407
+ #
408
+ # @return [Integer, nil] The number of deleted rows, or `nil` if not
409
+ # applicable.
410
+ #
411
+ def deleted_row_count
412
+ @gapi.statistics.query&.dml_stats&.deleted_row_count
413
+ end
414
+
415
+ ##
416
+ # The number of inserted rows. Present only for DML statements `INSERT`
417
+ # and `MERGE`. (See {#statement_type}.)
418
+ #
419
+ # @return [Integer, nil] The number of inserted rows, or `nil` if not
420
+ # applicable.
421
+ #
422
+ def inserted_row_count
423
+ @gapi.statistics.query&.dml_stats&.inserted_row_count
424
+ end
425
+
426
+ ##
427
+ # The number of updated rows. Present only for DML statements `UPDATE`
428
+ # and `MERGE`. (See {#statement_type}.)
429
+ #
430
+ # @return [Integer, nil] The number of updated rows, or `nil` if not
431
+ # applicable.
432
+ #
433
+ def updated_row_count
434
+ @gapi.statistics.query&.dml_stats&.updated_row_count
435
+ end
436
+
339
437
  ##
340
438
  # The table in which the query results are stored.
341
439
  #
@@ -383,9 +481,7 @@ module Google
383
481
  def udfs
384
482
  udfs_gapi = @gapi.configuration.query.user_defined_function_resources
385
483
  return nil unless udfs_gapi
386
- Array(udfs_gapi).map do |udf|
387
- udf.inline_code || udf.resource_uri
388
- end
484
+ Array(udfs_gapi).map { |udf| udf.inline_code || udf.resource_uri }
389
485
  end
390
486
 
391
487
  ##
@@ -396,16 +492,77 @@ module Google
396
492
  #
397
493
  # @!group Attributes
398
494
  def encryption
399
- EncryptionConfiguration.from_gapi(
400
- @gapi.configuration.query.destination_encryption_configuration
401
- )
495
+ EncryptionConfiguration.from_gapi @gapi.configuration.query.destination_encryption_configuration
496
+ end
497
+
498
+ ###
499
+ # Checks if the destination table will be range partitioned. See [Creating and using integer range partitioned
500
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
501
+ #
502
+ # @return [Boolean] `true` when the table is range partitioned, or `false` otherwise.
503
+ #
504
+ # @!group Attributes
505
+ #
506
+ def range_partitioning?
507
+ !@gapi.configuration.query.range_partitioning.nil?
508
+ end
509
+
510
+ ###
511
+ # The field on which the destination table will be range partitioned, if any. The field must be a
512
+ # top-level `NULLABLE/REQUIRED` field. The only supported type is `INTEGER/INT64`. See
513
+ # [Creating and using integer range partitioned
514
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
515
+ #
516
+ # @return [String, nil] The partition field, if a field was configured, or `nil` if not range partitioned.
517
+ #
518
+ # @!group Attributes
519
+ #
520
+ def range_partitioning_field
521
+ @gapi.configuration.query.range_partitioning.field if range_partitioning?
522
+ end
523
+
524
+ ###
525
+ # The start of range partitioning, inclusive. See [Creating and using integer range partitioned
526
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
527
+ #
528
+ # @return [Integer, nil] The start of range partitioning, inclusive, or `nil` if not range partitioned.
529
+ #
530
+ # @!group Attributes
531
+ #
532
+ def range_partitioning_start
533
+ @gapi.configuration.query.range_partitioning.range.start if range_partitioning?
534
+ end
535
+
536
+ ###
537
+ # The width of each interval. See [Creating and using integer range partitioned
538
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
539
+ #
540
+ # @return [Integer, nil] The width of each interval, for data in range partitions, or `nil` if not range
541
+ # partitioned.
542
+ #
543
+ # @!group Attributes
544
+ #
545
+ def range_partitioning_interval
546
+ @gapi.configuration.query.range_partitioning.range.interval if range_partitioning?
547
+ end
548
+
549
+ ###
550
+ # The end of range partitioning, exclusive. See [Creating and using integer range partitioned
551
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
552
+ #
553
+ # @return [Integer, nil] The end of range partitioning, exclusive, or `nil` if not range partitioned.
554
+ #
555
+ # @!group Attributes
556
+ #
557
+ def range_partitioning_end
558
+ @gapi.configuration.query.range_partitioning.range.end if range_partitioning?
402
559
  end
403
560
 
404
561
  ###
405
562
  # Checks if the destination table will be time-partitioned. See
406
563
  # [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
407
564
  #
408
- # @return [Boolean, nil] `true` when the table will be time-partitioned,
565
+ # @return [Boolean] `true` when the table will be time-partitioned,
409
566
  # or `false` otherwise.
410
567
  #
411
568
  # @!group Attributes
@@ -418,8 +575,9 @@ module Google
418
575
  # The period for which the destination table will be partitioned, if
419
576
  # any. See [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
420
577
  #
421
- # @return [String, nil] The partition type. Currently the only supported
422
- # value is "DAY", or `nil` if not present.
578
+ # @return [String, nil] The partition type. The supported types are `DAY`,
579
+ # `HOUR`, `MONTH`, and `YEAR`, which will generate one partition per day,
580
+ # hour, month, and year, respectively; or `nil` if not present.
423
581
  #
424
582
  # @!group Attributes
425
583
  #
@@ -479,10 +637,15 @@ module Google
479
637
  ###
480
638
  # Checks if the destination table will be clustered.
481
639
  #
640
+ # See {QueryJob::Updater#clustering_fields=}, {Table#clustering_fields} and
641
+ # {Table#clustering_fields=}.
642
+ #
482
643
  # @see https://cloud.google.com/bigquery/docs/clustered-tables
483
- # Introduction to Clustered Tables
644
+ # Introduction to clustered tables
645
+ # @see https://cloud.google.com/bigquery/docs/creating-clustered-tables
646
+ # Creating and using clustered tables
484
647
  #
485
- # @return [Boolean, nil] `true` when the table will be clustered,
648
+ # @return [Boolean] `true` when the table will be clustered,
486
649
  # or `false` otherwise.
487
650
  #
488
651
  # @!group Attributes
@@ -497,14 +660,16 @@ module Google
497
660
  # be first partitioned and subsequently clustered. The order of the
498
661
  # returned fields determines the sort order of the data.
499
662
  #
500
- # See {QueryJob::Updater#clustering_fields=}.
663
+ # BigQuery supports clustering for both partitioned and non-partitioned
664
+ # tables.
665
+ #
666
+ # See {QueryJob::Updater#clustering_fields=}, {Table#clustering_fields} and
667
+ # {Table#clustering_fields=}.
501
668
  #
502
- # @see https://cloud.google.com/bigquery/docs/partitioned-tables
503
- # Partitioned Tables
504
669
  # @see https://cloud.google.com/bigquery/docs/clustered-tables
505
- # Introduction to Clustered Tables
670
+ # Introduction to clustered tables
506
671
  # @see https://cloud.google.com/bigquery/docs/creating-clustered-tables
507
- # Creating and Using Clustered Tables
672
+ # Creating and using clustered tables
508
673
  #
509
674
  # @return [Array<String>, nil] The clustering fields, or `nil` if the
510
675
  # destination table will not be clustered.
@@ -535,8 +700,7 @@ module Google
535
700
 
536
701
  ensure_service!
537
702
  loop do
538
- query_results_gapi = service.job_query_results \
539
- job_id, location: location, max: 0
703
+ query_results_gapi = service.job_query_results job_id, location: location, max: 0
540
704
  if query_results_gapi.job_complete
541
705
  @destination_schema_gapi = query_results_gapi.schema
542
706
  break
@@ -566,27 +730,28 @@ module Google
566
730
  #
567
731
  # job.wait_until_done!
568
732
  # data = job.data
733
+ #
734
+ # # Iterate over the first page of results
569
735
  # data.each do |row|
570
736
  # puts row[:word]
571
737
  # end
738
+ # # Retrieve the next page of results
572
739
  # data = data.next if data.next?
573
740
  #
574
741
  def data token: nil, max: nil, start: nil
575
742
  return nil unless done?
576
- if dryrun?
577
- return Data.from_gapi_json({ rows: [] }, nil, @gapi, service)
578
- end
743
+ return Data.from_gapi_json({ rows: [] }, nil, @gapi, service) if dryrun?
579
744
  if ddl? || dml?
580
745
  data_hash = { totalRows: nil, rows: [] }
581
746
  return Data.from_gapi_json data_hash, nil, @gapi, service
582
747
  end
583
748
  ensure_schema!
584
749
 
585
- options = { token: token, max: max, start: start }
586
- data_hash = service.list_tabledata \
587
- destination_table_dataset_id,
588
- destination_table_table_id,
589
- options
750
+ data_hash = service.list_tabledata destination_table_dataset_id,
751
+ destination_table_table_id,
752
+ token: token,
753
+ max: max,
754
+ start: start
590
755
  Data.from_gapi_json data_hash, destination_table_gapi, @gapi, service
591
756
  end
592
757
  alias query_results data
@@ -597,12 +762,11 @@ module Google
597
762
  ##
598
763
  # @private Create an Updater object.
599
764
  def initialize service, gapi
765
+ super()
600
766
  @service = service
601
767
  @gapi = gapi
602
768
  end
603
769
 
604
- # rubocop:disable all
605
-
606
770
  ##
607
771
  # @private Create an Updater from an options hash.
608
772
  #
@@ -624,15 +788,16 @@ module Google
624
788
  )
625
789
 
626
790
  updater = QueryJob::Updater.new service, req
627
- updater.params = options[:params] if options[:params]
791
+ updater.set_params_and_types options[:params], options[:types] if options[:params]
628
792
  updater.create = options[:create]
793
+ updater.create_session = options[:create_session]
794
+ updater.session_id = options[:session_id] if options[:session_id]
629
795
  updater.write = options[:write]
630
796
  updater.table = options[:table]
631
797
  updater.dryrun = options[:dryrun]
632
798
  updater.maximum_bytes_billed = options[:maximum_bytes_billed]
633
799
  updater.labels = options[:labels] if options[:labels]
634
- updater.legacy_sql = Convert.resolve_legacy_sql(
635
- options[:standard_sql], options[:legacy_sql])
800
+ updater.legacy_sql = Convert.resolve_legacy_sql options[:standard_sql], options[:legacy_sql]
636
801
  updater.external = options[:external] if options[:external]
637
802
  updater.priority = options[:priority]
638
803
  updater.cache = options[:cache]
@@ -642,8 +807,6 @@ module Google
642
807
  updater
643
808
  end
644
809
 
645
- # rubocop:enable all
646
-
647
810
  ##
648
811
  # Sets the geographic location where the job should run. Required
649
812
  # except for US and EU.
@@ -731,41 +894,127 @@ module Google
731
894
  #
732
895
  # @!group Attributes
733
896
  def dataset= value
734
- @gapi.configuration.query.default_dataset =
735
- @service.dataset_ref_from value
897
+ @gapi.configuration.query.default_dataset = @service.dataset_ref_from value
736
898
  end
737
899
 
738
900
  ##
739
901
  # Sets the query parameters. Standard SQL only.
740
902
  #
741
- # @param [Array, Hash] params Used to pass query arguments when the
742
- # `query` string contains either positional (`?`) or named
743
- # (`@myparam`) query parameters. If value passed is an array
744
- # `["foo"]`, the query must use positional query parameters. If
745
- # value passed is a hash `{ myparam: "foo" }`, the query must use
746
- # named query parameters. When set, `legacy_sql` will automatically
747
- # be set to false and `standard_sql` to true.
903
+ # Use {set_params_and_types} to set both params and types.
904
+ #
905
+ # @param [Array, Hash] params Standard SQL only. Used to pass query arguments when the `query` string contains
906
+ # either positional (`?`) or named (`@myparam`) query parameters. If value passed is an array `["foo"]`, the
907
+ # query must use positional query parameters. If value passed is a hash `{ myparam: "foo" }`, the query must
908
+ # use named query parameters. When set, `legacy_sql` will automatically be set to false and `standard_sql`
909
+ # to true.
910
+ #
911
+ # BigQuery types are converted from Ruby types as follows:
912
+ #
913
+ # | BigQuery | Ruby | Notes |
914
+ # |--------------|--------------------------------------|--------------------------------------------------|
915
+ # | `BOOL` | `true`/`false` | |
916
+ # | `INT64` | `Integer` | |
917
+ # | `FLOAT64` | `Float` | |
918
+ # | `NUMERIC` | `BigDecimal` | `BigDecimal` values will be rounded to scale 9. |
919
+ # | `BIGNUMERIC` | `BigDecimal` | NOT AUTOMATIC: Must be mapped using `types`. |
920
+ # | `STRING` | `String` | |
921
+ # | `DATETIME` | `DateTime` | `DATETIME` does not support time zone. |
922
+ # | `DATE` | `Date` | |
923
+ # | `GEOGRAPHY` | `String` (WKT or GeoJSON) | NOT AUTOMATIC: Must be mapped using `types`. |
924
+ # | `TIMESTAMP` | `Time` | |
925
+ # | `TIME` | `Google::Cloud::BigQuery::Time` | |
926
+ # | `BYTES` | `File`, `IO`, `StringIO`, or similar | |
927
+ # | `ARRAY` | `Array` | Nested arrays, `nil` values are not supported. |
928
+ # | `STRUCT` | `Hash` | Hash keys may be strings or symbols. |
929
+ #
930
+ # See [Data Types](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types) for an overview
931
+ # of each BigQuery data type, including allowed values. For the `GEOGRAPHY` type, see [Working with BigQuery
932
+ # GIS data](https://cloud.google.com/bigquery/docs/gis-data).
748
933
  #
749
934
  # @!group Attributes
750
935
  def params= params
936
+ set_params_and_types params
937
+ end
938
+
939
+ ##
940
+ # Sets the query parameters. Standard SQL only.
941
+ #
942
+ # @param [Array, Hash] params Standard SQL only. Used to pass query arguments when the `query` string contains
943
+ # either positional (`?`) or named (`@myparam`) query parameters. If value passed is an array `["foo"]`, the
944
+ # query must use positional query parameters. If value passed is a hash `{ myparam: "foo" }`, the query must
945
+ # use named query parameters. When set, `legacy_sql` will automatically be set to false and `standard_sql`
946
+ # to true.
947
+ #
948
+ # BigQuery types are converted from Ruby types as follows:
949
+ #
950
+ # | BigQuery | Ruby | Notes |
951
+ # |--------------|--------------------------------------|--------------------------------------------------|
952
+ # | `BOOL` | `true`/`false` | |
953
+ # | `INT64` | `Integer` | |
954
+ # | `FLOAT64` | `Float` | |
955
+ # | `NUMERIC` | `BigDecimal` | `BigDecimal` values will be rounded to scale 9. |
956
+ # | `BIGNUMERIC` | `BigDecimal` | NOT AUTOMATIC: Must be mapped using `types`. |
957
+ # | `STRING` | `String` | |
958
+ # | `DATETIME` | `DateTime` | `DATETIME` does not support time zone. |
959
+ # | `DATE` | `Date` | |
960
+ # | `GEOGRAPHY` | `String` (WKT or GeoJSON) | NOT AUTOMATIC: Must be mapped using `types`. |
961
+ # | `TIMESTAMP` | `Time` | |
962
+ # | `TIME` | `Google::Cloud::BigQuery::Time` | |
963
+ # | `BYTES` | `File`, `IO`, `StringIO`, or similar | |
964
+ # | `ARRAY` | `Array` | Nested arrays, `nil` values are not supported. |
965
+ # | `STRUCT` | `Hash` | Hash keys may be strings or symbols. |
966
+ #
967
+ # See [Data Types](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types) for an overview
968
+ # of each BigQuery data type, including allowed values. For the `GEOGRAPHY` type, see [Working with BigQuery
969
+ # GIS data](https://cloud.google.com/bigquery/docs/gis-data).
970
+ # @param [Array, Hash] types Standard SQL only. Types of the SQL parameters in `params`. It is not always
971
+ # possible to infer the right SQL type from a value in `params`. In these cases, `types` must be used to
972
+ # specify the SQL type for these values.
973
+ #
974
+ # Arguments must match the value type passed to `params`. This must be an `Array` when the query uses
975
+ # positional query parameters. This must be an `Hash` when the query uses named query parameters. The values
976
+ # should be BigQuery type codes from the following list:
977
+ #
978
+ # * `:BOOL`
979
+ # * `:INT64`
980
+ # * `:FLOAT64`
981
+ # * `:NUMERIC`
982
+ # * `:BIGNUMERIC`
983
+ # * `:STRING`
984
+ # * `:DATETIME`
985
+ # * `:DATE`
986
+ # * `:GEOGRAPHY`
987
+ # * `:TIMESTAMP`
988
+ # * `:TIME`
989
+ # * `:BYTES`
990
+ # * `Array` - Lists are specified by providing the type code in an array. For example, an array of integers
991
+ # are specified as `[:INT64]`.
992
+ # * `Hash` - Types for STRUCT values (`Hash` objects) are specified using a `Hash` object, where the keys
993
+ # match the `params` hash, and the values are the types value that matches the data.
994
+ #
995
+ # Types are optional.
996
+ #
997
+ # @!group Attributes
998
+ def set_params_and_types params, types = nil
999
+ types ||= params.class.new
1000
+ raise ArgumentError, "types must use the same format as params" if types.class != params.class
1001
+
751
1002
  case params
752
- when Array then
1003
+ when Array
753
1004
  @gapi.configuration.query.use_legacy_sql = false
754
1005
  @gapi.configuration.query.parameter_mode = "POSITIONAL"
755
- @gapi.configuration.query.query_parameters = params.map do |param|
756
- Convert.to_query_param param
1006
+ @gapi.configuration.query.query_parameters = params.zip(types).map do |param, type|
1007
+ Convert.to_query_param param, type
757
1008
  end
758
- when Hash then
1009
+ when Hash
759
1010
  @gapi.configuration.query.use_legacy_sql = false
760
1011
  @gapi.configuration.query.parameter_mode = "NAMED"
761
- @gapi.configuration.query.query_parameters =
762
- params.map do |name, param|
763
- Convert.to_query_param(param).tap do |named_param|
764
- named_param.name = String name
765
- end
766
- end
1012
+ @gapi.configuration.query.query_parameters = params.map do |name, param|
1013
+ type = types[name]
1014
+ Convert.to_query_param(param, type).tap { |named_param| named_param.name = String name }
1015
+ end
767
1016
  else
768
- raise "Query parameters must be an Array or a Hash."
1017
+ raise ArgumentError, "params must be an Array or a Hash"
769
1018
  end
770
1019
  end
771
1020
 
@@ -783,8 +1032,38 @@ module Google
783
1032
  #
784
1033
  # @!group Attributes
785
1034
  def create= value
786
- @gapi.configuration.query.create_disposition =
787
- Convert.create_disposition value
1035
+ @gapi.configuration.query.create_disposition = Convert.create_disposition value
1036
+ end
1037
+
1038
+ ##
1039
+ # Sets the create_session property. If true, creates a new session,
1040
+ # where session id will be a server generated random id. If false,
1041
+ # runs query with an existing {#session_id=}, otherwise runs query in
1042
+ # non-session mode. The default value is `false`.
1043
+ #
1044
+ # @param [Boolean] value The create_session property. The default
1045
+ # value is `false`.
1046
+ #
1047
+ # @!group Attributes
1048
+ def create_session= value
1049
+ @gapi.configuration.query.create_session = value
1050
+ end
1051
+
1052
+ ##
1053
+ # Sets the session ID for a query run in session mode. See {#create_session=}.
1054
+ #
1055
+ # @param [String] value The session ID. The default value is `nil`.
1056
+ #
1057
+ # @!group Attributes
1058
+ def session_id= value
1059
+ @gapi.configuration.query.connection_properties ||= []
1060
+ prop = @gapi.configuration.query.connection_properties.find { |cp| cp.key == "session_id" }
1061
+ if prop
1062
+ prop.value = value
1063
+ else
1064
+ prop = Google::Apis::BigqueryV2::ConnectionProperty.new key: "session_id", value: value
1065
+ @gapi.configuration.query.connection_properties << prop
1066
+ end
788
1067
  end
789
1068
 
790
1069
  ##
@@ -802,8 +1081,7 @@ module Google
802
1081
  #
803
1082
  # @!group Attributes
804
1083
  def write= value
805
- @gapi.configuration.query.write_disposition =
806
- Convert.write_disposition value
1084
+ @gapi.configuration.query.write_disposition = Convert.write_disposition value
807
1085
  end
808
1086
 
809
1087
  ##
@@ -849,12 +1127,21 @@ module Google
849
1127
  # Sets the labels to use for the job.
850
1128
  #
851
1129
  # @param [Hash] value A hash of user-provided labels associated with
852
- # the job. You can use these to organize and group your jobs. Label
853
- # keys and values can be no longer than 63 characters, can only
854
- # contain lowercase letters, numeric characters, underscores and
855
- # dashes. International characters are allowed. Label values are
856
- # optional. Label keys must start with a letter and each label in
857
- # the list must have a different key.
1130
+ # the job. You can use these to organize and group your jobs.
1131
+ #
1132
+ # The labels applied to a resource must meet the following requirements:
1133
+ #
1134
+ # * Each resource can have multiple labels, up to a maximum of 64.
1135
+ # * Each label must be a key-value pair.
1136
+ # * Keys have a minimum length of 1 character and a maximum length of
1137
+ # 63 characters, and cannot be empty. Values can be empty, and have
1138
+ # a maximum length of 63 characters.
1139
+ # * Keys and values can contain only lowercase letters, numeric characters,
1140
+ # underscores, and dashes. All characters must use UTF-8 encoding, and
1141
+ # international characters are allowed.
1142
+ # * The key portion of a label must be unique. However, you can use the
1143
+ # same key with multiple resources.
1144
+ # * Keys must start with a lowercase letter or international character.
858
1145
  #
859
1146
  # @!group Attributes
860
1147
  #
@@ -905,9 +1192,7 @@ module Google
905
1192
  # @!group Attributes
906
1193
  #
907
1194
  def external= value
908
- external_table_pairs = value.map do |name, obj|
909
- [String(name), obj.to_gapi]
910
- end
1195
+ external_table_pairs = value.map { |name, obj| [String(name), obj.to_gapi] }
911
1196
  external_table_hash = Hash[external_table_pairs]
912
1197
  @gapi.configuration.query.table_definitions = external_table_hash
913
1198
  end
@@ -925,8 +1210,7 @@ module Google
925
1210
  #
926
1211
  # @!group Attributes
927
1212
  def udfs= value
928
- @gapi.configuration.query.user_defined_function_resources =
929
- udfs_gapi_from value
1213
+ @gapi.configuration.query.user_defined_function_resources = udfs_gapi_from value
930
1214
  end
931
1215
 
932
1216
  ##
@@ -950,21 +1234,180 @@ module Google
950
1234
  #
951
1235
  # @!group Attributes
952
1236
  def encryption= val
953
- @gapi.configuration.query.update!(
954
- destination_encryption_configuration: val.to_gapi
1237
+ @gapi.configuration.query.update! destination_encryption_configuration: val.to_gapi
1238
+ end
1239
+
1240
+ ##
1241
+ # Sets the field on which to range partition the table. See [Creating and using integer range partitioned
1242
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
1243
+ #
1244
+ # See {#range_partitioning_start=}, {#range_partitioning_interval=} and {#range_partitioning_end=}.
1245
+ #
1246
+ # You can only set range partitioning when creating a table. BigQuery does not allow you to change
1247
+ # partitioning on an existing table.
1248
+ #
1249
+ # @param [String] field The range partition field. the destination table is partitioned by this
1250
+ # field. The field must be a top-level `NULLABLE/REQUIRED` field. The only supported
1251
+ # type is `INTEGER/INT64`.
1252
+ #
1253
+ # @example
1254
+ # require "google/cloud/bigquery"
1255
+ #
1256
+ # bigquery = Google::Cloud::Bigquery.new
1257
+ # dataset = bigquery.dataset "my_dataset"
1258
+ # destination_table = dataset.table "my_destination_table",
1259
+ # skip_lookup: true
1260
+ #
1261
+ # job = bigquery.query_job "SELECT num FROM UNNEST(GENERATE_ARRAY(0, 99)) AS num" do |job|
1262
+ # job.table = destination_table
1263
+ # job.range_partitioning_field = "num"
1264
+ # job.range_partitioning_start = 0
1265
+ # job.range_partitioning_interval = 10
1266
+ # job.range_partitioning_end = 100
1267
+ # end
1268
+ #
1269
+ # job.wait_until_done!
1270
+ # job.done? #=> true
1271
+ #
1272
+ # @!group Attributes
1273
+ #
1274
+ def range_partitioning_field= field
1275
+ @gapi.configuration.query.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
1276
+ range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
955
1277
  )
1278
+ @gapi.configuration.query.range_partitioning.field = field
1279
+ end
1280
+
1281
+ ##
1282
+ # Sets the start of range partitioning, inclusive, for the destination table. See [Creating and using integer
1283
+ # range partitioned tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
1284
+ #
1285
+ # You can only set range partitioning when creating a table. BigQuery does not allow you to change
1286
+ # partitioning on an existing table.
1287
+ #
1288
+ # See {#range_partitioning_field=}, {#range_partitioning_interval=} and {#range_partitioning_end=}.
1289
+ #
1290
+ # @param [Integer] range_start The start of range partitioning, inclusive.
1291
+ #
1292
+ # @example
1293
+ # require "google/cloud/bigquery"
1294
+ #
1295
+ # bigquery = Google::Cloud::Bigquery.new
1296
+ # dataset = bigquery.dataset "my_dataset"
1297
+ # destination_table = dataset.table "my_destination_table",
1298
+ # skip_lookup: true
1299
+ #
1300
+ # job = bigquery.query_job "SELECT num FROM UNNEST(GENERATE_ARRAY(0, 99)) AS num" do |job|
1301
+ # job.table = destination_table
1302
+ # job.range_partitioning_field = "num"
1303
+ # job.range_partitioning_start = 0
1304
+ # job.range_partitioning_interval = 10
1305
+ # job.range_partitioning_end = 100
1306
+ # end
1307
+ #
1308
+ # job.wait_until_done!
1309
+ # job.done? #=> true
1310
+ #
1311
+ # @!group Attributes
1312
+ #
1313
+ def range_partitioning_start= range_start
1314
+ @gapi.configuration.query.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
1315
+ range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
1316
+ )
1317
+ @gapi.configuration.query.range_partitioning.range.start = range_start
1318
+ end
1319
+
1320
+ ##
1321
+ # Sets width of each interval for data in range partitions. See [Creating and using integer range partitioned
1322
+ # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
1323
+ #
1324
+ # You can only set range partitioning when creating a table. BigQuery does not allow you to change
1325
+ # partitioning on an existing table.
1326
+ #
1327
+ # See {#range_partitioning_field=}, {#range_partitioning_start=} and {#range_partitioning_end=}.
1328
+ #
1329
+ # @param [Integer] range_interval The width of each interval, for data in partitions.
1330
+ #
1331
+ # @example
1332
+ # require "google/cloud/bigquery"
1333
+ #
1334
+ # bigquery = Google::Cloud::Bigquery.new
1335
+ # dataset = bigquery.dataset "my_dataset"
1336
+ # destination_table = dataset.table "my_destination_table",
1337
+ # skip_lookup: true
1338
+ #
1339
+ # job = bigquery.query_job "SELECT num FROM UNNEST(GENERATE_ARRAY(0, 99)) AS num" do |job|
1340
+ # job.table = destination_table
1341
+ # job.range_partitioning_field = "num"
1342
+ # job.range_partitioning_start = 0
1343
+ # job.range_partitioning_interval = 10
1344
+ # job.range_partitioning_end = 100
1345
+ # end
1346
+ #
1347
+ # job.wait_until_done!
1348
+ # job.done? #=> true
1349
+ #
1350
+ # @!group Attributes
1351
+ #
1352
+ def range_partitioning_interval= range_interval
1353
+ @gapi.configuration.query.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
1354
+ range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
1355
+ )
1356
+ @gapi.configuration.query.range_partitioning.range.interval = range_interval
1357
+ end
1358
+
1359
+ ##
1360
+ # Sets the end of range partitioning, exclusive, for the destination table. See [Creating and using integer
1361
+ # range partitioned tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
1362
+ #
1363
+ # You can only set range partitioning when creating a table. BigQuery does not allow you to change
1364
+ # partitioning on an existing table.
1365
+ #
1366
+ # See {#range_partitioning_start=}, {#range_partitioning_interval=} and {#range_partitioning_field=}.
1367
+ #
1368
+ # @param [Integer] range_end The end of range partitioning, exclusive.
1369
+ #
1370
+ # @example
1371
+ # require "google/cloud/bigquery"
1372
+ #
1373
+ # bigquery = Google::Cloud::Bigquery.new
1374
+ # dataset = bigquery.dataset "my_dataset"
1375
+ # destination_table = dataset.table "my_destination_table",
1376
+ # skip_lookup: true
1377
+ #
1378
+ # job = bigquery.query_job "SELECT num FROM UNNEST(GENERATE_ARRAY(0, 99)) AS num" do |job|
1379
+ # job.table = destination_table
1380
+ # job.range_partitioning_field = "num"
1381
+ # job.range_partitioning_start = 0
1382
+ # job.range_partitioning_interval = 10
1383
+ # job.range_partitioning_end = 100
1384
+ # end
1385
+ #
1386
+ # job.wait_until_done!
1387
+ # job.done? #=> true
1388
+ #
1389
+ # @!group Attributes
1390
+ #
1391
+ def range_partitioning_end= range_end
1392
+ @gapi.configuration.query.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
1393
+ range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
1394
+ )
1395
+ @gapi.configuration.query.range_partitioning.range.end = range_end
956
1396
  end
957
1397
 
958
1398
  ##
959
1399
  # Sets the partitioning for the destination table. See [Partitioned
960
1400
  # Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
1401
+ # The supported types are `DAY`, `HOUR`, `MONTH`, and `YEAR`, which will
1402
+ # generate one partition per day, hour, month, and year, respectively.
961
1403
  #
962
1404
  # You can only set the partitioning field while creating a table.
963
1405
  # BigQuery does not allow you to change partitioning on an existing
964
1406
  # table.
965
1407
  #
966
- # @param [String] type The partition type. Currently the only
967
- # supported value is "DAY".
1408
+ # @param [String] type The partition type. The supported types are `DAY`,
1409
+ # `HOUR`, `MONTH`, and `YEAR`, which will generate one partition per day,
1410
+ # hour, month, and year, respectively.
968
1411
  #
969
1412
  # @example
970
1413
  # require "google/cloud/bigquery"
@@ -989,8 +1432,7 @@ module Google
989
1432
  # @!group Attributes
990
1433
  #
991
1434
  def time_partitioning_type= type
992
- @gapi.configuration.query.time_partitioning ||= \
993
- Google::Apis::BigqueryV2::TimePartitioning.new
1435
+ @gapi.configuration.query.time_partitioning ||= Google::Apis::BigqueryV2::TimePartitioning.new
994
1436
  @gapi.configuration.query.time_partitioning.update! type: type
995
1437
  end
996
1438
 
@@ -1036,8 +1478,7 @@ module Google
1036
1478
  # @!group Attributes
1037
1479
  #
1038
1480
  def time_partitioning_field= field
1039
- @gapi.configuration.query.time_partitioning ||= \
1040
- Google::Apis::BigqueryV2::TimePartitioning.new
1481
+ @gapi.configuration.query.time_partitioning ||= Google::Apis::BigqueryV2::TimePartitioning.new
1041
1482
  @gapi.configuration.query.time_partitioning.update! field: field
1042
1483
  end
1043
1484
 
@@ -1076,10 +1517,8 @@ module Google
1076
1517
  # @!group Attributes
1077
1518
  #
1078
1519
  def time_partitioning_expiration= expiration
1079
- @gapi.configuration.query.time_partitioning ||= \
1080
- Google::Apis::BigqueryV2::TimePartitioning.new
1081
- @gapi.configuration.query.time_partitioning.update! \
1082
- expiration_ms: expiration * 1000
1520
+ @gapi.configuration.query.time_partitioning ||= Google::Apis::BigqueryV2::TimePartitioning.new
1521
+ @gapi.configuration.query.time_partitioning.update! expiration_ms: expiration * 1000
1083
1522
  end
1084
1523
 
1085
1524
  ##
@@ -1094,30 +1533,28 @@ module Google
1094
1533
  # @!group Attributes
1095
1534
  #
1096
1535
  def time_partitioning_require_filter= val
1097
- @gapi.configuration.query.time_partitioning ||= \
1098
- Google::Apis::BigqueryV2::TimePartitioning.new
1099
- @gapi.configuration.query.time_partitioning.update! \
1100
- require_partition_filter: val
1536
+ @gapi.configuration.query.time_partitioning ||= Google::Apis::BigqueryV2::TimePartitioning.new
1537
+ @gapi.configuration.query.time_partitioning.update! require_partition_filter: val
1101
1538
  end
1102
1539
 
1103
1540
  ##
1104
- # Sets one or more fields on which the destination table should be
1105
- # clustered. Must be specified with time-based partitioning, data in
1106
- # the table will be first partitioned and subsequently clustered.
1541
+ # Sets the list of fields on which data should be clustered.
1107
1542
  #
1108
1543
  # Only top-level, non-repeated, simple-type fields are supported. When
1109
1544
  # you cluster a table using multiple columns, the order of columns you
1110
1545
  # specify is important. The order of the specified columns determines
1111
1546
  # the sort order of the data.
1112
1547
  #
1113
- # See {QueryJob#clustering_fields}.
1548
+ # BigQuery supports clustering for both partitioned and non-partitioned
1549
+ # tables.
1550
+ #
1551
+ # See {QueryJob#clustering_fields}, {Table#clustering_fields} and
1552
+ # {Table#clustering_fields=}.
1114
1553
  #
1115
- # @see https://cloud.google.com/bigquery/docs/partitioned-tables
1116
- # Partitioned Tables
1117
1554
  # @see https://cloud.google.com/bigquery/docs/clustered-tables
1118
- # Introduction to Clustered Tables
1555
+ # Introduction to clustered tables
1119
1556
  # @see https://cloud.google.com/bigquery/docs/creating-clustered-tables
1120
- # Creating and Using Clustered Tables
1557
+ # Creating and using clustered tables
1121
1558
  #
1122
1559
  # @param [Array<String>] fields The clustering fields. Only top-level,
1123
1560
  # non-repeated, simple-type fields are supported.
@@ -1143,11 +1580,27 @@ module Google
1143
1580
  # @!group Attributes
1144
1581
  #
1145
1582
  def clustering_fields= fields
1146
- @gapi.configuration.query.clustering ||= \
1147
- Google::Apis::BigqueryV2::Clustering.new
1583
+ @gapi.configuration.query.clustering ||= Google::Apis::BigqueryV2::Clustering.new
1148
1584
  @gapi.configuration.query.clustering.fields = fields
1149
1585
  end
1150
1586
 
1587
+ def cancel
1588
+ raise "not implemented in #{self.class}"
1589
+ end
1590
+
1591
+ def rerun!
1592
+ raise "not implemented in #{self.class}"
1593
+ end
1594
+
1595
+ def reload!
1596
+ raise "not implemented in #{self.class}"
1597
+ end
1598
+ alias refresh! reload!
1599
+
1600
+ def wait_until_done!
1601
+ raise "not implemented in #{self.class}"
1602
+ end
1603
+
1151
1604
  ##
1152
1605
  # @private Returns the Google API client library version of this job.
1153
1606
  #
@@ -1170,14 +1623,12 @@ module Google
1170
1623
  end
1171
1624
 
1172
1625
  def priority_value str
1173
- { "batch" => "BATCH",
1174
- "interactive" => "INTERACTIVE" }[str.to_s.downcase]
1626
+ { "batch" => "BATCH", "interactive" => "INTERACTIVE" }[str.to_s.downcase]
1175
1627
  end
1176
1628
 
1177
1629
  def udfs_gapi_from array_or_str
1178
1630
  Array(array_or_str).map do |uri_or_code|
1179
- resource =
1180
- Google::Apis::BigqueryV2::UserDefinedFunctionResource.new
1631
+ resource = Google::Apis::BigqueryV2::UserDefinedFunctionResource.new
1181
1632
  if uri_or_code.start_with? "gs://"
1182
1633
  resource.resource_uri = uri_or_code
1183
1634
  else
@@ -1237,17 +1688,26 @@ module Google
1237
1688
  # end
1238
1689
  #
1239
1690
  class Stage
1240
- attr_reader :compute_ratio_avg, :compute_ratio_max, :id, :name,
1241
- :read_ratio_avg, :read_ratio_max, :records_read,
1242
- :records_written, :status, :steps, :wait_ratio_avg,
1243
- :wait_ratio_max, :write_ratio_avg, :write_ratio_max
1691
+ attr_reader :compute_ratio_avg
1692
+ attr_reader :compute_ratio_max
1693
+ attr_reader :id
1694
+ attr_reader :name
1695
+ attr_reader :read_ratio_avg
1696
+ attr_reader :read_ratio_max
1697
+ attr_reader :records_read
1698
+ attr_reader :records_written
1699
+ attr_reader :status
1700
+ attr_reader :steps
1701
+ attr_reader :wait_ratio_avg
1702
+ attr_reader :wait_ratio_max
1703
+ attr_reader :write_ratio_avg
1704
+ attr_reader :write_ratio_max
1244
1705
 
1245
1706
  ##
1246
1707
  # @private Creates a new Stage instance.
1247
- def initialize compute_ratio_avg, compute_ratio_max, id, name,
1248
- read_ratio_avg, read_ratio_max, records_read,
1249
- records_written, status, steps, wait_ratio_avg,
1250
- wait_ratio_max, write_ratio_avg, write_ratio_max
1708
+ def initialize compute_ratio_avg, compute_ratio_max, id, name, read_ratio_avg, read_ratio_max, records_read,
1709
+ records_written, status, steps, wait_ratio_avg, wait_ratio_max, write_ratio_avg,
1710
+ write_ratio_max
1251
1711
  @compute_ratio_avg = compute_ratio_avg
1252
1712
  @compute_ratio_max = compute_ratio_max
1253
1713
  @id = id
@@ -1268,11 +1728,9 @@ module Google
1268
1728
  # @private New Stage from a statistics.query.queryPlan element.
1269
1729
  def self.from_gapi gapi
1270
1730
  steps = Array(gapi.steps).map { |g| Step.from_gapi g }
1271
- new gapi.compute_ratio_avg, gapi.compute_ratio_max, gapi.id,
1272
- gapi.name, gapi.read_ratio_avg, gapi.read_ratio_max,
1273
- gapi.records_read, gapi.records_written, gapi.status, steps,
1274
- gapi.wait_ratio_avg, gapi.wait_ratio_max, gapi.write_ratio_avg,
1275
- gapi.write_ratio_max
1731
+ new gapi.compute_ratio_avg, gapi.compute_ratio_max, gapi.id, gapi.name, gapi.read_ratio_avg,
1732
+ gapi.read_ratio_max, gapi.records_read, gapi.records_written, gapi.status, steps, gapi.wait_ratio_avg,
1733
+ gapi.wait_ratio_max, gapi.write_ratio_avg, gapi.write_ratio_max
1276
1734
  end
1277
1735
  end
1278
1736
 
@@ -1306,7 +1764,8 @@ module Google
1306
1764
  # end
1307
1765
  #
1308
1766
  class Step
1309
- attr_reader :kind, :substeps
1767
+ attr_reader :kind
1768
+ attr_reader :substeps
1310
1769
 
1311
1770
  ##
1312
1771
  # @private Creates a new Stage instance.
@@ -1327,8 +1786,7 @@ module Google
1327
1786
  def ensure_schema!
1328
1787
  return unless destination_schema.nil?
1329
1788
 
1330
- query_results_gapi = service.job_query_results \
1331
- job_id, location: location, max: 0
1789
+ query_results_gapi = service.job_query_results job_id, location: location, max: 0
1332
1790
  # raise "unable to retrieve schema" if query_results_gapi.schema.nil?
1333
1791
  @destination_schema_gapi = query_results_gapi.schema
1334
1792
  end
@@ -1346,9 +1804,10 @@ module Google
1346
1804
  end
1347
1805
 
1348
1806
  def destination_table_gapi
1349
- Google::Apis::BigqueryV2::Table.new \
1807
+ Google::Apis::BigqueryV2::Table.new(
1350
1808
  table_reference: @gapi.configuration.query.destination_table,
1351
1809
  schema: destination_schema
1810
+ )
1352
1811
  end
1353
1812
  end
1354
1813
  end