google-cloud-bigquery 1.27.0 → 1.32.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +58 -0
  3. data/CONTRIBUTING.md +3 -4
  4. data/LOGGING.md +1 -1
  5. data/OVERVIEW.md +15 -14
  6. data/lib/google/cloud/bigquery/convert.rb +72 -76
  7. data/lib/google/cloud/bigquery/copy_job.rb +1 -0
  8. data/lib/google/cloud/bigquery/data.rb +2 -2
  9. data/lib/google/cloud/bigquery/dataset.rb +181 -62
  10. data/lib/google/cloud/bigquery/dataset/access.rb +3 -3
  11. data/lib/google/cloud/bigquery/dataset/list.rb +2 -2
  12. data/lib/google/cloud/bigquery/external.rb +9 -2619
  13. data/lib/google/cloud/bigquery/external/bigtable_source.rb +230 -0
  14. data/lib/google/cloud/bigquery/external/bigtable_source/column.rb +404 -0
  15. data/lib/google/cloud/bigquery/external/bigtable_source/column_family.rb +945 -0
  16. data/lib/google/cloud/bigquery/external/csv_source.rb +481 -0
  17. data/lib/google/cloud/bigquery/external/data_source.rb +771 -0
  18. data/lib/google/cloud/bigquery/external/json_source.rb +170 -0
  19. data/lib/google/cloud/bigquery/external/parquet_source.rb +148 -0
  20. data/lib/google/cloud/bigquery/external/sheets_source.rb +166 -0
  21. data/lib/google/cloud/bigquery/extract_job.rb +4 -2
  22. data/lib/google/cloud/bigquery/job.rb +9 -3
  23. data/lib/google/cloud/bigquery/job/list.rb +4 -4
  24. data/lib/google/cloud/bigquery/load_job.rb +178 -19
  25. data/lib/google/cloud/bigquery/model/list.rb +2 -2
  26. data/lib/google/cloud/bigquery/policy.rb +2 -1
  27. data/lib/google/cloud/bigquery/project.rb +47 -43
  28. data/lib/google/cloud/bigquery/project/list.rb +2 -2
  29. data/lib/google/cloud/bigquery/query_job.rb +84 -62
  30. data/lib/google/cloud/bigquery/routine.rb +1 -4
  31. data/lib/google/cloud/bigquery/routine/list.rb +2 -2
  32. data/lib/google/cloud/bigquery/schema.rb +39 -3
  33. data/lib/google/cloud/bigquery/schema/field.rb +63 -13
  34. data/lib/google/cloud/bigquery/service.rb +11 -13
  35. data/lib/google/cloud/bigquery/standard_sql.rb +15 -3
  36. data/lib/google/cloud/bigquery/table.rb +312 -69
  37. data/lib/google/cloud/bigquery/table/async_inserter.rb +44 -17
  38. data/lib/google/cloud/bigquery/table/list.rb +2 -2
  39. data/lib/google/cloud/bigquery/version.rb +1 -1
  40. metadata +28 -14
@@ -246,6 +246,7 @@ module Google
246
246
  ##
247
247
  # @private Create an Updater object.
248
248
  def initialize gapi
249
+ super()
249
250
  @gapi = gapi
250
251
  end
251
252
 
@@ -263,9 +264,10 @@ module Google
263
264
  extract_config = Google::Apis::BigqueryV2::JobConfigurationExtract.new(
264
265
  destination_uris: Array(storage_urls)
265
266
  )
266
- if source.is_a? Google::Apis::BigqueryV2::TableReference
267
+ case source
268
+ when Google::Apis::BigqueryV2::TableReference
267
269
  extract_config.source_table = source
268
- elsif source.is_a? Google::Apis::BigqueryV2::ModelReference
270
+ when Google::Apis::BigqueryV2::ModelReference
269
271
  extract_config.source_model = source
270
272
  end
271
273
  job = Google::Apis::BigqueryV2::Job.new(
@@ -507,7 +507,8 @@ module Google
507
507
  # @attr_reader [Fixnum] slot_ms The slot-milliseconds the job spent in the given reservation.
508
508
  #
509
509
  class ReservationUsage
510
- attr_reader :name, :slot_ms
510
+ attr_reader :name
511
+ attr_reader :slot_ms
511
512
 
512
513
  ##
513
514
  # @private Creates a new ReservationUsage instance.
@@ -571,7 +572,8 @@ module Google
571
572
  # end
572
573
  #
573
574
  class ScriptStatistics
574
- attr_reader :evaluation_kind, :stack_frames
575
+ attr_reader :evaluation_kind
576
+ attr_reader :stack_frames
575
577
 
576
578
  ##
577
579
  # @private Creates a new ScriptStatistics instance.
@@ -636,7 +638,11 @@ module Google
636
638
  # end
637
639
  #
638
640
  class ScriptStackFrame
639
- attr_reader :start_line, :start_column, :end_line, :end_column, :text
641
+ attr_reader :start_line
642
+ attr_reader :start_column
643
+ attr_reader :end_line
644
+ attr_reader :end_column
645
+ attr_reader :text
640
646
 
641
647
  ##
642
648
  # @private Creates a new ScriptStackFrame instance.
@@ -72,8 +72,8 @@ module Google
72
72
  return nil unless next?
73
73
  ensure_service!
74
74
  next_kwargs = @kwargs.merge token: token
75
- next_gapi = @service.list_jobs next_kwargs
76
- self.class.from_gapi next_gapi, @service, next_kwargs
75
+ next_gapi = @service.list_jobs(**next_kwargs)
76
+ self.class.from_gapi next_gapi, @service, **next_kwargs
77
77
  end
78
78
 
79
79
  ##
@@ -121,12 +121,12 @@ module Google
121
121
  # puts job.state
122
122
  # end
123
123
  #
124
- def all request_limit: nil
124
+ def all request_limit: nil, &block
125
125
  request_limit = request_limit.to_i if request_limit
126
126
  return enum_for :all, request_limit: request_limit unless block_given?
127
127
  results = self
128
128
  loop do
129
- results.each { |r| yield r }
129
+ results.each(&block)
130
130
  if request_limit
131
131
  request_limit -= 1
132
132
  break if request_limit.negative?
@@ -416,6 +416,49 @@ module Google
416
416
  @gapi.configuration.load.hive_partitioning_options.source_uri_prefix if hive_partitioning?
417
417
  end
418
418
 
419
+ ###
420
+ # Checks if Parquet options are set.
421
+ #
422
+ # @see https://cloud.google.com/bigquery/docs/loading-data-cloud-storage-parquet Loading Parquet data from Cloud
423
+ # Storage
424
+ #
425
+ # @return [Boolean] `true` when Parquet options are set, or `false` otherwise.
426
+ #
427
+ # @!group Attributes
428
+ #
429
+ def parquet_options?
430
+ !@gapi.configuration.load.parquet_options.nil?
431
+ end
432
+
433
+ ###
434
+ # Indicates whether to use schema inference specifically for Parquet `LIST` logical type.
435
+ #
436
+ # @see https://cloud.google.com/bigquery/docs/loading-data-cloud-storage-parquet Loading Parquet data from Cloud
437
+ # Storage
438
+ #
439
+ # @return [Boolean, nil] The `enable_list_inference` value in Parquet options, or `nil` if Parquet options are
440
+ # not set.
441
+ #
442
+ # @!group Attributes
443
+ #
444
+ def parquet_enable_list_inference?
445
+ @gapi.configuration.load.parquet_options.enable_list_inference if parquet_options?
446
+ end
447
+
448
+ ###
449
+ # Indicates whether to infer Parquet `ENUM` logical type as `STRING` instead of `BYTES` by default.
450
+ #
451
+ # @see https://cloud.google.com/bigquery/docs/loading-data-cloud-storage-parquet Loading Parquet data from Cloud
452
+ # Storage
453
+ #
454
+ # @return [Boolean, nil] The `enum_as_string` value in Parquet options, or `nil` if Parquet options are not set.
455
+ #
456
+ # @!group Attributes
457
+ #
458
+ def parquet_enum_as_string?
459
+ @gapi.configuration.load.parquet_options.enum_as_string if parquet_options?
460
+ end
461
+
419
462
  ###
420
463
  # Checks if the destination table will be range partitioned. See [Creating and using integer range partitioned
421
464
  # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
@@ -484,7 +527,7 @@ module Google
484
527
  # Checks if the destination table will be time partitioned. See
485
528
  # [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
486
529
  #
487
- # @return [Boolean, nil] `true` when the table will be time-partitioned,
530
+ # @return [Boolean] `true` when the table will be time-partitioned,
488
531
  # or `false` otherwise.
489
532
  #
490
533
  # @!group Attributes
@@ -560,10 +603,15 @@ module Google
560
603
  ###
561
604
  # Checks if the destination table will be clustered.
562
605
  #
606
+ # See {LoadJob::Updater#clustering_fields=}, {Table#clustering_fields} and
607
+ # {Table#clustering_fields=}.
608
+ #
563
609
  # @see https://cloud.google.com/bigquery/docs/clustered-tables
564
- # Introduction to Clustered Tables
610
+ # Introduction to clustered tables
611
+ # @see https://cloud.google.com/bigquery/docs/creating-clustered-tables
612
+ # Creating and using clustered tables
565
613
  #
566
- # @return [Boolean, nil] `true` when the table will be clustered,
614
+ # @return [Boolean] `true` when the table will be clustered,
567
615
  # or `false` otherwise.
568
616
  #
569
617
  # @!group Attributes
@@ -578,14 +626,16 @@ module Google
578
626
  # be first partitioned and subsequently clustered. The order of the
579
627
  # returned fields determines the sort order of the data.
580
628
  #
581
- # See {LoadJob::Updater#clustering_fields=}.
629
+ # BigQuery supports clustering for both partitioned and non-partitioned
630
+ # tables.
631
+ #
632
+ # See {LoadJob::Updater#clustering_fields=}, {Table#clustering_fields} and
633
+ # {Table#clustering_fields=}.
582
634
  #
583
- # @see https://cloud.google.com/bigquery/docs/partitioned-tables
584
- # Partitioned Tables
585
635
  # @see https://cloud.google.com/bigquery/docs/clustered-tables
586
- # Introduction to Clustered Tables
636
+ # Introduction to clustered tables
587
637
  # @see https://cloud.google.com/bigquery/docs/creating-clustered-tables
588
- # Creating and Using Clustered Tables
638
+ # Creating and using clustered tables
589
639
  #
590
640
  # @return [Array<String>, nil] The clustering fields, or `nil` if the
591
641
  # destination table will not be clustered.
@@ -606,6 +656,7 @@ module Google
606
656
  ##
607
657
  # @private Create an Updater object.
608
658
  def initialize gapi
659
+ super()
609
660
  @updates = []
610
661
  @gapi = gapi
611
662
  @schema = nil
@@ -772,9 +823,18 @@ module Google
772
823
  end
773
824
 
774
825
  ##
775
- # Adds a numeric number field to the schema. Numeric is a
776
- # fixed-precision numeric type with 38 decimal digits, 9 that follow
777
- # the decimal point.
826
+ # Adds a numeric number field to the schema. `NUMERIC` is a decimal
827
+ # type with fixed precision and scale. Precision is the number of
828
+ # digits that the number contains. Scale is how many of these
829
+ # digits appear after the decimal point. It supports:
830
+ #
831
+ # Precision: 38
832
+ # Scale: 9
833
+ # Min: -9.9999999999999999999999999999999999999E+28
834
+ # Max: 9.9999999999999999999999999999999999999E+28
835
+ #
836
+ # This type can represent decimal fractions exactly, and is suitable
837
+ # for financial calculations.
778
838
  #
779
839
  # See {Schema#numeric}
780
840
  #
@@ -801,6 +861,45 @@ module Google
801
861
  schema.numeric name, description: description, mode: mode
802
862
  end
803
863
 
864
+ ##
865
+ # Adds a bignumeric number field to the schema. `BIGNUMERIC` is a
866
+ # decimal type with fixed precision and scale. Precision is the
867
+ # number of digits that the number contains. Scale is how many of
868
+ # these digits appear after the decimal point. It supports:
869
+ #
870
+ # Precision: 76.76 (the 77th digit is partial)
871
+ # Scale: 38
872
+ # Min: -5.7896044618658097711785492504343953926634992332820282019728792003956564819968E+38
873
+ # Max: 5.7896044618658097711785492504343953926634992332820282019728792003956564819967E+38
874
+ #
875
+ # This type can represent decimal fractions exactly, and is suitable
876
+ # for financial calculations.
877
+ #
878
+ # See {Schema#bignumeric}
879
+ #
880
+ # @param [String] name The field name. The name must contain only
881
+ # letters (a-z, A-Z), numbers (0-9), or underscores (_), and must
882
+ # start with a letter or underscore. The maximum length is 128
883
+ # characters.
884
+ # @param [String] description A description of the field.
885
+ # @param [Symbol] mode The field's mode. The possible values are
886
+ # `:nullable`, `:required`, and `:repeated`. The default value is
887
+ # `:nullable`.
888
+ #
889
+ # @example
890
+ # require "google/cloud/bigquery"
891
+ #
892
+ # bigquery = Google::Cloud::Bigquery.new
893
+ # dataset = bigquery.dataset "my_dataset"
894
+ # job = dataset.load_job "my_table", "gs://abc/file" do |schema|
895
+ # schema.bignumeric "total_cost", mode: :required
896
+ # end
897
+ #
898
+ # @!group Schema
899
+ def bignumeric name, description: nil, mode: :nullable
900
+ schema.bignumeric name, description: description, mode: mode
901
+ end
902
+
804
903
  ##
805
904
  # Adds a boolean field to the schema.
806
905
  #
@@ -1478,6 +1577,66 @@ module Google
1478
1577
  @gapi.configuration.load.hive_partitioning_options.source_uri_prefix = source_uri_prefix
1479
1578
  end
1480
1579
 
1580
+ ##
1581
+ # Sets whether to use schema inference specifically for Parquet `LIST` logical type.
1582
+ #
1583
+ # @see https://cloud.google.com/bigquery/docs/loading-data-cloud-storage-parquet Loading Parquet data from
1584
+ # Cloud Storage
1585
+ #
1586
+ # @param [Boolean] enable_list_inference The `enable_list_inference` value to use in Parquet options.
1587
+ #
1588
+ # @example
1589
+ # require "google/cloud/bigquery"
1590
+ #
1591
+ # bigquery = Google::Cloud::Bigquery.new
1592
+ # dataset = bigquery.dataset "my_dataset"
1593
+ #
1594
+ # gcs_uris = ["gs://mybucket/00/*.parquet", "gs://mybucket/01/*.parquet"]
1595
+ # load_job = dataset.load_job "my_new_table", gcs_uris do |job|
1596
+ # job.format = :parquet
1597
+ # job.parquet_enable_list_inference = true
1598
+ # end
1599
+ #
1600
+ # load_job.wait_until_done!
1601
+ # load_job.done? #=> true
1602
+ #
1603
+ # @!group Attributes
1604
+ #
1605
+ def parquet_enable_list_inference= enable_list_inference
1606
+ @gapi.configuration.load.parquet_options ||= Google::Apis::BigqueryV2::ParquetOptions.new
1607
+ @gapi.configuration.load.parquet_options.enable_list_inference = enable_list_inference
1608
+ end
1609
+
1610
+ ##
1611
+ # Sets whether to infer Parquet `ENUM` logical type as `STRING` instead of `BYTES` by default.
1612
+ #
1613
+ # @see https://cloud.google.com/bigquery/docs/loading-data-cloud-storage-parquet Loading Parquet data from
1614
+ # Cloud Storage
1615
+ #
1616
+ # @param [Boolean] enum_as_string The `enum_as_string` value to use in Parquet options.
1617
+ #
1618
+ # @example
1619
+ # require "google/cloud/bigquery"
1620
+ #
1621
+ # bigquery = Google::Cloud::Bigquery.new
1622
+ # dataset = bigquery.dataset "my_dataset"
1623
+ #
1624
+ # gcs_uris = ["gs://mybucket/00/*.parquet", "gs://mybucket/01/*.parquet"]
1625
+ # load_job = dataset.load_job "my_new_table", gcs_uris do |job|
1626
+ # job.format = :parquet
1627
+ # job.parquet_enum_as_string = true
1628
+ # end
1629
+ #
1630
+ # load_job.wait_until_done!
1631
+ # load_job.done? #=> true
1632
+ #
1633
+ # @!group Attributes
1634
+ #
1635
+ def parquet_enum_as_string= enum_as_string
1636
+ @gapi.configuration.load.parquet_options ||= Google::Apis::BigqueryV2::ParquetOptions.new
1637
+ @gapi.configuration.load.parquet_options.enum_as_string = enum_as_string
1638
+ end
1639
+
1481
1640
  ##
1482
1641
  # Sets the field on which to range partition the table. See [Creating and using integer range partitioned
1483
1642
  # tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
@@ -1770,23 +1929,23 @@ module Google
1770
1929
  end
1771
1930
 
1772
1931
  ##
1773
- # Sets one or more fields on which the destination table should be
1774
- # clustered. Must be specified with time-based partitioning, data in
1775
- # the table will be first partitioned and subsequently clustered.
1932
+ # Sets the list of fields on which data should be clustered.
1776
1933
  #
1777
1934
  # Only top-level, non-repeated, simple-type fields are supported. When
1778
1935
  # you cluster a table using multiple columns, the order of columns you
1779
1936
  # specify is important. The order of the specified columns determines
1780
1937
  # the sort order of the data.
1781
1938
  #
1782
- # See {LoadJob#clustering_fields}.
1939
+ # BigQuery supports clustering for both partitioned and non-partitioned
1940
+ # tables.
1941
+ #
1942
+ # See {LoadJob#clustering_fields}, {Table#clustering_fields} and
1943
+ # {Table#clustering_fields=}.
1783
1944
  #
1784
- # @see https://cloud.google.com/bigquery/docs/partitioned-tables
1785
- # Partitioned Tables
1786
1945
  # @see https://cloud.google.com/bigquery/docs/clustered-tables
1787
- # Introduction to Clustered Tables
1946
+ # Introduction to clustered tables
1788
1947
  # @see https://cloud.google.com/bigquery/docs/creating-clustered-tables
1789
- # Creating and Using Clustered Tables
1948
+ # Creating and using clustered tables
1790
1949
  #
1791
1950
  # @param [Array<String>] fields The clustering fields. Only top-level,
1792
1951
  # non-repeated, simple-type fields are supported.
@@ -124,12 +124,12 @@ module Google
124
124
  # puts model.model_id
125
125
  # end
126
126
  #
127
- def all request_limit: nil
127
+ def all request_limit: nil, &block
128
128
  request_limit = request_limit.to_i if request_limit
129
129
  return enum_for :all, request_limit: request_limit unless block_given?
130
130
  results = self
131
131
  loop do
132
- results.each { |r| yield r }
132
+ results.each(&block)
133
133
  if request_limit
134
134
  request_limit -= 1
135
135
  break if request_limit.negative?
@@ -96,7 +96,8 @@ module Google
96
96
  # end
97
97
  #
98
98
  class Policy
99
- attr_reader :etag, :bindings
99
+ attr_reader :etag
100
+ attr_reader :bindings
100
101
 
101
102
  # @private
102
103
  def initialize etag, bindings
@@ -56,7 +56,8 @@ module Google
56
56
  # @private The Service object.
57
57
  attr_accessor :service
58
58
 
59
- attr_reader :name, :numeric_id
59
+ attr_reader :name
60
+ attr_reader :numeric_id
60
61
 
61
62
  ##
62
63
  # Creates a new Service instance.
@@ -292,35 +293,37 @@ module Google
292
293
  #
293
294
  # Ruby types are mapped to BigQuery types as follows:
294
295
  #
295
- # | BigQuery | Ruby | Notes |
296
- # |-------------|--------------------------------------|------------------------------------------------|
297
- # | `BOOL` | `true`/`false` | |
298
- # | `INT64` | `Integer` | |
299
- # | `FLOAT64` | `Float` | |
300
- # | `NUMERIC` | `BigDecimal` | Will be rounded to 9 decimal places |
301
- # | `STRING` | `String` | |
302
- # | `DATETIME` | `DateTime` | `DATETIME` does not support time zone. |
303
- # | `DATE` | `Date` | |
304
- # | `TIMESTAMP` | `Time` | |
305
- # | `TIME` | `Google::Cloud::BigQuery::Time` | |
306
- # | `BYTES` | `File`, `IO`, `StringIO`, or similar | |
307
- # | `ARRAY` | `Array` | Nested arrays, `nil` values are not supported. |
308
- # | `STRUCT` | `Hash` | Hash keys may be strings or symbols. |
296
+ # | BigQuery | Ruby | Notes |
297
+ # |--------------|--------------------------------------|----------------------------------------------------|
298
+ # | `BOOL` | `true`/`false` | |
299
+ # | `INT64` | `Integer` | |
300
+ # | `FLOAT64` | `Float` | |
301
+ # | `NUMERIC` | `BigDecimal` | `BigDecimal` values will be rounded to scale 9. |
302
+ # | `BIGNUMERIC` | | Query param values must be mapped in `types`. |
303
+ # | `STRING` | `String` | |
304
+ # | `DATETIME` | `DateTime` | `DATETIME` does not support time zone. |
305
+ # | `DATE` | `Date` | |
306
+ # | `TIMESTAMP` | `Time` | |
307
+ # | `TIME` | `Google::Cloud::BigQuery::Time` | |
308
+ # | `BYTES` | `File`, `IO`, `StringIO`, or similar | |
309
+ # | `ARRAY` | `Array` | Nested arrays, `nil` values are not supported. |
310
+ # | `STRUCT` | `Hash` | Hash keys may be strings or symbols. |
309
311
  #
310
312
  # See [Data Types](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types) for an overview
311
313
  # of each BigQuery data type, including allowed values.
312
- # @param [Array, Hash] types Standard SQL only. Types of the SQL parameters in `params`. It is not always to
313
- # infer the right SQL type from a value in `params`. In these cases, `types` must be used to specify the SQL
314
- # type for these values.
314
+ # @param [Array, Hash] types Standard SQL only. Types of the SQL parameters in `params`. It is not always
315
+ # possible to infer the right SQL type from a value in `params`. In these cases, `types` must be used to
316
+ # specify the SQL type for these values.
315
317
  #
316
- # Must match the value type passed to `params`. This must be an `Array` when the query uses positional query
317
- # parameters. This must be an `Hash` when the query uses named query parameters. The values should be BigQuery
318
- # type codes from the following list:
318
+ # Arguments must match the value type passed to `params`. This must be an `Array` when the query uses
319
+ # positional query parameters. This must be an `Hash` when the query uses named query parameters. The values
320
+ # should be BigQuery type codes from the following list:
319
321
  #
320
322
  # * `:BOOL`
321
323
  # * `:INT64`
322
324
  # * `:FLOAT64`
323
325
  # * `:NUMERIC`
326
+ # * `:BIGNUMERIC`
324
327
  # * `:STRING`
325
328
  # * `:DATETIME`
326
329
  # * `:DATE`
@@ -637,35 +640,37 @@ module Google
637
640
  #
638
641
  # Ruby types are mapped to BigQuery types as follows:
639
642
  #
640
- # | BigQuery | Ruby | Notes |
641
- # |-------------|--------------------------------------|------------------------------------------------|
642
- # | `BOOL` | `true`/`false` | |
643
- # | `INT64` | `Integer` | |
644
- # | `FLOAT64` | `Float` | |
645
- # | `NUMERIC` | `BigDecimal` | Will be rounded to 9 decimal places |
646
- # | `STRING` | `String` | |
647
- # | `DATETIME` | `DateTime` | `DATETIME` does not support time zone. |
648
- # | `DATE` | `Date` | |
649
- # | `TIMESTAMP` | `Time` | |
650
- # | `TIME` | `Google::Cloud::BigQuery::Time` | |
651
- # | `BYTES` | `File`, `IO`, `StringIO`, or similar | |
652
- # | `ARRAY` | `Array` | Nested arrays, `nil` values are not supported. |
653
- # | `STRUCT` | `Hash` | Hash keys may be strings or symbols. |
643
+ # | BigQuery | Ruby | Notes |
644
+ # |--------------|--------------------------------------|----------------------------------------------------|
645
+ # | `BOOL` | `true`/`false` | |
646
+ # | `INT64` | `Integer` | |
647
+ # | `FLOAT64` | `Float` | |
648
+ # | `NUMERIC` | `BigDecimal` | `BigDecimal` values will be rounded to scale 9. |
649
+ # | `BIGNUMERIC` | | Query param values must be mapped in `types`. |
650
+ # | `STRING` | `String` | |
651
+ # | `DATETIME` | `DateTime` | `DATETIME` does not support time zone. |
652
+ # | `DATE` | `Date` | |
653
+ # | `TIMESTAMP` | `Time` | |
654
+ # | `TIME` | `Google::Cloud::BigQuery::Time` | |
655
+ # | `BYTES` | `File`, `IO`, `StringIO`, or similar | |
656
+ # | `ARRAY` | `Array` | Nested arrays, `nil` values are not supported. |
657
+ # | `STRUCT` | `Hash` | Hash keys may be strings or symbols. |
654
658
  #
655
659
  # See [Data Types](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types) for an overview
656
660
  # of each BigQuery data type, including allowed values.
657
- # @param [Array, Hash] types Standard SQL only. Types of the SQL parameters in `params`. It is not always to
658
- # infer the right SQL type from a value in `params`. In these cases, `types` must be used to specify the SQL
659
- # type for these values.
661
+ # @param [Array, Hash] types Standard SQL only. Types of the SQL parameters in `params`. It is not always
662
+ # possible to infer the right SQL type from a value in `params`. In these cases, `types` must be used to
663
+ # specify the SQL type for these values.
660
664
  #
661
- # Must match the value type passed to `params`. This must be an `Array` when the query uses positional query
662
- # parameters. This must be an `Hash` when the query uses named query parameters. The values should be BigQuery
663
- # type codes from the following list:
665
+ # Arguments must match the value type passed to `params`. This must be an `Array` when the query uses
666
+ # positional query parameters. This must be an `Hash` when the query uses named query parameters. The values
667
+ # should be BigQuery type codes from the following list:
664
668
  #
665
669
  # * `:BOOL`
666
670
  # * `:INT64`
667
671
  # * `:FLOAT64`
668
672
  # * `:NUMERIC`
673
+ # * `:BIGNUMERIC`
669
674
  # * `:STRING`
670
675
  # * `:DATETIME`
671
676
  # * `:DATE`
@@ -981,8 +986,7 @@ module Google
981
986
  # @param [String] description A user-friendly description of the
982
987
  # dataset.
983
988
  # @param [Integer] expiration The default lifetime of all tables in the
984
- # dataset, in milliseconds. The minimum value is 3600000 milliseconds
985
- # (one hour).
989
+ # dataset, in milliseconds. The minimum value is `3_600_000` (one hour).
986
990
  # @param [String] location The geographic location where the dataset
987
991
  # should reside. Possible values include `EU` and `US`. The default
988
992
  # value is `US`.