google-cloud-bigquery-storage-v1 0.11.1 → 0.14.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +5 -5
- data/lib/google/cloud/bigquery/storage/v1/avro_pb.rb +4 -0
- data/lib/google/cloud/bigquery/storage/v1/big_query_read/client.rb +10 -1
- data/lib/google/cloud/bigquery/storage/v1/storage_pb.rb +2 -0
- data/lib/google/cloud/bigquery/storage/v1/stream_pb.rb +1 -0
- data/lib/google/cloud/bigquery/storage/v1/version.rb +1 -1
- data/proto_docs/google/cloud/bigquery/storage/v1/avro.rb +18 -0
- data/proto_docs/google/cloud/bigquery/storage/v1/storage.rb +17 -0
- data/proto_docs/google/cloud/bigquery/storage/v1/stream.rb +50 -4
- metadata +11 -11
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 6eb514d8a87f950d7333d8a24f08ba2e8e5710fdbe055f890bea49f3b742b0a4
|
4
|
+
data.tar.gz: cd8f0057d5ad2da143516f146c04c208e6f219f29fef22271d26be21d7e08b2e
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: c521cfb6d963a905aa31e3f213220c1c5dd939ac8c42d78a630f0f3f6c41656c7e17f3c4446e6ea1c8bec8356390697288670ffd1c637fc9144607ef199df9a2
|
7
|
+
data.tar.gz: adae465e4f76681dacefd2fd7057c0a972beb7053b236a3bfec484b48371477469e700b2c030b8434bc6e030716ce3962143cc6965929f67f4b2d433402c8d52
|
data/README.md
CHANGED
@@ -76,14 +76,14 @@ To browse ready to use code samples check [Google Cloud Samples](https://cloud.g
|
|
76
76
|
|
77
77
|
## Supported Ruby Versions
|
78
78
|
|
79
|
-
This library is supported on Ruby 2.
|
79
|
+
This library is supported on Ruby 2.6+.
|
80
80
|
|
81
81
|
Google provides official support for Ruby versions that are actively supported
|
82
82
|
by Ruby Core—that is, Ruby versions that are either in normal maintenance or
|
83
|
-
in security maintenance, and not end of life.
|
84
|
-
|
85
|
-
|
86
|
-
|
83
|
+
in security maintenance, and not end of life. Older versions of Ruby _may_
|
84
|
+
still work, but are unsupported and not recommended. See
|
85
|
+
https://www.ruby-lang.org/en/downloads/branches/ for details about the Ruby
|
86
|
+
support schedule.
|
87
87
|
|
88
88
|
## Which client should I use?
|
89
89
|
|
@@ -12,6 +12,9 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
|
|
12
12
|
optional :serialized_binary_rows, :bytes, 1
|
13
13
|
optional :row_count, :int64, 2
|
14
14
|
end
|
15
|
+
add_message "google.cloud.bigquery.storage.v1.AvroSerializationOptions" do
|
16
|
+
optional :enable_display_name_attribute, :bool, 1
|
17
|
+
end
|
15
18
|
end
|
16
19
|
end
|
17
20
|
|
@@ -22,6 +25,7 @@ module Google
|
|
22
25
|
module V1
|
23
26
|
AvroSchema = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.bigquery.storage.v1.AvroSchema").msgclass
|
24
27
|
AvroRows = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.bigquery.storage.v1.AvroRows").msgclass
|
28
|
+
AvroSerializationOptions = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.bigquery.storage.v1.AvroSerializationOptions").msgclass
|
25
29
|
end
|
26
30
|
end
|
27
31
|
end
|
@@ -193,7 +193,7 @@ module Google
|
|
193
193
|
# @param options [::Gapic::CallOptions, ::Hash]
|
194
194
|
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
|
195
195
|
#
|
196
|
-
# @overload create_read_session(parent: nil, read_session: nil, max_stream_count: nil)
|
196
|
+
# @overload create_read_session(parent: nil, read_session: nil, max_stream_count: nil, preferred_min_stream_count: nil)
|
197
197
|
# Pass arguments to `create_read_session` via keyword arguments. Note that at
|
198
198
|
# least one keyword argument is required. To specify no parameters, or to keep all
|
199
199
|
# the default parameter values, pass an empty Hash as a request object (see above).
|
@@ -214,6 +214,15 @@ module Google
|
|
214
214
|
# Typically, clients should either leave this unset to let the system to
|
215
215
|
# determine an upper bound OR set this a size for the maximum "units of work"
|
216
216
|
# it can gracefully handle.
|
217
|
+
# @param preferred_min_stream_count [::Integer]
|
218
|
+
# The minimum preferred stream count. This parameter can be used to inform
|
219
|
+
# the service that there is a desired lower bound on the number of streams.
|
220
|
+
# This is typically a target parallelism of the client (e.g. a Spark
|
221
|
+
# cluster with N-workers would set this to a low multiple of N to ensure
|
222
|
+
# good cluster utilization).
|
223
|
+
#
|
224
|
+
# The system will make a best effort to provide at least this number of
|
225
|
+
# streams, but in some cases might provide less.
|
217
226
|
#
|
218
227
|
# @yield [response, operation] Access the result along with the RPC operation
|
219
228
|
# @yieldparam response [::Google::Cloud::Bigquery::Storage::V1::ReadSession]
|
@@ -22,6 +22,7 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
|
|
22
22
|
optional :parent, :string, 1
|
23
23
|
optional :read_session, :message, 2, "google.cloud.bigquery.storage.v1.ReadSession"
|
24
24
|
optional :max_stream_count, :int32, 3
|
25
|
+
optional :preferred_min_stream_count, :int32, 4
|
25
26
|
end
|
26
27
|
add_message "google.cloud.bigquery.storage.v1.ReadRowsRequest" do
|
27
28
|
optional :read_stream, :string, 1
|
@@ -77,6 +78,7 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
|
|
77
78
|
add_message "google.cloud.bigquery.storage.v1.AppendRowsResponse" do
|
78
79
|
optional :updated_schema, :message, 3, "google.cloud.bigquery.storage.v1.TableSchema"
|
79
80
|
repeated :row_errors, :message, 4, "google.cloud.bigquery.storage.v1.RowError"
|
81
|
+
optional :write_stream, :string, 5
|
80
82
|
oneof :response do
|
81
83
|
optional :append_result, :message, 1, "google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult"
|
82
84
|
optional :error, :message, 2, "google.rpc.Status"
|
@@ -35,6 +35,7 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
|
|
35
35
|
optional :row_restriction, :string, 2
|
36
36
|
oneof :output_format_serialization_options do
|
37
37
|
optional :arrow_serialization_options, :message, 3, "google.cloud.bigquery.storage.v1.ArrowSerializationOptions"
|
38
|
+
optional :avro_serialization_options, :message, 4, "google.cloud.bigquery.storage.v1.AvroSerializationOptions"
|
38
39
|
end
|
39
40
|
end
|
40
41
|
add_message "google.cloud.bigquery.storage.v1.ReadStream" do
|
@@ -44,6 +44,24 @@ module Google
|
|
44
44
|
include ::Google::Protobuf::MessageExts
|
45
45
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
46
46
|
end
|
47
|
+
|
48
|
+
# Contains options specific to Avro Serialization.
|
49
|
+
# @!attribute [rw] enable_display_name_attribute
|
50
|
+
# @return [::Boolean]
|
51
|
+
# Enable displayName attribute in Avro schema.
|
52
|
+
#
|
53
|
+
# The Avro specification requires field names to be alphanumeric. By
|
54
|
+
# default, in cases when column names do not conform to these requirements
|
55
|
+
# (e.g. non-ascii unicode codepoints) and Avro is requested as an output
|
56
|
+
# format, the CreateReadSession call will fail.
|
57
|
+
#
|
58
|
+
# Setting this field to true, populates avro field names with a placeholder
|
59
|
+
# value and populates a "displayName" attribute for every avro field with the
|
60
|
+
# original column name.
|
61
|
+
class AvroSerializationOptions
|
62
|
+
include ::Google::Protobuf::MessageExts
|
63
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
64
|
+
end
|
47
65
|
end
|
48
66
|
end
|
49
67
|
end
|
@@ -42,6 +42,16 @@ module Google
|
|
42
42
|
# Typically, clients should either leave this unset to let the system to
|
43
43
|
# determine an upper bound OR set this a size for the maximum "units of work"
|
44
44
|
# it can gracefully handle.
|
45
|
+
# @!attribute [rw] preferred_min_stream_count
|
46
|
+
# @return [::Integer]
|
47
|
+
# The minimum preferred stream count. This parameter can be used to inform
|
48
|
+
# the service that there is a desired lower bound on the number of streams.
|
49
|
+
# This is typically a target parallelism of the client (e.g. a Spark
|
50
|
+
# cluster with N-workers would set this to a low multiple of N to ensure
|
51
|
+
# good cluster utilization).
|
52
|
+
#
|
53
|
+
# The system will make a best effort to provide at least this number of
|
54
|
+
# streams, but in some cases might provide less.
|
45
55
|
class CreateReadSessionRequest
|
46
56
|
include ::Google::Protobuf::MessageExts
|
47
57
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
@@ -183,6 +193,9 @@ module Google
|
|
183
193
|
# Due to the nature of AppendRows being a bidirectional streaming RPC, certain
|
184
194
|
# parts of the AppendRowsRequest need only be specified for the first request
|
185
195
|
# sent each time the gRPC network connection is opened/reopened.
|
196
|
+
#
|
197
|
+
# The size of a single AppendRowsRequest must be less than 10 MB in size.
|
198
|
+
# Requests larger than this return an error, typically `INVALID_ARGUMENT`.
|
186
199
|
# @!attribute [rw] write_stream
|
187
200
|
# @return [::String]
|
188
201
|
# Required. The write_stream identifies the target of the append operation, and only
|
@@ -269,6 +282,10 @@ module Google
|
|
269
282
|
# If a request failed due to corrupted rows, no rows in the batch will be
|
270
283
|
# appended. The API will return row level error info, so that the caller can
|
271
284
|
# remove the bad rows and retry the request.
|
285
|
+
# @!attribute [rw] write_stream
|
286
|
+
# @return [::String]
|
287
|
+
# The target of the append operation. Matches the write_stream in the
|
288
|
+
# corresponding request.
|
272
289
|
class AppendRowsResponse
|
273
290
|
include ::Google::Protobuf::MessageExts
|
274
291
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
@@ -88,10 +88,53 @@ module Google
|
|
88
88
|
# Options dictating how we read a table.
|
89
89
|
# @!attribute [rw] selected_fields
|
90
90
|
# @return [::Array<::String>]
|
91
|
-
#
|
92
|
-
#
|
93
|
-
#
|
94
|
-
#
|
91
|
+
# Optional. The names of the fields in the table to be returned. If no
|
92
|
+
# field names are specified, then all fields in the table are returned.
|
93
|
+
#
|
94
|
+
# Nested fields -- the child elements of a STRUCT field -- can be selected
|
95
|
+
# individually using their fully-qualified names, and will be returned as
|
96
|
+
# record fields containing only the selected nested fields. If a STRUCT
|
97
|
+
# field is specified in the selected fields list, all of the child elements
|
98
|
+
# will be returned.
|
99
|
+
#
|
100
|
+
# As an example, consider a table with the following schema:
|
101
|
+
#
|
102
|
+
# {
|
103
|
+
# "name": "struct_field",
|
104
|
+
# "type": "RECORD",
|
105
|
+
# "mode": "NULLABLE",
|
106
|
+
# "fields": [
|
107
|
+
# {
|
108
|
+
# "name": "string_field1",
|
109
|
+
# "type": "STRING",
|
110
|
+
# . "mode": "NULLABLE"
|
111
|
+
# },
|
112
|
+
# {
|
113
|
+
# "name": "string_field2",
|
114
|
+
# "type": "STRING",
|
115
|
+
# "mode": "NULLABLE"
|
116
|
+
# }
|
117
|
+
# ]
|
118
|
+
# }
|
119
|
+
#
|
120
|
+
# Specifying "struct_field" in the selected fields list will result in a
|
121
|
+
# read session schema with the following logical structure:
|
122
|
+
#
|
123
|
+
# struct_field {
|
124
|
+
# string_field1
|
125
|
+
# string_field2
|
126
|
+
# }
|
127
|
+
#
|
128
|
+
# Specifying "struct_field.string_field1" in the selected fields list will
|
129
|
+
# result in a read session schema with the following logical structure:
|
130
|
+
#
|
131
|
+
# struct_field {
|
132
|
+
# string_field1
|
133
|
+
# }
|
134
|
+
#
|
135
|
+
# The order of the fields in the read session schema is derived from the
|
136
|
+
# table schema and does not correspond to the order in which the fields are
|
137
|
+
# specified in this list.
|
95
138
|
# @!attribute [rw] row_restriction
|
96
139
|
# @return [::String]
|
97
140
|
# SQL text filtering statement, similar to a WHERE clause in a query.
|
@@ -107,6 +150,9 @@ module Google
|
|
107
150
|
# @!attribute [rw] arrow_serialization_options
|
108
151
|
# @return [::Google::Cloud::Bigquery::Storage::V1::ArrowSerializationOptions]
|
109
152
|
# Optional. Options specific to the Apache Arrow output format.
|
153
|
+
# @!attribute [rw] avro_serialization_options
|
154
|
+
# @return [::Google::Cloud::Bigquery::Storage::V1::AvroSerializationOptions]
|
155
|
+
# Optional. Options specific to the Apache Avro output format
|
110
156
|
class TableReadOptions
|
111
157
|
include ::Google::Protobuf::MessageExts
|
112
158
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: google-cloud-bigquery-storage-v1
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.14.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Google LLC
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2022-
|
11
|
+
date: 2022-08-18 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: gapic-common
|
@@ -16,7 +16,7 @@ dependencies:
|
|
16
16
|
requirements:
|
17
17
|
- - ">="
|
18
18
|
- !ruby/object:Gem::Version
|
19
|
-
version: '0.
|
19
|
+
version: '0.10'
|
20
20
|
- - "<"
|
21
21
|
- !ruby/object:Gem::Version
|
22
22
|
version: 2.a
|
@@ -26,7 +26,7 @@ dependencies:
|
|
26
26
|
requirements:
|
27
27
|
- - ">="
|
28
28
|
- !ruby/object:Gem::Version
|
29
|
-
version: '0.
|
29
|
+
version: '0.10'
|
30
30
|
- - "<"
|
31
31
|
- !ruby/object:Gem::Version
|
32
32
|
version: 2.a
|
@@ -50,28 +50,28 @@ dependencies:
|
|
50
50
|
requirements:
|
51
51
|
- - "~>"
|
52
52
|
- !ruby/object:Gem::Version
|
53
|
-
version: 1.
|
53
|
+
version: 1.26.1
|
54
54
|
type: :development
|
55
55
|
prerelease: false
|
56
56
|
version_requirements: !ruby/object:Gem::Requirement
|
57
57
|
requirements:
|
58
58
|
- - "~>"
|
59
59
|
- !ruby/object:Gem::Version
|
60
|
-
version: 1.
|
60
|
+
version: 1.26.1
|
61
61
|
- !ruby/object:Gem::Dependency
|
62
62
|
name: minitest
|
63
63
|
requirement: !ruby/object:Gem::Requirement
|
64
64
|
requirements:
|
65
65
|
- - "~>"
|
66
66
|
- !ruby/object:Gem::Version
|
67
|
-
version: '5.
|
67
|
+
version: '5.16'
|
68
68
|
type: :development
|
69
69
|
prerelease: false
|
70
70
|
version_requirements: !ruby/object:Gem::Requirement
|
71
71
|
requirements:
|
72
72
|
- - "~>"
|
73
73
|
- !ruby/object:Gem::Version
|
74
|
-
version: '5.
|
74
|
+
version: '5.16'
|
75
75
|
- !ruby/object:Gem::Dependency
|
76
76
|
name: minitest-focus
|
77
77
|
requirement: !ruby/object:Gem::Requirement
|
@@ -106,14 +106,14 @@ dependencies:
|
|
106
106
|
requirements:
|
107
107
|
- - ">="
|
108
108
|
- !ruby/object:Gem::Version
|
109
|
-
version: '
|
109
|
+
version: '13.0'
|
110
110
|
type: :development
|
111
111
|
prerelease: false
|
112
112
|
version_requirements: !ruby/object:Gem::Requirement
|
113
113
|
requirements:
|
114
114
|
- - ">="
|
115
115
|
- !ruby/object:Gem::Version
|
116
|
-
version: '
|
116
|
+
version: '13.0'
|
117
117
|
- !ruby/object:Gem::Dependency
|
118
118
|
name: redcarpet
|
119
119
|
requirement: !ruby/object:Gem::Requirement
|
@@ -212,7 +212,7 @@ required_ruby_version: !ruby/object:Gem::Requirement
|
|
212
212
|
requirements:
|
213
213
|
- - ">="
|
214
214
|
- !ruby/object:Gem::Version
|
215
|
-
version: '2.
|
215
|
+
version: '2.6'
|
216
216
|
required_rubygems_version: !ruby/object:Gem::Requirement
|
217
217
|
requirements:
|
218
218
|
- - ">="
|