google-cloud-bigquery-storage-v1 0.12.0 → 0.15.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 97e012ed12b5d17636615b23153be73fd4006ccc47ba0955a722139b09c2f53d
4
- data.tar.gz: c2dd5d3b6a99d21fe96e99c2348e578307fbe67c5cd21c829385fdfa89e372df
3
+ metadata.gz: 0abc8b859be6c644255db082774e841e2489e7b24ed7035cb7471cf9f163fd9c
4
+ data.tar.gz: f9ee0a03278e1d077608a5ccca9d76083b7fa3ef994e8dd2c0618b6d636105ed
5
5
  SHA512:
6
- metadata.gz: 40865f1fe1a363798ce1a66aa55f10b450ce09e9aaa6768b0d7c5723901face80a5cec1a4fd09e742e2b8a51f2638fd94bcdb25f650e65758f46612e14ea9fcc
7
- data.tar.gz: 9d151f5e6c122112eafca91af8c33d21c01ce03d10895fddca917318300df57c00f2a9c86f9260a10ad5fb37ae02542c3e48d5e73681f1435b59b97b403c247e
6
+ metadata.gz: ebab73bb11c6015e4fc2912926dd9bb23e746e63eaaa5452db60d5b0a2301ab7437db54e15e2578f35d3b18ac73cac6cde1569e74352bab072e2b8c2e6d1db21
7
+ data.tar.gz: 7ad7a13d18b4385c8676414e5172fb90a8b8d4d6908bbfdbc48b9a6e4befb0bd2b1c6d8a5d7e8b4fee0d1e39efd8b48f09e92ed6698bf61f4a8664064a5fddd4
data/AUTHENTICATION.md CHANGED
@@ -114,7 +114,7 @@ credentials are discovered.
114
114
  To configure your system for this, simply:
115
115
 
116
116
  1. [Download and install the Cloud SDK](https://cloud.google.com/sdk)
117
- 2. Authenticate using OAuth 2.0 `$ gcloud auth login`
117
+ 2. Authenticate using OAuth 2.0 `$ gcloud auth application-default login`
118
118
  3. Write code as if already authenticated.
119
119
 
120
120
  **NOTE:** This is _not_ recommended for running in production. The Cloud SDK
@@ -12,6 +12,9 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
12
12
  optional :serialized_binary_rows, :bytes, 1
13
13
  optional :row_count, :int64, 2
14
14
  end
15
+ add_message "google.cloud.bigquery.storage.v1.AvroSerializationOptions" do
16
+ optional :enable_display_name_attribute, :bool, 1
17
+ end
15
18
  end
16
19
  end
17
20
 
@@ -22,6 +25,7 @@ module Google
22
25
  module V1
23
26
  AvroSchema = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.bigquery.storage.v1.AvroSchema").msgclass
24
27
  AvroRows = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.bigquery.storage.v1.AvroRows").msgclass
28
+ AvroSerializationOptions = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.bigquery.storage.v1.AvroSerializationOptions").msgclass
25
29
  end
26
30
  end
27
31
  end
@@ -193,7 +193,7 @@ module Google
193
193
  # @param options [::Gapic::CallOptions, ::Hash]
194
194
  # Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
195
195
  #
196
- # @overload create_read_session(parent: nil, read_session: nil, max_stream_count: nil)
196
+ # @overload create_read_session(parent: nil, read_session: nil, max_stream_count: nil, preferred_min_stream_count: nil)
197
197
  # Pass arguments to `create_read_session` via keyword arguments. Note that at
198
198
  # least one keyword argument is required. To specify no parameters, or to keep all
199
199
  # the default parameter values, pass an empty Hash as a request object (see above).
@@ -214,6 +214,15 @@ module Google
214
214
  # Typically, clients should either leave this unset to let the system to
215
215
  # determine an upper bound OR set this a size for the maximum "units of work"
216
216
  # it can gracefully handle.
217
+ # @param preferred_min_stream_count [::Integer]
218
+ # The minimum preferred stream count. This parameter can be used to inform
219
+ # the service that there is a desired lower bound on the number of streams.
220
+ # This is typically a target parallelism of the client (e.g. a Spark
221
+ # cluster with N-workers would set this to a low multiple of N to ensure
222
+ # good cluster utilization).
223
+ #
224
+ # The system will make a best effort to provide at least this number of
225
+ # streams, but in some cases might provide less.
217
226
  #
218
227
  # @yield [response, operation] Access the result along with the RPC operation
219
228
  # @yieldparam response [::Google::Cloud::Bigquery::Storage::V1::ReadSession]
@@ -401,7 +401,7 @@ module Google
401
401
  # @param options [::Gapic::CallOptions, ::Hash]
402
402
  # Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
403
403
  #
404
- # @overload get_write_stream(name: nil)
404
+ # @overload get_write_stream(name: nil, view: nil)
405
405
  # Pass arguments to `get_write_stream` via keyword arguments. Note that at
406
406
  # least one keyword argument is required. To specify no parameters, or to keep all
407
407
  # the default parameter values, pass an empty Hash as a request object (see above).
@@ -409,6 +409,9 @@ module Google
409
409
  # @param name [::String]
410
410
  # Required. Name of the stream to get, in the form of
411
411
  # `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
412
+ # @param view [::Google::Cloud::Bigquery::Storage::V1::WriteStreamView]
413
+ # Indicates whether to get full or partial view of the WriteStream. If
414
+ # not set, view returned will be basic.
412
415
  #
413
416
  # @yield [response, operation] Access the result along with the RPC operation
414
417
  # @yieldparam response [::Google::Cloud::Bigquery::Storage::V1::WriteStream]
@@ -22,6 +22,7 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
22
22
  optional :parent, :string, 1
23
23
  optional :read_session, :message, 2, "google.cloud.bigquery.storage.v1.ReadSession"
24
24
  optional :max_stream_count, :int32, 3
25
+ optional :preferred_min_stream_count, :int32, 4
25
26
  end
26
27
  add_message "google.cloud.bigquery.storage.v1.ReadRowsRequest" do
27
28
  optional :read_stream, :string, 1
@@ -77,6 +78,7 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
77
78
  add_message "google.cloud.bigquery.storage.v1.AppendRowsResponse" do
78
79
  optional :updated_schema, :message, 3, "google.cloud.bigquery.storage.v1.TableSchema"
79
80
  repeated :row_errors, :message, 4, "google.cloud.bigquery.storage.v1.RowError"
81
+ optional :write_stream, :string, 5
80
82
  oneof :response do
81
83
  optional :append_result, :message, 1, "google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult"
82
84
  optional :error, :message, 2, "google.rpc.Status"
@@ -87,6 +89,7 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
87
89
  end
88
90
  add_message "google.cloud.bigquery.storage.v1.GetWriteStreamRequest" do
89
91
  optional :name, :string, 1
92
+ optional :view, :enum, 3, "google.cloud.bigquery.storage.v1.WriteStreamView"
90
93
  end
91
94
  add_message "google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest" do
92
95
  optional :parent, :string, 1
@@ -35,6 +35,7 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
35
35
  optional :row_restriction, :string, 2
36
36
  oneof :output_format_serialization_options do
37
37
  optional :arrow_serialization_options, :message, 3, "google.cloud.bigquery.storage.v1.ArrowSerializationOptions"
38
+ optional :avro_serialization_options, :message, 4, "google.cloud.bigquery.storage.v1.AvroSerializationOptions"
38
39
  end
39
40
  end
40
41
  add_message "google.cloud.bigquery.storage.v1.ReadStream" do
@@ -47,6 +48,7 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
47
48
  optional :commit_time, :message, 4, "google.protobuf.Timestamp"
48
49
  optional :table_schema, :message, 5, "google.cloud.bigquery.storage.v1.TableSchema"
49
50
  optional :write_mode, :enum, 7, "google.cloud.bigquery.storage.v1.WriteStream.WriteMode"
51
+ optional :location, :string, 8
50
52
  end
51
53
  add_enum "google.cloud.bigquery.storage.v1.WriteStream.Type" do
52
54
  value :TYPE_UNSPECIFIED, 0
@@ -63,6 +65,11 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
63
65
  value :AVRO, 1
64
66
  value :ARROW, 2
65
67
  end
68
+ add_enum "google.cloud.bigquery.storage.v1.WriteStreamView" do
69
+ value :WRITE_STREAM_VIEW_UNSPECIFIED, 0
70
+ value :BASIC, 1
71
+ value :FULL, 2
72
+ end
66
73
  end
67
74
  end
68
75
 
@@ -79,6 +86,7 @@ module Google
79
86
  WriteStream::Type = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.bigquery.storage.v1.WriteStream.Type").enummodule
80
87
  WriteStream::WriteMode = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.bigquery.storage.v1.WriteStream.WriteMode").enummodule
81
88
  DataFormat = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.bigquery.storage.v1.DataFormat").enummodule
89
+ WriteStreamView = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.bigquery.storage.v1.WriteStreamView").enummodule
82
90
  end
83
91
  end
84
92
  end
@@ -22,7 +22,7 @@ module Google
22
22
  module Bigquery
23
23
  module Storage
24
24
  module V1
25
- VERSION = "0.12.0"
25
+ VERSION = "0.15.0"
26
26
  end
27
27
  end
28
28
  end
@@ -44,6 +44,24 @@ module Google
44
44
  include ::Google::Protobuf::MessageExts
45
45
  extend ::Google::Protobuf::MessageExts::ClassMethods
46
46
  end
47
+
48
+ # Contains options specific to Avro Serialization.
49
+ # @!attribute [rw] enable_display_name_attribute
50
+ # @return [::Boolean]
51
+ # Enable displayName attribute in Avro schema.
52
+ #
53
+ # The Avro specification requires field names to be alphanumeric. By
54
+ # default, in cases when column names do not conform to these requirements
55
+ # (e.g. non-ascii unicode codepoints) and Avro is requested as an output
56
+ # format, the CreateReadSession call will fail.
57
+ #
58
+ # Setting this field to true, populates avro field names with a placeholder
59
+ # value and populates a "displayName" attribute for every avro field with the
60
+ # original column name.
61
+ class AvroSerializationOptions
62
+ include ::Google::Protobuf::MessageExts
63
+ extend ::Google::Protobuf::MessageExts::ClassMethods
64
+ end
47
65
  end
48
66
  end
49
67
  end
@@ -42,6 +42,16 @@ module Google
42
42
  # Typically, clients should either leave this unset to let the system to
43
43
  # determine an upper bound OR set this a size for the maximum "units of work"
44
44
  # it can gracefully handle.
45
+ # @!attribute [rw] preferred_min_stream_count
46
+ # @return [::Integer]
47
+ # The minimum preferred stream count. This parameter can be used to inform
48
+ # the service that there is a desired lower bound on the number of streams.
49
+ # This is typically a target parallelism of the client (e.g. a Spark
50
+ # cluster with N-workers would set this to a low multiple of N to ensure
51
+ # good cluster utilization).
52
+ #
53
+ # The system will make a best effort to provide at least this number of
54
+ # streams, but in some cases might provide less.
45
55
  class CreateReadSessionRequest
46
56
  include ::Google::Protobuf::MessageExts
47
57
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -183,6 +193,9 @@ module Google
183
193
  # Due to the nature of AppendRows being a bidirectional streaming RPC, certain
184
194
  # parts of the AppendRowsRequest need only be specified for the first request
185
195
  # sent each time the gRPC network connection is opened/reopened.
196
+ #
197
+ # The size of a single AppendRowsRequest must be less than 10 MB in size.
198
+ # Requests larger than this return an error, typically `INVALID_ARGUMENT`.
186
199
  # @!attribute [rw] write_stream
187
200
  # @return [::String]
188
201
  # Required. The write_stream identifies the target of the append operation, and only
@@ -269,6 +282,10 @@ module Google
269
282
  # If a request failed due to corrupted rows, no rows in the batch will be
270
283
  # appended. The API will return row level error info, so that the caller can
271
284
  # remove the bad rows and retry the request.
285
+ # @!attribute [rw] write_stream
286
+ # @return [::String]
287
+ # The target of the append operation. Matches the write_stream in the
288
+ # corresponding request.
272
289
  class AppendRowsResponse
273
290
  include ::Google::Protobuf::MessageExts
274
291
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -289,6 +306,10 @@ module Google
289
306
  # @return [::String]
290
307
  # Required. Name of the stream to get, in the form of
291
308
  # `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
309
+ # @!attribute [rw] view
310
+ # @return [::Google::Cloud::Bigquery::Storage::V1::WriteStreamView]
311
+ # Indicates whether to get full or partial view of the WriteStream. If
312
+ # not set, view returned will be basic.
292
313
  class GetWriteStreamRequest
293
314
  include ::Google::Protobuf::MessageExts
294
315
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -88,10 +88,53 @@ module Google
88
88
  # Options dictating how we read a table.
89
89
  # @!attribute [rw] selected_fields
90
90
  # @return [::Array<::String>]
91
- # Names of the fields in the table that should be read. If empty, all
92
- # fields will be read. If the specified field is a nested field, all
93
- # the sub-fields in the field will be selected. The output field order is
94
- # unrelated to the order of fields in selected_fields.
91
+ # Optional. The names of the fields in the table to be returned. If no
92
+ # field names are specified, then all fields in the table are returned.
93
+ #
94
+ # Nested fields -- the child elements of a STRUCT field -- can be selected
95
+ # individually using their fully-qualified names, and will be returned as
96
+ # record fields containing only the selected nested fields. If a STRUCT
97
+ # field is specified in the selected fields list, all of the child elements
98
+ # will be returned.
99
+ #
100
+ # As an example, consider a table with the following schema:
101
+ #
102
+ # {
103
+ # "name": "struct_field",
104
+ # "type": "RECORD",
105
+ # "mode": "NULLABLE",
106
+ # "fields": [
107
+ # {
108
+ # "name": "string_field1",
109
+ # "type": "STRING",
110
+ # . "mode": "NULLABLE"
111
+ # },
112
+ # {
113
+ # "name": "string_field2",
114
+ # "type": "STRING",
115
+ # "mode": "NULLABLE"
116
+ # }
117
+ # ]
118
+ # }
119
+ #
120
+ # Specifying "struct_field" in the selected fields list will result in a
121
+ # read session schema with the following logical structure:
122
+ #
123
+ # struct_field {
124
+ # string_field1
125
+ # string_field2
126
+ # }
127
+ #
128
+ # Specifying "struct_field.string_field1" in the selected fields list will
129
+ # result in a read session schema with the following logical structure:
130
+ #
131
+ # struct_field {
132
+ # string_field1
133
+ # }
134
+ #
135
+ # The order of the fields in the read session schema is derived from the
136
+ # table schema and does not correspond to the order in which the fields are
137
+ # specified in this list.
95
138
  # @!attribute [rw] row_restriction
96
139
  # @return [::String]
97
140
  # SQL text filtering statement, similar to a WHERE clause in a query.
@@ -107,6 +150,9 @@ module Google
107
150
  # @!attribute [rw] arrow_serialization_options
108
151
  # @return [::Google::Cloud::Bigquery::Storage::V1::ArrowSerializationOptions]
109
152
  # Optional. Options specific to the Apache Arrow output format.
153
+ # @!attribute [rw] avro_serialization_options
154
+ # @return [::Google::Cloud::Bigquery::Storage::V1::AvroSerializationOptions]
155
+ # Optional. Options specific to the Apache Avro output format
110
156
  class TableReadOptions
111
157
  include ::Google::Protobuf::MessageExts
112
158
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -152,6 +198,11 @@ module Google
152
198
  # @!attribute [rw] write_mode
153
199
  # @return [::Google::Cloud::Bigquery::Storage::V1::WriteStream::WriteMode]
154
200
  # Immutable. Mode of the stream.
201
+ # @!attribute [rw] location
202
+ # @return [::String]
203
+ # Immutable. The geographic location where the stream's dataset resides. See
204
+ # https://cloud.google.com/bigquery/docs/locations for supported
205
+ # locations.
155
206
  class WriteStream
156
207
  include ::Google::Protobuf::MessageExts
157
208
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -196,6 +247,23 @@ module Google
196
247
  # See https://arrow.apache.org/ for more details.
197
248
  ARROW = 2
198
249
  end
250
+
251
+ # WriteStreamView is a view enum that controls what details about a write
252
+ # stream should be returned.
253
+ module WriteStreamView
254
+ # The default / unset value.
255
+ WRITE_STREAM_VIEW_UNSPECIFIED = 0
256
+
257
+ # The BASIC projection returns basic metadata about a write stream. The
258
+ # basic view does not include schema information. This is the default view
259
+ # returned by GetWriteStream.
260
+ BASIC = 1
261
+
262
+ # The FULL projection returns all available write stream metadata, including
263
+ # the schema. CreateWriteStream returns the full projection of write stream
264
+ # metadata.
265
+ FULL = 2
266
+ end
199
267
  end
200
268
  end
201
269
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: google-cloud-bigquery-storage-v1
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.12.0
4
+ version: 0.15.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Google LLC
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2022-07-01 00:00:00.000000000 Z
11
+ date: 2022-09-08 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: gapic-common