google-cloud-bigquery-storage-v1 0.20.0 → 0.21.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: c3798a072676c62908039d13ce88cbc15c3a12f46c4d488837d2d4bac64091fa
4
- data.tar.gz: 7dbccdae836e087be63b5a4291c94b45af21ef026893b6ecd5935df2ee5c7577
3
+ metadata.gz: 703332e4ce288caa3087714d2396c597a59be06c8419d4d4ebb181128eb7e699
4
+ data.tar.gz: 52241ca1467778c2ebbe3bd278093a5bc0421cf540aa1f3efdc57b48b9b7073a
5
5
  SHA512:
6
- metadata.gz: 6f8775594ee0f4a9990d97b3437107e7392326e0230cea5695bdc3837f4c422d227659a3f6ae999c43e23e635faf8189195fc42930cf331918618e7d65924ee7
7
- data.tar.gz: e1c115f3b2758f26e80dfe4179d32bc77211f8666058816275a4d8b91ed64c4089335a4224a52025ca3b292b3ca59a27701b982429b4bb33faca0e3857f5e1c4
6
+ metadata.gz: eff8e669467b02d0cc0e1bf2871728890becb60548b7ce3e8c055130a58e703c412578f5a8d1fa769f0995ed00efa4fd25a4c2e8baa99ac0fbaf77b425360436
7
+ data.tar.gz: 1acc21d67a2ee9cb1bbc481c0ed85768a5ec42eec2bb31587373089e102f88df914a5e0d7cd15b010ebc79208de2ac596e65bbd556c02a064c1b1f33469df4c9
@@ -82,22 +82,22 @@ module Google
82
82
 
83
83
  default_config.rpcs.get_write_stream.timeout = 600.0
84
84
  default_config.rpcs.get_write_stream.retry_policy = {
85
- initial_delay: 0.1, max_delay: 60.0, multiplier: 1.3, retry_codes: [4, 14]
85
+ initial_delay: 0.1, max_delay: 60.0, multiplier: 1.3, retry_codes: [4, 14, 8]
86
86
  }
87
87
 
88
88
  default_config.rpcs.finalize_write_stream.timeout = 600.0
89
89
  default_config.rpcs.finalize_write_stream.retry_policy = {
90
- initial_delay: 0.1, max_delay: 60.0, multiplier: 1.3, retry_codes: [4, 14]
90
+ initial_delay: 0.1, max_delay: 60.0, multiplier: 1.3, retry_codes: [4, 14, 8]
91
91
  }
92
92
 
93
93
  default_config.rpcs.batch_commit_write_streams.timeout = 600.0
94
94
  default_config.rpcs.batch_commit_write_streams.retry_policy = {
95
- initial_delay: 0.1, max_delay: 60.0, multiplier: 1.3, retry_codes: [4, 14]
95
+ initial_delay: 0.1, max_delay: 60.0, multiplier: 1.3, retry_codes: [4, 14, 8]
96
96
  }
97
97
 
98
98
  default_config.rpcs.flush_rows.timeout = 600.0
99
99
  default_config.rpcs.flush_rows.retry_policy = {
100
- initial_delay: 0.1, max_delay: 60.0, multiplier: 1.3, retry_codes: [4, 14]
100
+ initial_delay: 0.1, max_delay: 60.0, multiplier: 1.3, retry_codes: [4, 14, 8]
101
101
  }
102
102
 
103
103
  default_config
@@ -22,7 +22,7 @@ module Google
22
22
  module Bigquery
23
23
  module Storage
24
24
  module V1
25
- VERSION = "0.20.0"
25
+ VERSION = "0.21.0"
26
26
  end
27
27
  end
28
28
  end
@@ -190,18 +190,23 @@ module Google
190
190
 
191
191
  # Request message for `AppendRows`.
192
192
  #
193
- # Due to the nature of AppendRows being a bidirectional streaming RPC, certain
194
- # parts of the AppendRowsRequest need only be specified for the first request
195
- # sent each time the gRPC network connection is opened/reopened.
193
+ # Because AppendRows is a bidirectional streaming RPC, certain parts of the
194
+ # AppendRowsRequest need only be specified for the first request before
195
+ # switching table destinations. You can also switch table destinations within
196
+ # the same connection for the default stream.
196
197
  #
197
198
  # The size of a single AppendRowsRequest must be less than 10 MB in size.
198
199
  # Requests larger than this return an error, typically `INVALID_ARGUMENT`.
199
200
  # @!attribute [rw] write_stream
200
201
  # @return [::String]
201
- # Required. The write_stream identifies the target of the append operation,
202
- # and only needs to be specified as part of the first request on the gRPC
203
- # connection. If provided for subsequent requests, it must match the value of
204
- # the first request.
202
+ # Required. The write_stream identifies the append operation. It must be
203
+ # provided in the following scenarios:
204
+ #
205
+ # * In the first request to an AppendRows connection.
206
+ #
207
+ # * In all subsequent requests to an AppendRows connection, if you use the
208
+ # same connection to write to multiple tables or change the input schema for
209
+ # default streams.
205
210
  #
206
211
  # For explicitly created write streams, the format is:
207
212
  #
@@ -210,6 +215,22 @@ module Google
210
215
  # For the special default stream, the format is:
211
216
  #
212
217
  # * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
218
+ #
219
+ # An example of a possible sequence of requests with write_stream fields
220
+ # within a single connection:
221
+ #
222
+ # * r1: \\{write_stream: stream_name_1}
223
+ #
224
+ # * r2: \\{write_stream: /*omit*/}
225
+ #
226
+ # * r3: \\{write_stream: /*omit*/}
227
+ #
228
+ # * r4: \\{write_stream: stream_name_2}
229
+ #
230
+ # * r5: \\{write_stream: stream_name_2}
231
+ #
232
+ # The destination changed in request_4, so the write_stream field must be
233
+ # populated in all subsequent requests in this stream.
213
234
  # @!attribute [rw] offset
214
235
  # @return [::Google::Protobuf::Int64Value]
215
236
  # If present, the write is only performed if the next append offset is same
@@ -251,9 +272,14 @@ module Google
251
272
  # requests.
252
273
  # @!attribute [rw] writer_schema
253
274
  # @return [::Google::Cloud::Bigquery::Storage::V1::ProtoSchema]
254
- # Proto schema used to serialize the data. This value only needs to be
255
- # provided as part of the first request on a gRPC network connection,
256
- # and will be ignored for subsequent requests on the connection.
275
+ # The protocol buffer schema used to serialize the data. Provide this value
276
+ # whenever:
277
+ #
278
+ # * You send the first request of an RPC connection.
279
+ #
280
+ # * You change the input schema.
281
+ #
282
+ # * You specify a new destination table.
257
283
  # @!attribute [rw] rows
258
284
  # @return [::Google::Cloud::Bigquery::Storage::V1::ProtoRows]
259
285
  # Serialized row data in protobuf message format.
@@ -274,10 +300,9 @@ module Google
274
300
  extend ::Google::Protobuf::MessageExts::ClassMethods
275
301
  end
276
302
 
277
- # An enum to indicate how to interpret missing values. Missing values are
278
- # fields present in user schema but missing in rows. A missing value can
279
- # represent a NULL or a column default value defined in BigQuery table
280
- # schema.
303
+ # An enum to indicate how to interpret missing values of fields that are
304
+ # present in user schema but missing in rows. A missing value can represent a
305
+ # NULL or a column default value defined in BigQuery table schema.
281
306
  module MissingValueInterpretation
282
307
  # Invalid missing value interpretation. Requests with this value will be
283
308
  # rejected.
@@ -69,10 +69,10 @@ module Google
69
69
  # metadata from the table which might be incomplete or stale.
70
70
  # @!attribute [r] estimated_total_physical_file_size
71
71
  # @return [::Integer]
72
- # Output only. A pre-projected estimate of the total physical size (in bytes)
73
- # of files this session will scan when all streams are completely consumed.
74
- # This estimate does not depend on the selected columns and can be based on
75
- # metadata from the table which might be incomplete or stale. Only set for
72
+ # Output only. A pre-projected estimate of the total physical size of files
73
+ # (in bytes) that this session will scan when all streams are consumed. This
74
+ # estimate is independent of the selected columns and can be based on
75
+ # incomplete or stale metadata from the table. This field is only set for
76
76
  # BigLake tables.
77
77
  # @!attribute [r] estimated_row_count
78
78
  # @return [::Integer]
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: google-cloud-bigquery-storage-v1
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.20.0
4
+ version: 0.21.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Google LLC
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2023-06-23 00:00:00.000000000 Z
11
+ date: 2023-07-07 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: gapic-common