google-cloud-bigquery-storage-v1 0.19.1 → 0.21.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: d7f29fc9be5605c0f9372b71f0790c3c8989dbc828e06c53ce60cb49db3fa4cc
4
- data.tar.gz: d002f50749a976e66776e10b46d98f18949e8d743a6c3e4c6e4d6d495240442b
3
+ metadata.gz: 703332e4ce288caa3087714d2396c597a59be06c8419d4d4ebb181128eb7e699
4
+ data.tar.gz: 52241ca1467778c2ebbe3bd278093a5bc0421cf540aa1f3efdc57b48b9b7073a
5
5
  SHA512:
6
- metadata.gz: 14703258c674b429f46e5205d7ad2e911e8ca383db23a29ac4f8c2a331c87daf1244c38c36340db9463dad4b4c99cb0127f86f97d8fc5a488c0a85bc92ce5a60
7
- data.tar.gz: c66d3952c0d03f65719c65a06f547268a2696a74a16f9f5c13e9d6c0d992fd5dc322d452ccf915ee3ef6d8c915c048ff6c1181711cf02cf0889ca6e1167e524c
6
+ metadata.gz: eff8e669467b02d0cc0e1bf2871728890becb60548b7ce3e8c055130a58e703c412578f5a8d1fa769f0995ed00efa4fd25a4c2e8baa99ac0fbaf77b425360436
7
+ data.tar.gz: 1acc21d67a2ee9cb1bbc481c0ed85768a5ec42eec2bb31587373089e102f88df914a5e0d7cd15b010ebc79208de2ac596e65bbd556c02a064c1b1f33469df4c9
@@ -82,22 +82,22 @@ module Google
82
82
 
83
83
  default_config.rpcs.get_write_stream.timeout = 600.0
84
84
  default_config.rpcs.get_write_stream.retry_policy = {
85
- initial_delay: 0.1, max_delay: 60.0, multiplier: 1.3, retry_codes: [4, 14]
85
+ initial_delay: 0.1, max_delay: 60.0, multiplier: 1.3, retry_codes: [4, 14, 8]
86
86
  }
87
87
 
88
88
  default_config.rpcs.finalize_write_stream.timeout = 600.0
89
89
  default_config.rpcs.finalize_write_stream.retry_policy = {
90
- initial_delay: 0.1, max_delay: 60.0, multiplier: 1.3, retry_codes: [4, 14]
90
+ initial_delay: 0.1, max_delay: 60.0, multiplier: 1.3, retry_codes: [4, 14, 8]
91
91
  }
92
92
 
93
93
  default_config.rpcs.batch_commit_write_streams.timeout = 600.0
94
94
  default_config.rpcs.batch_commit_write_streams.retry_policy = {
95
- initial_delay: 0.1, max_delay: 60.0, multiplier: 1.3, retry_codes: [4, 14]
95
+ initial_delay: 0.1, max_delay: 60.0, multiplier: 1.3, retry_codes: [4, 14, 8]
96
96
  }
97
97
 
98
98
  default_config.rpcs.flush_rows.timeout = 600.0
99
99
  default_config.rpcs.flush_rows.retry_policy = {
100
- initial_delay: 0.1, max_delay: 60.0, multiplier: 1.3, retry_codes: [4, 14]
100
+ initial_delay: 0.1, max_delay: 60.0, multiplier: 1.3, retry_codes: [4, 14, 8]
101
101
  }
102
102
 
103
103
  default_config
@@ -12,7 +12,7 @@ require 'google/cloud/bigquery/storage/v1/table_pb'
12
12
  require 'google/protobuf/timestamp_pb'
13
13
 
14
14
 
15
- descriptor_data = "\n-google/cloud/bigquery/storage/v1/stream.proto\x12 google.cloud.bigquery.storage.v1\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a,google/cloud/bigquery/storage/v1/arrow.proto\x1a+google/cloud/bigquery/storage/v1/avro.proto\x1a,google/cloud/bigquery/storage/v1/table.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xff\t\n\x0bReadSession\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x34\n\x0b\x65xpire_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x46\n\x0b\x64\x61ta_format\x18\x03 \x01(\x0e\x32,.google.cloud.bigquery.storage.v1.DataFormatB\x03\xe0\x41\x05\x12H\n\x0b\x61vro_schema\x18\x04 \x01(\x0b\x32,.google.cloud.bigquery.storage.v1.AvroSchemaB\x03\xe0\x41\x03H\x00\x12J\n\x0c\x61rrow_schema\x18\x05 \x01(\x0b\x32-.google.cloud.bigquery.storage.v1.ArrowSchemaB\x03\xe0\x41\x03H\x00\x12\x34\n\x05table\x18\x06 \x01(\tB%\xe0\x41\x05\xfa\x41\x1f\n\x1d\x62igquery.googleapis.com/Table\x12Z\n\x0ftable_modifiers\x18\x07 \x01(\x0b\x32<.google.cloud.bigquery.storage.v1.ReadSession.TableModifiersB\x03\xe0\x41\x01\x12Y\n\x0cread_options\x18\x08 \x01(\x0b\x32>.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptionsB\x03\xe0\x41\x01\x12\x42\n\x07streams\x18\n \x03(\x0b\x32,.google.cloud.bigquery.storage.v1.ReadStreamB\x03\xe0\x41\x03\x12*\n\x1d\x65stimated_total_bytes_scanned\x18\x0c \x01(\x03\x42\x03\xe0\x41\x03\x12 \n\x13\x65stimated_row_count\x18\x0e \x01(\x03\x42\x03\xe0\x41\x03\x12\x15\n\x08trace_id\x18\r \x01(\tB\x03\xe0\x41\x01\x1a\x43\n\x0eTableModifiers\x12\x31\n\rsnapshot_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1a\xf6\x02\n\x10TableReadOptions\x12\x17\n\x0fselected_fields\x18\x01 \x03(\t\x12\x17\n\x0frow_restriction\x18\x02 \x01(\t\x12g\n\x1b\x61rrow_serialization_options\x18\x03 \x01(\x0b\x32;.google.cloud.bigquery.storage.v1.ArrowSerializationOptionsB\x03\xe0\x41\x01H\x00\x12\x65\n\x1a\x61vro_serialization_options\x18\x04 \x01(\x0b\x32:.google.cloud.bigquery.storage.v1.AvroSerializationOptionsB\x03\xe0\x41\x01H\x00\x12#\n\x11sample_percentage\x18\x05 \x01(\x01\x42\x03\xe0\x41\x01H\x01\x88\x01\x01\x42%\n#output_format_serialization_optionsB\x14\n\x12_sample_percentage:k\xea\x41h\n*bigquerystorage.googleapis.com/ReadSession\x12:projects/{project}/locations/{location}/sessions/{session}B\x08\n\x06schema\"\x9c\x01\n\nReadStream\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03:{\xea\x41x\n)bigquerystorage.googleapis.com/ReadStream\x12Kprojects/{project}/locations/{location}/sessions/{session}/streams/{stream}\"\xfb\x04\n\x0bWriteStream\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x45\n\x04type\x18\x02 \x01(\x0e\x32\x32.google.cloud.bigquery.storage.v1.WriteStream.TypeB\x03\xe0\x41\x05\x12\x34\n\x0b\x63reate_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x34\n\x0b\x63ommit_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12H\n\x0ctable_schema\x18\x05 \x01(\x0b\x32-.google.cloud.bigquery.storage.v1.TableSchemaB\x03\xe0\x41\x03\x12P\n\nwrite_mode\x18\x07 \x01(\x0e\x32\x37.google.cloud.bigquery.storage.v1.WriteStream.WriteModeB\x03\xe0\x41\x05\x12\x15\n\x08location\x18\x08 \x01(\tB\x03\xe0\x41\x05\"F\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\r\n\tCOMMITTED\x10\x01\x12\x0b\n\x07PENDING\x10\x02\x12\x0c\n\x08\x42UFFERED\x10\x03\"3\n\tWriteMode\x12\x1a\n\x16WRITE_MODE_UNSPECIFIED\x10\x00\x12\n\n\x06INSERT\x10\x01:v\xea\x41s\n*bigquerystorage.googleapis.com/WriteStream\x12\x45projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}*>\n\nDataFormat\x12\x1b\n\x17\x44\x41TA_FORMAT_UNSPECIFIED\x10\x00\x12\x08\n\x04\x41VRO\x10\x01\x12\t\n\x05\x41RROW\x10\x02*I\n\x0fWriteStreamView\x12!\n\x1dWRITE_STREAM_VIEW_UNSPECIFIED\x10\x00\x12\t\n\x05\x42\x41SIC\x10\x01\x12\x08\n\x04\x46ULL\x10\x02\x42\xbb\x01\n$com.google.cloud.bigquery.storage.v1B\x0bStreamProtoP\x01Z>cloud.google.com/go/bigquery/storage/apiv1/storagepb;storagepb\xaa\x02 Google.Cloud.BigQuery.Storage.V1\xca\x02 Google\\Cloud\\BigQuery\\Storage\\V1b\x06proto3"
15
+ descriptor_data = "\n-google/cloud/bigquery/storage/v1/stream.proto\x12 google.cloud.bigquery.storage.v1\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a,google/cloud/bigquery/storage/v1/arrow.proto\x1a+google/cloud/bigquery/storage/v1/avro.proto\x1a,google/cloud/bigquery/storage/v1/table.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xb0\n\n\x0bReadSession\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x34\n\x0b\x65xpire_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x46\n\x0b\x64\x61ta_format\x18\x03 \x01(\x0e\x32,.google.cloud.bigquery.storage.v1.DataFormatB\x03\xe0\x41\x05\x12H\n\x0b\x61vro_schema\x18\x04 \x01(\x0b\x32,.google.cloud.bigquery.storage.v1.AvroSchemaB\x03\xe0\x41\x03H\x00\x12J\n\x0c\x61rrow_schema\x18\x05 \x01(\x0b\x32-.google.cloud.bigquery.storage.v1.ArrowSchemaB\x03\xe0\x41\x03H\x00\x12\x34\n\x05table\x18\x06 \x01(\tB%\xe0\x41\x05\xfa\x41\x1f\n\x1d\x62igquery.googleapis.com/Table\x12Z\n\x0ftable_modifiers\x18\x07 \x01(\x0b\x32<.google.cloud.bigquery.storage.v1.ReadSession.TableModifiersB\x03\xe0\x41\x01\x12Y\n\x0cread_options\x18\x08 \x01(\x0b\x32>.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptionsB\x03\xe0\x41\x01\x12\x42\n\x07streams\x18\n \x03(\x0b\x32,.google.cloud.bigquery.storage.v1.ReadStreamB\x03\xe0\x41\x03\x12*\n\x1d\x65stimated_total_bytes_scanned\x18\x0c \x01(\x03\x42\x03\xe0\x41\x03\x12/\n\"estimated_total_physical_file_size\x18\x0f \x01(\x03\x42\x03\xe0\x41\x03\x12 \n\x13\x65stimated_row_count\x18\x0e \x01(\x03\x42\x03\xe0\x41\x03\x12\x15\n\x08trace_id\x18\r \x01(\tB\x03\xe0\x41\x01\x1a\x43\n\x0eTableModifiers\x12\x31\n\rsnapshot_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1a\xf6\x02\n\x10TableReadOptions\x12\x17\n\x0fselected_fields\x18\x01 \x03(\t\x12\x17\n\x0frow_restriction\x18\x02 \x01(\t\x12g\n\x1b\x61rrow_serialization_options\x18\x03 \x01(\x0b\x32;.google.cloud.bigquery.storage.v1.ArrowSerializationOptionsB\x03\xe0\x41\x01H\x00\x12\x65\n\x1a\x61vro_serialization_options\x18\x04 \x01(\x0b\x32:.google.cloud.bigquery.storage.v1.AvroSerializationOptionsB\x03\xe0\x41\x01H\x00\x12#\n\x11sample_percentage\x18\x05 \x01(\x01\x42\x03\xe0\x41\x01H\x01\x88\x01\x01\x42%\n#output_format_serialization_optionsB\x14\n\x12_sample_percentage:k\xea\x41h\n*bigquerystorage.googleapis.com/ReadSession\x12:projects/{project}/locations/{location}/sessions/{session}B\x08\n\x06schema\"\x9c\x01\n\nReadStream\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03:{\xea\x41x\n)bigquerystorage.googleapis.com/ReadStream\x12Kprojects/{project}/locations/{location}/sessions/{session}/streams/{stream}\"\xfb\x04\n\x0bWriteStream\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x45\n\x04type\x18\x02 \x01(\x0e\x32\x32.google.cloud.bigquery.storage.v1.WriteStream.TypeB\x03\xe0\x41\x05\x12\x34\n\x0b\x63reate_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x34\n\x0b\x63ommit_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12H\n\x0ctable_schema\x18\x05 \x01(\x0b\x32-.google.cloud.bigquery.storage.v1.TableSchemaB\x03\xe0\x41\x03\x12P\n\nwrite_mode\x18\x07 \x01(\x0e\x32\x37.google.cloud.bigquery.storage.v1.WriteStream.WriteModeB\x03\xe0\x41\x05\x12\x15\n\x08location\x18\x08 \x01(\tB\x03\xe0\x41\x05\"F\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\r\n\tCOMMITTED\x10\x01\x12\x0b\n\x07PENDING\x10\x02\x12\x0c\n\x08\x42UFFERED\x10\x03\"3\n\tWriteMode\x12\x1a\n\x16WRITE_MODE_UNSPECIFIED\x10\x00\x12\n\n\x06INSERT\x10\x01:v\xea\x41s\n*bigquerystorage.googleapis.com/WriteStream\x12\x45projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}*>\n\nDataFormat\x12\x1b\n\x17\x44\x41TA_FORMAT_UNSPECIFIED\x10\x00\x12\x08\n\x04\x41VRO\x10\x01\x12\t\n\x05\x41RROW\x10\x02*I\n\x0fWriteStreamView\x12!\n\x1dWRITE_STREAM_VIEW_UNSPECIFIED\x10\x00\x12\t\n\x05\x42\x41SIC\x10\x01\x12\x08\n\x04\x46ULL\x10\x02\x42\xbb\x01\n$com.google.cloud.bigquery.storage.v1B\x0bStreamProtoP\x01Z>cloud.google.com/go/bigquery/storage/apiv1/storagepb;storagepb\xaa\x02 Google.Cloud.BigQuery.Storage.V1\xca\x02 Google\\Cloud\\BigQuery\\Storage\\V1b\x06proto3"
16
16
 
17
17
  pool = Google::Protobuf::DescriptorPool.generated_pool
18
18
 
@@ -22,7 +22,7 @@ module Google
22
22
  module Bigquery
23
23
  module Storage
24
24
  module V1
25
- VERSION = "0.19.1"
25
+ VERSION = "0.21.0"
26
26
  end
27
27
  end
28
28
  end
@@ -190,18 +190,23 @@ module Google
190
190
 
191
191
  # Request message for `AppendRows`.
192
192
  #
193
- # Due to the nature of AppendRows being a bidirectional streaming RPC, certain
194
- # parts of the AppendRowsRequest need only be specified for the first request
195
- # sent each time the gRPC network connection is opened/reopened.
193
+ # Because AppendRows is a bidirectional streaming RPC, certain parts of the
194
+ # AppendRowsRequest need only be specified for the first request before
195
+ # switching table destinations. You can also switch table destinations within
196
+ # the same connection for the default stream.
196
197
  #
197
198
  # The size of a single AppendRowsRequest must be less than 10 MB in size.
198
199
  # Requests larger than this return an error, typically `INVALID_ARGUMENT`.
199
200
  # @!attribute [rw] write_stream
200
201
  # @return [::String]
201
- # Required. The write_stream identifies the target of the append operation,
202
- # and only needs to be specified as part of the first request on the gRPC
203
- # connection. If provided for subsequent requests, it must match the value of
204
- # the first request.
202
+ # Required. The write_stream identifies the append operation. It must be
203
+ # provided in the following scenarios:
204
+ #
205
+ # * In the first request to an AppendRows connection.
206
+ #
207
+ # * In all subsequent requests to an AppendRows connection, if you use the
208
+ # same connection to write to multiple tables or change the input schema for
209
+ # default streams.
205
210
  #
206
211
  # For explicitly created write streams, the format is:
207
212
  #
@@ -210,6 +215,22 @@ module Google
210
215
  # For the special default stream, the format is:
211
216
  #
212
217
  # * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
218
+ #
219
+ # An example of a possible sequence of requests with write_stream fields
220
+ # within a single connection:
221
+ #
222
+ # * r1: \\{write_stream: stream_name_1}
223
+ #
224
+ # * r2: \\{write_stream: /*omit*/}
225
+ #
226
+ # * r3: \\{write_stream: /*omit*/}
227
+ #
228
+ # * r4: \\{write_stream: stream_name_2}
229
+ #
230
+ # * r5: \\{write_stream: stream_name_2}
231
+ #
232
+ # The destination changed in request_4, so the write_stream field must be
233
+ # populated in all subsequent requests in this stream.
213
234
  # @!attribute [rw] offset
214
235
  # @return [::Google::Protobuf::Int64Value]
215
236
  # If present, the write is only performed if the next append offset is same
@@ -251,9 +272,14 @@ module Google
251
272
  # requests.
252
273
  # @!attribute [rw] writer_schema
253
274
  # @return [::Google::Cloud::Bigquery::Storage::V1::ProtoSchema]
254
- # Proto schema used to serialize the data. This value only needs to be
255
- # provided as part of the first request on a gRPC network connection,
256
- # and will be ignored for subsequent requests on the connection.
275
+ # The protocol buffer schema used to serialize the data. Provide this value
276
+ # whenever:
277
+ #
278
+ # * You send the first request of an RPC connection.
279
+ #
280
+ # * You change the input schema.
281
+ #
282
+ # * You specify a new destination table.
257
283
  # @!attribute [rw] rows
258
284
  # @return [::Google::Cloud::Bigquery::Storage::V1::ProtoRows]
259
285
  # Serialized row data in protobuf message format.
@@ -274,10 +300,9 @@ module Google
274
300
  extend ::Google::Protobuf::MessageExts::ClassMethods
275
301
  end
276
302
 
277
- # An enum to indicate how to interpret missing values. Missing values are
278
- # fields present in user schema but missing in rows. A missing value can
279
- # represent a NULL or a column default value defined in BigQuery table
280
- # schema.
303
+ # An enum to indicate how to interpret missing values of fields that are
304
+ # present in user schema but missing in rows. A missing value can represent a
305
+ # NULL or a column default value defined in BigQuery table schema.
281
306
  module MissingValueInterpretation
282
307
  # Invalid missing value interpretation. Requests with this value will be
283
308
  # rejected.
@@ -67,6 +67,13 @@ module Google
67
67
  # Output only. An estimate on the number of bytes this session will scan when
68
68
  # all streams are completely consumed. This estimate is based on
69
69
  # metadata from the table which might be incomplete or stale.
70
+ # @!attribute [r] estimated_total_physical_file_size
71
+ # @return [::Integer]
72
+ # Output only. A pre-projected estimate of the total physical size of files
73
+ # (in bytes) that this session will scan when all streams are consumed. This
74
+ # estimate is independent of the selected columns and can be based on
75
+ # incomplete or stale metadata from the table. This field is only set for
76
+ # BigLake tables.
70
77
  # @!attribute [r] estimated_row_count
71
78
  # @return [::Integer]
72
79
  # Output only. An estimate on the number of rows present in this session's
@@ -164,11 +171,11 @@ module Google
164
171
  # @!attribute [rw] sample_percentage
165
172
  # @return [::Float]
166
173
  # Optional. Specifies a table sampling percentage. Specifically, the query
167
- # planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). This
168
- # samples at the file-level. It will randomly choose for each file whether
169
- # to include that file in the sample returned. Note, that if the table only
170
- # has one file, then TABLESAMPLE SYSTEM will select that file and return
171
- # all returnable rows contained within.
174
+ # planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). The
175
+ # sampling percentage is applied at the data block granularity. It will
176
+ # randomly choose for each data block whether to read the rows in that data
177
+ # block. For more details, see
178
+ # https://cloud.google.com/bigquery/docs/table-sampling)
172
179
  class TableReadOptions
173
180
  include ::Google::Protobuf::MessageExts
174
181
  extend ::Google::Protobuf::MessageExts::ClassMethods
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: google-cloud-bigquery-storage-v1
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.19.1
4
+ version: 0.21.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Google LLC
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2023-06-06 00:00:00.000000000 Z
11
+ date: 2023-07-07 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: gapic-common