google-cloud-bigquery-storage-v1 0.9.2 → 0.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: dd6247e85a5ffc569f96771fba5c9a3662c25fbd53e6f8e2e9adb7aee9480376
4
- data.tar.gz: 0b501022ceac5a5bf209143d5639eb229adaa604d2aeb7b591910642c20e3dd5
3
+ metadata.gz: 9041851c373718175111413b2e1125660d22bcc2ca78bb0d46bf97ad75866661
4
+ data.tar.gz: 771714ee33b98b3750ff14a38295b1156c59011ce18fc46664aa54152e798a64
5
5
  SHA512:
6
- metadata.gz: b0018a10cb61fd33add4559fc6cd7c2323d79dd70244ffdaa0ee45d9ab79ab4139f0563739868c8fcf8be9ee5810a3ecb1378955e992f024a784ca8a0ef7f64c
7
- data.tar.gz: 6f45f00046cb5b0723f7bfb214b5e7918ef5ecb151e0f3cc7768a2eeeb9fc1e0f1a4753cd89eb38208dc10552dbeb34bcaae3f49c774b1115c5ad91146935010
6
+ metadata.gz: 89db88a25bb273beee8ba6dcedae7231c2ee4906a240c1ec2505fbd81e6196bb737879f37150a235cd7e378ef07b4b2a759b317af9a8e365d776d27025a8cfbb
7
+ data.tar.gz: 5c3f7a8acc54d45b82d2d5786dc08431d7b090f41ed7681f795899d3a747e3a16332aec2f8fcc2090fa1c060cdbbe003fc5c8e78246dd38c18b452c4358ec8b6
data/README.md CHANGED
@@ -69,6 +69,11 @@ module GRPC
69
69
  end
70
70
  ```
71
71
 
72
+
73
+ ## Google Cloud Samples
74
+
75
+ To browse ready to use code samples check [Google Cloud Samples](https://cloud.google.com/docs/samples).
76
+
72
77
  ## Supported Ruby Versions
73
78
 
74
79
  This library is supported on Ruby 2.5+.
@@ -207,11 +207,13 @@ module Google
207
207
  # Max initial number of streams. If unset or zero, the server will
208
208
  # provide a value of streams so as to produce reasonable throughput. Must be
209
209
  # non-negative. The number of streams may be lower than the requested number,
210
- # depending on the amount parallelism that is reasonable for the table. Error
211
- # will be returned if the max count is greater than the current system
212
- # max limit of 1,000.
210
+ # depending on the amount parallelism that is reasonable for the table.
211
+ # There is a default system max limit of 1,000.
213
212
  #
214
- # Streams must be read starting from offset 0.
213
+ # This must be greater than or equal to preferred_min_stream_count.
214
+ # Typically, clients should either leave this unset to let the system to
215
+ # determine an upper bound OR set this a size for the maximum "units of work"
216
+ # it can gracefully handle.
215
217
  #
216
218
  # @yield [response, operation] Access the result along with the RPC operation
217
219
  # @yieldparam response [::Google::Cloud::Bigquery::Storage::V1::ReadSession]
@@ -306,6 +306,13 @@ module Google
306
306
  # finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly
307
307
  # committed via the `BatchCommitWriteStreams` rpc.
308
308
  #
309
+ # Note: For users coding against the gRPC api directly, it may be
310
+ # necessary to supply the x-goog-request-params system parameter
311
+ # with `write_stream=<full_write_stream_name>`.
312
+ #
313
+ # More information about system parameters:
314
+ # https://cloud.google.com/apis/docs/system-parameters
315
+ #
309
316
  # @param request [::Gapic::StreamInput, ::Enumerable<::Google::Cloud::Bigquery::Storage::V1::AppendRowsRequest, ::Hash>]
310
317
  # An enumerable of {::Google::Cloud::Bigquery::Storage::V1::AppendRowsRequest} instances.
311
318
  # @param options [::Gapic::CallOptions, ::Hash]
@@ -1,9 +1,10 @@
1
1
  # Generated by the protocol buffer compiler. DO NOT EDIT!
2
2
  # source: google/cloud/bigquery/storage/v1/protobuf.proto
3
3
 
4
- require 'google/protobuf/descriptor_pb'
5
4
  require 'google/protobuf'
6
5
 
6
+ require 'google/protobuf/descriptor_pb'
7
+
7
8
  Google::Protobuf::DescriptorPool.generated_pool.build do
8
9
  add_file("google/cloud/bigquery/storage/v1/protobuf.proto", :syntax => :proto3) do
9
10
  add_message "google.cloud.bigquery.storage.v1.ProtoSchema" do
@@ -1,6 +1,8 @@
1
1
  # Generated by the protocol buffer compiler. DO NOT EDIT!
2
2
  # source: google/cloud/bigquery/storage/v1/storage.proto
3
3
 
4
+ require 'google/protobuf'
5
+
4
6
  require 'google/api/annotations_pb'
5
7
  require 'google/api/client_pb'
6
8
  require 'google/api/field_behavior_pb'
@@ -13,7 +15,6 @@ require 'google/cloud/bigquery/storage/v1/table_pb'
13
15
  require 'google/protobuf/timestamp_pb'
14
16
  require 'google/protobuf/wrappers_pb'
15
17
  require 'google/rpc/status_pb'
16
- require 'google/protobuf'
17
18
 
18
19
  Google::Protobuf::DescriptorPool.generated_pool.build do
19
20
  add_file("google/cloud/bigquery/storage/v1/storage.proto", :syntax => :proto3) do
@@ -75,6 +76,7 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
75
76
  end
76
77
  add_message "google.cloud.bigquery.storage.v1.AppendRowsResponse" do
77
78
  optional :updated_schema, :message, 3, "google.cloud.bigquery.storage.v1.TableSchema"
79
+ repeated :row_errors, :message, 4, "google.cloud.bigquery.storage.v1.RowError"
78
80
  oneof :response do
79
81
  optional :append_result, :message, 1, "google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult"
80
82
  optional :error, :message, 2, "google.rpc.Status"
@@ -121,6 +123,17 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
121
123
  value :INVALID_STREAM_STATE, 5
122
124
  value :STREAM_FINALIZED, 6
123
125
  value :SCHEMA_MISMATCH_EXTRA_FIELDS, 7
126
+ value :OFFSET_ALREADY_EXISTS, 8
127
+ value :OFFSET_OUT_OF_RANGE, 9
128
+ end
129
+ add_message "google.cloud.bigquery.storage.v1.RowError" do
130
+ optional :index, :int64, 1
131
+ optional :code, :enum, 2, "google.cloud.bigquery.storage.v1.RowError.RowErrorCode"
132
+ optional :message, :string, 3
133
+ end
134
+ add_enum "google.cloud.bigquery.storage.v1.RowError.RowErrorCode" do
135
+ value :ROW_ERROR_CODE_UNSPECIFIED, 0
136
+ value :FIELDS_ERROR, 1
124
137
  end
125
138
  end
126
139
  end
@@ -152,6 +165,8 @@ module Google
152
165
  FlushRowsResponse = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.bigquery.storage.v1.FlushRowsResponse").msgclass
153
166
  StorageError = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.bigquery.storage.v1.StorageError").msgclass
154
167
  StorageError::StorageErrorCode = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode").enummodule
168
+ RowError = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.bigquery.storage.v1.RowError").msgclass
169
+ RowError::RowErrorCode = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.bigquery.storage.v1.RowError.RowErrorCode").enummodule
155
170
  end
156
171
  end
157
172
  end
@@ -134,6 +134,13 @@ module Google
134
134
  # * For PENDING streams, data is not made visible until the stream itself is
135
135
  # finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly
136
136
  # committed via the `BatchCommitWriteStreams` rpc.
137
+ #
138
+ # Note: For users coding against the gRPC api directly, it may be
139
+ # necessary to supply the x-goog-request-params system parameter
140
+ # with `write_stream=<full_write_stream_name>`.
141
+ #
142
+ # More information about system parameters:
143
+ # https://cloud.google.com/apis/docs/system-parameters
137
144
  rpc :AppendRows, stream(::Google::Cloud::Bigquery::Storage::V1::AppendRowsRequest), stream(::Google::Cloud::Bigquery::Storage::V1::AppendRowsResponse)
138
145
  # Gets information about a write stream.
139
146
  rpc :GetWriteStream, ::Google::Cloud::Bigquery::Storage::V1::GetWriteStreamRequest, ::Google::Cloud::Bigquery::Storage::V1::WriteStream
@@ -1,13 +1,14 @@
1
1
  # Generated by the protocol buffer compiler. DO NOT EDIT!
2
2
  # source: google/cloud/bigquery/storage/v1/stream.proto
3
3
 
4
+ require 'google/protobuf'
5
+
4
6
  require 'google/api/field_behavior_pb'
5
7
  require 'google/api/resource_pb'
6
8
  require 'google/cloud/bigquery/storage/v1/arrow_pb'
7
9
  require 'google/cloud/bigquery/storage/v1/avro_pb'
8
10
  require 'google/cloud/bigquery/storage/v1/table_pb'
9
11
  require 'google/protobuf/timestamp_pb'
10
- require 'google/protobuf'
11
12
 
12
13
  Google::Protobuf::DescriptorPool.generated_pool.build do
13
14
  add_file("google/cloud/bigquery/storage/v1/stream.proto", :syntax => :proto3) do
@@ -20,6 +21,7 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
20
21
  optional :read_options, :message, 8, "google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions"
21
22
  repeated :streams, :message, 10, "google.cloud.bigquery.storage.v1.ReadStream"
22
23
  optional :estimated_total_bytes_scanned, :int64, 12
24
+ optional :trace_id, :string, 13
23
25
  oneof :schema do
24
26
  optional :avro_schema, :message, 4, "google.cloud.bigquery.storage.v1.AvroSchema"
25
27
  optional :arrow_schema, :message, 5, "google.cloud.bigquery.storage.v1.ArrowSchema"
@@ -1,9 +1,10 @@
1
1
  # Generated by the protocol buffer compiler. DO NOT EDIT!
2
2
  # source: google/cloud/bigquery/storage/v1/table.proto
3
3
 
4
- require 'google/api/field_behavior_pb'
5
4
  require 'google/protobuf'
6
5
 
6
+ require 'google/api/field_behavior_pb'
7
+
7
8
  Google::Protobuf::DescriptorPool.generated_pool.build do
8
9
  add_file("google/cloud/bigquery/storage/v1/table.proto", :syntax => :proto3) do
9
10
  add_message "google.cloud.bigquery.storage.v1.TableSchema" do
@@ -22,7 +22,7 @@ module Google
22
22
  module Bigquery
23
23
  module Storage
24
24
  module V1
25
- VERSION = "0.9.2"
25
+ VERSION = "0.11.0"
26
26
  end
27
27
  end
28
28
  end
@@ -27,6 +27,8 @@ module Google
27
27
  ##
28
28
  # To load this package, including all its services, and instantiate a client:
29
29
  #
30
+ # @example
31
+ #
30
32
  # require "google/cloud/bigquery/storage/v1"
31
33
  # client = ::Google::Cloud::Bigquery::Storage::V1::BigQueryRead::Client.new
32
34
  #
@@ -42,7 +42,8 @@ module Google
42
42
  # IPC-serialized Arrow RecordBatch.
43
43
  # @!attribute [rw] row_count
44
44
  # @return [::Integer]
45
- # The count of rows in `serialized_record_batch`.
45
+ # [Deprecated] The count of rows in `serialized_record_batch`.
46
+ # Please use the format-independent ReadRowsResponse.row_count instead.
46
47
  class ArrowRecordBatch
47
48
  include ::Google::Protobuf::MessageExts
48
49
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -38,7 +38,8 @@ module Google
38
38
  # Binary serialized rows in a block.
39
39
  # @!attribute [rw] row_count
40
40
  # @return [::Integer]
41
- # The count of rows in the returning block.
41
+ # [Deprecated] The count of rows in the returning block.
42
+ # Please use the format-independent ReadRowsResponse.row_count instead.
42
43
  class AvroRows
43
44
  include ::Google::Protobuf::MessageExts
44
45
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -35,11 +35,13 @@ module Google
35
35
  # Max initial number of streams. If unset or zero, the server will
36
36
  # provide a value of streams so as to produce reasonable throughput. Must be
37
37
  # non-negative. The number of streams may be lower than the requested number,
38
- # depending on the amount parallelism that is reasonable for the table. Error
39
- # will be returned if the max count is greater than the current system
40
- # max limit of 1,000.
38
+ # depending on the amount parallelism that is reasonable for the table.
39
+ # There is a default system max limit of 1,000.
41
40
  #
42
- # Streams must be read starting from offset 0.
41
+ # This must be greater than or equal to preferred_min_stream_count.
42
+ # Typically, clients should either leave this unset to let the system to
43
+ # determine an upper bound OR set this a size for the maximum "units of work"
44
+ # it can gracefully handle.
43
45
  class CreateReadSessionRequest
44
46
  include ::Google::Protobuf::MessageExts
45
47
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -189,10 +191,12 @@ module Google
189
191
  # request.
190
192
  #
191
193
  # For explicitly created write streams, the format is:
192
- # `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
194
+ #
195
+ # * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
193
196
  #
194
197
  # For the special default stream, the format is:
195
- # `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
198
+ #
199
+ # * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
196
200
  # @!attribute [rw] offset
197
201
  # @return [::Google::Protobuf::Int64Value]
198
202
  # If present, the write is only performed if the next append offset is same
@@ -260,6 +264,11 @@ module Google
260
264
  # If backend detects a schema update, pass it to user so that user can
261
265
  # use it to input new type of message. It will be empty when no schema
262
266
  # updates have occurred.
267
+ # @!attribute [rw] row_errors
268
+ # @return [::Array<::Google::Cloud::Bigquery::Storage::V1::RowError>]
269
+ # If a request failed due to corrupted rows, no rows in the batch will be
270
+ # appended. The API will return row level error info, so that the caller can
271
+ # remove the bad rows and retry the request.
263
272
  class AppendRowsResponse
264
273
  include ::Google::Protobuf::MessageExts
265
274
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -403,6 +412,36 @@ module Google
403
412
  # There is a schema mismatch and it is caused by user schema has extra
404
413
  # field than bigquery schema.
405
414
  SCHEMA_MISMATCH_EXTRA_FIELDS = 7
415
+
416
+ # Offset already exists.
417
+ OFFSET_ALREADY_EXISTS = 8
418
+
419
+ # Offset out of range.
420
+ OFFSET_OUT_OF_RANGE = 9
421
+ end
422
+ end
423
+
424
+ # The message that presents row level error info in a request.
425
+ # @!attribute [rw] index
426
+ # @return [::Integer]
427
+ # Index of the malformed row in the request.
428
+ # @!attribute [rw] code
429
+ # @return [::Google::Cloud::Bigquery::Storage::V1::RowError::RowErrorCode]
430
+ # Structured error reason for a row error.
431
+ # @!attribute [rw] message
432
+ # @return [::String]
433
+ # Description of the issue encountered when processing the row.
434
+ class RowError
435
+ include ::Google::Protobuf::MessageExts
436
+ extend ::Google::Protobuf::MessageExts::ClassMethods
437
+
438
+ # Error code for `RowError`.
439
+ module RowErrorCode
440
+ # Default error.
441
+ ROW_ERROR_CODE_UNSPECIFIED = 0
442
+
443
+ # One or more fields in the row has errors.
444
+ FIELDS_ERROR = 1
406
445
  end
407
446
  end
408
447
  end
@@ -34,7 +34,7 @@ module Google
34
34
  # automatically assigned and currently cannot be specified or updated.
35
35
  # @!attribute [rw] data_format
36
36
  # @return [::Google::Cloud::Bigquery::Storage::V1::DataFormat]
37
- # Immutable. Data format of the output data.
37
+ # Immutable. Data format of the output data. DATA_FORMAT_UNSPECIFIED not supported.
38
38
  # @!attribute [r] avro_schema
39
39
  # @return [::Google::Cloud::Bigquery::Storage::V1::AvroSchema]
40
40
  # Output only. Avro schema.
@@ -64,6 +64,14 @@ module Google
64
64
  # Output only. An estimate on the number of bytes this session will scan when
65
65
  # all streams are completely consumed. This estimate is based on
66
66
  # metadata from the table which might be incomplete or stale.
67
+ # @!attribute [rw] trace_id
68
+ # @return [::String]
69
+ # Optional. ID set by client to annotate a session identity. This does not need
70
+ # to be strictly unique, but instead the same ID should be used to group
71
+ # logically connected sessions (e.g. All using the same ID for all sessions
72
+ # needed to complete a Spark SQL query is reasonable).
73
+ #
74
+ # Maximum length is 256 bytes.
67
75
  class ReadSession
68
76
  include ::Google::Protobuf::MessageExts
69
77
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -177,6 +185,7 @@ module Google
177
185
 
178
186
  # Data format for input or output data.
179
187
  module DataFormat
188
+ # Data format is unspecified.
180
189
  DATA_FORMAT_UNSPECIFIED = 0
181
190
 
182
191
  # Avro is a standard open source row based file format.
@@ -22,7 +22,9 @@ module Google
22
22
  module Bigquery
23
23
  module Storage
24
24
  module V1
25
- # Schema of a table.
25
+ # Schema of a table. This schema is a subset of
26
+ # google.cloud.bigquery.v2.TableSchema containing information necessary to
27
+ # generate valid message to write to BigQuery.
26
28
  # @!attribute [rw] fields
27
29
  # @return [::Array<::Google::Cloud::Bigquery::Storage::V1::TableFieldSchema>]
28
30
  # Describes the fields in a table.
@@ -44,7 +44,7 @@ module Google
44
44
  # foo = any.unpack(Foo.class);
45
45
  # }
46
46
  #
47
- # Example 3: Pack and unpack a message in Python.
47
+ # Example 3: Pack and unpack a message in Python.
48
48
  #
49
49
  # foo = Foo(...)
50
50
  # any = Any()
@@ -54,7 +54,7 @@ module Google
54
54
  # any.Unpack(foo)
55
55
  # ...
56
56
  #
57
- # Example 4: Pack and unpack a message in Go
57
+ # Example 4: Pack and unpack a message in Go
58
58
  #
59
59
  # foo := &pb.Foo{...}
60
60
  # any, err := anypb.New(foo)
@@ -75,7 +75,7 @@ module Google
75
75
  #
76
76
  #
77
77
  # JSON
78
- # ====
78
+ #
79
79
  # The JSON representation of an `Any` value uses the regular
80
80
  # representation of the deserialized, embedded message, with an
81
81
  # additional field `@type` which contains the type URL. Example:
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: google-cloud-bigquery-storage-v1
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.9.2
4
+ version: 0.11.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Google LLC
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2022-02-18 00:00:00.000000000 Z
11
+ date: 2022-06-22 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: gapic-common
@@ -219,7 +219,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
219
219
  - !ruby/object:Gem::Version
220
220
  version: '0'
221
221
  requirements: []
222
- rubygems_version: 3.3.5
222
+ rubygems_version: 3.3.14
223
223
  signing_key:
224
224
  specification_version: 4
225
225
  summary: API Client library for the BigQuery Storage V1 API