google-cloud-bigtable-v2 0.7.1 → 0.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 4b488a55ed1e225be5451adb18dfc8738445870547b085792e064e8174e6071b
4
- data.tar.gz: 6b48ff8142c4ee5e7ed9180597c16a4800a663514ce8946c626ac9539c65a2a6
3
+ metadata.gz: 4cfc6e05ad85852561ba6aa5d53d5fe73c273f2e0dde7703f6c63802af275ff9
4
+ data.tar.gz: a5b32e4d889b81242c3622f883fb117e81d26a911064c6ebd64b6f03f0287c3c
5
5
  SHA512:
6
- metadata.gz: 2522db32282c52ff18ed77baaa0e7612f4495b0adf964449703067494633f0a6df785931db8f3eddeb37ac7900bebf0de4c4fee80cdc3589630859b94f2c32f6
7
- data.tar.gz: a30680ad0592b8e1dde4822ef8bafe539b5ba7d2c181fb6bc10df48dd7259cd33675da77a6c6a4dfbe0f32b13996ee840210e0cc21ea8be71e9c1417e13da2f8
6
+ metadata.gz: 73c03468e20fd6ea4e9306cf304018635bc55d1caf8e3e878be7298155a5d01bbefd7a0535012a89ee19cc1b896cb94bfd7c26892276d1bbb7d9b9fc01891432
7
+ data.tar.gz: 3c1710b480304d76b52a0e7552851f59ab0edc03dfa23c069bae7bf3a698292dc91dee244c69c26b52af25cd10933a99c3d958fb477b5a537fa2ed1001561ea0
data/README.md CHANGED
@@ -46,7 +46,7 @@ for general usage information.
46
46
  ## Enabling Logging
47
47
 
48
48
  To enable logging for this library, set the logger for the underlying [gRPC](https://github.com/grpc/grpc/tree/master/src/ruby) library.
49
- The logger that you set may be a Ruby stdlib [`Logger`](https://ruby-doc.org/stdlib/libdoc/logger/rdoc/Logger.html) as shown below,
49
+ The logger that you set may be a Ruby stdlib [`Logger`](https://ruby-doc.org/current/stdlibs/logger/Logger.html) as shown below,
50
50
  or a [`Google::Cloud::Logging::Logger`](https://googleapis.dev/ruby/google-cloud-logging/latest)
51
51
  that will write logs to [Cloud Logging](https://cloud.google.com/logging/). See [grpc/logconfig.rb](https://github.com/grpc/grpc/blob/master/src/ruby/lib/grpc/logconfig.rb)
52
52
  and the gRPC [spec_helper.rb](https://github.com/grpc/grpc/blob/master/src/ruby/spec/spec_helper.rb) for additional information.
@@ -10,6 +10,8 @@ require 'google/api/resource_pb'
10
10
  require 'google/api/routing_pb'
11
11
  require 'google/bigtable/v2/data_pb'
12
12
  require 'google/bigtable/v2/request_stats_pb'
13
+ require 'google/protobuf/duration_pb'
14
+ require 'google/protobuf/timestamp_pb'
13
15
  require 'google/protobuf/wrappers_pb'
14
16
  require 'google/rpc/status_pb'
15
17
 
@@ -104,6 +106,65 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
104
106
  add_message "google.bigtable.v2.ReadModifyWriteRowResponse" do
105
107
  optional :row, :message, 1, "google.bigtable.v2.Row"
106
108
  end
109
+ add_message "google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest" do
110
+ optional :table_name, :string, 1
111
+ optional :app_profile_id, :string, 2
112
+ end
113
+ add_message "google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse" do
114
+ optional :partition, :message, 1, "google.bigtable.v2.StreamPartition"
115
+ end
116
+ add_message "google.bigtable.v2.ReadChangeStreamRequest" do
117
+ optional :table_name, :string, 1
118
+ optional :app_profile_id, :string, 2
119
+ optional :partition, :message, 3, "google.bigtable.v2.StreamPartition"
120
+ optional :end_time, :message, 5, "google.protobuf.Timestamp"
121
+ optional :heartbeat_duration, :message, 7, "google.protobuf.Duration"
122
+ oneof :start_from do
123
+ optional :start_time, :message, 4, "google.protobuf.Timestamp"
124
+ optional :continuation_tokens, :message, 6, "google.bigtable.v2.StreamContinuationTokens"
125
+ end
126
+ end
127
+ add_message "google.bigtable.v2.ReadChangeStreamResponse" do
128
+ oneof :stream_record do
129
+ optional :data_change, :message, 1, "google.bigtable.v2.ReadChangeStreamResponse.DataChange"
130
+ optional :heartbeat, :message, 2, "google.bigtable.v2.ReadChangeStreamResponse.Heartbeat"
131
+ optional :close_stream, :message, 3, "google.bigtable.v2.ReadChangeStreamResponse.CloseStream"
132
+ end
133
+ end
134
+ add_message "google.bigtable.v2.ReadChangeStreamResponse.MutationChunk" do
135
+ optional :chunk_info, :message, 1, "google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo"
136
+ optional :mutation, :message, 2, "google.bigtable.v2.Mutation"
137
+ end
138
+ add_message "google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo" do
139
+ optional :chunked_value_size, :int32, 1
140
+ optional :chunked_value_offset, :int32, 2
141
+ optional :last_chunk, :bool, 3
142
+ end
143
+ add_message "google.bigtable.v2.ReadChangeStreamResponse.DataChange" do
144
+ optional :type, :enum, 1, "google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type"
145
+ optional :source_cluster_id, :string, 2
146
+ optional :row_key, :bytes, 3
147
+ optional :commit_timestamp, :message, 4, "google.protobuf.Timestamp"
148
+ optional :tiebreaker, :int32, 5
149
+ repeated :chunks, :message, 6, "google.bigtable.v2.ReadChangeStreamResponse.MutationChunk"
150
+ optional :done, :bool, 8
151
+ optional :token, :string, 9
152
+ optional :estimated_low_watermark, :message, 10, "google.protobuf.Timestamp"
153
+ end
154
+ add_enum "google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type" do
155
+ value :TYPE_UNSPECIFIED, 0
156
+ value :USER, 1
157
+ value :GARBAGE_COLLECTION, 2
158
+ value :CONTINUATION, 3
159
+ end
160
+ add_message "google.bigtable.v2.ReadChangeStreamResponse.Heartbeat" do
161
+ optional :continuation_token, :message, 1, "google.bigtable.v2.StreamContinuationToken"
162
+ optional :estimated_low_watermark, :message, 2, "google.protobuf.Timestamp"
163
+ end
164
+ add_message "google.bigtable.v2.ReadChangeStreamResponse.CloseStream" do
165
+ optional :status, :message, 1, "google.rpc.Status"
166
+ repeated :continuation_tokens, :message, 2, "google.bigtable.v2.StreamContinuationToken"
167
+ end
107
168
  end
108
169
  end
109
170
 
@@ -129,6 +190,16 @@ module Google
129
190
  PingAndWarmResponse = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.PingAndWarmResponse").msgclass
130
191
  ReadModifyWriteRowRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.ReadModifyWriteRowRequest").msgclass
131
192
  ReadModifyWriteRowResponse = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.ReadModifyWriteRowResponse").msgclass
193
+ GenerateInitialChangeStreamPartitionsRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest").msgclass
194
+ GenerateInitialChangeStreamPartitionsResponse = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse").msgclass
195
+ ReadChangeStreamRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.ReadChangeStreamRequest").msgclass
196
+ ReadChangeStreamResponse = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.ReadChangeStreamResponse").msgclass
197
+ ReadChangeStreamResponse::MutationChunk = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.ReadChangeStreamResponse.MutationChunk").msgclass
198
+ ReadChangeStreamResponse::MutationChunk::ChunkInfo = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo").msgclass
199
+ ReadChangeStreamResponse::DataChange = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.ReadChangeStreamResponse.DataChange").msgclass
200
+ ReadChangeStreamResponse::DataChange::Type = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type").enummodule
201
+ ReadChangeStreamResponse::Heartbeat = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.ReadChangeStreamResponse.Heartbeat").msgclass
202
+ ReadChangeStreamResponse::CloseStream = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.ReadChangeStreamResponse.CloseStream").msgclass
132
203
  end
133
204
  end
134
205
  end
@@ -62,6 +62,16 @@ module Google
62
62
  # timestamp is the greater of the existing timestamp or the current server
63
63
  # time. The method returns the new contents of all modified cells.
64
64
  rpc :ReadModifyWriteRow, ::Google::Cloud::Bigtable::V2::ReadModifyWriteRowRequest, ::Google::Cloud::Bigtable::V2::ReadModifyWriteRowResponse
65
+ # NOTE: This API is intended to be used by Apache Beam BigtableIO.
66
+ # Returns the current list of partitions that make up the table's
67
+ # change stream. The union of partitions will cover the entire keyspace.
68
+ # Partitions can be read with `ReadChangeStream`.
69
+ rpc :GenerateInitialChangeStreamPartitions, ::Google::Cloud::Bigtable::V2::GenerateInitialChangeStreamPartitionsRequest, stream(::Google::Cloud::Bigtable::V2::GenerateInitialChangeStreamPartitionsResponse)
70
+ # NOTE: This API is intended to be used by Apache Beam BigtableIO.
71
+ # Reads changes from a table's change stream. Changes will
72
+ # reflect both user-initiated mutations and mutations that are caused by
73
+ # garbage collection.
74
+ rpc :ReadChangeStream, ::Google::Cloud::Bigtable::V2::ReadChangeStreamRequest, stream(::Google::Cloud::Bigtable::V2::ReadChangeStreamResponse)
65
75
  end
66
76
 
67
77
  Stub = Service.rpc_stub_class
@@ -127,6 +127,16 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
127
127
  optional :increment_amount, :int64, 4
128
128
  end
129
129
  end
130
+ add_message "google.bigtable.v2.StreamPartition" do
131
+ optional :row_range, :message, 1, "google.bigtable.v2.RowRange"
132
+ end
133
+ add_message "google.bigtable.v2.StreamContinuationTokens" do
134
+ repeated :tokens, :message, 1, "google.bigtable.v2.StreamContinuationToken"
135
+ end
136
+ add_message "google.bigtable.v2.StreamContinuationToken" do
137
+ optional :partition, :message, 1, "google.bigtable.v2.StreamPartition"
138
+ optional :token, :string, 2
139
+ end
130
140
  end
131
141
  end
132
142
 
@@ -153,6 +163,9 @@ module Google
153
163
  Mutation::DeleteFromFamily = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.Mutation.DeleteFromFamily").msgclass
154
164
  Mutation::DeleteFromRow = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.Mutation.DeleteFromRow").msgclass
155
165
  ReadModifyWriteRule = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.ReadModifyWriteRule").msgclass
166
+ StreamPartition = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.StreamPartition").msgclass
167
+ StreamContinuationTokens = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.StreamContinuationTokens").msgclass
168
+ StreamContinuationToken = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.StreamContinuationToken").msgclass
156
169
  end
157
170
  end
158
171
  end
@@ -79,6 +79,10 @@ module Google
79
79
 
80
80
  default_config.rpcs.read_modify_write_row.timeout = 20.0
81
81
 
82
+ default_config.rpcs.generate_initial_change_stream_partitions.timeout = 60.0
83
+
84
+ default_config.rpcs.read_change_stream.timeout = 43_200.0
85
+
82
86
  default_config
83
87
  end
84
88
  yield @configure if block_given?
@@ -217,13 +221,13 @@ module Google
217
221
  # # Create a request. To set request fields, pass in keyword arguments.
218
222
  # request = Google::Cloud::Bigtable::V2::ReadRowsRequest.new
219
223
  #
220
- # # Call the read_rows method.
221
- # result = client.read_rows request
224
+ # # Call the read_rows method to start streaming.
225
+ # output = client.read_rows request
222
226
  #
223
- # # The returned object is a streamed enumerable yielding elements of
224
- # # type ::Google::Cloud::Bigtable::V2::ReadRowsResponse.
225
- # result.each do |response|
226
- # p response
227
+ # # The returned object is a streamed enumerable yielding elements of type
228
+ # # ::Google::Cloud::Bigtable::V2::ReadRowsResponse
229
+ # output.each do |current_response|
230
+ # p current_response
227
231
  # end
228
232
  #
229
233
  def read_rows request, options = nil
@@ -317,13 +321,13 @@ module Google
317
321
  # # Create a request. To set request fields, pass in keyword arguments.
318
322
  # request = Google::Cloud::Bigtable::V2::SampleRowKeysRequest.new
319
323
  #
320
- # # Call the sample_row_keys method.
321
- # result = client.sample_row_keys request
324
+ # # Call the sample_row_keys method to start streaming.
325
+ # output = client.sample_row_keys request
322
326
  #
323
- # # The returned object is a streamed enumerable yielding elements of
324
- # # type ::Google::Cloud::Bigtable::V2::SampleRowKeysResponse.
325
- # result.each do |response|
326
- # p response
327
+ # # The returned object is a streamed enumerable yielding elements of type
328
+ # # ::Google::Cloud::Bigtable::V2::SampleRowKeysResponse
329
+ # output.each do |current_response|
330
+ # p current_response
327
331
  # end
328
332
  #
329
333
  def sample_row_keys request, options = nil
@@ -391,8 +395,8 @@ module Google
391
395
  # the default parameter values, pass an empty Hash as a request object (see above).
392
396
  #
393
397
  # @param table_name [::String]
394
- # Required. The unique name of the table to which the mutation should be applied.
395
- # Values are of the form
398
+ # Required. The unique name of the table to which the mutation should be
399
+ # applied. Values are of the form
396
400
  # `projects/<project>/instances/<instance>/tables/<table>`.
397
401
  # @param app_profile_id [::String]
398
402
  # This value specifies routing for replication. If not specified, the
@@ -400,9 +404,9 @@ module Google
400
404
  # @param row_key [::String]
401
405
  # Required. The key of the row to which the mutation should be applied.
402
406
  # @param mutations [::Array<::Google::Cloud::Bigtable::V2::Mutation, ::Hash>]
403
- # Required. Changes to be atomically applied to the specified row. Entries are applied
404
- # in order, meaning that earlier mutations can be masked by later ones.
405
- # Must contain at least one entry and at most 100000.
407
+ # Required. Changes to be atomically applied to the specified row. Entries
408
+ # are applied in order, meaning that earlier mutations can be masked by later
409
+ # ones. Must contain at least one entry and at most 100000.
406
410
  #
407
411
  # @yield [response, operation] Access the result along with the RPC operation
408
412
  # @yieldparam response [::Google::Cloud::Bigtable::V2::MutateRowResponse]
@@ -493,7 +497,8 @@ module Google
493
497
  # the default parameter values, pass an empty Hash as a request object (see above).
494
498
  #
495
499
  # @param table_name [::String]
496
- # Required. The unique name of the table to which the mutations should be applied.
500
+ # Required. The unique name of the table to which the mutations should be
501
+ # applied.
497
502
  # @param app_profile_id [::String]
498
503
  # This value specifies routing for replication. If not specified, the
499
504
  # "default" application profile will be used.
@@ -521,13 +526,13 @@ module Google
521
526
  # # Create a request. To set request fields, pass in keyword arguments.
522
527
  # request = Google::Cloud::Bigtable::V2::MutateRowsRequest.new
523
528
  #
524
- # # Call the mutate_rows method.
525
- # result = client.mutate_rows request
529
+ # # Call the mutate_rows method to start streaming.
530
+ # output = client.mutate_rows request
526
531
  #
527
- # # The returned object is a streamed enumerable yielding elements of
528
- # # type ::Google::Cloud::Bigtable::V2::MutateRowsResponse.
529
- # result.each do |response|
530
- # p response
532
+ # # The returned object is a streamed enumerable yielding elements of type
533
+ # # ::Google::Cloud::Bigtable::V2::MutateRowsResponse
534
+ # output.each do |current_response|
535
+ # p current_response
531
536
  # end
532
537
  #
533
538
  def mutate_rows request, options = nil
@@ -594,15 +599,15 @@ module Google
594
599
  # the default parameter values, pass an empty Hash as a request object (see above).
595
600
  #
596
601
  # @param table_name [::String]
597
- # Required. The unique name of the table to which the conditional mutation should be
598
- # applied.
599
- # Values are of the form
602
+ # Required. The unique name of the table to which the conditional mutation
603
+ # should be applied. Values are of the form
600
604
  # `projects/<project>/instances/<instance>/tables/<table>`.
601
605
  # @param app_profile_id [::String]
602
606
  # This value specifies routing for replication. If not specified, the
603
607
  # "default" application profile will be used.
604
608
  # @param row_key [::String]
605
- # Required. The key of the row to which the conditional mutation should be applied.
609
+ # Required. The key of the row to which the conditional mutation should be
610
+ # applied.
606
611
  # @param predicate_filter [::Google::Cloud::Bigtable::V2::RowFilter, ::Hash]
607
612
  # The filter to be applied to the contents of the specified row. Depending
608
613
  # on whether or not any results are yielded, either `true_mutations` or
@@ -709,8 +714,9 @@ module Google
709
714
  # the default parameter values, pass an empty Hash as a request object (see above).
710
715
  #
711
716
  # @param name [::String]
712
- # Required. The unique name of the instance to check permissions for as well as
713
- # respond. Values are of the form `projects/<project>/instances/<instance>`.
717
+ # Required. The unique name of the instance to check permissions for as well
718
+ # as respond. Values are of the form
719
+ # `projects/<project>/instances/<instance>`.
714
720
  # @param app_profile_id [::String]
715
721
  # This value specifies routing for replication. If not specified, the
716
722
  # "default" application profile will be used.
@@ -806,19 +812,19 @@ module Google
806
812
  # the default parameter values, pass an empty Hash as a request object (see above).
807
813
  #
808
814
  # @param table_name [::String]
809
- # Required. The unique name of the table to which the read/modify/write rules should be
810
- # applied.
811
- # Values are of the form
815
+ # Required. The unique name of the table to which the read/modify/write rules
816
+ # should be applied. Values are of the form
812
817
  # `projects/<project>/instances/<instance>/tables/<table>`.
813
818
  # @param app_profile_id [::String]
814
819
  # This value specifies routing for replication. If not specified, the
815
820
  # "default" application profile will be used.
816
821
  # @param row_key [::String]
817
- # Required. The key of the row to which the read/modify/write rules should be applied.
822
+ # Required. The key of the row to which the read/modify/write rules should be
823
+ # applied.
818
824
  # @param rules [::Array<::Google::Cloud::Bigtable::V2::ReadModifyWriteRule, ::Hash>]
819
- # Required. Rules specifying how the specified row's contents are to be transformed
820
- # into writes. Entries are applied in order, meaning that earlier rules will
821
- # affect the results of later ones.
825
+ # Required. Rules specifying how the specified row's contents are to be
826
+ # transformed into writes. Entries are applied in order, meaning that earlier
827
+ # rules will affect the results of later ones.
822
828
  #
823
829
  # @yield [response, operation] Access the result along with the RPC operation
824
830
  # @yieldparam response [::Google::Cloud::Bigtable::V2::ReadModifyWriteRowResponse]
@@ -888,6 +894,227 @@ module Google
888
894
  raise ::Google::Cloud::Error.from_error(e)
889
895
  end
890
896
 
897
+ ##
898
+ # NOTE: This API is intended to be used by Apache Beam BigtableIO.
899
+ # Returns the current list of partitions that make up the table's
900
+ # change stream. The union of partitions will cover the entire keyspace.
901
+ # Partitions can be read with `ReadChangeStream`.
902
+ #
903
+ # @overload generate_initial_change_stream_partitions(request, options = nil)
904
+ # Pass arguments to `generate_initial_change_stream_partitions` via a request object, either of type
905
+ # {::Google::Cloud::Bigtable::V2::GenerateInitialChangeStreamPartitionsRequest} or an equivalent Hash.
906
+ #
907
+ # @param request [::Google::Cloud::Bigtable::V2::GenerateInitialChangeStreamPartitionsRequest, ::Hash]
908
+ # A request object representing the call parameters. Required. To specify no
909
+ # parameters, or to keep all the default parameter values, pass an empty Hash.
910
+ # @param options [::Gapic::CallOptions, ::Hash]
911
+ # Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
912
+ #
913
+ # @overload generate_initial_change_stream_partitions(table_name: nil, app_profile_id: nil)
914
+ # Pass arguments to `generate_initial_change_stream_partitions` via keyword arguments. Note that at
915
+ # least one keyword argument is required. To specify no parameters, or to keep all
916
+ # the default parameter values, pass an empty Hash as a request object (see above).
917
+ #
918
+ # @param table_name [::String]
919
+ # Required. The unique name of the table from which to get change stream
920
+ # partitions. Values are of the form
921
+ # `projects/<project>/instances/<instance>/tables/<table>`.
922
+ # Change streaming must be enabled on the table.
923
+ # @param app_profile_id [::String]
924
+ # This value specifies routing for replication. If not specified, the
925
+ # "default" application profile will be used.
926
+ # Single cluster routing must be configured on the profile.
927
+ #
928
+ # @yield [response, operation] Access the result along with the RPC operation
929
+ # @yieldparam response [::Enumerable<::Google::Cloud::Bigtable::V2::GenerateInitialChangeStreamPartitionsResponse>]
930
+ # @yieldparam operation [::GRPC::ActiveCall::Operation]
931
+ #
932
+ # @return [::Enumerable<::Google::Cloud::Bigtable::V2::GenerateInitialChangeStreamPartitionsResponse>]
933
+ #
934
+ # @raise [::Google::Cloud::Error] if the RPC is aborted.
935
+ #
936
+ # @example Basic example
937
+ # require "google/cloud/bigtable/v2"
938
+ #
939
+ # # Create a client object. The client can be reused for multiple calls.
940
+ # client = Google::Cloud::Bigtable::V2::Bigtable::Client.new
941
+ #
942
+ # # Create a request. To set request fields, pass in keyword arguments.
943
+ # request = Google::Cloud::Bigtable::V2::GenerateInitialChangeStreamPartitionsRequest.new
944
+ #
945
+ # # Call the generate_initial_change_stream_partitions method to start streaming.
946
+ # output = client.generate_initial_change_stream_partitions request
947
+ #
948
+ # # The returned object is a streamed enumerable yielding elements of type
949
+ # # ::Google::Cloud::Bigtable::V2::GenerateInitialChangeStreamPartitionsResponse
950
+ # output.each do |current_response|
951
+ # p current_response
952
+ # end
953
+ #
954
+ def generate_initial_change_stream_partitions request, options = nil
955
+ raise ::ArgumentError, "request must be provided" if request.nil?
956
+
957
+ request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::Bigtable::V2::GenerateInitialChangeStreamPartitionsRequest
958
+
959
+ # Converts hash and nil to an options object
960
+ options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
961
+
962
+ # Customize the options with defaults
963
+ metadata = @config.rpcs.generate_initial_change_stream_partitions.metadata.to_h
964
+
965
+ # Set x-goog-api-client and x-goog-user-project headers
966
+ metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
967
+ lib_name: @config.lib_name, lib_version: @config.lib_version,
968
+ gapic_version: ::Google::Cloud::Bigtable::V2::VERSION
969
+ metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
970
+
971
+ header_params = {}
972
+ if request.table_name
973
+ header_params["table_name"] = request.table_name
974
+ end
975
+
976
+ request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
977
+ metadata[:"x-goog-request-params"] ||= request_params_header
978
+
979
+ options.apply_defaults timeout: @config.rpcs.generate_initial_change_stream_partitions.timeout,
980
+ metadata: metadata,
981
+ retry_policy: @config.rpcs.generate_initial_change_stream_partitions.retry_policy
982
+
983
+ options.apply_defaults timeout: @config.timeout,
984
+ metadata: @config.metadata,
985
+ retry_policy: @config.retry_policy
986
+
987
+ @bigtable_stub.call_rpc :generate_initial_change_stream_partitions, request, options: options do |response, operation|
988
+ yield response, operation if block_given?
989
+ return response
990
+ end
991
+ rescue ::GRPC::BadStatus => e
992
+ raise ::Google::Cloud::Error.from_error(e)
993
+ end
994
+
995
+ ##
996
+ # NOTE: This API is intended to be used by Apache Beam BigtableIO.
997
+ # Reads changes from a table's change stream. Changes will
998
+ # reflect both user-initiated mutations and mutations that are caused by
999
+ # garbage collection.
1000
+ #
1001
+ # @overload read_change_stream(request, options = nil)
1002
+ # Pass arguments to `read_change_stream` via a request object, either of type
1003
+ # {::Google::Cloud::Bigtable::V2::ReadChangeStreamRequest} or an equivalent Hash.
1004
+ #
1005
+ # @param request [::Google::Cloud::Bigtable::V2::ReadChangeStreamRequest, ::Hash]
1006
+ # A request object representing the call parameters. Required. To specify no
1007
+ # parameters, or to keep all the default parameter values, pass an empty Hash.
1008
+ # @param options [::Gapic::CallOptions, ::Hash]
1009
+ # Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
1010
+ #
1011
+ # @overload read_change_stream(table_name: nil, app_profile_id: nil, partition: nil, start_time: nil, continuation_tokens: nil, end_time: nil, heartbeat_duration: nil)
1012
+ # Pass arguments to `read_change_stream` via keyword arguments. Note that at
1013
+ # least one keyword argument is required. To specify no parameters, or to keep all
1014
+ # the default parameter values, pass an empty Hash as a request object (see above).
1015
+ #
1016
+ # @param table_name [::String]
1017
+ # Required. The unique name of the table from which to read a change stream.
1018
+ # Values are of the form
1019
+ # `projects/<project>/instances/<instance>/tables/<table>`.
1020
+ # Change streaming must be enabled on the table.
1021
+ # @param app_profile_id [::String]
1022
+ # This value specifies routing for replication. If not specified, the
1023
+ # "default" application profile will be used.
1024
+ # Single cluster routing must be configured on the profile.
1025
+ # @param partition [::Google::Cloud::Bigtable::V2::StreamPartition, ::Hash]
1026
+ # The partition to read changes from.
1027
+ # @param start_time [::Google::Protobuf::Timestamp, ::Hash]
1028
+ # Start reading the stream at the specified timestamp. This timestamp must
1029
+ # be within the change stream retention period, less than or equal to the
1030
+ # current time, and after change stream creation, whichever is greater.
1031
+ # This value is inclusive and will be truncated to microsecond granularity.
1032
+ # @param continuation_tokens [::Google::Cloud::Bigtable::V2::StreamContinuationTokens, ::Hash]
1033
+ # Tokens that describe how to resume reading a stream where reading
1034
+ # previously left off. If specified, changes will be read starting at the
1035
+ # the position. Tokens are delivered on the stream as part of `Heartbeat`
1036
+ # and `CloseStream` messages.
1037
+ #
1038
+ # If a single token is provided, the token’s partition must exactly match
1039
+ # the request’s partition. If multiple tokens are provided, as in the case
1040
+ # of a partition merge, the union of the token partitions must exactly
1041
+ # cover the request’s partition. Otherwise, INVALID_ARGUMENT will be
1042
+ # returned.
1043
+ # @param end_time [::Google::Protobuf::Timestamp, ::Hash]
1044
+ # If specified, OK will be returned when the stream advances beyond
1045
+ # this time. Otherwise, changes will be continuously delivered on the stream.
1046
+ # This value is inclusive and will be truncated to microsecond granularity.
1047
+ # @param heartbeat_duration [::Google::Protobuf::Duration, ::Hash]
1048
+ # If specified, the duration between `Heartbeat` messages on the stream.
1049
+ # Otherwise, defaults to 5 seconds.
1050
+ #
1051
+ # @yield [response, operation] Access the result along with the RPC operation
1052
+ # @yieldparam response [::Enumerable<::Google::Cloud::Bigtable::V2::ReadChangeStreamResponse>]
1053
+ # @yieldparam operation [::GRPC::ActiveCall::Operation]
1054
+ #
1055
+ # @return [::Enumerable<::Google::Cloud::Bigtable::V2::ReadChangeStreamResponse>]
1056
+ #
1057
+ # @raise [::Google::Cloud::Error] if the RPC is aborted.
1058
+ #
1059
+ # @example Basic example
1060
+ # require "google/cloud/bigtable/v2"
1061
+ #
1062
+ # # Create a client object. The client can be reused for multiple calls.
1063
+ # client = Google::Cloud::Bigtable::V2::Bigtable::Client.new
1064
+ #
1065
+ # # Create a request. To set request fields, pass in keyword arguments.
1066
+ # request = Google::Cloud::Bigtable::V2::ReadChangeStreamRequest.new
1067
+ #
1068
+ # # Call the read_change_stream method to start streaming.
1069
+ # output = client.read_change_stream request
1070
+ #
1071
+ # # The returned object is a streamed enumerable yielding elements of type
1072
+ # # ::Google::Cloud::Bigtable::V2::ReadChangeStreamResponse
1073
+ # output.each do |current_response|
1074
+ # p current_response
1075
+ # end
1076
+ #
1077
+ def read_change_stream request, options = nil
1078
+ raise ::ArgumentError, "request must be provided" if request.nil?
1079
+
1080
+ request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::Bigtable::V2::ReadChangeStreamRequest
1081
+
1082
+ # Converts hash and nil to an options object
1083
+ options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
1084
+
1085
+ # Customize the options with defaults
1086
+ metadata = @config.rpcs.read_change_stream.metadata.to_h
1087
+
1088
+ # Set x-goog-api-client and x-goog-user-project headers
1089
+ metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
1090
+ lib_name: @config.lib_name, lib_version: @config.lib_version,
1091
+ gapic_version: ::Google::Cloud::Bigtable::V2::VERSION
1092
+ metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
1093
+
1094
+ header_params = {}
1095
+ if request.table_name
1096
+ header_params["table_name"] = request.table_name
1097
+ end
1098
+
1099
+ request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
1100
+ metadata[:"x-goog-request-params"] ||= request_params_header
1101
+
1102
+ options.apply_defaults timeout: @config.rpcs.read_change_stream.timeout,
1103
+ metadata: metadata,
1104
+ retry_policy: @config.rpcs.read_change_stream.retry_policy
1105
+
1106
+ options.apply_defaults timeout: @config.timeout,
1107
+ metadata: @config.metadata,
1108
+ retry_policy: @config.retry_policy
1109
+
1110
+ @bigtable_stub.call_rpc :read_change_stream, request, options: options do |response, operation|
1111
+ yield response, operation if block_given?
1112
+ return response
1113
+ end
1114
+ rescue ::GRPC::BadStatus => e
1115
+ raise ::Google::Cloud::Error.from_error(e)
1116
+ end
1117
+
891
1118
  ##
892
1119
  # Configuration class for the Bigtable API.
893
1120
  #
@@ -1058,6 +1285,16 @@ module Google
1058
1285
  # @return [::Gapic::Config::Method]
1059
1286
  #
1060
1287
  attr_reader :read_modify_write_row
1288
+ ##
1289
+ # RPC-specific configuration for `generate_initial_change_stream_partitions`
1290
+ # @return [::Gapic::Config::Method]
1291
+ #
1292
+ attr_reader :generate_initial_change_stream_partitions
1293
+ ##
1294
+ # RPC-specific configuration for `read_change_stream`
1295
+ # @return [::Gapic::Config::Method]
1296
+ #
1297
+ attr_reader :read_change_stream
1061
1298
 
1062
1299
  # @private
1063
1300
  def initialize parent_rpcs = nil
@@ -1075,6 +1312,10 @@ module Google
1075
1312
  @ping_and_warm = ::Gapic::Config::Method.new ping_and_warm_config
1076
1313
  read_modify_write_row_config = parent_rpcs.read_modify_write_row if parent_rpcs.respond_to? :read_modify_write_row
1077
1314
  @read_modify_write_row = ::Gapic::Config::Method.new read_modify_write_row_config
1315
+ generate_initial_change_stream_partitions_config = parent_rpcs.generate_initial_change_stream_partitions if parent_rpcs.respond_to? :generate_initial_change_stream_partitions
1316
+ @generate_initial_change_stream_partitions = ::Gapic::Config::Method.new generate_initial_change_stream_partitions_config
1317
+ read_change_stream_config = parent_rpcs.read_change_stream if parent_rpcs.respond_to? :read_change_stream
1318
+ @read_change_stream = ::Gapic::Config::Method.new read_change_stream_config
1078
1319
 
1079
1320
  yield self if block_given?
1080
1321
  end
@@ -33,7 +33,7 @@ module Google
33
33
  ##
34
34
  # Service for reading from and writing to existing Bigtable tables.
35
35
  #
36
- # To load this service and instantiate a client:
36
+ # @example Load this service and instantiate a gRPC client
37
37
  #
38
38
  # require "google/cloud/bigtable/v2/bigtable"
39
39
  # client = ::Google::Cloud::Bigtable::V2::Bigtable::Client.new
@@ -21,7 +21,7 @@ module Google
21
21
  module Cloud
22
22
  module Bigtable
23
23
  module V2
24
- VERSION = "0.7.1"
24
+ VERSION = "0.8.0"
25
25
  end
26
26
  end
27
27
  end
@@ -23,9 +23,9 @@ module Google
23
23
  module Cloud
24
24
  module Bigtable
25
25
  ##
26
- # To load this package, including all its services, and instantiate a client:
26
+ # API client module.
27
27
  #
28
- # @example
28
+ # @example Load this package, including all its services, and instantiate a gRPC client
29
29
  #
30
30
  # require "google/cloud/bigtable/v2"
31
31
  # client = ::Google::Cloud::Bigtable::V2::Bigtable::Client.new