google-cloud-bigtable-v2 0.7.0 → 0.8.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 9d95d9ba14cb16bd07793227a4cee13bda16cb26b3e901c89c2c05faa9a3face
4
- data.tar.gz: 21d78d9dc582f3beb6555eade471e89e3eb55a7402c185cfd962f7cfb24e7845
3
+ metadata.gz: 4cfc6e05ad85852561ba6aa5d53d5fe73c273f2e0dde7703f6c63802af275ff9
4
+ data.tar.gz: a5b32e4d889b81242c3622f883fb117e81d26a911064c6ebd64b6f03f0287c3c
5
5
  SHA512:
6
- metadata.gz: 8f7f1346f52674c2229a73b90f44c920351885d117a0ed368856eaf20bf5c33f4c61a001a8622e9f53a04ed0e4ae92f556a9213fa68c373bbe4f4076b6edd43c
7
- data.tar.gz: 3c6551336d90500086d5d8d03377a3c7ed9b8f14de8fd230a4ccc52eed036863f7739eaadbeaff246ce93ba5761e909385d81a673eb8c38b4fdad4cfdbd83fec
6
+ metadata.gz: 73c03468e20fd6ea4e9306cf304018635bc55d1caf8e3e878be7298155a5d01bbefd7a0535012a89ee19cc1b896cb94bfd7c26892276d1bbb7d9b9fc01891432
7
+ data.tar.gz: 3c1710b480304d76b52a0e7552851f59ab0edc03dfa23c069bae7bf3a698292dc91dee244c69c26b52af25cd10933a99c3d958fb477b5a537fa2ed1001561ea0
data/README.md CHANGED
@@ -46,7 +46,7 @@ for general usage information.
46
46
  ## Enabling Logging
47
47
 
48
48
  To enable logging for this library, set the logger for the underlying [gRPC](https://github.com/grpc/grpc/tree/master/src/ruby) library.
49
- The logger that you set may be a Ruby stdlib [`Logger`](https://ruby-doc.org/stdlib/libdoc/logger/rdoc/Logger.html) as shown below,
49
+ The logger that you set may be a Ruby stdlib [`Logger`](https://ruby-doc.org/current/stdlibs/logger/Logger.html) as shown below,
50
50
  or a [`Google::Cloud::Logging::Logger`](https://googleapis.dev/ruby/google-cloud-logging/latest)
51
51
  that will write logs to [Cloud Logging](https://cloud.google.com/logging/). See [grpc/logconfig.rb](https://github.com/grpc/grpc/blob/master/src/ruby/lib/grpc/logconfig.rb)
52
52
  and the gRPC [spec_helper.rb](https://github.com/grpc/grpc/blob/master/src/ruby/spec/spec_helper.rb) for additional information.
@@ -10,6 +10,8 @@ require 'google/api/resource_pb'
10
10
  require 'google/api/routing_pb'
11
11
  require 'google/bigtable/v2/data_pb'
12
12
  require 'google/bigtable/v2/request_stats_pb'
13
+ require 'google/protobuf/duration_pb'
14
+ require 'google/protobuf/timestamp_pb'
13
15
  require 'google/protobuf/wrappers_pb'
14
16
  require 'google/rpc/status_pb'
15
17
 
@@ -26,8 +28,7 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
26
28
  add_enum "google.bigtable.v2.ReadRowsRequest.RequestStatsView" do
27
29
  value :REQUEST_STATS_VIEW_UNSPECIFIED, 0
28
30
  value :REQUEST_STATS_NONE, 1
29
- value :REQUEST_STATS_EFFICIENCY, 2
30
- value :REQUEST_STATS_FULL, 3
31
+ value :REQUEST_STATS_FULL, 2
31
32
  end
32
33
  add_message "google.bigtable.v2.ReadRowsResponse" do
33
34
  repeated :chunks, :message, 1, "google.bigtable.v2.ReadRowsResponse.CellChunk"
@@ -105,6 +106,65 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
105
106
  add_message "google.bigtable.v2.ReadModifyWriteRowResponse" do
106
107
  optional :row, :message, 1, "google.bigtable.v2.Row"
107
108
  end
109
+ add_message "google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest" do
110
+ optional :table_name, :string, 1
111
+ optional :app_profile_id, :string, 2
112
+ end
113
+ add_message "google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse" do
114
+ optional :partition, :message, 1, "google.bigtable.v2.StreamPartition"
115
+ end
116
+ add_message "google.bigtable.v2.ReadChangeStreamRequest" do
117
+ optional :table_name, :string, 1
118
+ optional :app_profile_id, :string, 2
119
+ optional :partition, :message, 3, "google.bigtable.v2.StreamPartition"
120
+ optional :end_time, :message, 5, "google.protobuf.Timestamp"
121
+ optional :heartbeat_duration, :message, 7, "google.protobuf.Duration"
122
+ oneof :start_from do
123
+ optional :start_time, :message, 4, "google.protobuf.Timestamp"
124
+ optional :continuation_tokens, :message, 6, "google.bigtable.v2.StreamContinuationTokens"
125
+ end
126
+ end
127
+ add_message "google.bigtable.v2.ReadChangeStreamResponse" do
128
+ oneof :stream_record do
129
+ optional :data_change, :message, 1, "google.bigtable.v2.ReadChangeStreamResponse.DataChange"
130
+ optional :heartbeat, :message, 2, "google.bigtable.v2.ReadChangeStreamResponse.Heartbeat"
131
+ optional :close_stream, :message, 3, "google.bigtable.v2.ReadChangeStreamResponse.CloseStream"
132
+ end
133
+ end
134
+ add_message "google.bigtable.v2.ReadChangeStreamResponse.MutationChunk" do
135
+ optional :chunk_info, :message, 1, "google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo"
136
+ optional :mutation, :message, 2, "google.bigtable.v2.Mutation"
137
+ end
138
+ add_message "google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo" do
139
+ optional :chunked_value_size, :int32, 1
140
+ optional :chunked_value_offset, :int32, 2
141
+ optional :last_chunk, :bool, 3
142
+ end
143
+ add_message "google.bigtable.v2.ReadChangeStreamResponse.DataChange" do
144
+ optional :type, :enum, 1, "google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type"
145
+ optional :source_cluster_id, :string, 2
146
+ optional :row_key, :bytes, 3
147
+ optional :commit_timestamp, :message, 4, "google.protobuf.Timestamp"
148
+ optional :tiebreaker, :int32, 5
149
+ repeated :chunks, :message, 6, "google.bigtable.v2.ReadChangeStreamResponse.MutationChunk"
150
+ optional :done, :bool, 8
151
+ optional :token, :string, 9
152
+ optional :estimated_low_watermark, :message, 10, "google.protobuf.Timestamp"
153
+ end
154
+ add_enum "google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type" do
155
+ value :TYPE_UNSPECIFIED, 0
156
+ value :USER, 1
157
+ value :GARBAGE_COLLECTION, 2
158
+ value :CONTINUATION, 3
159
+ end
160
+ add_message "google.bigtable.v2.ReadChangeStreamResponse.Heartbeat" do
161
+ optional :continuation_token, :message, 1, "google.bigtable.v2.StreamContinuationToken"
162
+ optional :estimated_low_watermark, :message, 2, "google.protobuf.Timestamp"
163
+ end
164
+ add_message "google.bigtable.v2.ReadChangeStreamResponse.CloseStream" do
165
+ optional :status, :message, 1, "google.rpc.Status"
166
+ repeated :continuation_tokens, :message, 2, "google.bigtable.v2.StreamContinuationToken"
167
+ end
108
168
  end
109
169
  end
110
170
 
@@ -130,6 +190,16 @@ module Google
130
190
  PingAndWarmResponse = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.PingAndWarmResponse").msgclass
131
191
  ReadModifyWriteRowRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.ReadModifyWriteRowRequest").msgclass
132
192
  ReadModifyWriteRowResponse = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.ReadModifyWriteRowResponse").msgclass
193
+ GenerateInitialChangeStreamPartitionsRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest").msgclass
194
+ GenerateInitialChangeStreamPartitionsResponse = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse").msgclass
195
+ ReadChangeStreamRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.ReadChangeStreamRequest").msgclass
196
+ ReadChangeStreamResponse = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.ReadChangeStreamResponse").msgclass
197
+ ReadChangeStreamResponse::MutationChunk = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.ReadChangeStreamResponse.MutationChunk").msgclass
198
+ ReadChangeStreamResponse::MutationChunk::ChunkInfo = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo").msgclass
199
+ ReadChangeStreamResponse::DataChange = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.ReadChangeStreamResponse.DataChange").msgclass
200
+ ReadChangeStreamResponse::DataChange::Type = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type").enummodule
201
+ ReadChangeStreamResponse::Heartbeat = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.ReadChangeStreamResponse.Heartbeat").msgclass
202
+ ReadChangeStreamResponse::CloseStream = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.ReadChangeStreamResponse.CloseStream").msgclass
133
203
  end
134
204
  end
135
205
  end
@@ -62,6 +62,16 @@ module Google
62
62
  # timestamp is the greater of the existing timestamp or the current server
63
63
  # time. The method returns the new contents of all modified cells.
64
64
  rpc :ReadModifyWriteRow, ::Google::Cloud::Bigtable::V2::ReadModifyWriteRowRequest, ::Google::Cloud::Bigtable::V2::ReadModifyWriteRowResponse
65
+ # NOTE: This API is intended to be used by Apache Beam BigtableIO.
66
+ # Returns the current list of partitions that make up the table's
67
+ # change stream. The union of partitions will cover the entire keyspace.
68
+ # Partitions can be read with `ReadChangeStream`.
69
+ rpc :GenerateInitialChangeStreamPartitions, ::Google::Cloud::Bigtable::V2::GenerateInitialChangeStreamPartitionsRequest, stream(::Google::Cloud::Bigtable::V2::GenerateInitialChangeStreamPartitionsResponse)
70
+ # NOTE: This API is intended to be used by Apache Beam BigtableIO.
71
+ # Reads changes from a table's change stream. Changes will
72
+ # reflect both user-initiated mutations and mutations that are caused by
73
+ # garbage collection.
74
+ rpc :ReadChangeStream, ::Google::Cloud::Bigtable::V2::ReadChangeStreamRequest, stream(::Google::Cloud::Bigtable::V2::ReadChangeStreamResponse)
65
75
  end
66
76
 
67
77
  Stub = Service.rpc_stub_class
@@ -127,6 +127,16 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
127
127
  optional :increment_amount, :int64, 4
128
128
  end
129
129
  end
130
+ add_message "google.bigtable.v2.StreamPartition" do
131
+ optional :row_range, :message, 1, "google.bigtable.v2.RowRange"
132
+ end
133
+ add_message "google.bigtable.v2.StreamContinuationTokens" do
134
+ repeated :tokens, :message, 1, "google.bigtable.v2.StreamContinuationToken"
135
+ end
136
+ add_message "google.bigtable.v2.StreamContinuationToken" do
137
+ optional :partition, :message, 1, "google.bigtable.v2.StreamPartition"
138
+ optional :token, :string, 2
139
+ end
130
140
  end
131
141
  end
132
142
 
@@ -153,6 +163,9 @@ module Google
153
163
  Mutation::DeleteFromFamily = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.Mutation.DeleteFromFamily").msgclass
154
164
  Mutation::DeleteFromRow = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.Mutation.DeleteFromRow").msgclass
155
165
  ReadModifyWriteRule = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.ReadModifyWriteRule").msgclass
166
+ StreamPartition = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.StreamPartition").msgclass
167
+ StreamContinuationTokens = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.StreamContinuationTokens").msgclass
168
+ StreamContinuationToken = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.StreamContinuationToken").msgclass
156
169
  end
157
170
  end
158
171
  end
@@ -7,28 +7,22 @@ require 'google/protobuf/duration_pb'
7
7
 
8
8
  Google::Protobuf::DescriptorPool.generated_pool.build do
9
9
  add_file("google/bigtable/v2/request_stats.proto", :syntax => :proto3) do
10
- add_message "google.bigtable.v2.ReadIteratorStats" do
10
+ add_message "google.bigtable.v2.ReadIterationStats" do
11
11
  optional :rows_seen_count, :int64, 1
12
12
  optional :rows_returned_count, :int64, 2
13
13
  optional :cells_seen_count, :int64, 3
14
14
  optional :cells_returned_count, :int64, 4
15
- optional :deletes_seen_count, :int64, 5
16
15
  end
17
16
  add_message "google.bigtable.v2.RequestLatencyStats" do
18
17
  optional :frontend_server_latency, :message, 1, "google.protobuf.Duration"
19
18
  end
20
- add_message "google.bigtable.v2.ReadEfficiencyStats" do
21
- optional :read_iterator_stats, :message, 1, "google.bigtable.v2.ReadIteratorStats"
22
- optional :request_latency_stats, :message, 2, "google.bigtable.v2.RequestLatencyStats"
23
- end
24
- add_message "google.bigtable.v2.AllReadStats" do
25
- optional :read_iterator_stats, :message, 1, "google.bigtable.v2.ReadIteratorStats"
19
+ add_message "google.bigtable.v2.FullReadStatsView" do
20
+ optional :read_iteration_stats, :message, 1, "google.bigtable.v2.ReadIterationStats"
26
21
  optional :request_latency_stats, :message, 2, "google.bigtable.v2.RequestLatencyStats"
27
22
  end
28
23
  add_message "google.bigtable.v2.RequestStats" do
29
- oneof :stats do
30
- optional :read_efficiency_stats, :message, 1, "google.bigtable.v2.ReadEfficiencyStats"
31
- optional :all_read_stats, :message, 2, "google.bigtable.v2.AllReadStats"
24
+ oneof :stats_view do
25
+ optional :full_read_stats_view, :message, 1, "google.bigtable.v2.FullReadStatsView"
32
26
  end
33
27
  end
34
28
  end
@@ -38,10 +32,9 @@ module Google
38
32
  module Cloud
39
33
  module Bigtable
40
34
  module V2
41
- ReadIteratorStats = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.ReadIteratorStats").msgclass
35
+ ReadIterationStats = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.ReadIterationStats").msgclass
42
36
  RequestLatencyStats = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.RequestLatencyStats").msgclass
43
- ReadEfficiencyStats = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.ReadEfficiencyStats").msgclass
44
- AllReadStats = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.AllReadStats").msgclass
37
+ FullReadStatsView = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.FullReadStatsView").msgclass
45
38
  RequestStats = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.bigtable.v2.RequestStats").msgclass
46
39
  end
47
40
  end
@@ -79,6 +79,10 @@ module Google
79
79
 
80
80
  default_config.rpcs.read_modify_write_row.timeout = 20.0
81
81
 
82
+ default_config.rpcs.generate_initial_change_stream_partitions.timeout = 60.0
83
+
84
+ default_config.rpcs.read_change_stream.timeout = 43_200.0
85
+
82
86
  default_config
83
87
  end
84
88
  yield @configure if block_given?
@@ -217,13 +221,13 @@ module Google
217
221
  # # Create a request. To set request fields, pass in keyword arguments.
218
222
  # request = Google::Cloud::Bigtable::V2::ReadRowsRequest.new
219
223
  #
220
- # # Call the read_rows method.
221
- # result = client.read_rows request
224
+ # # Call the read_rows method to start streaming.
225
+ # output = client.read_rows request
222
226
  #
223
- # # The returned object is a streamed enumerable yielding elements of
224
- # # type ::Google::Cloud::Bigtable::V2::ReadRowsResponse.
225
- # result.each do |response|
226
- # p response
227
+ # # The returned object is a streamed enumerable yielding elements of type
228
+ # # ::Google::Cloud::Bigtable::V2::ReadRowsResponse
229
+ # output.each do |current_response|
230
+ # p current_response
227
231
  # end
228
232
  #
229
233
  def read_rows request, options = nil
@@ -317,13 +321,13 @@ module Google
317
321
  # # Create a request. To set request fields, pass in keyword arguments.
318
322
  # request = Google::Cloud::Bigtable::V2::SampleRowKeysRequest.new
319
323
  #
320
- # # Call the sample_row_keys method.
321
- # result = client.sample_row_keys request
324
+ # # Call the sample_row_keys method to start streaming.
325
+ # output = client.sample_row_keys request
322
326
  #
323
- # # The returned object is a streamed enumerable yielding elements of
324
- # # type ::Google::Cloud::Bigtable::V2::SampleRowKeysResponse.
325
- # result.each do |response|
326
- # p response
327
+ # # The returned object is a streamed enumerable yielding elements of type
328
+ # # ::Google::Cloud::Bigtable::V2::SampleRowKeysResponse
329
+ # output.each do |current_response|
330
+ # p current_response
327
331
  # end
328
332
  #
329
333
  def sample_row_keys request, options = nil
@@ -522,13 +526,13 @@ module Google
522
526
  # # Create a request. To set request fields, pass in keyword arguments.
523
527
  # request = Google::Cloud::Bigtable::V2::MutateRowsRequest.new
524
528
  #
525
- # # Call the mutate_rows method.
526
- # result = client.mutate_rows request
529
+ # # Call the mutate_rows method to start streaming.
530
+ # output = client.mutate_rows request
527
531
  #
528
- # # The returned object is a streamed enumerable yielding elements of
529
- # # type ::Google::Cloud::Bigtable::V2::MutateRowsResponse.
530
- # result.each do |response|
531
- # p response
532
+ # # The returned object is a streamed enumerable yielding elements of type
533
+ # # ::Google::Cloud::Bigtable::V2::MutateRowsResponse
534
+ # output.each do |current_response|
535
+ # p current_response
532
536
  # end
533
537
  #
534
538
  def mutate_rows request, options = nil
@@ -890,6 +894,227 @@ module Google
890
894
  raise ::Google::Cloud::Error.from_error(e)
891
895
  end
892
896
 
897
+ ##
898
+ # NOTE: This API is intended to be used by Apache Beam BigtableIO.
899
+ # Returns the current list of partitions that make up the table's
900
+ # change stream. The union of partitions will cover the entire keyspace.
901
+ # Partitions can be read with `ReadChangeStream`.
902
+ #
903
+ # @overload generate_initial_change_stream_partitions(request, options = nil)
904
+ # Pass arguments to `generate_initial_change_stream_partitions` via a request object, either of type
905
+ # {::Google::Cloud::Bigtable::V2::GenerateInitialChangeStreamPartitionsRequest} or an equivalent Hash.
906
+ #
907
+ # @param request [::Google::Cloud::Bigtable::V2::GenerateInitialChangeStreamPartitionsRequest, ::Hash]
908
+ # A request object representing the call parameters. Required. To specify no
909
+ # parameters, or to keep all the default parameter values, pass an empty Hash.
910
+ # @param options [::Gapic::CallOptions, ::Hash]
911
+ # Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
912
+ #
913
+ # @overload generate_initial_change_stream_partitions(table_name: nil, app_profile_id: nil)
914
+ # Pass arguments to `generate_initial_change_stream_partitions` via keyword arguments. Note that at
915
+ # least one keyword argument is required. To specify no parameters, or to keep all
916
+ # the default parameter values, pass an empty Hash as a request object (see above).
917
+ #
918
+ # @param table_name [::String]
919
+ # Required. The unique name of the table from which to get change stream
920
+ # partitions. Values are of the form
921
+ # `projects/<project>/instances/<instance>/tables/<table>`.
922
+ # Change streaming must be enabled on the table.
923
+ # @param app_profile_id [::String]
924
+ # This value specifies routing for replication. If not specified, the
925
+ # "default" application profile will be used.
926
+ # Single cluster routing must be configured on the profile.
927
+ #
928
+ # @yield [response, operation] Access the result along with the RPC operation
929
+ # @yieldparam response [::Enumerable<::Google::Cloud::Bigtable::V2::GenerateInitialChangeStreamPartitionsResponse>]
930
+ # @yieldparam operation [::GRPC::ActiveCall::Operation]
931
+ #
932
+ # @return [::Enumerable<::Google::Cloud::Bigtable::V2::GenerateInitialChangeStreamPartitionsResponse>]
933
+ #
934
+ # @raise [::Google::Cloud::Error] if the RPC is aborted.
935
+ #
936
+ # @example Basic example
937
+ # require "google/cloud/bigtable/v2"
938
+ #
939
+ # # Create a client object. The client can be reused for multiple calls.
940
+ # client = Google::Cloud::Bigtable::V2::Bigtable::Client.new
941
+ #
942
+ # # Create a request. To set request fields, pass in keyword arguments.
943
+ # request = Google::Cloud::Bigtable::V2::GenerateInitialChangeStreamPartitionsRequest.new
944
+ #
945
+ # # Call the generate_initial_change_stream_partitions method to start streaming.
946
+ # output = client.generate_initial_change_stream_partitions request
947
+ #
948
+ # # The returned object is a streamed enumerable yielding elements of type
949
+ # # ::Google::Cloud::Bigtable::V2::GenerateInitialChangeStreamPartitionsResponse
950
+ # output.each do |current_response|
951
+ # p current_response
952
+ # end
953
+ #
954
+ def generate_initial_change_stream_partitions request, options = nil
955
+ raise ::ArgumentError, "request must be provided" if request.nil?
956
+
957
+ request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::Bigtable::V2::GenerateInitialChangeStreamPartitionsRequest
958
+
959
+ # Converts hash and nil to an options object
960
+ options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
961
+
962
+ # Customize the options with defaults
963
+ metadata = @config.rpcs.generate_initial_change_stream_partitions.metadata.to_h
964
+
965
+ # Set x-goog-api-client and x-goog-user-project headers
966
+ metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
967
+ lib_name: @config.lib_name, lib_version: @config.lib_version,
968
+ gapic_version: ::Google::Cloud::Bigtable::V2::VERSION
969
+ metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
970
+
971
+ header_params = {}
972
+ if request.table_name
973
+ header_params["table_name"] = request.table_name
974
+ end
975
+
976
+ request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
977
+ metadata[:"x-goog-request-params"] ||= request_params_header
978
+
979
+ options.apply_defaults timeout: @config.rpcs.generate_initial_change_stream_partitions.timeout,
980
+ metadata: metadata,
981
+ retry_policy: @config.rpcs.generate_initial_change_stream_partitions.retry_policy
982
+
983
+ options.apply_defaults timeout: @config.timeout,
984
+ metadata: @config.metadata,
985
+ retry_policy: @config.retry_policy
986
+
987
+ @bigtable_stub.call_rpc :generate_initial_change_stream_partitions, request, options: options do |response, operation|
988
+ yield response, operation if block_given?
989
+ return response
990
+ end
991
+ rescue ::GRPC::BadStatus => e
992
+ raise ::Google::Cloud::Error.from_error(e)
993
+ end
994
+
995
+ ##
996
+ # NOTE: This API is intended to be used by Apache Beam BigtableIO.
997
+ # Reads changes from a table's change stream. Changes will
998
+ # reflect both user-initiated mutations and mutations that are caused by
999
+ # garbage collection.
1000
+ #
1001
+ # @overload read_change_stream(request, options = nil)
1002
+ # Pass arguments to `read_change_stream` via a request object, either of type
1003
+ # {::Google::Cloud::Bigtable::V2::ReadChangeStreamRequest} or an equivalent Hash.
1004
+ #
1005
+ # @param request [::Google::Cloud::Bigtable::V2::ReadChangeStreamRequest, ::Hash]
1006
+ # A request object representing the call parameters. Required. To specify no
1007
+ # parameters, or to keep all the default parameter values, pass an empty Hash.
1008
+ # @param options [::Gapic::CallOptions, ::Hash]
1009
+ # Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
1010
+ #
1011
+ # @overload read_change_stream(table_name: nil, app_profile_id: nil, partition: nil, start_time: nil, continuation_tokens: nil, end_time: nil, heartbeat_duration: nil)
1012
+ # Pass arguments to `read_change_stream` via keyword arguments. Note that at
1013
+ # least one keyword argument is required. To specify no parameters, or to keep all
1014
+ # the default parameter values, pass an empty Hash as a request object (see above).
1015
+ #
1016
+ # @param table_name [::String]
1017
+ # Required. The unique name of the table from which to read a change stream.
1018
+ # Values are of the form
1019
+ # `projects/<project>/instances/<instance>/tables/<table>`.
1020
+ # Change streaming must be enabled on the table.
1021
+ # @param app_profile_id [::String]
1022
+ # This value specifies routing for replication. If not specified, the
1023
+ # "default" application profile will be used.
1024
+ # Single cluster routing must be configured on the profile.
1025
+ # @param partition [::Google::Cloud::Bigtable::V2::StreamPartition, ::Hash]
1026
+ # The partition to read changes from.
1027
+ # @param start_time [::Google::Protobuf::Timestamp, ::Hash]
1028
+ # Start reading the stream at the specified timestamp. This timestamp must
1029
+ # be within the change stream retention period, less than or equal to the
1030
+ # current time, and after change stream creation, whichever is greater.
1031
+ # This value is inclusive and will be truncated to microsecond granularity.
1032
+ # @param continuation_tokens [::Google::Cloud::Bigtable::V2::StreamContinuationTokens, ::Hash]
1033
+ # Tokens that describe how to resume reading a stream where reading
1034
+ # previously left off. If specified, changes will be read starting at the
1035
+ # the position. Tokens are delivered on the stream as part of `Heartbeat`
1036
+ # and `CloseStream` messages.
1037
+ #
1038
+ # If a single token is provided, the token’s partition must exactly match
1039
+ # the request’s partition. If multiple tokens are provided, as in the case
1040
+ # of a partition merge, the union of the token partitions must exactly
1041
+ # cover the request’s partition. Otherwise, INVALID_ARGUMENT will be
1042
+ # returned.
1043
+ # @param end_time [::Google::Protobuf::Timestamp, ::Hash]
1044
+ # If specified, OK will be returned when the stream advances beyond
1045
+ # this time. Otherwise, changes will be continuously delivered on the stream.
1046
+ # This value is inclusive and will be truncated to microsecond granularity.
1047
+ # @param heartbeat_duration [::Google::Protobuf::Duration, ::Hash]
1048
+ # If specified, the duration between `Heartbeat` messages on the stream.
1049
+ # Otherwise, defaults to 5 seconds.
1050
+ #
1051
+ # @yield [response, operation] Access the result along with the RPC operation
1052
+ # @yieldparam response [::Enumerable<::Google::Cloud::Bigtable::V2::ReadChangeStreamResponse>]
1053
+ # @yieldparam operation [::GRPC::ActiveCall::Operation]
1054
+ #
1055
+ # @return [::Enumerable<::Google::Cloud::Bigtable::V2::ReadChangeStreamResponse>]
1056
+ #
1057
+ # @raise [::Google::Cloud::Error] if the RPC is aborted.
1058
+ #
1059
+ # @example Basic example
1060
+ # require "google/cloud/bigtable/v2"
1061
+ #
1062
+ # # Create a client object. The client can be reused for multiple calls.
1063
+ # client = Google::Cloud::Bigtable::V2::Bigtable::Client.new
1064
+ #
1065
+ # # Create a request. To set request fields, pass in keyword arguments.
1066
+ # request = Google::Cloud::Bigtable::V2::ReadChangeStreamRequest.new
1067
+ #
1068
+ # # Call the read_change_stream method to start streaming.
1069
+ # output = client.read_change_stream request
1070
+ #
1071
+ # # The returned object is a streamed enumerable yielding elements of type
1072
+ # # ::Google::Cloud::Bigtable::V2::ReadChangeStreamResponse
1073
+ # output.each do |current_response|
1074
+ # p current_response
1075
+ # end
1076
+ #
1077
+ def read_change_stream request, options = nil
1078
+ raise ::ArgumentError, "request must be provided" if request.nil?
1079
+
1080
+ request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::Bigtable::V2::ReadChangeStreamRequest
1081
+
1082
+ # Converts hash and nil to an options object
1083
+ options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
1084
+
1085
+ # Customize the options with defaults
1086
+ metadata = @config.rpcs.read_change_stream.metadata.to_h
1087
+
1088
+ # Set x-goog-api-client and x-goog-user-project headers
1089
+ metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
1090
+ lib_name: @config.lib_name, lib_version: @config.lib_version,
1091
+ gapic_version: ::Google::Cloud::Bigtable::V2::VERSION
1092
+ metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
1093
+
1094
+ header_params = {}
1095
+ if request.table_name
1096
+ header_params["table_name"] = request.table_name
1097
+ end
1098
+
1099
+ request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
1100
+ metadata[:"x-goog-request-params"] ||= request_params_header
1101
+
1102
+ options.apply_defaults timeout: @config.rpcs.read_change_stream.timeout,
1103
+ metadata: metadata,
1104
+ retry_policy: @config.rpcs.read_change_stream.retry_policy
1105
+
1106
+ options.apply_defaults timeout: @config.timeout,
1107
+ metadata: @config.metadata,
1108
+ retry_policy: @config.retry_policy
1109
+
1110
+ @bigtable_stub.call_rpc :read_change_stream, request, options: options do |response, operation|
1111
+ yield response, operation if block_given?
1112
+ return response
1113
+ end
1114
+ rescue ::GRPC::BadStatus => e
1115
+ raise ::Google::Cloud::Error.from_error(e)
1116
+ end
1117
+
893
1118
  ##
894
1119
  # Configuration class for the Bigtable API.
895
1120
  #
@@ -1060,6 +1285,16 @@ module Google
1060
1285
  # @return [::Gapic::Config::Method]
1061
1286
  #
1062
1287
  attr_reader :read_modify_write_row
1288
+ ##
1289
+ # RPC-specific configuration for `generate_initial_change_stream_partitions`
1290
+ # @return [::Gapic::Config::Method]
1291
+ #
1292
+ attr_reader :generate_initial_change_stream_partitions
1293
+ ##
1294
+ # RPC-specific configuration for `read_change_stream`
1295
+ # @return [::Gapic::Config::Method]
1296
+ #
1297
+ attr_reader :read_change_stream
1063
1298
 
1064
1299
  # @private
1065
1300
  def initialize parent_rpcs = nil
@@ -1077,6 +1312,10 @@ module Google
1077
1312
  @ping_and_warm = ::Gapic::Config::Method.new ping_and_warm_config
1078
1313
  read_modify_write_row_config = parent_rpcs.read_modify_write_row if parent_rpcs.respond_to? :read_modify_write_row
1079
1314
  @read_modify_write_row = ::Gapic::Config::Method.new read_modify_write_row_config
1315
+ generate_initial_change_stream_partitions_config = parent_rpcs.generate_initial_change_stream_partitions if parent_rpcs.respond_to? :generate_initial_change_stream_partitions
1316
+ @generate_initial_change_stream_partitions = ::Gapic::Config::Method.new generate_initial_change_stream_partitions_config
1317
+ read_change_stream_config = parent_rpcs.read_change_stream if parent_rpcs.respond_to? :read_change_stream
1318
+ @read_change_stream = ::Gapic::Config::Method.new read_change_stream_config
1080
1319
 
1081
1320
  yield self if block_given?
1082
1321
  end
@@ -33,7 +33,7 @@ module Google
33
33
  ##
34
34
  # Service for reading from and writing to existing Bigtable tables.
35
35
  #
36
- # To load this service and instantiate a client:
36
+ # @example Load this service and instantiate a gRPC client
37
37
  #
38
38
  # require "google/cloud/bigtable/v2/bigtable"
39
39
  # client = ::Google::Cloud::Bigtable::V2::Bigtable::Client.new
@@ -21,7 +21,7 @@ module Google
21
21
  module Cloud
22
22
  module Bigtable
23
23
  module V2
24
- VERSION = "0.7.0"
24
+ VERSION = "0.8.0"
25
25
  end
26
26
  end
27
27
  end
@@ -23,9 +23,9 @@ module Google
23
23
  module Cloud
24
24
  module Bigtable
25
25
  ##
26
- # To load this package, including all its services, and instantiate a client:
26
+ # API client module.
27
27
  #
28
- # @example
28
+ # @example Load this package, including all its services, and instantiate a gRPC client
29
29
  #
30
30
  # require "google/cloud/bigtable/v2"
31
31
  # client = ::Google::Cloud::Bigtable::V2::Bigtable::Client.new