google-cloud-managed_kafka-v1 0.a → 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. checksums.yaml +4 -4
  2. data/.yardopts +12 -0
  3. data/AUTHENTICATION.md +122 -0
  4. data/README.md +144 -8
  5. data/lib/google/cloud/managed_kafka/v1/bindings_override.rb +102 -0
  6. data/lib/google/cloud/managed_kafka/v1/managed_kafka/client.rb +1876 -0
  7. data/lib/google/cloud/managed_kafka/v1/managed_kafka/credentials.rb +47 -0
  8. data/lib/google/cloud/managed_kafka/v1/managed_kafka/operations.rb +809 -0
  9. data/lib/google/cloud/managed_kafka/v1/managed_kafka/paths.rb +132 -0
  10. data/lib/google/cloud/managed_kafka/v1/managed_kafka/rest/client.rb +1759 -0
  11. data/lib/google/cloud/managed_kafka/v1/managed_kafka/rest/operations.rb +902 -0
  12. data/lib/google/cloud/managed_kafka/v1/managed_kafka/rest/service_stub.rb +900 -0
  13. data/lib/google/cloud/managed_kafka/v1/managed_kafka/rest.rb +55 -0
  14. data/lib/google/cloud/managed_kafka/v1/managed_kafka.rb +57 -0
  15. data/lib/google/cloud/managed_kafka/v1/rest.rb +38 -0
  16. data/lib/google/cloud/managed_kafka/v1/version.rb +7 -2
  17. data/lib/google/cloud/managed_kafka/v1.rb +45 -0
  18. data/lib/google/cloud/managedkafka/v1/managed_kafka_pb.rb +70 -0
  19. data/lib/google/cloud/managedkafka/v1/managed_kafka_services_pb.rb +72 -0
  20. data/lib/google/cloud/managedkafka/v1/resources_pb.rb +59 -0
  21. data/lib/google-cloud-managed_kafka-v1.rb +21 -0
  22. data/proto_docs/README.md +4 -0
  23. data/proto_docs/google/api/client.rb +399 -0
  24. data/proto_docs/google/api/field_behavior.rb +85 -0
  25. data/proto_docs/google/api/field_info.rb +65 -0
  26. data/proto_docs/google/api/launch_stage.rb +71 -0
  27. data/proto_docs/google/api/resource.rb +222 -0
  28. data/proto_docs/google/cloud/managedkafka/v1/managed_kafka.rb +341 -0
  29. data/proto_docs/google/cloud/managedkafka/v1/resources.rb +291 -0
  30. data/proto_docs/google/longrunning/operations.rb +164 -0
  31. data/proto_docs/google/protobuf/any.rb +145 -0
  32. data/proto_docs/google/protobuf/duration.rb +98 -0
  33. data/proto_docs/google/protobuf/empty.rb +34 -0
  34. data/proto_docs/google/protobuf/field_mask.rb +229 -0
  35. data/proto_docs/google/protobuf/timestamp.rb +127 -0
  36. data/proto_docs/google/rpc/status.rb +48 -0
  37. metadata +96 -10
@@ -0,0 +1,1876 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Copyright 2024 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # https://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # Auto-generated by gapic-generator-ruby. DO NOT EDIT!
18
+
19
+ require "google/cloud/errors"
20
+ require "google/cloud/managedkafka/v1/managed_kafka_pb"
21
+ require "google/cloud/location"
22
+
23
+ module Google
24
+ module Cloud
25
+ module ManagedKafka
26
+ module V1
27
+ module ManagedKafka
28
+ ##
29
+ # Client for the ManagedKafka service.
30
+ #
31
+ # The service that a client application uses to manage Apache Kafka clusters,
32
+ # topics and consumer groups.
33
+ #
34
+ class Client
35
+ # @private
36
+ API_VERSION = ""
37
+
38
+ # @private
39
+ DEFAULT_ENDPOINT_TEMPLATE = "managedkafka.$UNIVERSE_DOMAIN$"
40
+
41
+ include Paths
42
+
43
+ # @private
44
+ attr_reader :managed_kafka_stub
45
+
46
+ ##
47
+ # Configure the ManagedKafka Client class.
48
+ #
49
+ # See {::Google::Cloud::ManagedKafka::V1::ManagedKafka::Client::Configuration}
50
+ # for a description of the configuration fields.
51
+ #
52
+ # @example
53
+ #
54
+ # # Modify the configuration for all ManagedKafka clients
55
+ # ::Google::Cloud::ManagedKafka::V1::ManagedKafka::Client.configure do |config|
56
+ # config.timeout = 10.0
57
+ # end
58
+ #
59
+ # @yield [config] Configure the Client client.
60
+ # @yieldparam config [Client::Configuration]
61
+ #
62
+ # @return [Client::Configuration]
63
+ #
64
+ def self.configure
65
+ @configure ||= begin
66
+ namespace = ["Google", "Cloud", "ManagedKafka", "V1"]
67
+ parent_config = while namespace.any?
68
+ parent_name = namespace.join "::"
69
+ parent_const = const_get parent_name
70
+ break parent_const.configure if parent_const.respond_to? :configure
71
+ namespace.pop
72
+ end
73
+ default_config = Client::Configuration.new parent_config
74
+
75
+ default_config.rpcs.list_clusters.timeout = 60.0
76
+ default_config.rpcs.list_clusters.retry_policy = {
77
+ initial_delay: 1.0, max_delay: 10.0, multiplier: 1.3, retry_codes: [14]
78
+ }
79
+
80
+ default_config.rpcs.get_cluster.timeout = 60.0
81
+ default_config.rpcs.get_cluster.retry_policy = {
82
+ initial_delay: 1.0, max_delay: 10.0, multiplier: 1.3, retry_codes: [14]
83
+ }
84
+
85
+ default_config.rpcs.create_cluster.timeout = 60.0
86
+
87
+ default_config.rpcs.update_cluster.timeout = 60.0
88
+
89
+ default_config.rpcs.delete_cluster.timeout = 60.0
90
+
91
+ default_config.rpcs.list_topics.timeout = 60.0
92
+ default_config.rpcs.list_topics.retry_policy = {
93
+ initial_delay: 1.0, max_delay: 10.0, multiplier: 1.3, retry_codes: [14]
94
+ }
95
+
96
+ default_config.rpcs.get_topic.timeout = 60.0
97
+ default_config.rpcs.get_topic.retry_policy = {
98
+ initial_delay: 1.0, max_delay: 10.0, multiplier: 1.3, retry_codes: [14]
99
+ }
100
+
101
+ default_config.rpcs.create_topic.timeout = 60.0
102
+
103
+ default_config.rpcs.update_topic.timeout = 60.0
104
+
105
+ default_config.rpcs.delete_topic.timeout = 60.0
106
+
107
+ default_config.rpcs.list_consumer_groups.timeout = 60.0
108
+ default_config.rpcs.list_consumer_groups.retry_policy = {
109
+ initial_delay: 1.0, max_delay: 10.0, multiplier: 1.3, retry_codes: [14]
110
+ }
111
+
112
+ default_config.rpcs.get_consumer_group.timeout = 60.0
113
+ default_config.rpcs.get_consumer_group.retry_policy = {
114
+ initial_delay: 1.0, max_delay: 10.0, multiplier: 1.3, retry_codes: [14]
115
+ }
116
+
117
+ default_config.rpcs.update_consumer_group.timeout = 60.0
118
+
119
+ default_config.rpcs.delete_consumer_group.timeout = 60.0
120
+
121
+ default_config
122
+ end
123
+ yield @configure if block_given?
124
+ @configure
125
+ end
126
+
127
+ ##
128
+ # Configure the ManagedKafka Client instance.
129
+ #
130
+ # The configuration is set to the derived mode, meaning that values can be changed,
131
+ # but structural changes (adding new fields, etc.) are not allowed. Structural changes
132
+ # should be made on {Client.configure}.
133
+ #
134
+ # See {::Google::Cloud::ManagedKafka::V1::ManagedKafka::Client::Configuration}
135
+ # for a description of the configuration fields.
136
+ #
137
+ # @yield [config] Configure the Client client.
138
+ # @yieldparam config [Client::Configuration]
139
+ #
140
+ # @return [Client::Configuration]
141
+ #
142
+ def configure
143
+ yield @config if block_given?
144
+ @config
145
+ end
146
+
147
+ ##
148
+ # The effective universe domain
149
+ #
150
+ # @return [String]
151
+ #
152
+ def universe_domain
153
+ @managed_kafka_stub.universe_domain
154
+ end
155
+
156
+ ##
157
+ # Create a new ManagedKafka client object.
158
+ #
159
+ # @example
160
+ #
161
+ # # Create a client using the default configuration
162
+ # client = ::Google::Cloud::ManagedKafka::V1::ManagedKafka::Client.new
163
+ #
164
+ # # Create a client using a custom configuration
165
+ # client = ::Google::Cloud::ManagedKafka::V1::ManagedKafka::Client.new do |config|
166
+ # config.timeout = 10.0
167
+ # end
168
+ #
169
+ # @yield [config] Configure the ManagedKafka client.
170
+ # @yieldparam config [Client::Configuration]
171
+ #
172
+ def initialize
173
+ # These require statements are intentionally placed here to initialize
174
+ # the gRPC module only when it's required.
175
+ # See https://github.com/googleapis/toolkit/issues/446
176
+ require "gapic/grpc"
177
+ require "google/cloud/managedkafka/v1/managed_kafka_services_pb"
178
+
179
+ # Create the configuration object
180
+ @config = Configuration.new Client.configure
181
+
182
+ # Yield the configuration if needed
183
+ yield @config if block_given?
184
+
185
+ # Create credentials
186
+ credentials = @config.credentials
187
+ # Use self-signed JWT if the endpoint is unchanged from default,
188
+ # but only if the default endpoint does not have a region prefix.
189
+ enable_self_signed_jwt = @config.endpoint.nil? ||
190
+ (@config.endpoint == Configuration::DEFAULT_ENDPOINT &&
191
+ !@config.endpoint.split(".").first.include?("-"))
192
+ credentials ||= Credentials.default scope: @config.scope,
193
+ enable_self_signed_jwt: enable_self_signed_jwt
194
+ if credentials.is_a?(::String) || credentials.is_a?(::Hash)
195
+ credentials = Credentials.new credentials, scope: @config.scope
196
+ end
197
+ @quota_project_id = @config.quota_project
198
+ @quota_project_id ||= credentials.quota_project_id if credentials.respond_to? :quota_project_id
199
+
200
+ @operations_client = Operations.new do |config|
201
+ config.credentials = credentials
202
+ config.quota_project = @quota_project_id
203
+ config.endpoint = @config.endpoint
204
+ config.universe_domain = @config.universe_domain
205
+ end
206
+
207
+ @managed_kafka_stub = ::Gapic::ServiceStub.new(
208
+ ::Google::Cloud::ManagedKafka::V1::ManagedKafka::Stub,
209
+ credentials: credentials,
210
+ endpoint: @config.endpoint,
211
+ endpoint_template: DEFAULT_ENDPOINT_TEMPLATE,
212
+ universe_domain: @config.universe_domain,
213
+ channel_args: @config.channel_args,
214
+ interceptors: @config.interceptors,
215
+ channel_pool_config: @config.channel_pool
216
+ )
217
+
218
+ @location_client = Google::Cloud::Location::Locations::Client.new do |config|
219
+ config.credentials = credentials
220
+ config.quota_project = @quota_project_id
221
+ config.endpoint = @managed_kafka_stub.endpoint
222
+ config.universe_domain = @managed_kafka_stub.universe_domain
223
+ end
224
+ end
225
+
226
+ ##
227
+ # Get the associated client for long-running operations.
228
+ #
229
+ # @return [::Google::Cloud::ManagedKafka::V1::ManagedKafka::Operations]
230
+ #
231
+ attr_reader :operations_client
232
+
233
+ ##
234
+ # Get the associated client for mix-in of the Locations.
235
+ #
236
+ # @return [Google::Cloud::Location::Locations::Client]
237
+ #
238
+ attr_reader :location_client
239
+
240
+ # Service calls
241
+
242
+ ##
243
+ # Lists the clusters in a given project and location.
244
+ #
245
+ # @overload list_clusters(request, options = nil)
246
+ # Pass arguments to `list_clusters` via a request object, either of type
247
+ # {::Google::Cloud::ManagedKafka::V1::ListClustersRequest} or an equivalent Hash.
248
+ #
249
+ # @param request [::Google::Cloud::ManagedKafka::V1::ListClustersRequest, ::Hash]
250
+ # A request object representing the call parameters. Required. To specify no
251
+ # parameters, or to keep all the default parameter values, pass an empty Hash.
252
+ # @param options [::Gapic::CallOptions, ::Hash]
253
+ # Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
254
+ #
255
+ # @overload list_clusters(parent: nil, page_size: nil, page_token: nil, filter: nil, order_by: nil)
256
+ # Pass arguments to `list_clusters` via keyword arguments. Note that at
257
+ # least one keyword argument is required. To specify no parameters, or to keep all
258
+ # the default parameter values, pass an empty Hash as a request object (see above).
259
+ #
260
+ # @param parent [::String]
261
+ # Required. The parent location whose clusters are to be listed. Structured
262
+ # like `projects/{project}/locations/{location}`.
263
+ # @param page_size [::Integer]
264
+ # Optional. The maximum number of clusters to return. The service may return
265
+ # fewer than this value. If unspecified, server will pick an appropriate
266
+ # default.
267
+ # @param page_token [::String]
268
+ # Optional. A page token, received from a previous `ListClusters` call.
269
+ # Provide this to retrieve the subsequent page.
270
+ #
271
+ # When paginating, all other parameters provided to `ListClusters` must match
272
+ # the call that provided the page token.
273
+ # @param filter [::String]
274
+ # Optional. Filter expression for the result.
275
+ # @param order_by [::String]
276
+ # Optional. Order by fields for the result.
277
+ #
278
+ # @yield [response, operation] Access the result along with the RPC operation
279
+ # @yieldparam response [::Gapic::PagedEnumerable<::Google::Cloud::ManagedKafka::V1::Cluster>]
280
+ # @yieldparam operation [::GRPC::ActiveCall::Operation]
281
+ #
282
+ # @return [::Gapic::PagedEnumerable<::Google::Cloud::ManagedKafka::V1::Cluster>]
283
+ #
284
+ # @raise [::Google::Cloud::Error] if the RPC is aborted.
285
+ #
286
+ # @example Basic example
287
+ # require "google/cloud/managed_kafka/v1"
288
+ #
289
+ # # Create a client object. The client can be reused for multiple calls.
290
+ # client = Google::Cloud::ManagedKafka::V1::ManagedKafka::Client.new
291
+ #
292
+ # # Create a request. To set request fields, pass in keyword arguments.
293
+ # request = Google::Cloud::ManagedKafka::V1::ListClustersRequest.new
294
+ #
295
+ # # Call the list_clusters method.
296
+ # result = client.list_clusters request
297
+ #
298
+ # # The returned object is of type Gapic::PagedEnumerable. You can iterate
299
+ # # over elements, and API calls will be issued to fetch pages as needed.
300
+ # result.each do |item|
301
+ # # Each element is of type ::Google::Cloud::ManagedKafka::V1::Cluster.
302
+ # p item
303
+ # end
304
+ #
305
+ def list_clusters request, options = nil
306
+ raise ::ArgumentError, "request must be provided" if request.nil?
307
+
308
+ request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::ManagedKafka::V1::ListClustersRequest
309
+
310
+ # Converts hash and nil to an options object
311
+ options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
312
+
313
+ # Customize the options with defaults
314
+ metadata = @config.rpcs.list_clusters.metadata.to_h
315
+
316
+ # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
317
+ metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
318
+ lib_name: @config.lib_name, lib_version: @config.lib_version,
319
+ gapic_version: ::Google::Cloud::ManagedKafka::V1::VERSION
320
+ metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
321
+ metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
322
+
323
+ header_params = {}
324
+ if request.parent
325
+ header_params["parent"] = request.parent
326
+ end
327
+
328
+ request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
329
+ metadata[:"x-goog-request-params"] ||= request_params_header
330
+
331
+ options.apply_defaults timeout: @config.rpcs.list_clusters.timeout,
332
+ metadata: metadata,
333
+ retry_policy: @config.rpcs.list_clusters.retry_policy
334
+
335
+ options.apply_defaults timeout: @config.timeout,
336
+ metadata: @config.metadata,
337
+ retry_policy: @config.retry_policy
338
+
339
+ @managed_kafka_stub.call_rpc :list_clusters, request, options: options do |response, operation|
340
+ response = ::Gapic::PagedEnumerable.new @managed_kafka_stub, :list_clusters, request, response, operation, options
341
+ yield response, operation if block_given?
342
+ return response
343
+ end
344
+ rescue ::GRPC::BadStatus => e
345
+ raise ::Google::Cloud::Error.from_error(e)
346
+ end
347
+
348
+ ##
349
+ # Returns the properties of a single cluster.
350
+ #
351
+ # @overload get_cluster(request, options = nil)
352
+ # Pass arguments to `get_cluster` via a request object, either of type
353
+ # {::Google::Cloud::ManagedKafka::V1::GetClusterRequest} or an equivalent Hash.
354
+ #
355
+ # @param request [::Google::Cloud::ManagedKafka::V1::GetClusterRequest, ::Hash]
356
+ # A request object representing the call parameters. Required. To specify no
357
+ # parameters, or to keep all the default parameter values, pass an empty Hash.
358
+ # @param options [::Gapic::CallOptions, ::Hash]
359
+ # Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
360
+ #
361
+ # @overload get_cluster(name: nil)
362
+ # Pass arguments to `get_cluster` via keyword arguments. Note that at
363
+ # least one keyword argument is required. To specify no parameters, or to keep all
364
+ # the default parameter values, pass an empty Hash as a request object (see above).
365
+ #
366
+ # @param name [::String]
367
+ # Required. The name of the cluster whose configuration to return.
368
+ #
369
+ # @yield [response, operation] Access the result along with the RPC operation
370
+ # @yieldparam response [::Google::Cloud::ManagedKafka::V1::Cluster]
371
+ # @yieldparam operation [::GRPC::ActiveCall::Operation]
372
+ #
373
+ # @return [::Google::Cloud::ManagedKafka::V1::Cluster]
374
+ #
375
+ # @raise [::Google::Cloud::Error] if the RPC is aborted.
376
+ #
377
+ # @example Basic example
378
+ # require "google/cloud/managed_kafka/v1"
379
+ #
380
+ # # Create a client object. The client can be reused for multiple calls.
381
+ # client = Google::Cloud::ManagedKafka::V1::ManagedKafka::Client.new
382
+ #
383
+ # # Create a request. To set request fields, pass in keyword arguments.
384
+ # request = Google::Cloud::ManagedKafka::V1::GetClusterRequest.new
385
+ #
386
+ # # Call the get_cluster method.
387
+ # result = client.get_cluster request
388
+ #
389
+ # # The returned object is of type Google::Cloud::ManagedKafka::V1::Cluster.
390
+ # p result
391
+ #
392
+ def get_cluster request, options = nil
393
+ raise ::ArgumentError, "request must be provided" if request.nil?
394
+
395
+ request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::ManagedKafka::V1::GetClusterRequest
396
+
397
+ # Converts hash and nil to an options object
398
+ options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
399
+
400
+ # Customize the options with defaults
401
+ metadata = @config.rpcs.get_cluster.metadata.to_h
402
+
403
+ # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
404
+ metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
405
+ lib_name: @config.lib_name, lib_version: @config.lib_version,
406
+ gapic_version: ::Google::Cloud::ManagedKafka::V1::VERSION
407
+ metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
408
+ metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
409
+
410
+ header_params = {}
411
+ if request.name
412
+ header_params["name"] = request.name
413
+ end
414
+
415
+ request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
416
+ metadata[:"x-goog-request-params"] ||= request_params_header
417
+
418
+ options.apply_defaults timeout: @config.rpcs.get_cluster.timeout,
419
+ metadata: metadata,
420
+ retry_policy: @config.rpcs.get_cluster.retry_policy
421
+
422
+ options.apply_defaults timeout: @config.timeout,
423
+ metadata: @config.metadata,
424
+ retry_policy: @config.retry_policy
425
+
426
+ @managed_kafka_stub.call_rpc :get_cluster, request, options: options do |response, operation|
427
+ yield response, operation if block_given?
428
+ return response
429
+ end
430
+ rescue ::GRPC::BadStatus => e
431
+ raise ::Google::Cloud::Error.from_error(e)
432
+ end
433
+
434
+ ##
435
+ # Creates a new cluster in a given project and location.
436
+ #
437
+ # @overload create_cluster(request, options = nil)
438
+ # Pass arguments to `create_cluster` via a request object, either of type
439
+ # {::Google::Cloud::ManagedKafka::V1::CreateClusterRequest} or an equivalent Hash.
440
+ #
441
+ # @param request [::Google::Cloud::ManagedKafka::V1::CreateClusterRequest, ::Hash]
442
+ # A request object representing the call parameters. Required. To specify no
443
+ # parameters, or to keep all the default parameter values, pass an empty Hash.
444
+ # @param options [::Gapic::CallOptions, ::Hash]
445
+ # Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
446
+ #
447
+ # @overload create_cluster(parent: nil, cluster_id: nil, cluster: nil, request_id: nil)
448
+ # Pass arguments to `create_cluster` via keyword arguments. Note that at
449
+ # least one keyword argument is required. To specify no parameters, or to keep all
450
+ # the default parameter values, pass an empty Hash as a request object (see above).
451
+ #
452
+ # @param parent [::String]
453
+ # Required. The parent region in which to create the cluster. Structured like
454
+ # `projects/{project}/locations/{location}`.
455
+ # @param cluster_id [::String]
456
+ # Required. The ID to use for the cluster, which will become the final
457
+ # component of the cluster's name. The ID must be 1-63 characters long, and
458
+ # match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` to comply with
459
+ # RFC 1035.
460
+ #
461
+ # This value is structured like: `my-cluster-id`.
462
+ # @param cluster [::Google::Cloud::ManagedKafka::V1::Cluster, ::Hash]
463
+ # Required. Configuration of the cluster to create. Its `name` field is
464
+ # ignored.
465
+ # @param request_id [::String]
466
+ # Optional. An optional request ID to identify requests. Specify a unique
467
+ # request ID to avoid duplication of requests. If a request times out or
468
+ # fails, retrying with the same ID allows the server to recognize the
469
+ # previous attempt. For at least 60 minutes, the server ignores duplicate
470
+ # requests bearing the same ID.
471
+ #
472
+ # For example, consider a situation where you make an initial request and the
473
+ # request times out. If you make the request again with the same request ID
474
+ # within 60 minutes of the last request, the server checks if an original
475
+ # operation with the same request ID was received. If so, the server ignores
476
+ # the second request.
477
+ #
478
+ # The request ID must be a valid UUID. A zero UUID is not supported
479
+ # (00000000-0000-0000-0000-000000000000).
480
+ #
481
+ # @yield [response, operation] Access the result along with the RPC operation
482
+ # @yieldparam response [::Gapic::Operation]
483
+ # @yieldparam operation [::GRPC::ActiveCall::Operation]
484
+ #
485
+ # @return [::Gapic::Operation]
486
+ #
487
+ # @raise [::Google::Cloud::Error] if the RPC is aborted.
488
+ #
489
+ # @example Basic example
490
+ # require "google/cloud/managed_kafka/v1"
491
+ #
492
+ # # Create a client object. The client can be reused for multiple calls.
493
+ # client = Google::Cloud::ManagedKafka::V1::ManagedKafka::Client.new
494
+ #
495
+ # # Create a request. To set request fields, pass in keyword arguments.
496
+ # request = Google::Cloud::ManagedKafka::V1::CreateClusterRequest.new
497
+ #
498
+ # # Call the create_cluster method.
499
+ # result = client.create_cluster request
500
+ #
501
+ # # The returned object is of type Gapic::Operation. You can use it to
502
+ # # check the status of an operation, cancel it, or wait for results.
503
+ # # Here is how to wait for a response.
504
+ # result.wait_until_done! timeout: 60
505
+ # if result.response?
506
+ # p result.response
507
+ # else
508
+ # puts "No response received."
509
+ # end
510
+ #
511
+ def create_cluster request, options = nil
512
+ raise ::ArgumentError, "request must be provided" if request.nil?
513
+
514
+ request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::ManagedKafka::V1::CreateClusterRequest
515
+
516
+ # Converts hash and nil to an options object
517
+ options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
518
+
519
+ # Customize the options with defaults
520
+ metadata = @config.rpcs.create_cluster.metadata.to_h
521
+
522
+ # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
523
+ metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
524
+ lib_name: @config.lib_name, lib_version: @config.lib_version,
525
+ gapic_version: ::Google::Cloud::ManagedKafka::V1::VERSION
526
+ metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
527
+ metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
528
+
529
+ header_params = {}
530
+ if request.parent
531
+ header_params["parent"] = request.parent
532
+ end
533
+
534
+ request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
535
+ metadata[:"x-goog-request-params"] ||= request_params_header
536
+
537
+ options.apply_defaults timeout: @config.rpcs.create_cluster.timeout,
538
+ metadata: metadata,
539
+ retry_policy: @config.rpcs.create_cluster.retry_policy
540
+
541
+ options.apply_defaults timeout: @config.timeout,
542
+ metadata: @config.metadata,
543
+ retry_policy: @config.retry_policy
544
+
545
+ @managed_kafka_stub.call_rpc :create_cluster, request, options: options do |response, operation|
546
+ response = ::Gapic::Operation.new response, @operations_client, options: options
547
+ yield response, operation if block_given?
548
+ return response
549
+ end
550
+ rescue ::GRPC::BadStatus => e
551
+ raise ::Google::Cloud::Error.from_error(e)
552
+ end
553
+
554
+ ##
555
+ # Updates the properties of a single cluster.
556
+ #
557
+ # @overload update_cluster(request, options = nil)
558
+ # Pass arguments to `update_cluster` via a request object, either of type
559
+ # {::Google::Cloud::ManagedKafka::V1::UpdateClusterRequest} or an equivalent Hash.
560
+ #
561
+ # @param request [::Google::Cloud::ManagedKafka::V1::UpdateClusterRequest, ::Hash]
562
+ # A request object representing the call parameters. Required. To specify no
563
+ # parameters, or to keep all the default parameter values, pass an empty Hash.
564
+ # @param options [::Gapic::CallOptions, ::Hash]
565
+ # Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
566
+ #
567
+ # @overload update_cluster(update_mask: nil, cluster: nil, request_id: nil)
568
+ # Pass arguments to `update_cluster` via keyword arguments. Note that at
569
+ # least one keyword argument is required. To specify no parameters, or to keep all
570
+ # the default parameter values, pass an empty Hash as a request object (see above).
571
+ #
572
+ # @param update_mask [::Google::Protobuf::FieldMask, ::Hash]
573
+ # Required. Field mask is used to specify the fields to be overwritten in the
574
+ # cluster resource by the update. The fields specified in the update_mask are
575
+ # relative to the resource, not the full request. A field will be overwritten
576
+ # if it is in the mask. The mask is required and a value of * will update all
577
+ # fields.
578
+ # @param cluster [::Google::Cloud::ManagedKafka::V1::Cluster, ::Hash]
579
+ # Required. The cluster to update. Its `name` field must be populated.
580
+ # @param request_id [::String]
581
+ # Optional. An optional request ID to identify requests. Specify a unique
582
+ # request ID to avoid duplication of requests. If a request times out or
583
+ # fails, retrying with the same ID allows the server to recognize the
584
+ # previous attempt. For at least 60 minutes, the server ignores duplicate
585
+ # requests bearing the same ID.
586
+ #
587
+ # For example, consider a situation where you make an initial request and the
588
+ # request times out. If you make the request again with the same request ID
589
+ # within 60 minutes of the last request, the server checks if an original
590
+ # operation with the same request ID was received. If so, the server ignores
591
+ # the second request.
592
+ #
593
+ # The request ID must be a valid UUID. A zero UUID is not supported
594
+ # (00000000-0000-0000-0000-000000000000).
595
+ #
596
+ # @yield [response, operation] Access the result along with the RPC operation
597
+ # @yieldparam response [::Gapic::Operation]
598
+ # @yieldparam operation [::GRPC::ActiveCall::Operation]
599
+ #
600
+ # @return [::Gapic::Operation]
601
+ #
602
+ # @raise [::Google::Cloud::Error] if the RPC is aborted.
603
+ #
604
+ # @example Basic example
605
+ # require "google/cloud/managed_kafka/v1"
606
+ #
607
+ # # Create a client object. The client can be reused for multiple calls.
608
+ # client = Google::Cloud::ManagedKafka::V1::ManagedKafka::Client.new
609
+ #
610
+ # # Create a request. To set request fields, pass in keyword arguments.
611
+ # request = Google::Cloud::ManagedKafka::V1::UpdateClusterRequest.new
612
+ #
613
+ # # Call the update_cluster method.
614
+ # result = client.update_cluster request
615
+ #
616
+ # # The returned object is of type Gapic::Operation. You can use it to
617
+ # # check the status of an operation, cancel it, or wait for results.
618
+ # # Here is how to wait for a response.
619
+ # result.wait_until_done! timeout: 60
620
+ # if result.response?
621
+ # p result.response
622
+ # else
623
+ # puts "No response received."
624
+ # end
625
+ #
626
+ def update_cluster request, options = nil
627
+ raise ::ArgumentError, "request must be provided" if request.nil?
628
+
629
+ request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::ManagedKafka::V1::UpdateClusterRequest
630
+
631
+ # Converts hash and nil to an options object
632
+ options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
633
+
634
+ # Customize the options with defaults
635
+ metadata = @config.rpcs.update_cluster.metadata.to_h
636
+
637
+ # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
638
+ metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
639
+ lib_name: @config.lib_name, lib_version: @config.lib_version,
640
+ gapic_version: ::Google::Cloud::ManagedKafka::V1::VERSION
641
+ metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
642
+ metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
643
+
644
+ header_params = {}
645
+ if request.cluster&.name
646
+ header_params["cluster.name"] = request.cluster.name
647
+ end
648
+
649
+ request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
650
+ metadata[:"x-goog-request-params"] ||= request_params_header
651
+
652
+ options.apply_defaults timeout: @config.rpcs.update_cluster.timeout,
653
+ metadata: metadata,
654
+ retry_policy: @config.rpcs.update_cluster.retry_policy
655
+
656
+ options.apply_defaults timeout: @config.timeout,
657
+ metadata: @config.metadata,
658
+ retry_policy: @config.retry_policy
659
+
660
+ @managed_kafka_stub.call_rpc :update_cluster, request, options: options do |response, operation|
661
+ response = ::Gapic::Operation.new response, @operations_client, options: options
662
+ yield response, operation if block_given?
663
+ return response
664
+ end
665
+ rescue ::GRPC::BadStatus => e
666
+ raise ::Google::Cloud::Error.from_error(e)
667
+ end
668
+
669
+ ##
670
+ # Deletes a single cluster.
671
+ #
672
+ # @overload delete_cluster(request, options = nil)
673
+ # Pass arguments to `delete_cluster` via a request object, either of type
674
+ # {::Google::Cloud::ManagedKafka::V1::DeleteClusterRequest} or an equivalent Hash.
675
+ #
676
+ # @param request [::Google::Cloud::ManagedKafka::V1::DeleteClusterRequest, ::Hash]
677
+ # A request object representing the call parameters. Required. To specify no
678
+ # parameters, or to keep all the default parameter values, pass an empty Hash.
679
+ # @param options [::Gapic::CallOptions, ::Hash]
680
+ # Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
681
+ #
682
+ # @overload delete_cluster(name: nil, request_id: nil)
683
+ # Pass arguments to `delete_cluster` via keyword arguments. Note that at
684
+ # least one keyword argument is required. To specify no parameters, or to keep all
685
+ # the default parameter values, pass an empty Hash as a request object (see above).
686
+ #
687
+ # @param name [::String]
688
+ # Required. The name of the cluster to delete.
689
+ # @param request_id [::String]
690
+ # Optional. An optional request ID to identify requests. Specify a unique
691
+ # request ID to avoid duplication of requests. If a request times out or
692
+ # fails, retrying with the same ID allows the server to recognize the
693
+ # previous attempt. For at least 60 minutes, the server ignores duplicate
694
+ # requests bearing the same ID.
695
+ #
696
+ # For example, consider a situation where you make an initial request and the
697
+ # request times out. If you make the request again with the same request ID
698
+ # within 60 minutes of the last request, the server checks if an original
699
+ # operation with the same request ID was received. If so, the server ignores
700
+ # the second request.
701
+ #
702
+ # The request ID must be a valid UUID. A zero UUID is not supported
703
+ # (00000000-0000-0000-0000-000000000000).
704
+ #
705
+ # @yield [response, operation] Access the result along with the RPC operation
706
+ # @yieldparam response [::Gapic::Operation]
707
+ # @yieldparam operation [::GRPC::ActiveCall::Operation]
708
+ #
709
+ # @return [::Gapic::Operation]
710
+ #
711
+ # @raise [::Google::Cloud::Error] if the RPC is aborted.
712
+ #
713
+ # @example Basic example
714
+ # require "google/cloud/managed_kafka/v1"
715
+ #
716
+ # # Create a client object. The client can be reused for multiple calls.
717
+ # client = Google::Cloud::ManagedKafka::V1::ManagedKafka::Client.new
718
+ #
719
+ # # Create a request. To set request fields, pass in keyword arguments.
720
+ # request = Google::Cloud::ManagedKafka::V1::DeleteClusterRequest.new
721
+ #
722
+ # # Call the delete_cluster method.
723
+ # result = client.delete_cluster request
724
+ #
725
+ # # The returned object is of type Gapic::Operation. You can use it to
726
+ # # check the status of an operation, cancel it, or wait for results.
727
+ # # Here is how to wait for a response.
728
+ # result.wait_until_done! timeout: 60
729
+ # if result.response?
730
+ # p result.response
731
+ # else
732
+ # puts "No response received."
733
+ # end
734
+ #
735
+ def delete_cluster request, options = nil
736
+ raise ::ArgumentError, "request must be provided" if request.nil?
737
+
738
+ request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::ManagedKafka::V1::DeleteClusterRequest
739
+
740
+ # Converts hash and nil to an options object
741
+ options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
742
+
743
+ # Customize the options with defaults
744
+ metadata = @config.rpcs.delete_cluster.metadata.to_h
745
+
746
+ # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
747
+ metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
748
+ lib_name: @config.lib_name, lib_version: @config.lib_version,
749
+ gapic_version: ::Google::Cloud::ManagedKafka::V1::VERSION
750
+ metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
751
+ metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
752
+
753
+ header_params = {}
754
+ if request.name
755
+ header_params["name"] = request.name
756
+ end
757
+
758
+ request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
759
+ metadata[:"x-goog-request-params"] ||= request_params_header
760
+
761
+ options.apply_defaults timeout: @config.rpcs.delete_cluster.timeout,
762
+ metadata: metadata,
763
+ retry_policy: @config.rpcs.delete_cluster.retry_policy
764
+
765
+ options.apply_defaults timeout: @config.timeout,
766
+ metadata: @config.metadata,
767
+ retry_policy: @config.retry_policy
768
+
769
+ @managed_kafka_stub.call_rpc :delete_cluster, request, options: options do |response, operation|
770
+ response = ::Gapic::Operation.new response, @operations_client, options: options
771
+ yield response, operation if block_given?
772
+ return response
773
+ end
774
+ rescue ::GRPC::BadStatus => e
775
+ raise ::Google::Cloud::Error.from_error(e)
776
+ end
777
+
778
+ ##
779
+ # Lists the topics in a given cluster.
780
+ #
781
+ # @overload list_topics(request, options = nil)
782
+ # Pass arguments to `list_topics` via a request object, either of type
783
+ # {::Google::Cloud::ManagedKafka::V1::ListTopicsRequest} or an equivalent Hash.
784
+ #
785
+ # @param request [::Google::Cloud::ManagedKafka::V1::ListTopicsRequest, ::Hash]
786
+ # A request object representing the call parameters. Required. To specify no
787
+ # parameters, or to keep all the default parameter values, pass an empty Hash.
788
+ # @param options [::Gapic::CallOptions, ::Hash]
789
+ # Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
790
+ #
791
+ # @overload list_topics(parent: nil, page_size: nil, page_token: nil)
792
+ # Pass arguments to `list_topics` via keyword arguments. Note that at
793
+ # least one keyword argument is required. To specify no parameters, or to keep all
794
+ # the default parameter values, pass an empty Hash as a request object (see above).
795
+ #
796
+ # @param parent [::String]
797
+ # Required. The parent cluster whose topics are to be listed. Structured like
798
+ # `projects/{project}/locations/{location}/clusters/{cluster}`.
799
+ # @param page_size [::Integer]
800
+ # Optional. The maximum number of topics to return. The service may return
801
+ # fewer than this value. If unset or zero, all topics for the parent is
802
+ # returned.
803
+ # @param page_token [::String]
804
+ # Optional. A page token, received from a previous `ListTopics` call.
805
+ # Provide this to retrieve the subsequent page.
806
+ #
807
+ # When paginating, all other parameters provided to `ListTopics` must match
808
+ # the call that provided the page token.
809
+ #
810
+ # @yield [response, operation] Access the result along with the RPC operation
811
+ # @yieldparam response [::Gapic::PagedEnumerable<::Google::Cloud::ManagedKafka::V1::Topic>]
812
+ # @yieldparam operation [::GRPC::ActiveCall::Operation]
813
+ #
814
+ # @return [::Gapic::PagedEnumerable<::Google::Cloud::ManagedKafka::V1::Topic>]
815
+ #
816
+ # @raise [::Google::Cloud::Error] if the RPC is aborted.
817
+ #
818
+ # @example Basic example
819
+ # require "google/cloud/managed_kafka/v1"
820
+ #
821
+ # # Create a client object. The client can be reused for multiple calls.
822
+ # client = Google::Cloud::ManagedKafka::V1::ManagedKafka::Client.new
823
+ #
824
+ # # Create a request. To set request fields, pass in keyword arguments.
825
+ # request = Google::Cloud::ManagedKafka::V1::ListTopicsRequest.new
826
+ #
827
+ # # Call the list_topics method.
828
+ # result = client.list_topics request
829
+ #
830
+ # # The returned object is of type Gapic::PagedEnumerable. You can iterate
831
+ # # over elements, and API calls will be issued to fetch pages as needed.
832
+ # result.each do |item|
833
+ # # Each element is of type ::Google::Cloud::ManagedKafka::V1::Topic.
834
+ # p item
835
+ # end
836
+ #
837
+ def list_topics request, options = nil
838
+ raise ::ArgumentError, "request must be provided" if request.nil?
839
+
840
+ request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::ManagedKafka::V1::ListTopicsRequest
841
+
842
+ # Converts hash and nil to an options object
843
+ options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
844
+
845
+ # Customize the options with defaults
846
+ metadata = @config.rpcs.list_topics.metadata.to_h
847
+
848
+ # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
849
+ metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
850
+ lib_name: @config.lib_name, lib_version: @config.lib_version,
851
+ gapic_version: ::Google::Cloud::ManagedKafka::V1::VERSION
852
+ metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
853
+ metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
854
+
855
+ header_params = {}
856
+ if request.parent
857
+ header_params["parent"] = request.parent
858
+ end
859
+
860
+ request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
861
+ metadata[:"x-goog-request-params"] ||= request_params_header
862
+
863
+ options.apply_defaults timeout: @config.rpcs.list_topics.timeout,
864
+ metadata: metadata,
865
+ retry_policy: @config.rpcs.list_topics.retry_policy
866
+
867
+ options.apply_defaults timeout: @config.timeout,
868
+ metadata: @config.metadata,
869
+ retry_policy: @config.retry_policy
870
+
871
+ @managed_kafka_stub.call_rpc :list_topics, request, options: options do |response, operation|
872
+ response = ::Gapic::PagedEnumerable.new @managed_kafka_stub, :list_topics, request, response, operation, options
873
+ yield response, operation if block_given?
874
+ return response
875
+ end
876
+ rescue ::GRPC::BadStatus => e
877
+ raise ::Google::Cloud::Error.from_error(e)
878
+ end
879
+
880
+ ##
881
+ # Returns the properties of a single topic.
882
+ #
883
+ # @overload get_topic(request, options = nil)
884
+ # Pass arguments to `get_topic` via a request object, either of type
885
+ # {::Google::Cloud::ManagedKafka::V1::GetTopicRequest} or an equivalent Hash.
886
+ #
887
+ # @param request [::Google::Cloud::ManagedKafka::V1::GetTopicRequest, ::Hash]
888
+ # A request object representing the call parameters. Required. To specify no
889
+ # parameters, or to keep all the default parameter values, pass an empty Hash.
890
+ # @param options [::Gapic::CallOptions, ::Hash]
891
+ # Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
892
+ #
893
+ # @overload get_topic(name: nil)
894
+ # Pass arguments to `get_topic` via keyword arguments. Note that at
895
+ # least one keyword argument is required. To specify no parameters, or to keep all
896
+ # the default parameter values, pass an empty Hash as a request object (see above).
897
+ #
898
+ # @param name [::String]
899
+ # Required. The name of the topic whose configuration to return. Structured
900
+ # like:
901
+ # projects/\\{project}/locations/\\{location}/clusters/\\{cluster}/topics/\\{topic}.
902
+ #
903
+ # @yield [response, operation] Access the result along with the RPC operation
904
+ # @yieldparam response [::Google::Cloud::ManagedKafka::V1::Topic]
905
+ # @yieldparam operation [::GRPC::ActiveCall::Operation]
906
+ #
907
+ # @return [::Google::Cloud::ManagedKafka::V1::Topic]
908
+ #
909
+ # @raise [::Google::Cloud::Error] if the RPC is aborted.
910
+ #
911
+ # @example Basic example
912
+ # require "google/cloud/managed_kafka/v1"
913
+ #
914
+ # # Create a client object. The client can be reused for multiple calls.
915
+ # client = Google::Cloud::ManagedKafka::V1::ManagedKafka::Client.new
916
+ #
917
+ # # Create a request. To set request fields, pass in keyword arguments.
918
+ # request = Google::Cloud::ManagedKafka::V1::GetTopicRequest.new
919
+ #
920
+ # # Call the get_topic method.
921
+ # result = client.get_topic request
922
+ #
923
+ # # The returned object is of type Google::Cloud::ManagedKafka::V1::Topic.
924
+ # p result
925
+ #
926
+ def get_topic request, options = nil
927
+ raise ::ArgumentError, "request must be provided" if request.nil?
928
+
929
+ request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::ManagedKafka::V1::GetTopicRequest
930
+
931
+ # Converts hash and nil to an options object
932
+ options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
933
+
934
+ # Customize the options with defaults
935
+ metadata = @config.rpcs.get_topic.metadata.to_h
936
+
937
+ # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
938
+ metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
939
+ lib_name: @config.lib_name, lib_version: @config.lib_version,
940
+ gapic_version: ::Google::Cloud::ManagedKafka::V1::VERSION
941
+ metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
942
+ metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
943
+
944
+ header_params = {}
945
+ if request.name
946
+ header_params["name"] = request.name
947
+ end
948
+
949
+ request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
950
+ metadata[:"x-goog-request-params"] ||= request_params_header
951
+
952
+ options.apply_defaults timeout: @config.rpcs.get_topic.timeout,
953
+ metadata: metadata,
954
+ retry_policy: @config.rpcs.get_topic.retry_policy
955
+
956
+ options.apply_defaults timeout: @config.timeout,
957
+ metadata: @config.metadata,
958
+ retry_policy: @config.retry_policy
959
+
960
+ @managed_kafka_stub.call_rpc :get_topic, request, options: options do |response, operation|
961
+ yield response, operation if block_given?
962
+ return response
963
+ end
964
+ rescue ::GRPC::BadStatus => e
965
+ raise ::Google::Cloud::Error.from_error(e)
966
+ end
967
+
968
+ ##
969
+ # Creates a new topic in a given project and location.
970
+ #
971
+ # @overload create_topic(request, options = nil)
972
+ # Pass arguments to `create_topic` via a request object, either of type
973
+ # {::Google::Cloud::ManagedKafka::V1::CreateTopicRequest} or an equivalent Hash.
974
+ #
975
+ # @param request [::Google::Cloud::ManagedKafka::V1::CreateTopicRequest, ::Hash]
976
+ # A request object representing the call parameters. Required. To specify no
977
+ # parameters, or to keep all the default parameter values, pass an empty Hash.
978
+ # @param options [::Gapic::CallOptions, ::Hash]
979
+ # Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
980
+ #
981
+ # @overload create_topic(parent: nil, topic_id: nil, topic: nil)
982
+ # Pass arguments to `create_topic` via keyword arguments. Note that at
983
+ # least one keyword argument is required. To specify no parameters, or to keep all
984
+ # the default parameter values, pass an empty Hash as a request object (see above).
985
+ #
986
+ # @param parent [::String]
987
+ # Required. The parent cluster in which to create the topic.
988
+ # Structured like
989
+ # `projects/{project}/locations/{location}/clusters/{cluster}`.
990
+ # @param topic_id [::String]
991
+ # Required. The ID to use for the topic, which will become the final
992
+ # component of the topic's name.
993
+ #
994
+ # This value is structured like: `my-topic-name`.
995
+ # @param topic [::Google::Cloud::ManagedKafka::V1::Topic, ::Hash]
996
+ # Required. Configuration of the topic to create. Its `name` field is
997
+ # ignored.
998
+ #
999
+ # @yield [response, operation] Access the result along with the RPC operation
1000
+ # @yieldparam response [::Google::Cloud::ManagedKafka::V1::Topic]
1001
+ # @yieldparam operation [::GRPC::ActiveCall::Operation]
1002
+ #
1003
+ # @return [::Google::Cloud::ManagedKafka::V1::Topic]
1004
+ #
1005
+ # @raise [::Google::Cloud::Error] if the RPC is aborted.
1006
+ #
1007
+ # @example Basic example
1008
+ # require "google/cloud/managed_kafka/v1"
1009
+ #
1010
+ # # Create a client object. The client can be reused for multiple calls.
1011
+ # client = Google::Cloud::ManagedKafka::V1::ManagedKafka::Client.new
1012
+ #
1013
+ # # Create a request. To set request fields, pass in keyword arguments.
1014
+ # request = Google::Cloud::ManagedKafka::V1::CreateTopicRequest.new
1015
+ #
1016
+ # # Call the create_topic method.
1017
+ # result = client.create_topic request
1018
+ #
1019
+ # # The returned object is of type Google::Cloud::ManagedKafka::V1::Topic.
1020
+ # p result
1021
+ #
1022
+ def create_topic request, options = nil
1023
+ raise ::ArgumentError, "request must be provided" if request.nil?
1024
+
1025
+ request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::ManagedKafka::V1::CreateTopicRequest
1026
+
1027
+ # Converts hash and nil to an options object
1028
+ options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
1029
+
1030
+ # Customize the options with defaults
1031
+ metadata = @config.rpcs.create_topic.metadata.to_h
1032
+
1033
+ # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
1034
+ metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
1035
+ lib_name: @config.lib_name, lib_version: @config.lib_version,
1036
+ gapic_version: ::Google::Cloud::ManagedKafka::V1::VERSION
1037
+ metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
1038
+ metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
1039
+
1040
+ header_params = {}
1041
+ if request.parent
1042
+ header_params["parent"] = request.parent
1043
+ end
1044
+
1045
+ request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
1046
+ metadata[:"x-goog-request-params"] ||= request_params_header
1047
+
1048
+ options.apply_defaults timeout: @config.rpcs.create_topic.timeout,
1049
+ metadata: metadata,
1050
+ retry_policy: @config.rpcs.create_topic.retry_policy
1051
+
1052
+ options.apply_defaults timeout: @config.timeout,
1053
+ metadata: @config.metadata,
1054
+ retry_policy: @config.retry_policy
1055
+
1056
+ @managed_kafka_stub.call_rpc :create_topic, request, options: options do |response, operation|
1057
+ yield response, operation if block_given?
1058
+ return response
1059
+ end
1060
+ rescue ::GRPC::BadStatus => e
1061
+ raise ::Google::Cloud::Error.from_error(e)
1062
+ end
1063
+
1064
+ ##
1065
+ # Updates the properties of a single topic.
1066
+ #
1067
+ # @overload update_topic(request, options = nil)
1068
+ # Pass arguments to `update_topic` via a request object, either of type
1069
+ # {::Google::Cloud::ManagedKafka::V1::UpdateTopicRequest} or an equivalent Hash.
1070
+ #
1071
+ # @param request [::Google::Cloud::ManagedKafka::V1::UpdateTopicRequest, ::Hash]
1072
+ # A request object representing the call parameters. Required. To specify no
1073
+ # parameters, or to keep all the default parameter values, pass an empty Hash.
1074
+ # @param options [::Gapic::CallOptions, ::Hash]
1075
+ # Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
1076
+ #
1077
+ # @overload update_topic(update_mask: nil, topic: nil)
1078
+ # Pass arguments to `update_topic` via keyword arguments. Note that at
1079
+ # least one keyword argument is required. To specify no parameters, or to keep all
1080
+ # the default parameter values, pass an empty Hash as a request object (see above).
1081
+ #
1082
+ # @param update_mask [::Google::Protobuf::FieldMask, ::Hash]
1083
+ # Required. Field mask is used to specify the fields to be overwritten in the
1084
+ # Topic resource by the update. The fields specified in the update_mask are
1085
+ # relative to the resource, not the full request. A field will be overwritten
1086
+ # if it is in the mask. The mask is required and a value of * will update all
1087
+ # fields.
1088
+ # @param topic [::Google::Cloud::ManagedKafka::V1::Topic, ::Hash]
1089
+ # Required. The topic to update. Its `name` field must be populated.
1090
+ #
1091
+ # @yield [response, operation] Access the result along with the RPC operation
1092
+ # @yieldparam response [::Google::Cloud::ManagedKafka::V1::Topic]
1093
+ # @yieldparam operation [::GRPC::ActiveCall::Operation]
1094
+ #
1095
+ # @return [::Google::Cloud::ManagedKafka::V1::Topic]
1096
+ #
1097
+ # @raise [::Google::Cloud::Error] if the RPC is aborted.
1098
+ #
1099
+ # @example Basic example
1100
+ # require "google/cloud/managed_kafka/v1"
1101
+ #
1102
+ # # Create a client object. The client can be reused for multiple calls.
1103
+ # client = Google::Cloud::ManagedKafka::V1::ManagedKafka::Client.new
1104
+ #
1105
+ # # Create a request. To set request fields, pass in keyword arguments.
1106
+ # request = Google::Cloud::ManagedKafka::V1::UpdateTopicRequest.new
1107
+ #
1108
+ # # Call the update_topic method.
1109
+ # result = client.update_topic request
1110
+ #
1111
+ # # The returned object is of type Google::Cloud::ManagedKafka::V1::Topic.
1112
+ # p result
1113
+ #
1114
+ def update_topic request, options = nil
1115
+ raise ::ArgumentError, "request must be provided" if request.nil?
1116
+
1117
+ request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::ManagedKafka::V1::UpdateTopicRequest
1118
+
1119
+ # Converts hash and nil to an options object
1120
+ options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
1121
+
1122
+ # Customize the options with defaults
1123
+ metadata = @config.rpcs.update_topic.metadata.to_h
1124
+
1125
+ # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
1126
+ metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
1127
+ lib_name: @config.lib_name, lib_version: @config.lib_version,
1128
+ gapic_version: ::Google::Cloud::ManagedKafka::V1::VERSION
1129
+ metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
1130
+ metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
1131
+
1132
+ header_params = {}
1133
+ if request.topic&.name
1134
+ header_params["topic.name"] = request.topic.name
1135
+ end
1136
+
1137
+ request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
1138
+ metadata[:"x-goog-request-params"] ||= request_params_header
1139
+
1140
+ options.apply_defaults timeout: @config.rpcs.update_topic.timeout,
1141
+ metadata: metadata,
1142
+ retry_policy: @config.rpcs.update_topic.retry_policy
1143
+
1144
+ options.apply_defaults timeout: @config.timeout,
1145
+ metadata: @config.metadata,
1146
+ retry_policy: @config.retry_policy
1147
+
1148
+ @managed_kafka_stub.call_rpc :update_topic, request, options: options do |response, operation|
1149
+ yield response, operation if block_given?
1150
+ return response
1151
+ end
1152
+ rescue ::GRPC::BadStatus => e
1153
+ raise ::Google::Cloud::Error.from_error(e)
1154
+ end
1155
+
1156
+ ##
1157
+ # Deletes a single topic.
1158
+ #
1159
+ # @overload delete_topic(request, options = nil)
1160
+ # Pass arguments to `delete_topic` via a request object, either of type
1161
+ # {::Google::Cloud::ManagedKafka::V1::DeleteTopicRequest} or an equivalent Hash.
1162
+ #
1163
+ # @param request [::Google::Cloud::ManagedKafka::V1::DeleteTopicRequest, ::Hash]
1164
+ # A request object representing the call parameters. Required. To specify no
1165
+ # parameters, or to keep all the default parameter values, pass an empty Hash.
1166
+ # @param options [::Gapic::CallOptions, ::Hash]
1167
+ # Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
1168
+ #
1169
+ # @overload delete_topic(name: nil)
1170
+ # Pass arguments to `delete_topic` via keyword arguments. Note that at
1171
+ # least one keyword argument is required. To specify no parameters, or to keep all
1172
+ # the default parameter values, pass an empty Hash as a request object (see above).
1173
+ #
1174
+ # @param name [::String]
1175
+ # Required. The name of the topic to delete.
1176
+ # `projects/{project}/locations/{location}/clusters/{cluster}/topics/{topic}`.
1177
+ #
1178
+ # @yield [response, operation] Access the result along with the RPC operation
1179
+ # @yieldparam response [::Google::Protobuf::Empty]
1180
+ # @yieldparam operation [::GRPC::ActiveCall::Operation]
1181
+ #
1182
+ # @return [::Google::Protobuf::Empty]
1183
+ #
1184
+ # @raise [::Google::Cloud::Error] if the RPC is aborted.
1185
+ #
1186
+ # @example Basic example
1187
+ # require "google/cloud/managed_kafka/v1"
1188
+ #
1189
+ # # Create a client object. The client can be reused for multiple calls.
1190
+ # client = Google::Cloud::ManagedKafka::V1::ManagedKafka::Client.new
1191
+ #
1192
+ # # Create a request. To set request fields, pass in keyword arguments.
1193
+ # request = Google::Cloud::ManagedKafka::V1::DeleteTopicRequest.new
1194
+ #
1195
+ # # Call the delete_topic method.
1196
+ # result = client.delete_topic request
1197
+ #
1198
+ # # The returned object is of type Google::Protobuf::Empty.
1199
+ # p result
1200
+ #
1201
+ def delete_topic request, options = nil
1202
+ raise ::ArgumentError, "request must be provided" if request.nil?
1203
+
1204
+ request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::ManagedKafka::V1::DeleteTopicRequest
1205
+
1206
+ # Converts hash and nil to an options object
1207
+ options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
1208
+
1209
+ # Customize the options with defaults
1210
+ metadata = @config.rpcs.delete_topic.metadata.to_h
1211
+
1212
+ # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
1213
+ metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
1214
+ lib_name: @config.lib_name, lib_version: @config.lib_version,
1215
+ gapic_version: ::Google::Cloud::ManagedKafka::V1::VERSION
1216
+ metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
1217
+ metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
1218
+
1219
+ header_params = {}
1220
+ if request.name
1221
+ header_params["name"] = request.name
1222
+ end
1223
+
1224
+ request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
1225
+ metadata[:"x-goog-request-params"] ||= request_params_header
1226
+
1227
+ options.apply_defaults timeout: @config.rpcs.delete_topic.timeout,
1228
+ metadata: metadata,
1229
+ retry_policy: @config.rpcs.delete_topic.retry_policy
1230
+
1231
+ options.apply_defaults timeout: @config.timeout,
1232
+ metadata: @config.metadata,
1233
+ retry_policy: @config.retry_policy
1234
+
1235
+ @managed_kafka_stub.call_rpc :delete_topic, request, options: options do |response, operation|
1236
+ yield response, operation if block_given?
1237
+ return response
1238
+ end
1239
+ rescue ::GRPC::BadStatus => e
1240
+ raise ::Google::Cloud::Error.from_error(e)
1241
+ end
1242
+
1243
+ ##
1244
+ # Lists the consumer groups in a given cluster.
1245
+ #
1246
+ # @overload list_consumer_groups(request, options = nil)
1247
+ # Pass arguments to `list_consumer_groups` via a request object, either of type
1248
+ # {::Google::Cloud::ManagedKafka::V1::ListConsumerGroupsRequest} or an equivalent Hash.
1249
+ #
1250
+ # @param request [::Google::Cloud::ManagedKafka::V1::ListConsumerGroupsRequest, ::Hash]
1251
+ # A request object representing the call parameters. Required. To specify no
1252
+ # parameters, or to keep all the default parameter values, pass an empty Hash.
1253
+ # @param options [::Gapic::CallOptions, ::Hash]
1254
+ # Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
1255
+ #
1256
+ # @overload list_consumer_groups(parent: nil, page_size: nil, page_token: nil)
1257
+ # Pass arguments to `list_consumer_groups` via keyword arguments. Note that at
1258
+ # least one keyword argument is required. To specify no parameters, or to keep all
1259
+ # the default parameter values, pass an empty Hash as a request object (see above).
1260
+ #
1261
+ # @param parent [::String]
1262
+ # Required. The parent cluster whose consumer groups are to be listed.
1263
+ # Structured like
1264
+ # `projects/{project}/locations/{location}/clusters/{cluster}`.
1265
+ # @param page_size [::Integer]
1266
+ # Optional. The maximum number of consumer groups to return. The service may
1267
+ # return fewer than this value. If unset or zero, all consumer groups for the
1268
+ # parent is returned.
1269
+ # @param page_token [::String]
1270
+ # Optional. A page token, received from a previous `ListConsumerGroups` call.
1271
+ # Provide this to retrieve the subsequent page.
1272
+ #
1273
+ # When paginating, all other parameters provided to `ListConsumerGroups` must
1274
+ # match the call that provided the page token.
1275
+ #
1276
+ # @yield [response, operation] Access the result along with the RPC operation
1277
+ # @yieldparam response [::Gapic::PagedEnumerable<::Google::Cloud::ManagedKafka::V1::ConsumerGroup>]
1278
+ # @yieldparam operation [::GRPC::ActiveCall::Operation]
1279
+ #
1280
+ # @return [::Gapic::PagedEnumerable<::Google::Cloud::ManagedKafka::V1::ConsumerGroup>]
1281
+ #
1282
+ # @raise [::Google::Cloud::Error] if the RPC is aborted.
1283
+ #
1284
+ # @example Basic example
1285
+ # require "google/cloud/managed_kafka/v1"
1286
+ #
1287
+ # # Create a client object. The client can be reused for multiple calls.
1288
+ # client = Google::Cloud::ManagedKafka::V1::ManagedKafka::Client.new
1289
+ #
1290
+ # # Create a request. To set request fields, pass in keyword arguments.
1291
+ # request = Google::Cloud::ManagedKafka::V1::ListConsumerGroupsRequest.new
1292
+ #
1293
+ # # Call the list_consumer_groups method.
1294
+ # result = client.list_consumer_groups request
1295
+ #
1296
+ # # The returned object is of type Gapic::PagedEnumerable. You can iterate
1297
+ # # over elements, and API calls will be issued to fetch pages as needed.
1298
+ # result.each do |item|
1299
+ # # Each element is of type ::Google::Cloud::ManagedKafka::V1::ConsumerGroup.
1300
+ # p item
1301
+ # end
1302
+ #
1303
+ def list_consumer_groups request, options = nil
1304
+ raise ::ArgumentError, "request must be provided" if request.nil?
1305
+
1306
+ request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::ManagedKafka::V1::ListConsumerGroupsRequest
1307
+
1308
+ # Converts hash and nil to an options object
1309
+ options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
1310
+
1311
+ # Customize the options with defaults
1312
+ metadata = @config.rpcs.list_consumer_groups.metadata.to_h
1313
+
1314
+ # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
1315
+ metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
1316
+ lib_name: @config.lib_name, lib_version: @config.lib_version,
1317
+ gapic_version: ::Google::Cloud::ManagedKafka::V1::VERSION
1318
+ metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
1319
+ metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
1320
+
1321
+ header_params = {}
1322
+ if request.parent
1323
+ header_params["parent"] = request.parent
1324
+ end
1325
+
1326
+ request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
1327
+ metadata[:"x-goog-request-params"] ||= request_params_header
1328
+
1329
+ options.apply_defaults timeout: @config.rpcs.list_consumer_groups.timeout,
1330
+ metadata: metadata,
1331
+ retry_policy: @config.rpcs.list_consumer_groups.retry_policy
1332
+
1333
+ options.apply_defaults timeout: @config.timeout,
1334
+ metadata: @config.metadata,
1335
+ retry_policy: @config.retry_policy
1336
+
1337
+ @managed_kafka_stub.call_rpc :list_consumer_groups, request, options: options do |response, operation|
1338
+ response = ::Gapic::PagedEnumerable.new @managed_kafka_stub, :list_consumer_groups, request, response, operation, options
1339
+ yield response, operation if block_given?
1340
+ return response
1341
+ end
1342
+ rescue ::GRPC::BadStatus => e
1343
+ raise ::Google::Cloud::Error.from_error(e)
1344
+ end
1345
+
1346
+ ##
1347
+ # Returns the properties of a single consumer group.
1348
+ #
1349
+ # @overload get_consumer_group(request, options = nil)
1350
+ # Pass arguments to `get_consumer_group` via a request object, either of type
1351
+ # {::Google::Cloud::ManagedKafka::V1::GetConsumerGroupRequest} or an equivalent Hash.
1352
+ #
1353
+ # @param request [::Google::Cloud::ManagedKafka::V1::GetConsumerGroupRequest, ::Hash]
1354
+ # A request object representing the call parameters. Required. To specify no
1355
+ # parameters, or to keep all the default parameter values, pass an empty Hash.
1356
+ # @param options [::Gapic::CallOptions, ::Hash]
1357
+ # Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
1358
+ #
1359
+ # @overload get_consumer_group(name: nil)
1360
+ # Pass arguments to `get_consumer_group` via keyword arguments. Note that at
1361
+ # least one keyword argument is required. To specify no parameters, or to keep all
1362
+ # the default parameter values, pass an empty Hash as a request object (see above).
1363
+ #
1364
+ # @param name [::String]
1365
+ # Required. The name of the consumer group whose configuration to return.
1366
+ # `projects/{project}/locations/{location}/clusters/{cluster}/consumerGroups/{consumerGroup}`.
1367
+ #
1368
+ # @yield [response, operation] Access the result along with the RPC operation
1369
+ # @yieldparam response [::Google::Cloud::ManagedKafka::V1::ConsumerGroup]
1370
+ # @yieldparam operation [::GRPC::ActiveCall::Operation]
1371
+ #
1372
+ # @return [::Google::Cloud::ManagedKafka::V1::ConsumerGroup]
1373
+ #
1374
+ # @raise [::Google::Cloud::Error] if the RPC is aborted.
1375
+ #
1376
+ # @example Basic example
1377
+ # require "google/cloud/managed_kafka/v1"
1378
+ #
1379
+ # # Create a client object. The client can be reused for multiple calls.
1380
+ # client = Google::Cloud::ManagedKafka::V1::ManagedKafka::Client.new
1381
+ #
1382
+ # # Create a request. To set request fields, pass in keyword arguments.
1383
+ # request = Google::Cloud::ManagedKafka::V1::GetConsumerGroupRequest.new
1384
+ #
1385
+ # # Call the get_consumer_group method.
1386
+ # result = client.get_consumer_group request
1387
+ #
1388
+ # # The returned object is of type Google::Cloud::ManagedKafka::V1::ConsumerGroup.
1389
+ # p result
1390
+ #
1391
+ def get_consumer_group request, options = nil
1392
+ raise ::ArgumentError, "request must be provided" if request.nil?
1393
+
1394
+ request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::ManagedKafka::V1::GetConsumerGroupRequest
1395
+
1396
+ # Converts hash and nil to an options object
1397
+ options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
1398
+
1399
+ # Customize the options with defaults
1400
+ metadata = @config.rpcs.get_consumer_group.metadata.to_h
1401
+
1402
+ # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
1403
+ metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
1404
+ lib_name: @config.lib_name, lib_version: @config.lib_version,
1405
+ gapic_version: ::Google::Cloud::ManagedKafka::V1::VERSION
1406
+ metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
1407
+ metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
1408
+
1409
+ header_params = {}
1410
+ if request.name
1411
+ header_params["name"] = request.name
1412
+ end
1413
+
1414
+ request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
1415
+ metadata[:"x-goog-request-params"] ||= request_params_header
1416
+
1417
+ options.apply_defaults timeout: @config.rpcs.get_consumer_group.timeout,
1418
+ metadata: metadata,
1419
+ retry_policy: @config.rpcs.get_consumer_group.retry_policy
1420
+
1421
+ options.apply_defaults timeout: @config.timeout,
1422
+ metadata: @config.metadata,
1423
+ retry_policy: @config.retry_policy
1424
+
1425
+ @managed_kafka_stub.call_rpc :get_consumer_group, request, options: options do |response, operation|
1426
+ yield response, operation if block_given?
1427
+ return response
1428
+ end
1429
+ rescue ::GRPC::BadStatus => e
1430
+ raise ::Google::Cloud::Error.from_error(e)
1431
+ end
1432
+
1433
+ ##
1434
+ # Updates the properties of a single consumer group.
1435
+ #
1436
+ # @overload update_consumer_group(request, options = nil)
1437
+ # Pass arguments to `update_consumer_group` via a request object, either of type
1438
+ # {::Google::Cloud::ManagedKafka::V1::UpdateConsumerGroupRequest} or an equivalent Hash.
1439
+ #
1440
+ # @param request [::Google::Cloud::ManagedKafka::V1::UpdateConsumerGroupRequest, ::Hash]
1441
+ # A request object representing the call parameters. Required. To specify no
1442
+ # parameters, or to keep all the default parameter values, pass an empty Hash.
1443
+ # @param options [::Gapic::CallOptions, ::Hash]
1444
+ # Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
1445
+ #
1446
+ # @overload update_consumer_group(update_mask: nil, consumer_group: nil)
1447
+ # Pass arguments to `update_consumer_group` via keyword arguments. Note that at
1448
+ # least one keyword argument is required. To specify no parameters, or to keep all
1449
+ # the default parameter values, pass an empty Hash as a request object (see above).
1450
+ #
1451
+ # @param update_mask [::Google::Protobuf::FieldMask, ::Hash]
1452
+ # Required. Field mask is used to specify the fields to be overwritten in the
1453
+ # ConsumerGroup resource by the update.
1454
+ # The fields specified in the update_mask are relative to the resource, not
1455
+ # the full request. A field will be overwritten if it is in the mask. The
1456
+ # mask is required and a value of * will update all fields.
1457
+ # @param consumer_group [::Google::Cloud::ManagedKafka::V1::ConsumerGroup, ::Hash]
1458
+ # Required. The consumer group to update. Its `name` field must be populated.
1459
+ #
1460
+ # @yield [response, operation] Access the result along with the RPC operation
1461
+ # @yieldparam response [::Google::Cloud::ManagedKafka::V1::ConsumerGroup]
1462
+ # @yieldparam operation [::GRPC::ActiveCall::Operation]
1463
+ #
1464
+ # @return [::Google::Cloud::ManagedKafka::V1::ConsumerGroup]
1465
+ #
1466
+ # @raise [::Google::Cloud::Error] if the RPC is aborted.
1467
+ #
1468
+ # @example Basic example
1469
+ # require "google/cloud/managed_kafka/v1"
1470
+ #
1471
+ # # Create a client object. The client can be reused for multiple calls.
1472
+ # client = Google::Cloud::ManagedKafka::V1::ManagedKafka::Client.new
1473
+ #
1474
+ # # Create a request. To set request fields, pass in keyword arguments.
1475
+ # request = Google::Cloud::ManagedKafka::V1::UpdateConsumerGroupRequest.new
1476
+ #
1477
+ # # Call the update_consumer_group method.
1478
+ # result = client.update_consumer_group request
1479
+ #
1480
+ # # The returned object is of type Google::Cloud::ManagedKafka::V1::ConsumerGroup.
1481
+ # p result
1482
+ #
1483
+ def update_consumer_group request, options = nil
1484
+ raise ::ArgumentError, "request must be provided" if request.nil?
1485
+
1486
+ request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::ManagedKafka::V1::UpdateConsumerGroupRequest
1487
+
1488
+ # Converts hash and nil to an options object
1489
+ options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
1490
+
1491
+ # Customize the options with defaults
1492
+ metadata = @config.rpcs.update_consumer_group.metadata.to_h
1493
+
1494
+ # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
1495
+ metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
1496
+ lib_name: @config.lib_name, lib_version: @config.lib_version,
1497
+ gapic_version: ::Google::Cloud::ManagedKafka::V1::VERSION
1498
+ metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
1499
+ metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
1500
+
1501
+ header_params = {}
1502
+ if request.consumer_group&.name
1503
+ header_params["consumer_group.name"] = request.consumer_group.name
1504
+ end
1505
+
1506
+ request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
1507
+ metadata[:"x-goog-request-params"] ||= request_params_header
1508
+
1509
+ options.apply_defaults timeout: @config.rpcs.update_consumer_group.timeout,
1510
+ metadata: metadata,
1511
+ retry_policy: @config.rpcs.update_consumer_group.retry_policy
1512
+
1513
+ options.apply_defaults timeout: @config.timeout,
1514
+ metadata: @config.metadata,
1515
+ retry_policy: @config.retry_policy
1516
+
1517
+ @managed_kafka_stub.call_rpc :update_consumer_group, request, options: options do |response, operation|
1518
+ yield response, operation if block_given?
1519
+ return response
1520
+ end
1521
+ rescue ::GRPC::BadStatus => e
1522
+ raise ::Google::Cloud::Error.from_error(e)
1523
+ end
1524
+
1525
+ ##
1526
+ # Deletes a single consumer group.
1527
+ #
1528
+ # @overload delete_consumer_group(request, options = nil)
1529
+ # Pass arguments to `delete_consumer_group` via a request object, either of type
1530
+ # {::Google::Cloud::ManagedKafka::V1::DeleteConsumerGroupRequest} or an equivalent Hash.
1531
+ #
1532
+ # @param request [::Google::Cloud::ManagedKafka::V1::DeleteConsumerGroupRequest, ::Hash]
1533
+ # A request object representing the call parameters. Required. To specify no
1534
+ # parameters, or to keep all the default parameter values, pass an empty Hash.
1535
+ # @param options [::Gapic::CallOptions, ::Hash]
1536
+ # Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
1537
+ #
1538
+ # @overload delete_consumer_group(name: nil)
1539
+ # Pass arguments to `delete_consumer_group` via keyword arguments. Note that at
1540
+ # least one keyword argument is required. To specify no parameters, or to keep all
1541
+ # the default parameter values, pass an empty Hash as a request object (see above).
1542
+ #
1543
+ # @param name [::String]
1544
+ # Required. The name of the consumer group to delete.
1545
+ # `projects/{project}/locations/{location}/clusters/{cluster}/consumerGroups/{consumerGroup}`.
1546
+ #
1547
+ # @yield [response, operation] Access the result along with the RPC operation
1548
+ # @yieldparam response [::Google::Protobuf::Empty]
1549
+ # @yieldparam operation [::GRPC::ActiveCall::Operation]
1550
+ #
1551
+ # @return [::Google::Protobuf::Empty]
1552
+ #
1553
+ # @raise [::Google::Cloud::Error] if the RPC is aborted.
1554
+ #
1555
+ # @example Basic example
1556
+ # require "google/cloud/managed_kafka/v1"
1557
+ #
1558
+ # # Create a client object. The client can be reused for multiple calls.
1559
+ # client = Google::Cloud::ManagedKafka::V1::ManagedKafka::Client.new
1560
+ #
1561
+ # # Create a request. To set request fields, pass in keyword arguments.
1562
+ # request = Google::Cloud::ManagedKafka::V1::DeleteConsumerGroupRequest.new
1563
+ #
1564
+ # # Call the delete_consumer_group method.
1565
+ # result = client.delete_consumer_group request
1566
+ #
1567
+ # # The returned object is of type Google::Protobuf::Empty.
1568
+ # p result
1569
+ #
1570
+ def delete_consumer_group request, options = nil
1571
+ raise ::ArgumentError, "request must be provided" if request.nil?
1572
+
1573
+ request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::ManagedKafka::V1::DeleteConsumerGroupRequest
1574
+
1575
+ # Converts hash and nil to an options object
1576
+ options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
1577
+
1578
+ # Customize the options with defaults
1579
+ metadata = @config.rpcs.delete_consumer_group.metadata.to_h
1580
+
1581
+ # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
1582
+ metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
1583
+ lib_name: @config.lib_name, lib_version: @config.lib_version,
1584
+ gapic_version: ::Google::Cloud::ManagedKafka::V1::VERSION
1585
+ metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
1586
+ metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
1587
+
1588
+ header_params = {}
1589
+ if request.name
1590
+ header_params["name"] = request.name
1591
+ end
1592
+
1593
+ request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
1594
+ metadata[:"x-goog-request-params"] ||= request_params_header
1595
+
1596
+ options.apply_defaults timeout: @config.rpcs.delete_consumer_group.timeout,
1597
+ metadata: metadata,
1598
+ retry_policy: @config.rpcs.delete_consumer_group.retry_policy
1599
+
1600
+ options.apply_defaults timeout: @config.timeout,
1601
+ metadata: @config.metadata,
1602
+ retry_policy: @config.retry_policy
1603
+
1604
+ @managed_kafka_stub.call_rpc :delete_consumer_group, request, options: options do |response, operation|
1605
+ yield response, operation if block_given?
1606
+ return response
1607
+ end
1608
+ rescue ::GRPC::BadStatus => e
1609
+ raise ::Google::Cloud::Error.from_error(e)
1610
+ end
1611
+
1612
+ ##
1613
+ # Configuration class for the ManagedKafka API.
1614
+ #
1615
+ # This class represents the configuration for ManagedKafka,
1616
+ # providing control over timeouts, retry behavior, logging, transport
1617
+ # parameters, and other low-level controls. Certain parameters can also be
1618
+ # applied individually to specific RPCs. See
1619
+ # {::Google::Cloud::ManagedKafka::V1::ManagedKafka::Client::Configuration::Rpcs}
1620
+ # for a list of RPCs that can be configured independently.
1621
+ #
1622
+ # Configuration can be applied globally to all clients, or to a single client
1623
+ # on construction.
1624
+ #
1625
+ # @example
1626
+ #
1627
+ # # Modify the global config, setting the timeout for
1628
+ # # list_clusters to 20 seconds,
1629
+ # # and all remaining timeouts to 10 seconds.
1630
+ # ::Google::Cloud::ManagedKafka::V1::ManagedKafka::Client.configure do |config|
1631
+ # config.timeout = 10.0
1632
+ # config.rpcs.list_clusters.timeout = 20.0
1633
+ # end
1634
+ #
1635
+ # # Apply the above configuration only to a new client.
1636
+ # client = ::Google::Cloud::ManagedKafka::V1::ManagedKafka::Client.new do |config|
1637
+ # config.timeout = 10.0
1638
+ # config.rpcs.list_clusters.timeout = 20.0
1639
+ # end
1640
+ #
1641
+ # @!attribute [rw] endpoint
1642
+ # A custom service endpoint, as a hostname or hostname:port. The default is
1643
+ # nil, indicating to use the default endpoint in the current universe domain.
1644
+ # @return [::String,nil]
1645
+ # @!attribute [rw] credentials
1646
+ # Credentials to send with calls. You may provide any of the following types:
1647
+ # * (`String`) The path to a service account key file in JSON format
1648
+ # * (`Hash`) A service account key as a Hash
1649
+ # * (`Google::Auth::Credentials`) A googleauth credentials object
1650
+ # (see the [googleauth docs](https://rubydoc.info/gems/googleauth/Google/Auth/Credentials))
1651
+ # * (`Signet::OAuth2::Client`) A signet oauth2 client object
1652
+ # (see the [signet docs](https://rubydoc.info/gems/signet/Signet/OAuth2/Client))
1653
+ # * (`GRPC::Core::Channel`) a gRPC channel with included credentials
1654
+ # * (`GRPC::Core::ChannelCredentials`) a gRPC credentails object
1655
+ # * (`nil`) indicating no credentials
1656
+ # @return [::Object]
1657
+ # @!attribute [rw] scope
1658
+ # The OAuth scopes
1659
+ # @return [::Array<::String>]
1660
+ # @!attribute [rw] lib_name
1661
+ # The library name as recorded in instrumentation and logging
1662
+ # @return [::String]
1663
+ # @!attribute [rw] lib_version
1664
+ # The library version as recorded in instrumentation and logging
1665
+ # @return [::String]
1666
+ # @!attribute [rw] channel_args
1667
+ # Extra parameters passed to the gRPC channel. Note: this is ignored if a
1668
+ # `GRPC::Core::Channel` object is provided as the credential.
1669
+ # @return [::Hash]
1670
+ # @!attribute [rw] interceptors
1671
+ # An array of interceptors that are run before calls are executed.
1672
+ # @return [::Array<::GRPC::ClientInterceptor>]
1673
+ # @!attribute [rw] timeout
1674
+ # The call timeout in seconds.
1675
+ # @return [::Numeric]
1676
+ # @!attribute [rw] metadata
1677
+ # Additional gRPC headers to be sent with the call.
1678
+ # @return [::Hash{::Symbol=>::String}]
1679
+ # @!attribute [rw] retry_policy
1680
+ # The retry policy. The value is a hash with the following keys:
1681
+ # * `:initial_delay` (*type:* `Numeric`) - The initial delay in seconds.
1682
+ # * `:max_delay` (*type:* `Numeric`) - The max delay in seconds.
1683
+ # * `:multiplier` (*type:* `Numeric`) - The incremental backoff multiplier.
1684
+ # * `:retry_codes` (*type:* `Array<String>`) - The error codes that should
1685
+ # trigger a retry.
1686
+ # @return [::Hash]
1687
+ # @!attribute [rw] quota_project
1688
+ # A separate project against which to charge quota.
1689
+ # @return [::String]
1690
+ # @!attribute [rw] universe_domain
1691
+ # The universe domain within which to make requests. This determines the
1692
+ # default endpoint URL. The default value of nil uses the environment
1693
+ # universe (usually the default "googleapis.com" universe).
1694
+ # @return [::String,nil]
1695
+ #
1696
+ class Configuration
1697
+ extend ::Gapic::Config
1698
+
1699
+ # @private
1700
+ # The endpoint specific to the default "googleapis.com" universe. Deprecated.
1701
+ DEFAULT_ENDPOINT = "managedkafka.googleapis.com"
1702
+
1703
+ config_attr :endpoint, nil, ::String, nil
1704
+ config_attr :credentials, nil do |value|
1705
+ allowed = [::String, ::Hash, ::Proc, ::Symbol, ::Google::Auth::Credentials, ::Signet::OAuth2::Client, nil]
1706
+ allowed += [::GRPC::Core::Channel, ::GRPC::Core::ChannelCredentials] if defined? ::GRPC
1707
+ allowed.any? { |klass| klass === value }
1708
+ end
1709
+ config_attr :scope, nil, ::String, ::Array, nil
1710
+ config_attr :lib_name, nil, ::String, nil
1711
+ config_attr :lib_version, nil, ::String, nil
1712
+ config_attr(:channel_args, { "grpc.service_config_disable_resolution" => 1 }, ::Hash, nil)
1713
+ config_attr :interceptors, nil, ::Array, nil
1714
+ config_attr :timeout, nil, ::Numeric, nil
1715
+ config_attr :metadata, nil, ::Hash, nil
1716
+ config_attr :retry_policy, nil, ::Hash, ::Proc, nil
1717
+ config_attr :quota_project, nil, ::String, nil
1718
+ config_attr :universe_domain, nil, ::String, nil
1719
+
1720
+ # @private
1721
+ def initialize parent_config = nil
1722
+ @parent_config = parent_config unless parent_config.nil?
1723
+
1724
+ yield self if block_given?
1725
+ end
1726
+
1727
+ ##
1728
+ # Configurations for individual RPCs
1729
+ # @return [Rpcs]
1730
+ #
1731
+ def rpcs
1732
+ @rpcs ||= begin
1733
+ parent_rpcs = nil
1734
+ parent_rpcs = @parent_config.rpcs if defined?(@parent_config) && @parent_config.respond_to?(:rpcs)
1735
+ Rpcs.new parent_rpcs
1736
+ end
1737
+ end
1738
+
1739
+ ##
1740
+ # Configuration for the channel pool
1741
+ # @return [::Gapic::ServiceStub::ChannelPool::Configuration]
1742
+ #
1743
+ def channel_pool
1744
+ @channel_pool ||= ::Gapic::ServiceStub::ChannelPool::Configuration.new
1745
+ end
1746
+
1747
+ ##
1748
+ # Configuration RPC class for the ManagedKafka API.
1749
+ #
1750
+ # Includes fields providing the configuration for each RPC in this service.
1751
+ # Each configuration object is of type `Gapic::Config::Method` and includes
1752
+ # the following configuration fields:
1753
+ #
1754
+ # * `timeout` (*type:* `Numeric`) - The call timeout in seconds
1755
+ # * `metadata` (*type:* `Hash{Symbol=>String}`) - Additional gRPC headers
1756
+ # * `retry_policy (*type:* `Hash`) - The retry policy. The policy fields
1757
+ # include the following keys:
1758
+ # * `:initial_delay` (*type:* `Numeric`) - The initial delay in seconds.
1759
+ # * `:max_delay` (*type:* `Numeric`) - The max delay in seconds.
1760
+ # * `:multiplier` (*type:* `Numeric`) - The incremental backoff multiplier.
1761
+ # * `:retry_codes` (*type:* `Array<String>`) - The error codes that should
1762
+ # trigger a retry.
1763
+ #
1764
+ class Rpcs
1765
+ ##
1766
+ # RPC-specific configuration for `list_clusters`
1767
+ # @return [::Gapic::Config::Method]
1768
+ #
1769
+ attr_reader :list_clusters
1770
+ ##
1771
+ # RPC-specific configuration for `get_cluster`
1772
+ # @return [::Gapic::Config::Method]
1773
+ #
1774
+ attr_reader :get_cluster
1775
+ ##
1776
+ # RPC-specific configuration for `create_cluster`
1777
+ # @return [::Gapic::Config::Method]
1778
+ #
1779
+ attr_reader :create_cluster
1780
+ ##
1781
+ # RPC-specific configuration for `update_cluster`
1782
+ # @return [::Gapic::Config::Method]
1783
+ #
1784
+ attr_reader :update_cluster
1785
+ ##
1786
+ # RPC-specific configuration for `delete_cluster`
1787
+ # @return [::Gapic::Config::Method]
1788
+ #
1789
+ attr_reader :delete_cluster
1790
+ ##
1791
+ # RPC-specific configuration for `list_topics`
1792
+ # @return [::Gapic::Config::Method]
1793
+ #
1794
+ attr_reader :list_topics
1795
+ ##
1796
+ # RPC-specific configuration for `get_topic`
1797
+ # @return [::Gapic::Config::Method]
1798
+ #
1799
+ attr_reader :get_topic
1800
+ ##
1801
+ # RPC-specific configuration for `create_topic`
1802
+ # @return [::Gapic::Config::Method]
1803
+ #
1804
+ attr_reader :create_topic
1805
+ ##
1806
+ # RPC-specific configuration for `update_topic`
1807
+ # @return [::Gapic::Config::Method]
1808
+ #
1809
+ attr_reader :update_topic
1810
+ ##
1811
+ # RPC-specific configuration for `delete_topic`
1812
+ # @return [::Gapic::Config::Method]
1813
+ #
1814
+ attr_reader :delete_topic
1815
+ ##
1816
+ # RPC-specific configuration for `list_consumer_groups`
1817
+ # @return [::Gapic::Config::Method]
1818
+ #
1819
+ attr_reader :list_consumer_groups
1820
+ ##
1821
+ # RPC-specific configuration for `get_consumer_group`
1822
+ # @return [::Gapic::Config::Method]
1823
+ #
1824
+ attr_reader :get_consumer_group
1825
+ ##
1826
+ # RPC-specific configuration for `update_consumer_group`
1827
+ # @return [::Gapic::Config::Method]
1828
+ #
1829
+ attr_reader :update_consumer_group
1830
+ ##
1831
+ # RPC-specific configuration for `delete_consumer_group`
1832
+ # @return [::Gapic::Config::Method]
1833
+ #
1834
+ attr_reader :delete_consumer_group
1835
+
1836
+ # @private
1837
+ def initialize parent_rpcs = nil
1838
+ list_clusters_config = parent_rpcs.list_clusters if parent_rpcs.respond_to? :list_clusters
1839
+ @list_clusters = ::Gapic::Config::Method.new list_clusters_config
1840
+ get_cluster_config = parent_rpcs.get_cluster if parent_rpcs.respond_to? :get_cluster
1841
+ @get_cluster = ::Gapic::Config::Method.new get_cluster_config
1842
+ create_cluster_config = parent_rpcs.create_cluster if parent_rpcs.respond_to? :create_cluster
1843
+ @create_cluster = ::Gapic::Config::Method.new create_cluster_config
1844
+ update_cluster_config = parent_rpcs.update_cluster if parent_rpcs.respond_to? :update_cluster
1845
+ @update_cluster = ::Gapic::Config::Method.new update_cluster_config
1846
+ delete_cluster_config = parent_rpcs.delete_cluster if parent_rpcs.respond_to? :delete_cluster
1847
+ @delete_cluster = ::Gapic::Config::Method.new delete_cluster_config
1848
+ list_topics_config = parent_rpcs.list_topics if parent_rpcs.respond_to? :list_topics
1849
+ @list_topics = ::Gapic::Config::Method.new list_topics_config
1850
+ get_topic_config = parent_rpcs.get_topic if parent_rpcs.respond_to? :get_topic
1851
+ @get_topic = ::Gapic::Config::Method.new get_topic_config
1852
+ create_topic_config = parent_rpcs.create_topic if parent_rpcs.respond_to? :create_topic
1853
+ @create_topic = ::Gapic::Config::Method.new create_topic_config
1854
+ update_topic_config = parent_rpcs.update_topic if parent_rpcs.respond_to? :update_topic
1855
+ @update_topic = ::Gapic::Config::Method.new update_topic_config
1856
+ delete_topic_config = parent_rpcs.delete_topic if parent_rpcs.respond_to? :delete_topic
1857
+ @delete_topic = ::Gapic::Config::Method.new delete_topic_config
1858
+ list_consumer_groups_config = parent_rpcs.list_consumer_groups if parent_rpcs.respond_to? :list_consumer_groups
1859
+ @list_consumer_groups = ::Gapic::Config::Method.new list_consumer_groups_config
1860
+ get_consumer_group_config = parent_rpcs.get_consumer_group if parent_rpcs.respond_to? :get_consumer_group
1861
+ @get_consumer_group = ::Gapic::Config::Method.new get_consumer_group_config
1862
+ update_consumer_group_config = parent_rpcs.update_consumer_group if parent_rpcs.respond_to? :update_consumer_group
1863
+ @update_consumer_group = ::Gapic::Config::Method.new update_consumer_group_config
1864
+ delete_consumer_group_config = parent_rpcs.delete_consumer_group if parent_rpcs.respond_to? :delete_consumer_group
1865
+ @delete_consumer_group = ::Gapic::Config::Method.new delete_consumer_group_config
1866
+
1867
+ yield self if block_given?
1868
+ end
1869
+ end
1870
+ end
1871
+ end
1872
+ end
1873
+ end
1874
+ end
1875
+ end
1876
+ end