google-cloud-managed_kafka-v1 0.a → 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (37) hide show
  1. checksums.yaml +4 -4
  2. data/.yardopts +12 -0
  3. data/AUTHENTICATION.md +122 -0
  4. data/README.md +144 -8
  5. data/lib/google/cloud/managed_kafka/v1/bindings_override.rb +102 -0
  6. data/lib/google/cloud/managed_kafka/v1/managed_kafka/client.rb +1876 -0
  7. data/lib/google/cloud/managed_kafka/v1/managed_kafka/credentials.rb +47 -0
  8. data/lib/google/cloud/managed_kafka/v1/managed_kafka/operations.rb +809 -0
  9. data/lib/google/cloud/managed_kafka/v1/managed_kafka/paths.rb +132 -0
  10. data/lib/google/cloud/managed_kafka/v1/managed_kafka/rest/client.rb +1759 -0
  11. data/lib/google/cloud/managed_kafka/v1/managed_kafka/rest/operations.rb +902 -0
  12. data/lib/google/cloud/managed_kafka/v1/managed_kafka/rest/service_stub.rb +900 -0
  13. data/lib/google/cloud/managed_kafka/v1/managed_kafka/rest.rb +55 -0
  14. data/lib/google/cloud/managed_kafka/v1/managed_kafka.rb +57 -0
  15. data/lib/google/cloud/managed_kafka/v1/rest.rb +38 -0
  16. data/lib/google/cloud/managed_kafka/v1/version.rb +7 -2
  17. data/lib/google/cloud/managed_kafka/v1.rb +45 -0
  18. data/lib/google/cloud/managedkafka/v1/managed_kafka_pb.rb +70 -0
  19. data/lib/google/cloud/managedkafka/v1/managed_kafka_services_pb.rb +72 -0
  20. data/lib/google/cloud/managedkafka/v1/resources_pb.rb +59 -0
  21. data/lib/google-cloud-managed_kafka-v1.rb +21 -0
  22. data/proto_docs/README.md +4 -0
  23. data/proto_docs/google/api/client.rb +399 -0
  24. data/proto_docs/google/api/field_behavior.rb +85 -0
  25. data/proto_docs/google/api/field_info.rb +65 -0
  26. data/proto_docs/google/api/launch_stage.rb +71 -0
  27. data/proto_docs/google/api/resource.rb +222 -0
  28. data/proto_docs/google/cloud/managedkafka/v1/managed_kafka.rb +341 -0
  29. data/proto_docs/google/cloud/managedkafka/v1/resources.rb +291 -0
  30. data/proto_docs/google/longrunning/operations.rb +164 -0
  31. data/proto_docs/google/protobuf/any.rb +145 -0
  32. data/proto_docs/google/protobuf/duration.rb +98 -0
  33. data/proto_docs/google/protobuf/empty.rb +34 -0
  34. data/proto_docs/google/protobuf/field_mask.rb +229 -0
  35. data/proto_docs/google/protobuf/timestamp.rb +127 -0
  36. data/proto_docs/google/rpc/status.rb +48 -0
  37. metadata +96 -10
@@ -0,0 +1,1759 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Copyright 2024 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # https://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # Auto-generated by gapic-generator-ruby. DO NOT EDIT!
18
+
19
+ require "google/cloud/errors"
20
+ require "google/cloud/managedkafka/v1/managed_kafka_pb"
21
+ require "google/cloud/managed_kafka/v1/managed_kafka/rest/service_stub"
22
+ require "google/cloud/location/rest"
23
+
24
+ module Google
25
+ module Cloud
26
+ module ManagedKafka
27
+ module V1
28
+ module ManagedKafka
29
+ module Rest
30
+ ##
31
+ # REST client for the ManagedKafka service.
32
+ #
33
+ # The service that a client application uses to manage Apache Kafka clusters,
34
+ # topics and consumer groups.
35
+ #
36
+ class Client
37
+ # @private
38
+ API_VERSION = ""
39
+
40
+ # @private
41
+ DEFAULT_ENDPOINT_TEMPLATE = "managedkafka.$UNIVERSE_DOMAIN$"
42
+
43
+ include Paths
44
+
45
+ # @private
46
+ attr_reader :managed_kafka_stub
47
+
48
+ ##
49
+ # Configure the ManagedKafka Client class.
50
+ #
51
+ # See {::Google::Cloud::ManagedKafka::V1::ManagedKafka::Rest::Client::Configuration}
52
+ # for a description of the configuration fields.
53
+ #
54
+ # @example
55
+ #
56
+ # # Modify the configuration for all ManagedKafka clients
57
+ # ::Google::Cloud::ManagedKafka::V1::ManagedKafka::Rest::Client.configure do |config|
58
+ # config.timeout = 10.0
59
+ # end
60
+ #
61
+ # @yield [config] Configure the Client client.
62
+ # @yieldparam config [Client::Configuration]
63
+ #
64
+ # @return [Client::Configuration]
65
+ #
66
+ def self.configure
67
+ @configure ||= begin
68
+ namespace = ["Google", "Cloud", "ManagedKafka", "V1"]
69
+ parent_config = while namespace.any?
70
+ parent_name = namespace.join "::"
71
+ parent_const = const_get parent_name
72
+ break parent_const.configure if parent_const.respond_to? :configure
73
+ namespace.pop
74
+ end
75
+ default_config = Client::Configuration.new parent_config
76
+
77
+ default_config.rpcs.list_clusters.timeout = 60.0
78
+ default_config.rpcs.list_clusters.retry_policy = {
79
+ initial_delay: 1.0, max_delay: 10.0, multiplier: 1.3, retry_codes: [14]
80
+ }
81
+
82
+ default_config.rpcs.get_cluster.timeout = 60.0
83
+ default_config.rpcs.get_cluster.retry_policy = {
84
+ initial_delay: 1.0, max_delay: 10.0, multiplier: 1.3, retry_codes: [14]
85
+ }
86
+
87
+ default_config.rpcs.create_cluster.timeout = 60.0
88
+
89
+ default_config.rpcs.update_cluster.timeout = 60.0
90
+
91
+ default_config.rpcs.delete_cluster.timeout = 60.0
92
+
93
+ default_config.rpcs.list_topics.timeout = 60.0
94
+ default_config.rpcs.list_topics.retry_policy = {
95
+ initial_delay: 1.0, max_delay: 10.0, multiplier: 1.3, retry_codes: [14]
96
+ }
97
+
98
+ default_config.rpcs.get_topic.timeout = 60.0
99
+ default_config.rpcs.get_topic.retry_policy = {
100
+ initial_delay: 1.0, max_delay: 10.0, multiplier: 1.3, retry_codes: [14]
101
+ }
102
+
103
+ default_config.rpcs.create_topic.timeout = 60.0
104
+
105
+ default_config.rpcs.update_topic.timeout = 60.0
106
+
107
+ default_config.rpcs.delete_topic.timeout = 60.0
108
+
109
+ default_config.rpcs.list_consumer_groups.timeout = 60.0
110
+ default_config.rpcs.list_consumer_groups.retry_policy = {
111
+ initial_delay: 1.0, max_delay: 10.0, multiplier: 1.3, retry_codes: [14]
112
+ }
113
+
114
+ default_config.rpcs.get_consumer_group.timeout = 60.0
115
+ default_config.rpcs.get_consumer_group.retry_policy = {
116
+ initial_delay: 1.0, max_delay: 10.0, multiplier: 1.3, retry_codes: [14]
117
+ }
118
+
119
+ default_config.rpcs.update_consumer_group.timeout = 60.0
120
+
121
+ default_config.rpcs.delete_consumer_group.timeout = 60.0
122
+
123
+ default_config
124
+ end
125
+ yield @configure if block_given?
126
+ @configure
127
+ end
128
+
129
+ ##
130
+ # Configure the ManagedKafka Client instance.
131
+ #
132
+ # The configuration is set to the derived mode, meaning that values can be changed,
133
+ # but structural changes (adding new fields, etc.) are not allowed. Structural changes
134
+ # should be made on {Client.configure}.
135
+ #
136
+ # See {::Google::Cloud::ManagedKafka::V1::ManagedKafka::Rest::Client::Configuration}
137
+ # for a description of the configuration fields.
138
+ #
139
+ # @yield [config] Configure the Client client.
140
+ # @yieldparam config [Client::Configuration]
141
+ #
142
+ # @return [Client::Configuration]
143
+ #
144
+ def configure
145
+ yield @config if block_given?
146
+ @config
147
+ end
148
+
149
+ ##
150
+ # The effective universe domain
151
+ #
152
+ # @return [String]
153
+ #
154
+ def universe_domain
155
+ @managed_kafka_stub.universe_domain
156
+ end
157
+
158
+ ##
159
+ # Create a new ManagedKafka REST client object.
160
+ #
161
+ # @example
162
+ #
163
+ # # Create a client using the default configuration
164
+ # client = ::Google::Cloud::ManagedKafka::V1::ManagedKafka::Rest::Client.new
165
+ #
166
+ # # Create a client using a custom configuration
167
+ # client = ::Google::Cloud::ManagedKafka::V1::ManagedKafka::Rest::Client.new do |config|
168
+ # config.timeout = 10.0
169
+ # end
170
+ #
171
+ # @yield [config] Configure the ManagedKafka client.
172
+ # @yieldparam config [Client::Configuration]
173
+ #
174
+ def initialize
175
+ # Create the configuration object
176
+ @config = Configuration.new Client.configure
177
+
178
+ # Yield the configuration if needed
179
+ yield @config if block_given?
180
+
181
+ # Create credentials
182
+ credentials = @config.credentials
183
+ # Use self-signed JWT if the endpoint is unchanged from default,
184
+ # but only if the default endpoint does not have a region prefix.
185
+ enable_self_signed_jwt = @config.endpoint.nil? ||
186
+ (@config.endpoint == Configuration::DEFAULT_ENDPOINT &&
187
+ !@config.endpoint.split(".").first.include?("-"))
188
+ credentials ||= Credentials.default scope: @config.scope,
189
+ enable_self_signed_jwt: enable_self_signed_jwt
190
+ if credentials.is_a?(::String) || credentials.is_a?(::Hash)
191
+ credentials = Credentials.new credentials, scope: @config.scope
192
+ end
193
+
194
+ @quota_project_id = @config.quota_project
195
+ @quota_project_id ||= credentials.quota_project_id if credentials.respond_to? :quota_project_id
196
+
197
+ @operations_client = ::Google::Cloud::ManagedKafka::V1::ManagedKafka::Rest::Operations.new do |config|
198
+ config.credentials = credentials
199
+ config.quota_project = @quota_project_id
200
+ config.endpoint = @config.endpoint
201
+ config.universe_domain = @config.universe_domain
202
+ end
203
+
204
+ @managed_kafka_stub = ::Google::Cloud::ManagedKafka::V1::ManagedKafka::Rest::ServiceStub.new(
205
+ endpoint: @config.endpoint,
206
+ endpoint_template: DEFAULT_ENDPOINT_TEMPLATE,
207
+ universe_domain: @config.universe_domain,
208
+ credentials: credentials
209
+ )
210
+
211
+ @location_client = Google::Cloud::Location::Locations::Rest::Client.new do |config|
212
+ config.credentials = credentials
213
+ config.quota_project = @quota_project_id
214
+ config.endpoint = @managed_kafka_stub.endpoint
215
+ config.universe_domain = @managed_kafka_stub.universe_domain
216
+ config.bindings_override = @config.bindings_override
217
+ end
218
+ end
219
+
220
+ ##
221
+ # Get the associated client for long-running operations.
222
+ #
223
+ # @return [::Google::Cloud::ManagedKafka::V1::ManagedKafka::Rest::Operations]
224
+ #
225
+ attr_reader :operations_client
226
+
227
+ ##
228
+ # Get the associated client for mix-in of the Locations.
229
+ #
230
+ # @return [Google::Cloud::Location::Locations::Rest::Client]
231
+ #
232
+ attr_reader :location_client
233
+
234
+ # Service calls
235
+
236
+ ##
237
+ # Lists the clusters in a given project and location.
238
+ #
239
+ # @overload list_clusters(request, options = nil)
240
+ # Pass arguments to `list_clusters` via a request object, either of type
241
+ # {::Google::Cloud::ManagedKafka::V1::ListClustersRequest} or an equivalent Hash.
242
+ #
243
+ # @param request [::Google::Cloud::ManagedKafka::V1::ListClustersRequest, ::Hash]
244
+ # A request object representing the call parameters. Required. To specify no
245
+ # parameters, or to keep all the default parameter values, pass an empty Hash.
246
+ # @param options [::Gapic::CallOptions, ::Hash]
247
+ # Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
248
+ #
249
+ # @overload list_clusters(parent: nil, page_size: nil, page_token: nil, filter: nil, order_by: nil)
250
+ # Pass arguments to `list_clusters` via keyword arguments. Note that at
251
+ # least one keyword argument is required. To specify no parameters, or to keep all
252
+ # the default parameter values, pass an empty Hash as a request object (see above).
253
+ #
254
+ # @param parent [::String]
255
+ # Required. The parent location whose clusters are to be listed. Structured
256
+ # like `projects/{project}/locations/{location}`.
257
+ # @param page_size [::Integer]
258
+ # Optional. The maximum number of clusters to return. The service may return
259
+ # fewer than this value. If unspecified, server will pick an appropriate
260
+ # default.
261
+ # @param page_token [::String]
262
+ # Optional. A page token, received from a previous `ListClusters` call.
263
+ # Provide this to retrieve the subsequent page.
264
+ #
265
+ # When paginating, all other parameters provided to `ListClusters` must match
266
+ # the call that provided the page token.
267
+ # @param filter [::String]
268
+ # Optional. Filter expression for the result.
269
+ # @param order_by [::String]
270
+ # Optional. Order by fields for the result.
271
+ # @yield [result, operation] Access the result along with the TransportOperation object
272
+ # @yieldparam result [::Google::Cloud::ManagedKafka::V1::ListClustersResponse]
273
+ # @yieldparam operation [::Gapic::Rest::TransportOperation]
274
+ #
275
+ # @return [::Google::Cloud::ManagedKafka::V1::ListClustersResponse]
276
+ #
277
+ # @raise [::Google::Cloud::Error] if the REST call is aborted.
278
+ #
279
+ # @example Basic example
280
+ # require "google/cloud/managed_kafka/v1"
281
+ #
282
+ # # Create a client object. The client can be reused for multiple calls.
283
+ # client = Google::Cloud::ManagedKafka::V1::ManagedKafka::Rest::Client.new
284
+ #
285
+ # # Create a request. To set request fields, pass in keyword arguments.
286
+ # request = Google::Cloud::ManagedKafka::V1::ListClustersRequest.new
287
+ #
288
+ # # Call the list_clusters method.
289
+ # result = client.list_clusters request
290
+ #
291
+ # # The returned object is of type Gapic::PagedEnumerable. You can iterate
292
+ # # over elements, and API calls will be issued to fetch pages as needed.
293
+ # result.each do |item|
294
+ # # Each element is of type ::Google::Cloud::ManagedKafka::V1::Cluster.
295
+ # p item
296
+ # end
297
+ #
298
+ def list_clusters request, options = nil
299
+ raise ::ArgumentError, "request must be provided" if request.nil?
300
+
301
+ request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::ManagedKafka::V1::ListClustersRequest
302
+
303
+ # Converts hash and nil to an options object
304
+ options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
305
+
306
+ # Customize the options with defaults
307
+ call_metadata = @config.rpcs.list_clusters.metadata.to_h
308
+
309
+ # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
310
+ call_metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
311
+ lib_name: @config.lib_name, lib_version: @config.lib_version,
312
+ gapic_version: ::Google::Cloud::ManagedKafka::V1::VERSION,
313
+ transports_version_send: [:rest]
314
+
315
+ call_metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
316
+ call_metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
317
+
318
+ options.apply_defaults timeout: @config.rpcs.list_clusters.timeout,
319
+ metadata: call_metadata,
320
+ retry_policy: @config.rpcs.list_clusters.retry_policy
321
+
322
+ options.apply_defaults timeout: @config.timeout,
323
+ metadata: @config.metadata,
324
+ retry_policy: @config.retry_policy
325
+
326
+ @managed_kafka_stub.list_clusters request, options do |result, operation|
327
+ yield result, operation if block_given?
328
+ return result
329
+ end
330
+ rescue ::Gapic::Rest::Error => e
331
+ raise ::Google::Cloud::Error.from_error(e)
332
+ end
333
+
334
+ ##
335
+ # Returns the properties of a single cluster.
336
+ #
337
+ # @overload get_cluster(request, options = nil)
338
+ # Pass arguments to `get_cluster` via a request object, either of type
339
+ # {::Google::Cloud::ManagedKafka::V1::GetClusterRequest} or an equivalent Hash.
340
+ #
341
+ # @param request [::Google::Cloud::ManagedKafka::V1::GetClusterRequest, ::Hash]
342
+ # A request object representing the call parameters. Required. To specify no
343
+ # parameters, or to keep all the default parameter values, pass an empty Hash.
344
+ # @param options [::Gapic::CallOptions, ::Hash]
345
+ # Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
346
+ #
347
+ # @overload get_cluster(name: nil)
348
+ # Pass arguments to `get_cluster` via keyword arguments. Note that at
349
+ # least one keyword argument is required. To specify no parameters, or to keep all
350
+ # the default parameter values, pass an empty Hash as a request object (see above).
351
+ #
352
+ # @param name [::String]
353
+ # Required. The name of the cluster whose configuration to return.
354
+ # @yield [result, operation] Access the result along with the TransportOperation object
355
+ # @yieldparam result [::Google::Cloud::ManagedKafka::V1::Cluster]
356
+ # @yieldparam operation [::Gapic::Rest::TransportOperation]
357
+ #
358
+ # @return [::Google::Cloud::ManagedKafka::V1::Cluster]
359
+ #
360
+ # @raise [::Google::Cloud::Error] if the REST call is aborted.
361
+ #
362
+ # @example Basic example
363
+ # require "google/cloud/managed_kafka/v1"
364
+ #
365
+ # # Create a client object. The client can be reused for multiple calls.
366
+ # client = Google::Cloud::ManagedKafka::V1::ManagedKafka::Rest::Client.new
367
+ #
368
+ # # Create a request. To set request fields, pass in keyword arguments.
369
+ # request = Google::Cloud::ManagedKafka::V1::GetClusterRequest.new
370
+ #
371
+ # # Call the get_cluster method.
372
+ # result = client.get_cluster request
373
+ #
374
+ # # The returned object is of type Google::Cloud::ManagedKafka::V1::Cluster.
375
+ # p result
376
+ #
377
+ def get_cluster request, options = nil
378
+ raise ::ArgumentError, "request must be provided" if request.nil?
379
+
380
+ request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::ManagedKafka::V1::GetClusterRequest
381
+
382
+ # Converts hash and nil to an options object
383
+ options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
384
+
385
+ # Customize the options with defaults
386
+ call_metadata = @config.rpcs.get_cluster.metadata.to_h
387
+
388
+ # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
389
+ call_metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
390
+ lib_name: @config.lib_name, lib_version: @config.lib_version,
391
+ gapic_version: ::Google::Cloud::ManagedKafka::V1::VERSION,
392
+ transports_version_send: [:rest]
393
+
394
+ call_metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
395
+ call_metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
396
+
397
+ options.apply_defaults timeout: @config.rpcs.get_cluster.timeout,
398
+ metadata: call_metadata,
399
+ retry_policy: @config.rpcs.get_cluster.retry_policy
400
+
401
+ options.apply_defaults timeout: @config.timeout,
402
+ metadata: @config.metadata,
403
+ retry_policy: @config.retry_policy
404
+
405
+ @managed_kafka_stub.get_cluster request, options do |result, operation|
406
+ yield result, operation if block_given?
407
+ return result
408
+ end
409
+ rescue ::Gapic::Rest::Error => e
410
+ raise ::Google::Cloud::Error.from_error(e)
411
+ end
412
+
413
+ ##
414
+ # Creates a new cluster in a given project and location.
415
+ #
416
+ # @overload create_cluster(request, options = nil)
417
+ # Pass arguments to `create_cluster` via a request object, either of type
418
+ # {::Google::Cloud::ManagedKafka::V1::CreateClusterRequest} or an equivalent Hash.
419
+ #
420
+ # @param request [::Google::Cloud::ManagedKafka::V1::CreateClusterRequest, ::Hash]
421
+ # A request object representing the call parameters. Required. To specify no
422
+ # parameters, or to keep all the default parameter values, pass an empty Hash.
423
+ # @param options [::Gapic::CallOptions, ::Hash]
424
+ # Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
425
+ #
426
+ # @overload create_cluster(parent: nil, cluster_id: nil, cluster: nil, request_id: nil)
427
+ # Pass arguments to `create_cluster` via keyword arguments. Note that at
428
+ # least one keyword argument is required. To specify no parameters, or to keep all
429
+ # the default parameter values, pass an empty Hash as a request object (see above).
430
+ #
431
+ # @param parent [::String]
432
+ # Required. The parent region in which to create the cluster. Structured like
433
+ # `projects/{project}/locations/{location}`.
434
+ # @param cluster_id [::String]
435
+ # Required. The ID to use for the cluster, which will become the final
436
+ # component of the cluster's name. The ID must be 1-63 characters long, and
437
+ # match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` to comply with
438
+ # RFC 1035.
439
+ #
440
+ # This value is structured like: `my-cluster-id`.
441
+ # @param cluster [::Google::Cloud::ManagedKafka::V1::Cluster, ::Hash]
442
+ # Required. Configuration of the cluster to create. Its `name` field is
443
+ # ignored.
444
+ # @param request_id [::String]
445
+ # Optional. An optional request ID to identify requests. Specify a unique
446
+ # request ID to avoid duplication of requests. If a request times out or
447
+ # fails, retrying with the same ID allows the server to recognize the
448
+ # previous attempt. For at least 60 minutes, the server ignores duplicate
449
+ # requests bearing the same ID.
450
+ #
451
+ # For example, consider a situation where you make an initial request and the
452
+ # request times out. If you make the request again with the same request ID
453
+ # within 60 minutes of the last request, the server checks if an original
454
+ # operation with the same request ID was received. If so, the server ignores
455
+ # the second request.
456
+ #
457
+ # The request ID must be a valid UUID. A zero UUID is not supported
458
+ # (00000000-0000-0000-0000-000000000000).
459
+ # @yield [result, operation] Access the result along with the TransportOperation object
460
+ # @yieldparam result [::Gapic::Operation]
461
+ # @yieldparam operation [::Gapic::Rest::TransportOperation]
462
+ #
463
+ # @return [::Gapic::Operation]
464
+ #
465
+ # @raise [::Google::Cloud::Error] if the REST call is aborted.
466
+ #
467
+ # @example Basic example
468
+ # require "google/cloud/managed_kafka/v1"
469
+ #
470
+ # # Create a client object. The client can be reused for multiple calls.
471
+ # client = Google::Cloud::ManagedKafka::V1::ManagedKafka::Rest::Client.new
472
+ #
473
+ # # Create a request. To set request fields, pass in keyword arguments.
474
+ # request = Google::Cloud::ManagedKafka::V1::CreateClusterRequest.new
475
+ #
476
+ # # Call the create_cluster method.
477
+ # result = client.create_cluster request
478
+ #
479
+ # # The returned object is of type Gapic::Operation. You can use it to
480
+ # # check the status of an operation, cancel it, or wait for results.
481
+ # # Here is how to wait for a response.
482
+ # result.wait_until_done! timeout: 60
483
+ # if result.response?
484
+ # p result.response
485
+ # else
486
+ # puts "No response received."
487
+ # end
488
+ #
489
+ def create_cluster request, options = nil
490
+ raise ::ArgumentError, "request must be provided" if request.nil?
491
+
492
+ request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::ManagedKafka::V1::CreateClusterRequest
493
+
494
+ # Converts hash and nil to an options object
495
+ options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
496
+
497
+ # Customize the options with defaults
498
+ call_metadata = @config.rpcs.create_cluster.metadata.to_h
499
+
500
+ # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
501
+ call_metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
502
+ lib_name: @config.lib_name, lib_version: @config.lib_version,
503
+ gapic_version: ::Google::Cloud::ManagedKafka::V1::VERSION,
504
+ transports_version_send: [:rest]
505
+
506
+ call_metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
507
+ call_metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
508
+
509
+ options.apply_defaults timeout: @config.rpcs.create_cluster.timeout,
510
+ metadata: call_metadata,
511
+ retry_policy: @config.rpcs.create_cluster.retry_policy
512
+
513
+ options.apply_defaults timeout: @config.timeout,
514
+ metadata: @config.metadata,
515
+ retry_policy: @config.retry_policy
516
+
517
+ @managed_kafka_stub.create_cluster request, options do |result, operation|
518
+ result = ::Gapic::Operation.new result, @operations_client, options: options
519
+ yield result, operation if block_given?
520
+ return result
521
+ end
522
+ rescue ::Gapic::Rest::Error => e
523
+ raise ::Google::Cloud::Error.from_error(e)
524
+ end
525
+
526
+ ##
527
+ # Updates the properties of a single cluster.
528
+ #
529
+ # @overload update_cluster(request, options = nil)
530
+ # Pass arguments to `update_cluster` via a request object, either of type
531
+ # {::Google::Cloud::ManagedKafka::V1::UpdateClusterRequest} or an equivalent Hash.
532
+ #
533
+ # @param request [::Google::Cloud::ManagedKafka::V1::UpdateClusterRequest, ::Hash]
534
+ # A request object representing the call parameters. Required. To specify no
535
+ # parameters, or to keep all the default parameter values, pass an empty Hash.
536
+ # @param options [::Gapic::CallOptions, ::Hash]
537
+ # Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
538
+ #
539
+ # @overload update_cluster(update_mask: nil, cluster: nil, request_id: nil)
540
+ # Pass arguments to `update_cluster` via keyword arguments. Note that at
541
+ # least one keyword argument is required. To specify no parameters, or to keep all
542
+ # the default parameter values, pass an empty Hash as a request object (see above).
543
+ #
544
+ # @param update_mask [::Google::Protobuf::FieldMask, ::Hash]
545
+ # Required. Field mask is used to specify the fields to be overwritten in the
546
+ # cluster resource by the update. The fields specified in the update_mask are
547
+ # relative to the resource, not the full request. A field will be overwritten
548
+ # if it is in the mask. The mask is required and a value of * will update all
549
+ # fields.
550
+ # @param cluster [::Google::Cloud::ManagedKafka::V1::Cluster, ::Hash]
551
+ # Required. The cluster to update. Its `name` field must be populated.
552
+ # @param request_id [::String]
553
+ # Optional. An optional request ID to identify requests. Specify a unique
554
+ # request ID to avoid duplication of requests. If a request times out or
555
+ # fails, retrying with the same ID allows the server to recognize the
556
+ # previous attempt. For at least 60 minutes, the server ignores duplicate
557
+ # requests bearing the same ID.
558
+ #
559
+ # For example, consider a situation where you make an initial request and the
560
+ # request times out. If you make the request again with the same request ID
561
+ # within 60 minutes of the last request, the server checks if an original
562
+ # operation with the same request ID was received. If so, the server ignores
563
+ # the second request.
564
+ #
565
+ # The request ID must be a valid UUID. A zero UUID is not supported
566
+ # (00000000-0000-0000-0000-000000000000).
567
+ # @yield [result, operation] Access the result along with the TransportOperation object
568
+ # @yieldparam result [::Gapic::Operation]
569
+ # @yieldparam operation [::Gapic::Rest::TransportOperation]
570
+ #
571
+ # @return [::Gapic::Operation]
572
+ #
573
+ # @raise [::Google::Cloud::Error] if the REST call is aborted.
574
+ #
575
+ # @example Basic example
576
+ # require "google/cloud/managed_kafka/v1"
577
+ #
578
+ # # Create a client object. The client can be reused for multiple calls.
579
+ # client = Google::Cloud::ManagedKafka::V1::ManagedKafka::Rest::Client.new
580
+ #
581
+ # # Create a request. To set request fields, pass in keyword arguments.
582
+ # request = Google::Cloud::ManagedKafka::V1::UpdateClusterRequest.new
583
+ #
584
+ # # Call the update_cluster method.
585
+ # result = client.update_cluster request
586
+ #
587
+ # # The returned object is of type Gapic::Operation. You can use it to
588
+ # # check the status of an operation, cancel it, or wait for results.
589
+ # # Here is how to wait for a response.
590
+ # result.wait_until_done! timeout: 60
591
+ # if result.response?
592
+ # p result.response
593
+ # else
594
+ # puts "No response received."
595
+ # end
596
+ #
597
+ def update_cluster request, options = nil
598
+ raise ::ArgumentError, "request must be provided" if request.nil?
599
+
600
+ request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::ManagedKafka::V1::UpdateClusterRequest
601
+
602
+ # Converts hash and nil to an options object
603
+ options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
604
+
605
+ # Customize the options with defaults
606
+ call_metadata = @config.rpcs.update_cluster.metadata.to_h
607
+
608
+ # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
609
+ call_metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
610
+ lib_name: @config.lib_name, lib_version: @config.lib_version,
611
+ gapic_version: ::Google::Cloud::ManagedKafka::V1::VERSION,
612
+ transports_version_send: [:rest]
613
+
614
+ call_metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
615
+ call_metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
616
+
617
+ options.apply_defaults timeout: @config.rpcs.update_cluster.timeout,
618
+ metadata: call_metadata,
619
+ retry_policy: @config.rpcs.update_cluster.retry_policy
620
+
621
+ options.apply_defaults timeout: @config.timeout,
622
+ metadata: @config.metadata,
623
+ retry_policy: @config.retry_policy
624
+
625
+ @managed_kafka_stub.update_cluster request, options do |result, operation|
626
+ result = ::Gapic::Operation.new result, @operations_client, options: options
627
+ yield result, operation if block_given?
628
+ return result
629
+ end
630
+ rescue ::Gapic::Rest::Error => e
631
+ raise ::Google::Cloud::Error.from_error(e)
632
+ end
633
+
634
+ ##
635
+ # Deletes a single cluster.
636
+ #
637
+ # @overload delete_cluster(request, options = nil)
638
+ # Pass arguments to `delete_cluster` via a request object, either of type
639
+ # {::Google::Cloud::ManagedKafka::V1::DeleteClusterRequest} or an equivalent Hash.
640
+ #
641
+ # @param request [::Google::Cloud::ManagedKafka::V1::DeleteClusterRequest, ::Hash]
642
+ # A request object representing the call parameters. Required. To specify no
643
+ # parameters, or to keep all the default parameter values, pass an empty Hash.
644
+ # @param options [::Gapic::CallOptions, ::Hash]
645
+ # Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
646
+ #
647
+ # @overload delete_cluster(name: nil, request_id: nil)
648
+ # Pass arguments to `delete_cluster` via keyword arguments. Note that at
649
+ # least one keyword argument is required. To specify no parameters, or to keep all
650
+ # the default parameter values, pass an empty Hash as a request object (see above).
651
+ #
652
+ # @param name [::String]
653
+ # Required. The name of the cluster to delete.
654
+ # @param request_id [::String]
655
+ # Optional. An optional request ID to identify requests. Specify a unique
656
+ # request ID to avoid duplication of requests. If a request times out or
657
+ # fails, retrying with the same ID allows the server to recognize the
658
+ # previous attempt. For at least 60 minutes, the server ignores duplicate
659
+ # requests bearing the same ID.
660
+ #
661
+ # For example, consider a situation where you make an initial request and the
662
+ # request times out. If you make the request again with the same request ID
663
+ # within 60 minutes of the last request, the server checks if an original
664
+ # operation with the same request ID was received. If so, the server ignores
665
+ # the second request.
666
+ #
667
+ # The request ID must be a valid UUID. A zero UUID is not supported
668
+ # (00000000-0000-0000-0000-000000000000).
669
+ # @yield [result, operation] Access the result along with the TransportOperation object
670
+ # @yieldparam result [::Gapic::Operation]
671
+ # @yieldparam operation [::Gapic::Rest::TransportOperation]
672
+ #
673
+ # @return [::Gapic::Operation]
674
+ #
675
+ # @raise [::Google::Cloud::Error] if the REST call is aborted.
676
+ #
677
+ # @example Basic example
678
+ # require "google/cloud/managed_kafka/v1"
679
+ #
680
+ # # Create a client object. The client can be reused for multiple calls.
681
+ # client = Google::Cloud::ManagedKafka::V1::ManagedKafka::Rest::Client.new
682
+ #
683
+ # # Create a request. To set request fields, pass in keyword arguments.
684
+ # request = Google::Cloud::ManagedKafka::V1::DeleteClusterRequest.new
685
+ #
686
+ # # Call the delete_cluster method.
687
+ # result = client.delete_cluster request
688
+ #
689
+ # # The returned object is of type Gapic::Operation. You can use it to
690
+ # # check the status of an operation, cancel it, or wait for results.
691
+ # # Here is how to wait for a response.
692
+ # result.wait_until_done! timeout: 60
693
+ # if result.response?
694
+ # p result.response
695
+ # else
696
+ # puts "No response received."
697
+ # end
698
+ #
699
+ def delete_cluster request, options = nil
700
+ raise ::ArgumentError, "request must be provided" if request.nil?
701
+
702
+ request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::ManagedKafka::V1::DeleteClusterRequest
703
+
704
+ # Converts hash and nil to an options object
705
+ options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
706
+
707
+ # Customize the options with defaults
708
+ call_metadata = @config.rpcs.delete_cluster.metadata.to_h
709
+
710
+ # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
711
+ call_metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
712
+ lib_name: @config.lib_name, lib_version: @config.lib_version,
713
+ gapic_version: ::Google::Cloud::ManagedKafka::V1::VERSION,
714
+ transports_version_send: [:rest]
715
+
716
+ call_metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
717
+ call_metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
718
+
719
+ options.apply_defaults timeout: @config.rpcs.delete_cluster.timeout,
720
+ metadata: call_metadata,
721
+ retry_policy: @config.rpcs.delete_cluster.retry_policy
722
+
723
+ options.apply_defaults timeout: @config.timeout,
724
+ metadata: @config.metadata,
725
+ retry_policy: @config.retry_policy
726
+
727
+ @managed_kafka_stub.delete_cluster request, options do |result, operation|
728
+ result = ::Gapic::Operation.new result, @operations_client, options: options
729
+ yield result, operation if block_given?
730
+ return result
731
+ end
732
+ rescue ::Gapic::Rest::Error => e
733
+ raise ::Google::Cloud::Error.from_error(e)
734
+ end
735
+
736
+ ##
737
+ # Lists the topics in a given cluster.
738
+ #
739
+ # @overload list_topics(request, options = nil)
740
+ # Pass arguments to `list_topics` via a request object, either of type
741
+ # {::Google::Cloud::ManagedKafka::V1::ListTopicsRequest} or an equivalent Hash.
742
+ #
743
+ # @param request [::Google::Cloud::ManagedKafka::V1::ListTopicsRequest, ::Hash]
744
+ # A request object representing the call parameters. Required. To specify no
745
+ # parameters, or to keep all the default parameter values, pass an empty Hash.
746
+ # @param options [::Gapic::CallOptions, ::Hash]
747
+ # Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
748
+ #
749
+ # @overload list_topics(parent: nil, page_size: nil, page_token: nil)
750
+ # Pass arguments to `list_topics` via keyword arguments. Note that at
751
+ # least one keyword argument is required. To specify no parameters, or to keep all
752
+ # the default parameter values, pass an empty Hash as a request object (see above).
753
+ #
754
+ # @param parent [::String]
755
+ # Required. The parent cluster whose topics are to be listed. Structured like
756
+ # `projects/{project}/locations/{location}/clusters/{cluster}`.
757
+ # @param page_size [::Integer]
758
+ # Optional. The maximum number of topics to return. The service may return
759
+ # fewer than this value. If unset or zero, all topics for the parent is
760
+ # returned.
761
+ # @param page_token [::String]
762
+ # Optional. A page token, received from a previous `ListTopics` call.
763
+ # Provide this to retrieve the subsequent page.
764
+ #
765
+ # When paginating, all other parameters provided to `ListTopics` must match
766
+ # the call that provided the page token.
767
+ # @yield [result, operation] Access the result along with the TransportOperation object
768
+ # @yieldparam result [::Gapic::Rest::PagedEnumerable<::Google::Cloud::ManagedKafka::V1::Topic>]
769
+ # @yieldparam operation [::Gapic::Rest::TransportOperation]
770
+ #
771
+ # @return [::Gapic::Rest::PagedEnumerable<::Google::Cloud::ManagedKafka::V1::Topic>]
772
+ #
773
+ # @raise [::Google::Cloud::Error] if the REST call is aborted.
774
+ #
775
+ # @example Basic example
776
+ # require "google/cloud/managed_kafka/v1"
777
+ #
778
+ # # Create a client object. The client can be reused for multiple calls.
779
+ # client = Google::Cloud::ManagedKafka::V1::ManagedKafka::Rest::Client.new
780
+ #
781
+ # # Create a request. To set request fields, pass in keyword arguments.
782
+ # request = Google::Cloud::ManagedKafka::V1::ListTopicsRequest.new
783
+ #
784
+ # # Call the list_topics method.
785
+ # result = client.list_topics request
786
+ #
787
+ # # The returned object is of type Gapic::PagedEnumerable. You can iterate
788
+ # # over elements, and API calls will be issued to fetch pages as needed.
789
+ # result.each do |item|
790
+ # # Each element is of type ::Google::Cloud::ManagedKafka::V1::Topic.
791
+ # p item
792
+ # end
793
+ #
794
+ def list_topics request, options = nil
795
+ raise ::ArgumentError, "request must be provided" if request.nil?
796
+
797
+ request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::ManagedKafka::V1::ListTopicsRequest
798
+
799
+ # Converts hash and nil to an options object
800
+ options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
801
+
802
+ # Customize the options with defaults
803
+ call_metadata = @config.rpcs.list_topics.metadata.to_h
804
+
805
+ # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
806
+ call_metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
807
+ lib_name: @config.lib_name, lib_version: @config.lib_version,
808
+ gapic_version: ::Google::Cloud::ManagedKafka::V1::VERSION,
809
+ transports_version_send: [:rest]
810
+
811
+ call_metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
812
+ call_metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
813
+
814
+ options.apply_defaults timeout: @config.rpcs.list_topics.timeout,
815
+ metadata: call_metadata,
816
+ retry_policy: @config.rpcs.list_topics.retry_policy
817
+
818
+ options.apply_defaults timeout: @config.timeout,
819
+ metadata: @config.metadata,
820
+ retry_policy: @config.retry_policy
821
+
822
+ @managed_kafka_stub.list_topics request, options do |result, operation|
823
+ result = ::Gapic::Rest::PagedEnumerable.new @managed_kafka_stub, :list_topics, "topics", request, result, options
824
+ yield result, operation if block_given?
825
+ return result
826
+ end
827
+ rescue ::Gapic::Rest::Error => e
828
+ raise ::Google::Cloud::Error.from_error(e)
829
+ end
830
+
831
+ ##
832
+ # Returns the properties of a single topic.
833
+ #
834
+ # @overload get_topic(request, options = nil)
835
+ # Pass arguments to `get_topic` via a request object, either of type
836
+ # {::Google::Cloud::ManagedKafka::V1::GetTopicRequest} or an equivalent Hash.
837
+ #
838
+ # @param request [::Google::Cloud::ManagedKafka::V1::GetTopicRequest, ::Hash]
839
+ # A request object representing the call parameters. Required. To specify no
840
+ # parameters, or to keep all the default parameter values, pass an empty Hash.
841
+ # @param options [::Gapic::CallOptions, ::Hash]
842
+ # Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
843
+ #
844
+ # @overload get_topic(name: nil)
845
+ # Pass arguments to `get_topic` via keyword arguments. Note that at
846
+ # least one keyword argument is required. To specify no parameters, or to keep all
847
+ # the default parameter values, pass an empty Hash as a request object (see above).
848
+ #
849
+ # @param name [::String]
850
+ # Required. The name of the topic whose configuration to return. Structured
851
+ # like:
852
+ # projects/\\{project}/locations/\\{location}/clusters/\\{cluster}/topics/\\{topic}.
853
+ # @yield [result, operation] Access the result along with the TransportOperation object
854
+ # @yieldparam result [::Google::Cloud::ManagedKafka::V1::Topic]
855
+ # @yieldparam operation [::Gapic::Rest::TransportOperation]
856
+ #
857
+ # @return [::Google::Cloud::ManagedKafka::V1::Topic]
858
+ #
859
+ # @raise [::Google::Cloud::Error] if the REST call is aborted.
860
+ #
861
+ # @example Basic example
862
+ # require "google/cloud/managed_kafka/v1"
863
+ #
864
+ # # Create a client object. The client can be reused for multiple calls.
865
+ # client = Google::Cloud::ManagedKafka::V1::ManagedKafka::Rest::Client.new
866
+ #
867
+ # # Create a request. To set request fields, pass in keyword arguments.
868
+ # request = Google::Cloud::ManagedKafka::V1::GetTopicRequest.new
869
+ #
870
+ # # Call the get_topic method.
871
+ # result = client.get_topic request
872
+ #
873
+ # # The returned object is of type Google::Cloud::ManagedKafka::V1::Topic.
874
+ # p result
875
+ #
876
+ def get_topic request, options = nil
877
+ raise ::ArgumentError, "request must be provided" if request.nil?
878
+
879
+ request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::ManagedKafka::V1::GetTopicRequest
880
+
881
+ # Converts hash and nil to an options object
882
+ options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
883
+
884
+ # Customize the options with defaults
885
+ call_metadata = @config.rpcs.get_topic.metadata.to_h
886
+
887
+ # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
888
+ call_metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
889
+ lib_name: @config.lib_name, lib_version: @config.lib_version,
890
+ gapic_version: ::Google::Cloud::ManagedKafka::V1::VERSION,
891
+ transports_version_send: [:rest]
892
+
893
+ call_metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
894
+ call_metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
895
+
896
+ options.apply_defaults timeout: @config.rpcs.get_topic.timeout,
897
+ metadata: call_metadata,
898
+ retry_policy: @config.rpcs.get_topic.retry_policy
899
+
900
+ options.apply_defaults timeout: @config.timeout,
901
+ metadata: @config.metadata,
902
+ retry_policy: @config.retry_policy
903
+
904
+ @managed_kafka_stub.get_topic request, options do |result, operation|
905
+ yield result, operation if block_given?
906
+ return result
907
+ end
908
+ rescue ::Gapic::Rest::Error => e
909
+ raise ::Google::Cloud::Error.from_error(e)
910
+ end
911
+
912
+ ##
913
+ # Creates a new topic in a given project and location.
914
+ #
915
+ # @overload create_topic(request, options = nil)
916
+ # Pass arguments to `create_topic` via a request object, either of type
917
+ # {::Google::Cloud::ManagedKafka::V1::CreateTopicRequest} or an equivalent Hash.
918
+ #
919
+ # @param request [::Google::Cloud::ManagedKafka::V1::CreateTopicRequest, ::Hash]
920
+ # A request object representing the call parameters. Required. To specify no
921
+ # parameters, or to keep all the default parameter values, pass an empty Hash.
922
+ # @param options [::Gapic::CallOptions, ::Hash]
923
+ # Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
924
+ #
925
+ # @overload create_topic(parent: nil, topic_id: nil, topic: nil)
926
+ # Pass arguments to `create_topic` via keyword arguments. Note that at
927
+ # least one keyword argument is required. To specify no parameters, or to keep all
928
+ # the default parameter values, pass an empty Hash as a request object (see above).
929
+ #
930
+ # @param parent [::String]
931
+ # Required. The parent cluster in which to create the topic.
932
+ # Structured like
933
+ # `projects/{project}/locations/{location}/clusters/{cluster}`.
934
+ # @param topic_id [::String]
935
+ # Required. The ID to use for the topic, which will become the final
936
+ # component of the topic's name.
937
+ #
938
+ # This value is structured like: `my-topic-name`.
939
+ # @param topic [::Google::Cloud::ManagedKafka::V1::Topic, ::Hash]
940
+ # Required. Configuration of the topic to create. Its `name` field is
941
+ # ignored.
942
+ # @yield [result, operation] Access the result along with the TransportOperation object
943
+ # @yieldparam result [::Google::Cloud::ManagedKafka::V1::Topic]
944
+ # @yieldparam operation [::Gapic::Rest::TransportOperation]
945
+ #
946
+ # @return [::Google::Cloud::ManagedKafka::V1::Topic]
947
+ #
948
+ # @raise [::Google::Cloud::Error] if the REST call is aborted.
949
+ #
950
+ # @example Basic example
951
+ # require "google/cloud/managed_kafka/v1"
952
+ #
953
+ # # Create a client object. The client can be reused for multiple calls.
954
+ # client = Google::Cloud::ManagedKafka::V1::ManagedKafka::Rest::Client.new
955
+ #
956
+ # # Create a request. To set request fields, pass in keyword arguments.
957
+ # request = Google::Cloud::ManagedKafka::V1::CreateTopicRequest.new
958
+ #
959
+ # # Call the create_topic method.
960
+ # result = client.create_topic request
961
+ #
962
+ # # The returned object is of type Google::Cloud::ManagedKafka::V1::Topic.
963
+ # p result
964
+ #
965
+ def create_topic request, options = nil
966
+ raise ::ArgumentError, "request must be provided" if request.nil?
967
+
968
+ request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::ManagedKafka::V1::CreateTopicRequest
969
+
970
+ # Converts hash and nil to an options object
971
+ options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
972
+
973
+ # Customize the options with defaults
974
+ call_metadata = @config.rpcs.create_topic.metadata.to_h
975
+
976
+ # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
977
+ call_metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
978
+ lib_name: @config.lib_name, lib_version: @config.lib_version,
979
+ gapic_version: ::Google::Cloud::ManagedKafka::V1::VERSION,
980
+ transports_version_send: [:rest]
981
+
982
+ call_metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
983
+ call_metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
984
+
985
+ options.apply_defaults timeout: @config.rpcs.create_topic.timeout,
986
+ metadata: call_metadata,
987
+ retry_policy: @config.rpcs.create_topic.retry_policy
988
+
989
+ options.apply_defaults timeout: @config.timeout,
990
+ metadata: @config.metadata,
991
+ retry_policy: @config.retry_policy
992
+
993
+ @managed_kafka_stub.create_topic request, options do |result, operation|
994
+ yield result, operation if block_given?
995
+ return result
996
+ end
997
+ rescue ::Gapic::Rest::Error => e
998
+ raise ::Google::Cloud::Error.from_error(e)
999
+ end
1000
+
1001
+ ##
1002
+ # Updates the properties of a single topic.
1003
+ #
1004
+ # @overload update_topic(request, options = nil)
1005
+ # Pass arguments to `update_topic` via a request object, either of type
1006
+ # {::Google::Cloud::ManagedKafka::V1::UpdateTopicRequest} or an equivalent Hash.
1007
+ #
1008
+ # @param request [::Google::Cloud::ManagedKafka::V1::UpdateTopicRequest, ::Hash]
1009
+ # A request object representing the call parameters. Required. To specify no
1010
+ # parameters, or to keep all the default parameter values, pass an empty Hash.
1011
+ # @param options [::Gapic::CallOptions, ::Hash]
1012
+ # Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
1013
+ #
1014
+ # @overload update_topic(update_mask: nil, topic: nil)
1015
+ # Pass arguments to `update_topic` via keyword arguments. Note that at
1016
+ # least one keyword argument is required. To specify no parameters, or to keep all
1017
+ # the default parameter values, pass an empty Hash as a request object (see above).
1018
+ #
1019
+ # @param update_mask [::Google::Protobuf::FieldMask, ::Hash]
1020
+ # Required. Field mask is used to specify the fields to be overwritten in the
1021
+ # Topic resource by the update. The fields specified in the update_mask are
1022
+ # relative to the resource, not the full request. A field will be overwritten
1023
+ # if it is in the mask. The mask is required and a value of * will update all
1024
+ # fields.
1025
+ # @param topic [::Google::Cloud::ManagedKafka::V1::Topic, ::Hash]
1026
+ # Required. The topic to update. Its `name` field must be populated.
1027
+ # @yield [result, operation] Access the result along with the TransportOperation object
1028
+ # @yieldparam result [::Google::Cloud::ManagedKafka::V1::Topic]
1029
+ # @yieldparam operation [::Gapic::Rest::TransportOperation]
1030
+ #
1031
+ # @return [::Google::Cloud::ManagedKafka::V1::Topic]
1032
+ #
1033
+ # @raise [::Google::Cloud::Error] if the REST call is aborted.
1034
+ #
1035
+ # @example Basic example
1036
+ # require "google/cloud/managed_kafka/v1"
1037
+ #
1038
+ # # Create a client object. The client can be reused for multiple calls.
1039
+ # client = Google::Cloud::ManagedKafka::V1::ManagedKafka::Rest::Client.new
1040
+ #
1041
+ # # Create a request. To set request fields, pass in keyword arguments.
1042
+ # request = Google::Cloud::ManagedKafka::V1::UpdateTopicRequest.new
1043
+ #
1044
+ # # Call the update_topic method.
1045
+ # result = client.update_topic request
1046
+ #
1047
+ # # The returned object is of type Google::Cloud::ManagedKafka::V1::Topic.
1048
+ # p result
1049
+ #
1050
+ def update_topic request, options = nil
1051
+ raise ::ArgumentError, "request must be provided" if request.nil?
1052
+
1053
+ request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::ManagedKafka::V1::UpdateTopicRequest
1054
+
1055
+ # Converts hash and nil to an options object
1056
+ options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
1057
+
1058
+ # Customize the options with defaults
1059
+ call_metadata = @config.rpcs.update_topic.metadata.to_h
1060
+
1061
+ # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
1062
+ call_metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
1063
+ lib_name: @config.lib_name, lib_version: @config.lib_version,
1064
+ gapic_version: ::Google::Cloud::ManagedKafka::V1::VERSION,
1065
+ transports_version_send: [:rest]
1066
+
1067
+ call_metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
1068
+ call_metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
1069
+
1070
+ options.apply_defaults timeout: @config.rpcs.update_topic.timeout,
1071
+ metadata: call_metadata,
1072
+ retry_policy: @config.rpcs.update_topic.retry_policy
1073
+
1074
+ options.apply_defaults timeout: @config.timeout,
1075
+ metadata: @config.metadata,
1076
+ retry_policy: @config.retry_policy
1077
+
1078
+ @managed_kafka_stub.update_topic request, options do |result, operation|
1079
+ yield result, operation if block_given?
1080
+ return result
1081
+ end
1082
+ rescue ::Gapic::Rest::Error => e
1083
+ raise ::Google::Cloud::Error.from_error(e)
1084
+ end
1085
+
1086
+ ##
1087
+ # Deletes a single topic.
1088
+ #
1089
+ # @overload delete_topic(request, options = nil)
1090
+ # Pass arguments to `delete_topic` via a request object, either of type
1091
+ # {::Google::Cloud::ManagedKafka::V1::DeleteTopicRequest} or an equivalent Hash.
1092
+ #
1093
+ # @param request [::Google::Cloud::ManagedKafka::V1::DeleteTopicRequest, ::Hash]
1094
+ # A request object representing the call parameters. Required. To specify no
1095
+ # parameters, or to keep all the default parameter values, pass an empty Hash.
1096
+ # @param options [::Gapic::CallOptions, ::Hash]
1097
+ # Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
1098
+ #
1099
+ # @overload delete_topic(name: nil)
1100
+ # Pass arguments to `delete_topic` via keyword arguments. Note that at
1101
+ # least one keyword argument is required. To specify no parameters, or to keep all
1102
+ # the default parameter values, pass an empty Hash as a request object (see above).
1103
+ #
1104
+ # @param name [::String]
1105
+ # Required. The name of the topic to delete.
1106
+ # `projects/{project}/locations/{location}/clusters/{cluster}/topics/{topic}`.
1107
+ # @yield [result, operation] Access the result along with the TransportOperation object
1108
+ # @yieldparam result [::Google::Protobuf::Empty]
1109
+ # @yieldparam operation [::Gapic::Rest::TransportOperation]
1110
+ #
1111
+ # @return [::Google::Protobuf::Empty]
1112
+ #
1113
+ # @raise [::Google::Cloud::Error] if the REST call is aborted.
1114
+ #
1115
+ # @example Basic example
1116
+ # require "google/cloud/managed_kafka/v1"
1117
+ #
1118
+ # # Create a client object. The client can be reused for multiple calls.
1119
+ # client = Google::Cloud::ManagedKafka::V1::ManagedKafka::Rest::Client.new
1120
+ #
1121
+ # # Create a request. To set request fields, pass in keyword arguments.
1122
+ # request = Google::Cloud::ManagedKafka::V1::DeleteTopicRequest.new
1123
+ #
1124
+ # # Call the delete_topic method.
1125
+ # result = client.delete_topic request
1126
+ #
1127
+ # # The returned object is of type Google::Protobuf::Empty.
1128
+ # p result
1129
+ #
1130
+ def delete_topic request, options = nil
1131
+ raise ::ArgumentError, "request must be provided" if request.nil?
1132
+
1133
+ request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::ManagedKafka::V1::DeleteTopicRequest
1134
+
1135
+ # Converts hash and nil to an options object
1136
+ options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
1137
+
1138
+ # Customize the options with defaults
1139
+ call_metadata = @config.rpcs.delete_topic.metadata.to_h
1140
+
1141
+ # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
1142
+ call_metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
1143
+ lib_name: @config.lib_name, lib_version: @config.lib_version,
1144
+ gapic_version: ::Google::Cloud::ManagedKafka::V1::VERSION,
1145
+ transports_version_send: [:rest]
1146
+
1147
+ call_metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
1148
+ call_metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
1149
+
1150
+ options.apply_defaults timeout: @config.rpcs.delete_topic.timeout,
1151
+ metadata: call_metadata,
1152
+ retry_policy: @config.rpcs.delete_topic.retry_policy
1153
+
1154
+ options.apply_defaults timeout: @config.timeout,
1155
+ metadata: @config.metadata,
1156
+ retry_policy: @config.retry_policy
1157
+
1158
+ @managed_kafka_stub.delete_topic request, options do |result, operation|
1159
+ yield result, operation if block_given?
1160
+ return result
1161
+ end
1162
+ rescue ::Gapic::Rest::Error => e
1163
+ raise ::Google::Cloud::Error.from_error(e)
1164
+ end
1165
+
1166
+ ##
1167
+ # Lists the consumer groups in a given cluster.
1168
+ #
1169
+ # @overload list_consumer_groups(request, options = nil)
1170
+ # Pass arguments to `list_consumer_groups` via a request object, either of type
1171
+ # {::Google::Cloud::ManagedKafka::V1::ListConsumerGroupsRequest} or an equivalent Hash.
1172
+ #
1173
+ # @param request [::Google::Cloud::ManagedKafka::V1::ListConsumerGroupsRequest, ::Hash]
1174
+ # A request object representing the call parameters. Required. To specify no
1175
+ # parameters, or to keep all the default parameter values, pass an empty Hash.
1176
+ # @param options [::Gapic::CallOptions, ::Hash]
1177
+ # Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
1178
+ #
1179
+ # @overload list_consumer_groups(parent: nil, page_size: nil, page_token: nil)
1180
+ # Pass arguments to `list_consumer_groups` via keyword arguments. Note that at
1181
+ # least one keyword argument is required. To specify no parameters, or to keep all
1182
+ # the default parameter values, pass an empty Hash as a request object (see above).
1183
+ #
1184
+ # @param parent [::String]
1185
+ # Required. The parent cluster whose consumer groups are to be listed.
1186
+ # Structured like
1187
+ # `projects/{project}/locations/{location}/clusters/{cluster}`.
1188
+ # @param page_size [::Integer]
1189
+ # Optional. The maximum number of consumer groups to return. The service may
1190
+ # return fewer than this value. If unset or zero, all consumer groups for the
1191
+ # parent is returned.
1192
+ # @param page_token [::String]
1193
+ # Optional. A page token, received from a previous `ListConsumerGroups` call.
1194
+ # Provide this to retrieve the subsequent page.
1195
+ #
1196
+ # When paginating, all other parameters provided to `ListConsumerGroups` must
1197
+ # match the call that provided the page token.
1198
+ # @yield [result, operation] Access the result along with the TransportOperation object
1199
+ # @yieldparam result [::Gapic::Rest::PagedEnumerable<::Google::Cloud::ManagedKafka::V1::ConsumerGroup>]
1200
+ # @yieldparam operation [::Gapic::Rest::TransportOperation]
1201
+ #
1202
+ # @return [::Gapic::Rest::PagedEnumerable<::Google::Cloud::ManagedKafka::V1::ConsumerGroup>]
1203
+ #
1204
+ # @raise [::Google::Cloud::Error] if the REST call is aborted.
1205
+ #
1206
+ # @example Basic example
1207
+ # require "google/cloud/managed_kafka/v1"
1208
+ #
1209
+ # # Create a client object. The client can be reused for multiple calls.
1210
+ # client = Google::Cloud::ManagedKafka::V1::ManagedKafka::Rest::Client.new
1211
+ #
1212
+ # # Create a request. To set request fields, pass in keyword arguments.
1213
+ # request = Google::Cloud::ManagedKafka::V1::ListConsumerGroupsRequest.new
1214
+ #
1215
+ # # Call the list_consumer_groups method.
1216
+ # result = client.list_consumer_groups request
1217
+ #
1218
+ # # The returned object is of type Gapic::PagedEnumerable. You can iterate
1219
+ # # over elements, and API calls will be issued to fetch pages as needed.
1220
+ # result.each do |item|
1221
+ # # Each element is of type ::Google::Cloud::ManagedKafka::V1::ConsumerGroup.
1222
+ # p item
1223
+ # end
1224
+ #
1225
+ def list_consumer_groups request, options = nil
1226
+ raise ::ArgumentError, "request must be provided" if request.nil?
1227
+
1228
+ request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::ManagedKafka::V1::ListConsumerGroupsRequest
1229
+
1230
+ # Converts hash and nil to an options object
1231
+ options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
1232
+
1233
+ # Customize the options with defaults
1234
+ call_metadata = @config.rpcs.list_consumer_groups.metadata.to_h
1235
+
1236
+ # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
1237
+ call_metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
1238
+ lib_name: @config.lib_name, lib_version: @config.lib_version,
1239
+ gapic_version: ::Google::Cloud::ManagedKafka::V1::VERSION,
1240
+ transports_version_send: [:rest]
1241
+
1242
+ call_metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
1243
+ call_metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
1244
+
1245
+ options.apply_defaults timeout: @config.rpcs.list_consumer_groups.timeout,
1246
+ metadata: call_metadata,
1247
+ retry_policy: @config.rpcs.list_consumer_groups.retry_policy
1248
+
1249
+ options.apply_defaults timeout: @config.timeout,
1250
+ metadata: @config.metadata,
1251
+ retry_policy: @config.retry_policy
1252
+
1253
+ @managed_kafka_stub.list_consumer_groups request, options do |result, operation|
1254
+ result = ::Gapic::Rest::PagedEnumerable.new @managed_kafka_stub, :list_consumer_groups, "consumer_groups", request, result, options
1255
+ yield result, operation if block_given?
1256
+ return result
1257
+ end
1258
+ rescue ::Gapic::Rest::Error => e
1259
+ raise ::Google::Cloud::Error.from_error(e)
1260
+ end
1261
+
1262
+ ##
1263
+ # Returns the properties of a single consumer group.
1264
+ #
1265
+ # @overload get_consumer_group(request, options = nil)
1266
+ # Pass arguments to `get_consumer_group` via a request object, either of type
1267
+ # {::Google::Cloud::ManagedKafka::V1::GetConsumerGroupRequest} or an equivalent Hash.
1268
+ #
1269
+ # @param request [::Google::Cloud::ManagedKafka::V1::GetConsumerGroupRequest, ::Hash]
1270
+ # A request object representing the call parameters. Required. To specify no
1271
+ # parameters, or to keep all the default parameter values, pass an empty Hash.
1272
+ # @param options [::Gapic::CallOptions, ::Hash]
1273
+ # Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
1274
+ #
1275
+ # @overload get_consumer_group(name: nil)
1276
+ # Pass arguments to `get_consumer_group` via keyword arguments. Note that at
1277
+ # least one keyword argument is required. To specify no parameters, or to keep all
1278
+ # the default parameter values, pass an empty Hash as a request object (see above).
1279
+ #
1280
+ # @param name [::String]
1281
+ # Required. The name of the consumer group whose configuration to return.
1282
+ # `projects/{project}/locations/{location}/clusters/{cluster}/consumerGroups/{consumerGroup}`.
1283
+ # @yield [result, operation] Access the result along with the TransportOperation object
1284
+ # @yieldparam result [::Google::Cloud::ManagedKafka::V1::ConsumerGroup]
1285
+ # @yieldparam operation [::Gapic::Rest::TransportOperation]
1286
+ #
1287
+ # @return [::Google::Cloud::ManagedKafka::V1::ConsumerGroup]
1288
+ #
1289
+ # @raise [::Google::Cloud::Error] if the REST call is aborted.
1290
+ #
1291
+ # @example Basic example
1292
+ # require "google/cloud/managed_kafka/v1"
1293
+ #
1294
+ # # Create a client object. The client can be reused for multiple calls.
1295
+ # client = Google::Cloud::ManagedKafka::V1::ManagedKafka::Rest::Client.new
1296
+ #
1297
+ # # Create a request. To set request fields, pass in keyword arguments.
1298
+ # request = Google::Cloud::ManagedKafka::V1::GetConsumerGroupRequest.new
1299
+ #
1300
+ # # Call the get_consumer_group method.
1301
+ # result = client.get_consumer_group request
1302
+ #
1303
+ # # The returned object is of type Google::Cloud::ManagedKafka::V1::ConsumerGroup.
1304
+ # p result
1305
+ #
1306
+ def get_consumer_group request, options = nil
1307
+ raise ::ArgumentError, "request must be provided" if request.nil?
1308
+
1309
+ request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::ManagedKafka::V1::GetConsumerGroupRequest
1310
+
1311
+ # Converts hash and nil to an options object
1312
+ options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
1313
+
1314
+ # Customize the options with defaults
1315
+ call_metadata = @config.rpcs.get_consumer_group.metadata.to_h
1316
+
1317
+ # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
1318
+ call_metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
1319
+ lib_name: @config.lib_name, lib_version: @config.lib_version,
1320
+ gapic_version: ::Google::Cloud::ManagedKafka::V1::VERSION,
1321
+ transports_version_send: [:rest]
1322
+
1323
+ call_metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
1324
+ call_metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
1325
+
1326
+ options.apply_defaults timeout: @config.rpcs.get_consumer_group.timeout,
1327
+ metadata: call_metadata,
1328
+ retry_policy: @config.rpcs.get_consumer_group.retry_policy
1329
+
1330
+ options.apply_defaults timeout: @config.timeout,
1331
+ metadata: @config.metadata,
1332
+ retry_policy: @config.retry_policy
1333
+
1334
+ @managed_kafka_stub.get_consumer_group request, options do |result, operation|
1335
+ yield result, operation if block_given?
1336
+ return result
1337
+ end
1338
+ rescue ::Gapic::Rest::Error => e
1339
+ raise ::Google::Cloud::Error.from_error(e)
1340
+ end
1341
+
1342
+ ##
1343
+ # Updates the properties of a single consumer group.
1344
+ #
1345
+ # @overload update_consumer_group(request, options = nil)
1346
+ # Pass arguments to `update_consumer_group` via a request object, either of type
1347
+ # {::Google::Cloud::ManagedKafka::V1::UpdateConsumerGroupRequest} or an equivalent Hash.
1348
+ #
1349
+ # @param request [::Google::Cloud::ManagedKafka::V1::UpdateConsumerGroupRequest, ::Hash]
1350
+ # A request object representing the call parameters. Required. To specify no
1351
+ # parameters, or to keep all the default parameter values, pass an empty Hash.
1352
+ # @param options [::Gapic::CallOptions, ::Hash]
1353
+ # Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
1354
+ #
1355
+ # @overload update_consumer_group(update_mask: nil, consumer_group: nil)
1356
+ # Pass arguments to `update_consumer_group` via keyword arguments. Note that at
1357
+ # least one keyword argument is required. To specify no parameters, or to keep all
1358
+ # the default parameter values, pass an empty Hash as a request object (see above).
1359
+ #
1360
+ # @param update_mask [::Google::Protobuf::FieldMask, ::Hash]
1361
+ # Required. Field mask is used to specify the fields to be overwritten in the
1362
+ # ConsumerGroup resource by the update.
1363
+ # The fields specified in the update_mask are relative to the resource, not
1364
+ # the full request. A field will be overwritten if it is in the mask. The
1365
+ # mask is required and a value of * will update all fields.
1366
+ # @param consumer_group [::Google::Cloud::ManagedKafka::V1::ConsumerGroup, ::Hash]
1367
+ # Required. The consumer group to update. Its `name` field must be populated.
1368
+ # @yield [result, operation] Access the result along with the TransportOperation object
1369
+ # @yieldparam result [::Google::Cloud::ManagedKafka::V1::ConsumerGroup]
1370
+ # @yieldparam operation [::Gapic::Rest::TransportOperation]
1371
+ #
1372
+ # @return [::Google::Cloud::ManagedKafka::V1::ConsumerGroup]
1373
+ #
1374
+ # @raise [::Google::Cloud::Error] if the REST call is aborted.
1375
+ #
1376
+ # @example Basic example
1377
+ # require "google/cloud/managed_kafka/v1"
1378
+ #
1379
+ # # Create a client object. The client can be reused for multiple calls.
1380
+ # client = Google::Cloud::ManagedKafka::V1::ManagedKafka::Rest::Client.new
1381
+ #
1382
+ # # Create a request. To set request fields, pass in keyword arguments.
1383
+ # request = Google::Cloud::ManagedKafka::V1::UpdateConsumerGroupRequest.new
1384
+ #
1385
+ # # Call the update_consumer_group method.
1386
+ # result = client.update_consumer_group request
1387
+ #
1388
+ # # The returned object is of type Google::Cloud::ManagedKafka::V1::ConsumerGroup.
1389
+ # p result
1390
+ #
1391
+ def update_consumer_group request, options = nil
1392
+ raise ::ArgumentError, "request must be provided" if request.nil?
1393
+
1394
+ request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::ManagedKafka::V1::UpdateConsumerGroupRequest
1395
+
1396
+ # Converts hash and nil to an options object
1397
+ options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
1398
+
1399
+ # Customize the options with defaults
1400
+ call_metadata = @config.rpcs.update_consumer_group.metadata.to_h
1401
+
1402
+ # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
1403
+ call_metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
1404
+ lib_name: @config.lib_name, lib_version: @config.lib_version,
1405
+ gapic_version: ::Google::Cloud::ManagedKafka::V1::VERSION,
1406
+ transports_version_send: [:rest]
1407
+
1408
+ call_metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
1409
+ call_metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
1410
+
1411
+ options.apply_defaults timeout: @config.rpcs.update_consumer_group.timeout,
1412
+ metadata: call_metadata,
1413
+ retry_policy: @config.rpcs.update_consumer_group.retry_policy
1414
+
1415
+ options.apply_defaults timeout: @config.timeout,
1416
+ metadata: @config.metadata,
1417
+ retry_policy: @config.retry_policy
1418
+
1419
+ @managed_kafka_stub.update_consumer_group request, options do |result, operation|
1420
+ yield result, operation if block_given?
1421
+ return result
1422
+ end
1423
+ rescue ::Gapic::Rest::Error => e
1424
+ raise ::Google::Cloud::Error.from_error(e)
1425
+ end
1426
+
1427
+ ##
1428
+ # Deletes a single consumer group.
1429
+ #
1430
+ # @overload delete_consumer_group(request, options = nil)
1431
+ # Pass arguments to `delete_consumer_group` via a request object, either of type
1432
+ # {::Google::Cloud::ManagedKafka::V1::DeleteConsumerGroupRequest} or an equivalent Hash.
1433
+ #
1434
+ # @param request [::Google::Cloud::ManagedKafka::V1::DeleteConsumerGroupRequest, ::Hash]
1435
+ # A request object representing the call parameters. Required. To specify no
1436
+ # parameters, or to keep all the default parameter values, pass an empty Hash.
1437
+ # @param options [::Gapic::CallOptions, ::Hash]
1438
+ # Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
1439
+ #
1440
+ # @overload delete_consumer_group(name: nil)
1441
+ # Pass arguments to `delete_consumer_group` via keyword arguments. Note that at
1442
+ # least one keyword argument is required. To specify no parameters, or to keep all
1443
+ # the default parameter values, pass an empty Hash as a request object (see above).
1444
+ #
1445
+ # @param name [::String]
1446
+ # Required. The name of the consumer group to delete.
1447
+ # `projects/{project}/locations/{location}/clusters/{cluster}/consumerGroups/{consumerGroup}`.
1448
+ # @yield [result, operation] Access the result along with the TransportOperation object
1449
+ # @yieldparam result [::Google::Protobuf::Empty]
1450
+ # @yieldparam operation [::Gapic::Rest::TransportOperation]
1451
+ #
1452
+ # @return [::Google::Protobuf::Empty]
1453
+ #
1454
+ # @raise [::Google::Cloud::Error] if the REST call is aborted.
1455
+ #
1456
+ # @example Basic example
1457
+ # require "google/cloud/managed_kafka/v1"
1458
+ #
1459
+ # # Create a client object. The client can be reused for multiple calls.
1460
+ # client = Google::Cloud::ManagedKafka::V1::ManagedKafka::Rest::Client.new
1461
+ #
1462
+ # # Create a request. To set request fields, pass in keyword arguments.
1463
+ # request = Google::Cloud::ManagedKafka::V1::DeleteConsumerGroupRequest.new
1464
+ #
1465
+ # # Call the delete_consumer_group method.
1466
+ # result = client.delete_consumer_group request
1467
+ #
1468
+ # # The returned object is of type Google::Protobuf::Empty.
1469
+ # p result
1470
+ #
1471
+ def delete_consumer_group request, options = nil
1472
+ raise ::ArgumentError, "request must be provided" if request.nil?
1473
+
1474
+ request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::ManagedKafka::V1::DeleteConsumerGroupRequest
1475
+
1476
+ # Converts hash and nil to an options object
1477
+ options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
1478
+
1479
+ # Customize the options with defaults
1480
+ call_metadata = @config.rpcs.delete_consumer_group.metadata.to_h
1481
+
1482
+ # Set x-goog-api-client, x-goog-user-project and x-goog-api-version headers
1483
+ call_metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
1484
+ lib_name: @config.lib_name, lib_version: @config.lib_version,
1485
+ gapic_version: ::Google::Cloud::ManagedKafka::V1::VERSION,
1486
+ transports_version_send: [:rest]
1487
+
1488
+ call_metadata[:"x-goog-api-version"] = API_VERSION unless API_VERSION.empty?
1489
+ call_metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
1490
+
1491
+ options.apply_defaults timeout: @config.rpcs.delete_consumer_group.timeout,
1492
+ metadata: call_metadata,
1493
+ retry_policy: @config.rpcs.delete_consumer_group.retry_policy
1494
+
1495
+ options.apply_defaults timeout: @config.timeout,
1496
+ metadata: @config.metadata,
1497
+ retry_policy: @config.retry_policy
1498
+
1499
+ @managed_kafka_stub.delete_consumer_group request, options do |result, operation|
1500
+ yield result, operation if block_given?
1501
+ return result
1502
+ end
1503
+ rescue ::Gapic::Rest::Error => e
1504
+ raise ::Google::Cloud::Error.from_error(e)
1505
+ end
1506
+
1507
+ ##
1508
+ # Configuration class for the ManagedKafka REST API.
1509
+ #
1510
+ # This class represents the configuration for ManagedKafka REST,
1511
+ # providing control over timeouts, retry behavior, logging, transport
1512
+ # parameters, and other low-level controls. Certain parameters can also be
1513
+ # applied individually to specific RPCs. See
1514
+ # {::Google::Cloud::ManagedKafka::V1::ManagedKafka::Rest::Client::Configuration::Rpcs}
1515
+ # for a list of RPCs that can be configured independently.
1516
+ #
1517
+ # Configuration can be applied globally to all clients, or to a single client
1518
+ # on construction.
1519
+ #
1520
+ # @example
1521
+ #
1522
+ # # Modify the global config, setting the timeout for
1523
+ # # list_clusters to 20 seconds,
1524
+ # # and all remaining timeouts to 10 seconds.
1525
+ # ::Google::Cloud::ManagedKafka::V1::ManagedKafka::Rest::Client.configure do |config|
1526
+ # config.timeout = 10.0
1527
+ # config.rpcs.list_clusters.timeout = 20.0
1528
+ # end
1529
+ #
1530
+ # # Apply the above configuration only to a new client.
1531
+ # client = ::Google::Cloud::ManagedKafka::V1::ManagedKafka::Rest::Client.new do |config|
1532
+ # config.timeout = 10.0
1533
+ # config.rpcs.list_clusters.timeout = 20.0
1534
+ # end
1535
+ #
1536
+ # @!attribute [rw] endpoint
1537
+ # A custom service endpoint, as a hostname or hostname:port. The default is
1538
+ # nil, indicating to use the default endpoint in the current universe domain.
1539
+ # @return [::String,nil]
1540
+ # @!attribute [rw] credentials
1541
+ # Credentials to send with calls. You may provide any of the following types:
1542
+ # * (`String`) The path to a service account key file in JSON format
1543
+ # * (`Hash`) A service account key as a Hash
1544
+ # * (`Google::Auth::Credentials`) A googleauth credentials object
1545
+ # (see the [googleauth docs](https://rubydoc.info/gems/googleauth/Google/Auth/Credentials))
1546
+ # * (`Signet::OAuth2::Client`) A signet oauth2 client object
1547
+ # (see the [signet docs](https://rubydoc.info/gems/signet/Signet/OAuth2/Client))
1548
+ # * (`nil`) indicating no credentials
1549
+ # @return [::Object]
1550
+ # @!attribute [rw] scope
1551
+ # The OAuth scopes
1552
+ # @return [::Array<::String>]
1553
+ # @!attribute [rw] lib_name
1554
+ # The library name as recorded in instrumentation and logging
1555
+ # @return [::String]
1556
+ # @!attribute [rw] lib_version
1557
+ # The library version as recorded in instrumentation and logging
1558
+ # @return [::String]
1559
+ # @!attribute [rw] timeout
1560
+ # The call timeout in seconds.
1561
+ # @return [::Numeric]
1562
+ # @!attribute [rw] metadata
1563
+ # Additional headers to be sent with the call.
1564
+ # @return [::Hash{::Symbol=>::String}]
1565
+ # @!attribute [rw] retry_policy
1566
+ # The retry policy. The value is a hash with the following keys:
1567
+ # * `:initial_delay` (*type:* `Numeric`) - The initial delay in seconds.
1568
+ # * `:max_delay` (*type:* `Numeric`) - The max delay in seconds.
1569
+ # * `:multiplier` (*type:* `Numeric`) - The incremental backoff multiplier.
1570
+ # * `:retry_codes` (*type:* `Array<String>`) - The error codes that should
1571
+ # trigger a retry.
1572
+ # @return [::Hash]
1573
+ # @!attribute [rw] quota_project
1574
+ # A separate project against which to charge quota.
1575
+ # @return [::String]
1576
+ # @!attribute [rw] universe_domain
1577
+ # The universe domain within which to make requests. This determines the
1578
+ # default endpoint URL. The default value of nil uses the environment
1579
+ # universe (usually the default "googleapis.com" universe).
1580
+ # @return [::String,nil]
1581
+ #
1582
+ class Configuration
1583
+ extend ::Gapic::Config
1584
+
1585
+ # @private
1586
+ # The endpoint specific to the default "googleapis.com" universe. Deprecated.
1587
+ DEFAULT_ENDPOINT = "managedkafka.googleapis.com"
1588
+
1589
+ config_attr :endpoint, nil, ::String, nil
1590
+ config_attr :credentials, nil do |value|
1591
+ allowed = [::String, ::Hash, ::Proc, ::Symbol, ::Google::Auth::Credentials, ::Signet::OAuth2::Client, nil]
1592
+ allowed.any? { |klass| klass === value }
1593
+ end
1594
+ config_attr :scope, nil, ::String, ::Array, nil
1595
+ config_attr :lib_name, nil, ::String, nil
1596
+ config_attr :lib_version, nil, ::String, nil
1597
+ config_attr :timeout, nil, ::Numeric, nil
1598
+ config_attr :metadata, nil, ::Hash, nil
1599
+ config_attr :retry_policy, nil, ::Hash, ::Proc, nil
1600
+ config_attr :quota_project, nil, ::String, nil
1601
+ config_attr :universe_domain, nil, ::String, nil
1602
+
1603
+ # @private
1604
+ # Overrides for http bindings for the RPCs of this service
1605
+ # are only used when this service is used as mixin, and only
1606
+ # by the host service.
1607
+ # @return [::Hash{::Symbol=>::Array<::Gapic::Rest::GrpcTranscoder::HttpBinding>}]
1608
+ config_attr :bindings_override, {}, ::Hash, nil
1609
+
1610
+ # @private
1611
+ def initialize parent_config = nil
1612
+ @parent_config = parent_config unless parent_config.nil?
1613
+
1614
+ yield self if block_given?
1615
+ end
1616
+
1617
+ ##
1618
+ # Configurations for individual RPCs
1619
+ # @return [Rpcs]
1620
+ #
1621
+ def rpcs
1622
+ @rpcs ||= begin
1623
+ parent_rpcs = nil
1624
+ parent_rpcs = @parent_config.rpcs if defined?(@parent_config) && @parent_config.respond_to?(:rpcs)
1625
+ Rpcs.new parent_rpcs
1626
+ end
1627
+ end
1628
+
1629
+ ##
1630
+ # Configuration RPC class for the ManagedKafka API.
1631
+ #
1632
+ # Includes fields providing the configuration for each RPC in this service.
1633
+ # Each configuration object is of type `Gapic::Config::Method` and includes
1634
+ # the following configuration fields:
1635
+ #
1636
+ # * `timeout` (*type:* `Numeric`) - The call timeout in seconds
1637
+ # * `metadata` (*type:* `Hash{Symbol=>String}`) - Additional headers
1638
+ # * `retry_policy (*type:* `Hash`) - The retry policy. The policy fields
1639
+ # include the following keys:
1640
+ # * `:initial_delay` (*type:* `Numeric`) - The initial delay in seconds.
1641
+ # * `:max_delay` (*type:* `Numeric`) - The max delay in seconds.
1642
+ # * `:multiplier` (*type:* `Numeric`) - The incremental backoff multiplier.
1643
+ # * `:retry_codes` (*type:* `Array<String>`) - The error codes that should
1644
+ # trigger a retry.
1645
+ #
1646
+ class Rpcs
1647
+ ##
1648
+ # RPC-specific configuration for `list_clusters`
1649
+ # @return [::Gapic::Config::Method]
1650
+ #
1651
+ attr_reader :list_clusters
1652
+ ##
1653
+ # RPC-specific configuration for `get_cluster`
1654
+ # @return [::Gapic::Config::Method]
1655
+ #
1656
+ attr_reader :get_cluster
1657
+ ##
1658
+ # RPC-specific configuration for `create_cluster`
1659
+ # @return [::Gapic::Config::Method]
1660
+ #
1661
+ attr_reader :create_cluster
1662
+ ##
1663
+ # RPC-specific configuration for `update_cluster`
1664
+ # @return [::Gapic::Config::Method]
1665
+ #
1666
+ attr_reader :update_cluster
1667
+ ##
1668
+ # RPC-specific configuration for `delete_cluster`
1669
+ # @return [::Gapic::Config::Method]
1670
+ #
1671
+ attr_reader :delete_cluster
1672
+ ##
1673
+ # RPC-specific configuration for `list_topics`
1674
+ # @return [::Gapic::Config::Method]
1675
+ #
1676
+ attr_reader :list_topics
1677
+ ##
1678
+ # RPC-specific configuration for `get_topic`
1679
+ # @return [::Gapic::Config::Method]
1680
+ #
1681
+ attr_reader :get_topic
1682
+ ##
1683
+ # RPC-specific configuration for `create_topic`
1684
+ # @return [::Gapic::Config::Method]
1685
+ #
1686
+ attr_reader :create_topic
1687
+ ##
1688
+ # RPC-specific configuration for `update_topic`
1689
+ # @return [::Gapic::Config::Method]
1690
+ #
1691
+ attr_reader :update_topic
1692
+ ##
1693
+ # RPC-specific configuration for `delete_topic`
1694
+ # @return [::Gapic::Config::Method]
1695
+ #
1696
+ attr_reader :delete_topic
1697
+ ##
1698
+ # RPC-specific configuration for `list_consumer_groups`
1699
+ # @return [::Gapic::Config::Method]
1700
+ #
1701
+ attr_reader :list_consumer_groups
1702
+ ##
1703
+ # RPC-specific configuration for `get_consumer_group`
1704
+ # @return [::Gapic::Config::Method]
1705
+ #
1706
+ attr_reader :get_consumer_group
1707
+ ##
1708
+ # RPC-specific configuration for `update_consumer_group`
1709
+ # @return [::Gapic::Config::Method]
1710
+ #
1711
+ attr_reader :update_consumer_group
1712
+ ##
1713
+ # RPC-specific configuration for `delete_consumer_group`
1714
+ # @return [::Gapic::Config::Method]
1715
+ #
1716
+ attr_reader :delete_consumer_group
1717
+
1718
+ # @private
1719
+ def initialize parent_rpcs = nil
1720
+ list_clusters_config = parent_rpcs.list_clusters if parent_rpcs.respond_to? :list_clusters
1721
+ @list_clusters = ::Gapic::Config::Method.new list_clusters_config
1722
+ get_cluster_config = parent_rpcs.get_cluster if parent_rpcs.respond_to? :get_cluster
1723
+ @get_cluster = ::Gapic::Config::Method.new get_cluster_config
1724
+ create_cluster_config = parent_rpcs.create_cluster if parent_rpcs.respond_to? :create_cluster
1725
+ @create_cluster = ::Gapic::Config::Method.new create_cluster_config
1726
+ update_cluster_config = parent_rpcs.update_cluster if parent_rpcs.respond_to? :update_cluster
1727
+ @update_cluster = ::Gapic::Config::Method.new update_cluster_config
1728
+ delete_cluster_config = parent_rpcs.delete_cluster if parent_rpcs.respond_to? :delete_cluster
1729
+ @delete_cluster = ::Gapic::Config::Method.new delete_cluster_config
1730
+ list_topics_config = parent_rpcs.list_topics if parent_rpcs.respond_to? :list_topics
1731
+ @list_topics = ::Gapic::Config::Method.new list_topics_config
1732
+ get_topic_config = parent_rpcs.get_topic if parent_rpcs.respond_to? :get_topic
1733
+ @get_topic = ::Gapic::Config::Method.new get_topic_config
1734
+ create_topic_config = parent_rpcs.create_topic if parent_rpcs.respond_to? :create_topic
1735
+ @create_topic = ::Gapic::Config::Method.new create_topic_config
1736
+ update_topic_config = parent_rpcs.update_topic if parent_rpcs.respond_to? :update_topic
1737
+ @update_topic = ::Gapic::Config::Method.new update_topic_config
1738
+ delete_topic_config = parent_rpcs.delete_topic if parent_rpcs.respond_to? :delete_topic
1739
+ @delete_topic = ::Gapic::Config::Method.new delete_topic_config
1740
+ list_consumer_groups_config = parent_rpcs.list_consumer_groups if parent_rpcs.respond_to? :list_consumer_groups
1741
+ @list_consumer_groups = ::Gapic::Config::Method.new list_consumer_groups_config
1742
+ get_consumer_group_config = parent_rpcs.get_consumer_group if parent_rpcs.respond_to? :get_consumer_group
1743
+ @get_consumer_group = ::Gapic::Config::Method.new get_consumer_group_config
1744
+ update_consumer_group_config = parent_rpcs.update_consumer_group if parent_rpcs.respond_to? :update_consumer_group
1745
+ @update_consumer_group = ::Gapic::Config::Method.new update_consumer_group_config
1746
+ delete_consumer_group_config = parent_rpcs.delete_consumer_group if parent_rpcs.respond_to? :delete_consumer_group
1747
+ @delete_consumer_group = ::Gapic::Config::Method.new delete_consumer_group_config
1748
+
1749
+ yield self if block_given?
1750
+ end
1751
+ end
1752
+ end
1753
+ end
1754
+ end
1755
+ end
1756
+ end
1757
+ end
1758
+ end
1759
+ end