aws-sdk-kafka 1.16.0 → 1.21.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
- SHA1:
3
- metadata.gz: 2005c3656411ba7b160634a0d482f6bc6813858b
4
- data.tar.gz: ae25ffb8f746862f3a12830dbd3781c37f7f1550
2
+ SHA256:
3
+ metadata.gz: b90b1f87c629c19c9cb61a2136d7ded455cbd37e6d7fd9bbcfb1a2055dd24501
4
+ data.tar.gz: 1ad31cafdcb9782852b571460e0a3c7d6ac52667a09614ad08f7548f08fa60d8
5
5
  SHA512:
6
- metadata.gz: a3fd4af4e94a8e0ac209dfbd5757d7eb4c2214e1cb986738c7a89d998c17dcde17129065af19ac241d571bb306ab09bb44cee8a1c95b10e2d3966c3b65f3faac
7
- data.tar.gz: 47f123679661e1a791a343dc71c8539c9988d78304260291b5b8373c6d55f446a86c21f0609feca7bfd917394d8b458b51ef07881fa48c7e46b63775677b024c
6
+ metadata.gz: fca435a85574def0085561c96e340c56e7538e39570541564c7a0600d9699937e079869e1d2ae7e2ec73c12126af2abb09c6575ee11dddcac8f6b3d019a3f10e
7
+ data.tar.gz: ccd66241d9b9308a5ca06c0f3ecac12de157fb6599ff48438e3bd2a8558667984fef32e01eabd7123f79f4ab508ceb3b51889b46254a432c5a4eecedf9682565
@@ -24,17 +24,20 @@ require_relative 'aws-sdk-kafka/customizations'
24
24
  # methods each accept a hash of request parameters and return a response
25
25
  # structure.
26
26
  #
27
+ # kafka = Aws::Kafka::Client.new
28
+ # resp = kafka.create_cluster(params)
29
+ #
27
30
  # See {Client} for more information.
28
31
  #
29
32
  # # Errors
30
33
  #
31
- # Errors returned from Managed Streaming for Kafka all
32
- # extend {Errors::ServiceError}.
34
+ # Errors returned from Managed Streaming for Kafka are defined in the
35
+ # {Errors} module and all extend {Errors::ServiceError}.
33
36
  #
34
37
  # begin
35
38
  # # do stuff
36
39
  # rescue Aws::Kafka::Errors::ServiceError
37
- # # rescues all service API errors
40
+ # # rescues all Managed Streaming for Kafka API errors
38
41
  # end
39
42
  #
40
43
  # See {Errors} for more information.
@@ -42,6 +45,6 @@ require_relative 'aws-sdk-kafka/customizations'
42
45
  # @service
43
46
  module Aws::Kafka
44
47
 
45
- GEM_VERSION = '1.16.0'
48
+ GEM_VERSION = '1.21.0'
46
49
 
47
50
  end
@@ -30,6 +30,18 @@ require 'aws-sdk-core/plugins/protocols/rest_json.rb'
30
30
  Aws::Plugins::GlobalConfiguration.add_identifier(:kafka)
31
31
 
32
32
  module Aws::Kafka
33
+ # An API client for Kafka. To construct a client, you need to configure a `:region` and `:credentials`.
34
+ #
35
+ # client = Aws::Kafka::Client.new(
36
+ # region: region_name,
37
+ # credentials: credentials,
38
+ # # ...
39
+ # )
40
+ #
41
+ # For details on configuring region and credentials see
42
+ # the [developer guide](/sdk-for-ruby/v3/developer-guide/setup-config.html).
43
+ #
44
+ # See {#initialize} for a full list of supported configuration options.
33
45
  class Client < Seahorse::Client::Base
34
46
 
35
47
  include Aws::ClientStubs
@@ -93,7 +105,7 @@ module Aws::Kafka
93
105
  # @option options [required, String] :region
94
106
  # The AWS region to connect to. The configured `:region` is
95
107
  # used to determine the service `:endpoint`. When not passed,
96
- # a default `:region` is search for in the following locations:
108
+ # a default `:region` is searched for in the following locations:
97
109
  #
98
110
  # * `Aws.config[:region]`
99
111
  # * `ENV['AWS_REGION']`
@@ -108,6 +120,12 @@ module Aws::Kafka
108
120
  # When set to `true`, a thread polling for endpoints will be running in
109
121
  # the background every 60 secs (default). Defaults to `false`.
110
122
  #
123
+ # @option options [Boolean] :adaptive_retry_wait_to_fill (true)
124
+ # Used only in `adaptive` retry mode. When true, the request will sleep
125
+ # until there is sufficent client side capacity to retry the request.
126
+ # When false, the request will raise a `RetryCapacityNotAvailableError` and will
127
+ # not retry instead of sleeping.
128
+ #
111
129
  # @option options [Boolean] :client_side_monitoring (false)
112
130
  # When `true`, client-side metrics will be collected for all API requests from
113
131
  # this client.
@@ -132,6 +150,10 @@ module Aws::Kafka
132
150
  # When `true`, an attempt is made to coerce request parameters into
133
151
  # the required types.
134
152
  #
153
+ # @option options [Boolean] :correct_clock_skew (true)
154
+ # Used only in `standard` and adaptive retry modes. Specifies whether to apply
155
+ # a clock skew correction and retry requests with skewed client clocks.
156
+ #
135
157
  # @option options [Boolean] :disable_host_prefix_injection (false)
136
158
  # Set to true to disable SDK automatically adding host prefix
137
159
  # to default service endpoint when available.
@@ -139,7 +161,7 @@ module Aws::Kafka
139
161
  # @option options [String] :endpoint
140
162
  # The client endpoint is normally constructed from the `:region`
141
163
  # option. You should only configure an `:endpoint` when connecting
142
- # to test endpoints. This should be avalid HTTP(S) URI.
164
+ # to test endpoints. This should be a valid HTTP(S) URI.
143
165
  #
144
166
  # @option options [Integer] :endpoint_cache_max_entries (1000)
145
167
  # Used for the maximum size limit of the LRU cache storing endpoints data
@@ -154,7 +176,7 @@ module Aws::Kafka
154
176
  # requests fetching endpoints information. Defaults to 60 sec.
155
177
  #
156
178
  # @option options [Boolean] :endpoint_discovery (false)
157
- # When set to `true`, endpoint discovery will be enabled for operations when available. Defaults to `false`.
179
+ # When set to `true`, endpoint discovery will be enabled for operations when available.
158
180
  #
159
181
  # @option options [Aws::Log::Formatter] :log_formatter (Aws::Log::Formatter.default)
160
182
  # The log formatter.
@@ -166,15 +188,29 @@ module Aws::Kafka
166
188
  # The Logger instance to send log messages to. If this option
167
189
  # is not set, logging will be disabled.
168
190
  #
191
+ # @option options [Integer] :max_attempts (3)
192
+ # An integer representing the maximum number attempts that will be made for
193
+ # a single request, including the initial attempt. For example,
194
+ # setting this value to 5 will result in a request being retried up to
195
+ # 4 times. Used in `standard` and `adaptive` retry modes.
196
+ #
169
197
  # @option options [String] :profile ("default")
170
198
  # Used when loading credentials from the shared credentials file
171
199
  # at HOME/.aws/credentials. When not specified, 'default' is used.
172
200
  #
201
+ # @option options [Proc] :retry_backoff
202
+ # A proc or lambda used for backoff. Defaults to 2**retries * retry_base_delay.
203
+ # This option is only used in the `legacy` retry mode.
204
+ #
173
205
  # @option options [Float] :retry_base_delay (0.3)
174
- # The base delay in seconds used by the default backoff function.
206
+ # The base delay in seconds used by the default backoff function. This option
207
+ # is only used in the `legacy` retry mode.
175
208
  #
176
209
  # @option options [Symbol] :retry_jitter (:none)
177
- # A delay randomiser function used by the default backoff function. Some predefined functions can be referenced by name - :none, :equal, :full, otherwise a Proc that takes and returns a number.
210
+ # A delay randomiser function used by the default backoff function.
211
+ # Some predefined functions can be referenced by name - :none, :equal, :full,
212
+ # otherwise a Proc that takes and returns a number. This option is only used
213
+ # in the `legacy` retry mode.
178
214
  #
179
215
  # @see https://www.awsarchitectureblog.com/2015/03/backoff.html
180
216
  #
@@ -182,11 +218,30 @@ module Aws::Kafka
182
218
  # The maximum number of times to retry failed requests. Only
183
219
  # ~ 500 level server errors and certain ~ 400 level client errors
184
220
  # are retried. Generally, these are throttling errors, data
185
- # checksum errors, networking errors, timeout errors and auth
186
- # errors from expired credentials.
221
+ # checksum errors, networking errors, timeout errors, auth errors,
222
+ # endpoint discovery, and errors from expired credentials.
223
+ # This option is only used in the `legacy` retry mode.
187
224
  #
188
225
  # @option options [Integer] :retry_max_delay (0)
189
- # The maximum number of seconds to delay between retries (0 for no limit) used by the default backoff function.
226
+ # The maximum number of seconds to delay between retries (0 for no limit)
227
+ # used by the default backoff function. This option is only used in the
228
+ # `legacy` retry mode.
229
+ #
230
+ # @option options [String] :retry_mode ("legacy")
231
+ # Specifies which retry algorithm to use. Values are:
232
+ #
233
+ # * `legacy` - The pre-existing retry behavior. This is default value if
234
+ # no retry mode is provided.
235
+ #
236
+ # * `standard` - A standardized set of retry rules across the AWS SDKs.
237
+ # This includes support for retry quotas, which limit the number of
238
+ # unsuccessful retries a client can make.
239
+ #
240
+ # * `adaptive` - An experimental retry mode that includes all the
241
+ # functionality of `standard` mode along with automatic client side
242
+ # throttling. This is a provisional mode that may change behavior
243
+ # in the future.
244
+ #
190
245
  #
191
246
  # @option options [String] :secret_access_key
192
247
  #
@@ -209,16 +264,15 @@ module Aws::Kafka
209
264
  # requests through. Formatted like 'http://proxy.com:123'.
210
265
  #
211
266
  # @option options [Float] :http_open_timeout (15) The number of
212
- # seconds to wait when opening a HTTP session before rasing a
267
+ # seconds to wait when opening a HTTP session before raising a
213
268
  # `Timeout::Error`.
214
269
  #
215
270
  # @option options [Integer] :http_read_timeout (60) The default
216
271
  # number of seconds to wait for response data. This value can
217
- # safely be set
218
- # per-request on the session yeidled by {#session_for}.
272
+ # safely be set per-request on the session.
219
273
  #
220
274
  # @option options [Float] :http_idle_timeout (5) The number of
221
- # seconds a connection is allowed to sit idble before it is
275
+ # seconds a connection is allowed to sit idle before it is
222
276
  # considered stale. Stale connections are closed and removed
223
277
  # from the pool before making a request.
224
278
  #
@@ -227,7 +281,7 @@ module Aws::Kafka
227
281
  # request body. This option has no effect unless the request has
228
282
  # "Expect" header set to "100-continue". Defaults to `nil` which
229
283
  # disables this behaviour. This value can safely be set per
230
- # request on the session yeidled by {#session_for}.
284
+ # request on the session.
231
285
  #
232
286
  # @option options [Boolean] :http_wire_trace (false) When `true`,
233
287
  # HTTP debug output will be sent to the `:logger`.
@@ -278,6 +332,9 @@ module Aws::Kafka
278
332
  # @option params [required, String] :kafka_version
279
333
  # The version of Apache Kafka.
280
334
  #
335
+ # @option params [Types::LoggingInfo] :logging_info
336
+ # LoggingInfo details.
337
+ #
281
338
  # @option params [required, Integer] :number_of_broker_nodes
282
339
  # The number of Kafka broker nodes in the Amazon MSK cluster.
283
340
  #
@@ -328,6 +385,23 @@ module Aws::Kafka
328
385
  # },
329
386
  # enhanced_monitoring: "DEFAULT", # accepts DEFAULT, PER_BROKER, PER_TOPIC_PER_BROKER
330
387
  # kafka_version: "__stringMin1Max128", # required
388
+ # logging_info: {
389
+ # broker_logs: { # required
390
+ # cloud_watch_logs: {
391
+ # enabled: false, # required
392
+ # log_group: "__string",
393
+ # },
394
+ # firehose: {
395
+ # delivery_stream: "__string",
396
+ # enabled: false, # required
397
+ # },
398
+ # s3: {
399
+ # bucket: "__string",
400
+ # enabled: false, # required
401
+ # prefix: "__string",
402
+ # },
403
+ # },
404
+ # },
331
405
  # number_of_broker_nodes: 1, # required
332
406
  # open_monitoring: {
333
407
  # prometheus: { # required
@@ -364,7 +438,7 @@ module Aws::Kafka
364
438
  # @option params [String] :description
365
439
  # The description of the configuration.
366
440
  #
367
- # @option params [required, Array<String>] :kafka_versions
441
+ # @option params [Array<String>] :kafka_versions
368
442
  # The versions of Apache Kafka with which you can use this MSK
369
443
  # configuration.
370
444
  #
@@ -385,7 +459,7 @@ module Aws::Kafka
385
459
  #
386
460
  # resp = client.create_configuration({
387
461
  # description: "__string",
388
- # kafka_versions: ["__string"], # required
462
+ # kafka_versions: ["__string"],
389
463
  # name: "__string", # required
390
464
  # server_properties: "data", # required
391
465
  # })
@@ -474,6 +548,13 @@ module Aws::Kafka
474
548
  # resp.cluster_info.current_broker_software_info.configuration_arn #=> String
475
549
  # resp.cluster_info.current_broker_software_info.configuration_revision #=> Integer
476
550
  # resp.cluster_info.current_broker_software_info.kafka_version #=> String
551
+ # resp.cluster_info.logging_info.broker_logs.cloud_watch_logs.enabled #=> Boolean
552
+ # resp.cluster_info.logging_info.broker_logs.cloud_watch_logs.log_group #=> String
553
+ # resp.cluster_info.logging_info.broker_logs.firehose.delivery_stream #=> String
554
+ # resp.cluster_info.logging_info.broker_logs.firehose.enabled #=> Boolean
555
+ # resp.cluster_info.logging_info.broker_logs.s3.bucket #=> String
556
+ # resp.cluster_info.logging_info.broker_logs.s3.enabled #=> Boolean
557
+ # resp.cluster_info.logging_info.broker_logs.s3.prefix #=> String
477
558
  # resp.cluster_info.current_version #=> String
478
559
  # resp.cluster_info.encryption_info.encryption_at_rest.data_volume_kms_key_id #=> String
479
560
  # resp.cluster_info.encryption_info.encryption_in_transit.client_broker #=> String, one of "TLS", "TLS_PLAINTEXT", "PLAINTEXT"
@@ -518,6 +599,9 @@ module Aws::Kafka
518
599
  # resp.cluster_operation_info.end_time #=> Time
519
600
  # resp.cluster_operation_info.error_info.error_code #=> String
520
601
  # resp.cluster_operation_info.error_info.error_string #=> String
602
+ # resp.cluster_operation_info.operation_steps #=> Array
603
+ # resp.cluster_operation_info.operation_steps[0].step_info.step_status #=> String
604
+ # resp.cluster_operation_info.operation_steps[0].step_name #=> String
521
605
  # resp.cluster_operation_info.operation_arn #=> String
522
606
  # resp.cluster_operation_info.operation_state #=> String
523
607
  # resp.cluster_operation_info.operation_type #=> String
@@ -530,6 +614,14 @@ module Aws::Kafka
530
614
  # resp.cluster_operation_info.source_cluster_info.open_monitoring.prometheus.jmx_exporter.enabled_in_broker #=> Boolean
531
615
  # resp.cluster_operation_info.source_cluster_info.open_monitoring.prometheus.node_exporter.enabled_in_broker #=> Boolean
532
616
  # resp.cluster_operation_info.source_cluster_info.enhanced_monitoring #=> String, one of "DEFAULT", "PER_BROKER", "PER_TOPIC_PER_BROKER"
617
+ # resp.cluster_operation_info.source_cluster_info.kafka_version #=> String
618
+ # resp.cluster_operation_info.source_cluster_info.logging_info.broker_logs.cloud_watch_logs.enabled #=> Boolean
619
+ # resp.cluster_operation_info.source_cluster_info.logging_info.broker_logs.cloud_watch_logs.log_group #=> String
620
+ # resp.cluster_operation_info.source_cluster_info.logging_info.broker_logs.firehose.delivery_stream #=> String
621
+ # resp.cluster_operation_info.source_cluster_info.logging_info.broker_logs.firehose.enabled #=> Boolean
622
+ # resp.cluster_operation_info.source_cluster_info.logging_info.broker_logs.s3.bucket #=> String
623
+ # resp.cluster_operation_info.source_cluster_info.logging_info.broker_logs.s3.enabled #=> Boolean
624
+ # resp.cluster_operation_info.source_cluster_info.logging_info.broker_logs.s3.prefix #=> String
533
625
  # resp.cluster_operation_info.target_cluster_info.broker_ebs_volume_info #=> Array
534
626
  # resp.cluster_operation_info.target_cluster_info.broker_ebs_volume_info[0].kafka_broker_node_id #=> String
535
627
  # resp.cluster_operation_info.target_cluster_info.broker_ebs_volume_info[0].volume_size_gb #=> Integer
@@ -539,6 +631,14 @@ module Aws::Kafka
539
631
  # resp.cluster_operation_info.target_cluster_info.open_monitoring.prometheus.jmx_exporter.enabled_in_broker #=> Boolean
540
632
  # resp.cluster_operation_info.target_cluster_info.open_monitoring.prometheus.node_exporter.enabled_in_broker #=> Boolean
541
633
  # resp.cluster_operation_info.target_cluster_info.enhanced_monitoring #=> String, one of "DEFAULT", "PER_BROKER", "PER_TOPIC_PER_BROKER"
634
+ # resp.cluster_operation_info.target_cluster_info.kafka_version #=> String
635
+ # resp.cluster_operation_info.target_cluster_info.logging_info.broker_logs.cloud_watch_logs.enabled #=> Boolean
636
+ # resp.cluster_operation_info.target_cluster_info.logging_info.broker_logs.cloud_watch_logs.log_group #=> String
637
+ # resp.cluster_operation_info.target_cluster_info.logging_info.broker_logs.firehose.delivery_stream #=> String
638
+ # resp.cluster_operation_info.target_cluster_info.logging_info.broker_logs.firehose.enabled #=> Boolean
639
+ # resp.cluster_operation_info.target_cluster_info.logging_info.broker_logs.s3.bucket #=> String
640
+ # resp.cluster_operation_info.target_cluster_info.logging_info.broker_logs.s3.enabled #=> Boolean
641
+ # resp.cluster_operation_info.target_cluster_info.logging_info.broker_logs.s3.prefix #=> String
542
642
  #
543
643
  # @see http://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/DescribeClusterOperation AWS API Documentation
544
644
  #
@@ -656,6 +756,37 @@ module Aws::Kafka
656
756
  req.send_request(options)
657
757
  end
658
758
 
759
+ # Gets the Apache Kafka versions to which you can update the MSK
760
+ # cluster.
761
+ #
762
+ # @option params [String] :cluster_arn
763
+ #
764
+ # @return [Types::GetCompatibleKafkaVersionsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
765
+ #
766
+ # * {Types::GetCompatibleKafkaVersionsResponse#compatible_kafka_versions #compatible_kafka_versions} => Array&lt;Types::CompatibleKafkaVersion&gt;
767
+ #
768
+ # @example Request syntax with placeholder values
769
+ #
770
+ # resp = client.get_compatible_kafka_versions({
771
+ # cluster_arn: "__string",
772
+ # })
773
+ #
774
+ # @example Response structure
775
+ #
776
+ # resp.compatible_kafka_versions #=> Array
777
+ # resp.compatible_kafka_versions[0].source_version #=> String
778
+ # resp.compatible_kafka_versions[0].target_versions #=> Array
779
+ # resp.compatible_kafka_versions[0].target_versions[0] #=> String
780
+ #
781
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/GetCompatibleKafkaVersions AWS API Documentation
782
+ #
783
+ # @overload get_compatible_kafka_versions(params = {})
784
+ # @param [Hash] params ({})
785
+ def get_compatible_kafka_versions(params = {}, options = {})
786
+ req = build_request(:get_compatible_kafka_versions, params)
787
+ req.send_request(options)
788
+ end
789
+
659
790
  # Returns a list of all the operations that have been performed on the
660
791
  # specified MSK cluster.
661
792
  #
@@ -670,6 +801,8 @@ module Aws::Kafka
670
801
  # * {Types::ListClusterOperationsResponse#cluster_operation_info_list #cluster_operation_info_list} => Array&lt;Types::ClusterOperationInfo&gt;
671
802
  # * {Types::ListClusterOperationsResponse#next_token #next_token} => String
672
803
  #
804
+ # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
805
+ #
673
806
  # @example Request syntax with placeholder values
674
807
  #
675
808
  # resp = client.list_cluster_operations({
@@ -687,6 +820,9 @@ module Aws::Kafka
687
820
  # resp.cluster_operation_info_list[0].end_time #=> Time
688
821
  # resp.cluster_operation_info_list[0].error_info.error_code #=> String
689
822
  # resp.cluster_operation_info_list[0].error_info.error_string #=> String
823
+ # resp.cluster_operation_info_list[0].operation_steps #=> Array
824
+ # resp.cluster_operation_info_list[0].operation_steps[0].step_info.step_status #=> String
825
+ # resp.cluster_operation_info_list[0].operation_steps[0].step_name #=> String
690
826
  # resp.cluster_operation_info_list[0].operation_arn #=> String
691
827
  # resp.cluster_operation_info_list[0].operation_state #=> String
692
828
  # resp.cluster_operation_info_list[0].operation_type #=> String
@@ -699,6 +835,14 @@ module Aws::Kafka
699
835
  # resp.cluster_operation_info_list[0].source_cluster_info.open_monitoring.prometheus.jmx_exporter.enabled_in_broker #=> Boolean
700
836
  # resp.cluster_operation_info_list[0].source_cluster_info.open_monitoring.prometheus.node_exporter.enabled_in_broker #=> Boolean
701
837
  # resp.cluster_operation_info_list[0].source_cluster_info.enhanced_monitoring #=> String, one of "DEFAULT", "PER_BROKER", "PER_TOPIC_PER_BROKER"
838
+ # resp.cluster_operation_info_list[0].source_cluster_info.kafka_version #=> String
839
+ # resp.cluster_operation_info_list[0].source_cluster_info.logging_info.broker_logs.cloud_watch_logs.enabled #=> Boolean
840
+ # resp.cluster_operation_info_list[0].source_cluster_info.logging_info.broker_logs.cloud_watch_logs.log_group #=> String
841
+ # resp.cluster_operation_info_list[0].source_cluster_info.logging_info.broker_logs.firehose.delivery_stream #=> String
842
+ # resp.cluster_operation_info_list[0].source_cluster_info.logging_info.broker_logs.firehose.enabled #=> Boolean
843
+ # resp.cluster_operation_info_list[0].source_cluster_info.logging_info.broker_logs.s3.bucket #=> String
844
+ # resp.cluster_operation_info_list[0].source_cluster_info.logging_info.broker_logs.s3.enabled #=> Boolean
845
+ # resp.cluster_operation_info_list[0].source_cluster_info.logging_info.broker_logs.s3.prefix #=> String
702
846
  # resp.cluster_operation_info_list[0].target_cluster_info.broker_ebs_volume_info #=> Array
703
847
  # resp.cluster_operation_info_list[0].target_cluster_info.broker_ebs_volume_info[0].kafka_broker_node_id #=> String
704
848
  # resp.cluster_operation_info_list[0].target_cluster_info.broker_ebs_volume_info[0].volume_size_gb #=> Integer
@@ -708,6 +852,14 @@ module Aws::Kafka
708
852
  # resp.cluster_operation_info_list[0].target_cluster_info.open_monitoring.prometheus.jmx_exporter.enabled_in_broker #=> Boolean
709
853
  # resp.cluster_operation_info_list[0].target_cluster_info.open_monitoring.prometheus.node_exporter.enabled_in_broker #=> Boolean
710
854
  # resp.cluster_operation_info_list[0].target_cluster_info.enhanced_monitoring #=> String, one of "DEFAULT", "PER_BROKER", "PER_TOPIC_PER_BROKER"
855
+ # resp.cluster_operation_info_list[0].target_cluster_info.kafka_version #=> String
856
+ # resp.cluster_operation_info_list[0].target_cluster_info.logging_info.broker_logs.cloud_watch_logs.enabled #=> Boolean
857
+ # resp.cluster_operation_info_list[0].target_cluster_info.logging_info.broker_logs.cloud_watch_logs.log_group #=> String
858
+ # resp.cluster_operation_info_list[0].target_cluster_info.logging_info.broker_logs.firehose.delivery_stream #=> String
859
+ # resp.cluster_operation_info_list[0].target_cluster_info.logging_info.broker_logs.firehose.enabled #=> Boolean
860
+ # resp.cluster_operation_info_list[0].target_cluster_info.logging_info.broker_logs.s3.bucket #=> String
861
+ # resp.cluster_operation_info_list[0].target_cluster_info.logging_info.broker_logs.s3.enabled #=> Boolean
862
+ # resp.cluster_operation_info_list[0].target_cluster_info.logging_info.broker_logs.s3.prefix #=> String
711
863
  # resp.next_token #=> String
712
864
  #
713
865
  # @see http://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListClusterOperations AWS API Documentation
@@ -732,6 +884,8 @@ module Aws::Kafka
732
884
  # * {Types::ListClustersResponse#cluster_info_list #cluster_info_list} => Array&lt;Types::ClusterInfo&gt;
733
885
  # * {Types::ListClustersResponse#next_token #next_token} => String
734
886
  #
887
+ # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
888
+ #
735
889
  # @example Request syntax with placeholder values
736
890
  #
737
891
  # resp = client.list_clusters({
@@ -759,6 +913,13 @@ module Aws::Kafka
759
913
  # resp.cluster_info_list[0].current_broker_software_info.configuration_arn #=> String
760
914
  # resp.cluster_info_list[0].current_broker_software_info.configuration_revision #=> Integer
761
915
  # resp.cluster_info_list[0].current_broker_software_info.kafka_version #=> String
916
+ # resp.cluster_info_list[0].logging_info.broker_logs.cloud_watch_logs.enabled #=> Boolean
917
+ # resp.cluster_info_list[0].logging_info.broker_logs.cloud_watch_logs.log_group #=> String
918
+ # resp.cluster_info_list[0].logging_info.broker_logs.firehose.delivery_stream #=> String
919
+ # resp.cluster_info_list[0].logging_info.broker_logs.firehose.enabled #=> Boolean
920
+ # resp.cluster_info_list[0].logging_info.broker_logs.s3.bucket #=> String
921
+ # resp.cluster_info_list[0].logging_info.broker_logs.s3.enabled #=> Boolean
922
+ # resp.cluster_info_list[0].logging_info.broker_logs.s3.prefix #=> String
762
923
  # resp.cluster_info_list[0].current_version #=> String
763
924
  # resp.cluster_info_list[0].encryption_info.encryption_at_rest.data_volume_kms_key_id #=> String
764
925
  # resp.cluster_info_list[0].encryption_info.encryption_in_transit.client_broker #=> String, one of "TLS", "TLS_PLAINTEXT", "PLAINTEXT"
@@ -795,6 +956,8 @@ module Aws::Kafka
795
956
  # * {Types::ListConfigurationRevisionsResponse#next_token #next_token} => String
796
957
  # * {Types::ListConfigurationRevisionsResponse#revisions #revisions} => Array&lt;Types::ConfigurationRevision&gt;
797
958
  #
959
+ # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
960
+ #
798
961
  # @example Request syntax with placeholder values
799
962
  #
800
963
  # resp = client.list_configuration_revisions({
@@ -831,6 +994,8 @@ module Aws::Kafka
831
994
  # * {Types::ListConfigurationsResponse#configurations #configurations} => Array&lt;Types::Configuration&gt;
832
995
  # * {Types::ListConfigurationsResponse#next_token #next_token} => String
833
996
  #
997
+ # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
998
+ #
834
999
  # @example Request syntax with placeholder values
835
1000
  #
836
1001
  # resp = client.list_configurations({
@@ -861,6 +1026,42 @@ module Aws::Kafka
861
1026
  req.send_request(options)
862
1027
  end
863
1028
 
1029
+ # Returns a list of Kafka versions.
1030
+ #
1031
+ # @option params [Integer] :max_results
1032
+ #
1033
+ # @option params [String] :next_token
1034
+ #
1035
+ # @return [Types::ListKafkaVersionsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1036
+ #
1037
+ # * {Types::ListKafkaVersionsResponse#kafka_versions #kafka_versions} => Array&lt;Types::KafkaVersion&gt;
1038
+ # * {Types::ListKafkaVersionsResponse#next_token #next_token} => String
1039
+ #
1040
+ # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
1041
+ #
1042
+ # @example Request syntax with placeholder values
1043
+ #
1044
+ # resp = client.list_kafka_versions({
1045
+ # max_results: 1,
1046
+ # next_token: "__string",
1047
+ # })
1048
+ #
1049
+ # @example Response structure
1050
+ #
1051
+ # resp.kafka_versions #=> Array
1052
+ # resp.kafka_versions[0].version #=> String
1053
+ # resp.kafka_versions[0].status #=> String, one of "ACTIVE", "DEPRECATED"
1054
+ # resp.next_token #=> String
1055
+ #
1056
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListKafkaVersions AWS API Documentation
1057
+ #
1058
+ # @overload list_kafka_versions(params = {})
1059
+ # @param [Hash] params ({})
1060
+ def list_kafka_versions(params = {}, options = {})
1061
+ req = build_request(:list_kafka_versions, params)
1062
+ req.send_request(options)
1063
+ end
1064
+
864
1065
  # Returns a list of the broker nodes in the cluster.
865
1066
  #
866
1067
  # @option params [required, String] :cluster_arn
@@ -874,6 +1075,8 @@ module Aws::Kafka
874
1075
  # * {Types::ListNodesResponse#next_token #next_token} => String
875
1076
  # * {Types::ListNodesResponse#node_info_list #node_info_list} => Array&lt;Types::NodeInfo&gt;
876
1077
  #
1078
+ # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
1079
+ #
877
1080
  # @example Request syntax with placeholder values
878
1081
  #
879
1082
  # resp = client.list_nodes({
@@ -1126,6 +1329,50 @@ module Aws::Kafka
1126
1329
  req.send_request(options)
1127
1330
  end
1128
1331
 
1332
+ # Updates the Apache Kafka version for the cluster.
1333
+ #
1334
+ # @option params [required, String] :cluster_arn
1335
+ #
1336
+ # @option params [Types::ConfigurationInfo] :configuration_info
1337
+ # Specifies the configuration to use for the brokers.
1338
+ #
1339
+ # @option params [required, String] :current_version
1340
+ # Current cluster version.
1341
+ #
1342
+ # @option params [required, String] :target_kafka_version
1343
+ # Target Kafka version.
1344
+ #
1345
+ # @return [Types::UpdateClusterKafkaVersionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1346
+ #
1347
+ # * {Types::UpdateClusterKafkaVersionResponse#cluster_arn #cluster_arn} => String
1348
+ # * {Types::UpdateClusterKafkaVersionResponse#cluster_operation_arn #cluster_operation_arn} => String
1349
+ #
1350
+ # @example Request syntax with placeholder values
1351
+ #
1352
+ # resp = client.update_cluster_kafka_version({
1353
+ # cluster_arn: "__string", # required
1354
+ # configuration_info: {
1355
+ # arn: "__string", # required
1356
+ # revision: 1, # required
1357
+ # },
1358
+ # current_version: "__string", # required
1359
+ # target_kafka_version: "__string", # required
1360
+ # })
1361
+ #
1362
+ # @example Response structure
1363
+ #
1364
+ # resp.cluster_arn #=> String
1365
+ # resp.cluster_operation_arn #=> String
1366
+ #
1367
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UpdateClusterKafkaVersion AWS API Documentation
1368
+ #
1369
+ # @overload update_cluster_kafka_version(params = {})
1370
+ # @param [Hash] params ({})
1371
+ def update_cluster_kafka_version(params = {}, options = {})
1372
+ req = build_request(:update_cluster_kafka_version, params)
1373
+ req.send_request(options)
1374
+ end
1375
+
1129
1376
  # Updates the monitoring settings for the cluster. You can use this
1130
1377
  # operation to specify which Apache Kafka metrics you want Amazon MSK to
1131
1378
  # send to Amazon CloudWatch. You can also specify settings for open
@@ -1144,6 +1391,9 @@ module Aws::Kafka
1144
1391
  # @option params [Types::OpenMonitoringInfo] :open_monitoring
1145
1392
  # The settings for open monitoring.
1146
1393
  #
1394
+ # @option params [Types::LoggingInfo] :logging_info
1395
+ # LoggingInfo details.
1396
+ #
1147
1397
  # @return [Types::UpdateMonitoringResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1148
1398
  #
1149
1399
  # * {Types::UpdateMonitoringResponse#cluster_arn #cluster_arn} => String
@@ -1165,6 +1415,23 @@ module Aws::Kafka
1165
1415
  # },
1166
1416
  # },
1167
1417
  # },
1418
+ # logging_info: {
1419
+ # broker_logs: { # required
1420
+ # cloud_watch_logs: {
1421
+ # enabled: false, # required
1422
+ # log_group: "__string",
1423
+ # },
1424
+ # firehose: {
1425
+ # delivery_stream: "__string",
1426
+ # enabled: false, # required
1427
+ # },
1428
+ # s3: {
1429
+ # bucket: "__string",
1430
+ # enabled: false, # required
1431
+ # prefix: "__string",
1432
+ # },
1433
+ # },
1434
+ # },
1168
1435
  # })
1169
1436
  #
1170
1437
  # @example Response structure
@@ -1194,7 +1461,7 @@ module Aws::Kafka
1194
1461
  params: params,
1195
1462
  config: config)
1196
1463
  context[:gem_name] = 'aws-sdk-kafka'
1197
- context[:gem_version] = '1.16.0'
1464
+ context[:gem_version] = '1.21.0'
1198
1465
  Seahorse::Client::Request.new(handlers, context)
1199
1466
  end
1200
1467