aws-sdk-kafka 1.17.0 → 1.22.1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
- SHA1:
3
- metadata.gz: 0aa7e923a0cedef0fed316373839b002aed22d5b
4
- data.tar.gz: afe5ac227264224ee6e76c4616b6ff813ba20179
2
+ SHA256:
3
+ metadata.gz: c9cb986829fb0140c1f36c509c233bffaa9642a1f7cd0b5cc7b57e859d7515cf
4
+ data.tar.gz: 9014a1b4f6f682ed91b77aa93d94fb088046b8afb92567bf243157287c4113ef
5
5
  SHA512:
6
- metadata.gz: 245b0bcd251352982a92c7fd9501d3c161ea5810ebb17950b2778367eb16c5cf1f3d4c8ffefce4bfdeb3de76fcbc63f5ed7ac40cbbd5e6386d966dfeb347cf43
7
- data.tar.gz: e789cc2d6ea09970a4454e7741455c590888193014d5bc74b7afcc66526270feb79614a26d68b9881edd457d0c3175818be118da91615ad01969cf77fd29542a
6
+ metadata.gz: 8163d556cc637d6ecc52e1cdd7848852f1cec13f5893569d563b57f5b842c6f4d12707e84a46be9e7857d8977dc75894857ef852404b3d2ec3dbc1273bf85234
7
+ data.tar.gz: 0da281fff8d8ca8da320c5575bde224e857c447d0f791571588e5055832683d75d0e3b273f6bb8c401912aa6da4d34126586ab26b1b79765ae5d924bbf99b379
@@ -24,17 +24,20 @@ require_relative 'aws-sdk-kafka/customizations'
24
24
  # methods each accept a hash of request parameters and return a response
25
25
  # structure.
26
26
  #
27
+ # kafka = Aws::Kafka::Client.new
28
+ # resp = kafka.create_cluster(params)
29
+ #
27
30
  # See {Client} for more information.
28
31
  #
29
32
  # # Errors
30
33
  #
31
- # Errors returned from Managed Streaming for Kafka all
32
- # extend {Errors::ServiceError}.
34
+ # Errors returned from Managed Streaming for Kafka are defined in the
35
+ # {Errors} module and all extend {Errors::ServiceError}.
33
36
  #
34
37
  # begin
35
38
  # # do stuff
36
39
  # rescue Aws::Kafka::Errors::ServiceError
37
- # # rescues all service API errors
40
+ # # rescues all Managed Streaming for Kafka API errors
38
41
  # end
39
42
  #
40
43
  # See {Errors} for more information.
@@ -42,6 +45,6 @@ require_relative 'aws-sdk-kafka/customizations'
42
45
  # @service
43
46
  module Aws::Kafka
44
47
 
45
- GEM_VERSION = '1.17.0'
48
+ GEM_VERSION = '1.22.1'
46
49
 
47
50
  end
@@ -24,12 +24,25 @@ require 'aws-sdk-core/plugins/jsonvalue_converter.rb'
24
24
  require 'aws-sdk-core/plugins/client_metrics_plugin.rb'
25
25
  require 'aws-sdk-core/plugins/client_metrics_send_plugin.rb'
26
26
  require 'aws-sdk-core/plugins/transfer_encoding.rb'
27
+ require 'aws-sdk-core/plugins/http_checksum.rb'
27
28
  require 'aws-sdk-core/plugins/signature_v4.rb'
28
29
  require 'aws-sdk-core/plugins/protocols/rest_json.rb'
29
30
 
30
31
  Aws::Plugins::GlobalConfiguration.add_identifier(:kafka)
31
32
 
32
33
  module Aws::Kafka
34
+ # An API client for Kafka. To construct a client, you need to configure a `:region` and `:credentials`.
35
+ #
36
+ # client = Aws::Kafka::Client.new(
37
+ # region: region_name,
38
+ # credentials: credentials,
39
+ # # ...
40
+ # )
41
+ #
42
+ # For details on configuring region and credentials see
43
+ # the [developer guide](/sdk-for-ruby/v3/developer-guide/setup-config.html).
44
+ #
45
+ # See {#initialize} for a full list of supported configuration options.
33
46
  class Client < Seahorse::Client::Base
34
47
 
35
48
  include Aws::ClientStubs
@@ -57,6 +70,7 @@ module Aws::Kafka
57
70
  add_plugin(Aws::Plugins::ClientMetricsPlugin)
58
71
  add_plugin(Aws::Plugins::ClientMetricsSendPlugin)
59
72
  add_plugin(Aws::Plugins::TransferEncoding)
73
+ add_plugin(Aws::Plugins::HttpChecksum)
60
74
  add_plugin(Aws::Plugins::SignatureV4)
61
75
  add_plugin(Aws::Plugins::Protocols::RestJson)
62
76
 
@@ -93,7 +107,7 @@ module Aws::Kafka
93
107
  # @option options [required, String] :region
94
108
  # The AWS region to connect to. The configured `:region` is
95
109
  # used to determine the service `:endpoint`. When not passed,
96
- # a default `:region` is search for in the following locations:
110
+ # a default `:region` is searched for in the following locations:
97
111
  #
98
112
  # * `Aws.config[:region]`
99
113
  # * `ENV['AWS_REGION']`
@@ -108,6 +122,12 @@ module Aws::Kafka
108
122
  # When set to `true`, a thread polling for endpoints will be running in
109
123
  # the background every 60 secs (default). Defaults to `false`.
110
124
  #
125
+ # @option options [Boolean] :adaptive_retry_wait_to_fill (true)
126
+ # Used only in `adaptive` retry mode. When true, the request will sleep
127
+ # until there is sufficent client side capacity to retry the request.
128
+ # When false, the request will raise a `RetryCapacityNotAvailableError` and will
129
+ # not retry instead of sleeping.
130
+ #
111
131
  # @option options [Boolean] :client_side_monitoring (false)
112
132
  # When `true`, client-side metrics will be collected for all API requests from
113
133
  # this client.
@@ -132,6 +152,10 @@ module Aws::Kafka
132
152
  # When `true`, an attempt is made to coerce request parameters into
133
153
  # the required types.
134
154
  #
155
+ # @option options [Boolean] :correct_clock_skew (true)
156
+ # Used only in `standard` and adaptive retry modes. Specifies whether to apply
157
+ # a clock skew correction and retry requests with skewed client clocks.
158
+ #
135
159
  # @option options [Boolean] :disable_host_prefix_injection (false)
136
160
  # Set to true to disable SDK automatically adding host prefix
137
161
  # to default service endpoint when available.
@@ -139,7 +163,7 @@ module Aws::Kafka
139
163
  # @option options [String] :endpoint
140
164
  # The client endpoint is normally constructed from the `:region`
141
165
  # option. You should only configure an `:endpoint` when connecting
142
- # to test endpoints. This should be avalid HTTP(S) URI.
166
+ # to test or custom endpoints. This should be a valid HTTP(S) URI.
143
167
  #
144
168
  # @option options [Integer] :endpoint_cache_max_entries (1000)
145
169
  # Used for the maximum size limit of the LRU cache storing endpoints data
@@ -154,7 +178,7 @@ module Aws::Kafka
154
178
  # requests fetching endpoints information. Defaults to 60 sec.
155
179
  #
156
180
  # @option options [Boolean] :endpoint_discovery (false)
157
- # When set to `true`, endpoint discovery will be enabled for operations when available. Defaults to `false`.
181
+ # When set to `true`, endpoint discovery will be enabled for operations when available.
158
182
  #
159
183
  # @option options [Aws::Log::Formatter] :log_formatter (Aws::Log::Formatter.default)
160
184
  # The log formatter.
@@ -166,15 +190,29 @@ module Aws::Kafka
166
190
  # The Logger instance to send log messages to. If this option
167
191
  # is not set, logging will be disabled.
168
192
  #
193
+ # @option options [Integer] :max_attempts (3)
194
+ # An integer representing the maximum number attempts that will be made for
195
+ # a single request, including the initial attempt. For example,
196
+ # setting this value to 5 will result in a request being retried up to
197
+ # 4 times. Used in `standard` and `adaptive` retry modes.
198
+ #
169
199
  # @option options [String] :profile ("default")
170
200
  # Used when loading credentials from the shared credentials file
171
201
  # at HOME/.aws/credentials. When not specified, 'default' is used.
172
202
  #
203
+ # @option options [Proc] :retry_backoff
204
+ # A proc or lambda used for backoff. Defaults to 2**retries * retry_base_delay.
205
+ # This option is only used in the `legacy` retry mode.
206
+ #
173
207
  # @option options [Float] :retry_base_delay (0.3)
174
- # The base delay in seconds used by the default backoff function.
208
+ # The base delay in seconds used by the default backoff function. This option
209
+ # is only used in the `legacy` retry mode.
175
210
  #
176
211
  # @option options [Symbol] :retry_jitter (:none)
177
- # A delay randomiser function used by the default backoff function. Some predefined functions can be referenced by name - :none, :equal, :full, otherwise a Proc that takes and returns a number.
212
+ # A delay randomiser function used by the default backoff function.
213
+ # Some predefined functions can be referenced by name - :none, :equal, :full,
214
+ # otherwise a Proc that takes and returns a number. This option is only used
215
+ # in the `legacy` retry mode.
178
216
  #
179
217
  # @see https://www.awsarchitectureblog.com/2015/03/backoff.html
180
218
  #
@@ -182,11 +220,30 @@ module Aws::Kafka
182
220
  # The maximum number of times to retry failed requests. Only
183
221
  # ~ 500 level server errors and certain ~ 400 level client errors
184
222
  # are retried. Generally, these are throttling errors, data
185
- # checksum errors, networking errors, timeout errors and auth
186
- # errors from expired credentials.
223
+ # checksum errors, networking errors, timeout errors, auth errors,
224
+ # endpoint discovery, and errors from expired credentials.
225
+ # This option is only used in the `legacy` retry mode.
187
226
  #
188
227
  # @option options [Integer] :retry_max_delay (0)
189
- # The maximum number of seconds to delay between retries (0 for no limit) used by the default backoff function.
228
+ # The maximum number of seconds to delay between retries (0 for no limit)
229
+ # used by the default backoff function. This option is only used in the
230
+ # `legacy` retry mode.
231
+ #
232
+ # @option options [String] :retry_mode ("legacy")
233
+ # Specifies which retry algorithm to use. Values are:
234
+ #
235
+ # * `legacy` - The pre-existing retry behavior. This is default value if
236
+ # no retry mode is provided.
237
+ #
238
+ # * `standard` - A standardized set of retry rules across the AWS SDKs.
239
+ # This includes support for retry quotas, which limit the number of
240
+ # unsuccessful retries a client can make.
241
+ #
242
+ # * `adaptive` - An experimental retry mode that includes all the
243
+ # functionality of `standard` mode along with automatic client side
244
+ # throttling. This is a provisional mode that may change behavior
245
+ # in the future.
246
+ #
190
247
  #
191
248
  # @option options [String] :secret_access_key
192
249
  #
@@ -209,16 +266,15 @@ module Aws::Kafka
209
266
  # requests through. Formatted like 'http://proxy.com:123'.
210
267
  #
211
268
  # @option options [Float] :http_open_timeout (15) The number of
212
- # seconds to wait when opening a HTTP session before rasing a
269
+ # seconds to wait when opening a HTTP session before raising a
213
270
  # `Timeout::Error`.
214
271
  #
215
272
  # @option options [Integer] :http_read_timeout (60) The default
216
273
  # number of seconds to wait for response data. This value can
217
- # safely be set
218
- # per-request on the session yeidled by {#session_for}.
274
+ # safely be set per-request on the session.
219
275
  #
220
276
  # @option options [Float] :http_idle_timeout (5) The number of
221
- # seconds a connection is allowed to sit idble before it is
277
+ # seconds a connection is allowed to sit idle before it is
222
278
  # considered stale. Stale connections are closed and removed
223
279
  # from the pool before making a request.
224
280
  #
@@ -227,7 +283,7 @@ module Aws::Kafka
227
283
  # request body. This option has no effect unless the request has
228
284
  # "Expect" header set to "100-continue". Defaults to `nil` which
229
285
  # disables this behaviour. This value can safely be set per
230
- # request on the session yeidled by {#session_for}.
286
+ # request on the session.
231
287
  #
232
288
  # @option options [Boolean] :http_wire_trace (false) When `true`,
233
289
  # HTTP debug output will be sent to the `:logger`.
@@ -278,6 +334,9 @@ module Aws::Kafka
278
334
  # @option params [required, String] :kafka_version
279
335
  # The version of Apache Kafka.
280
336
  #
337
+ # @option params [Types::LoggingInfo] :logging_info
338
+ # LoggingInfo details.
339
+ #
281
340
  # @option params [required, Integer] :number_of_broker_nodes
282
341
  # The number of Kafka broker nodes in the Amazon MSK cluster.
283
342
  #
@@ -328,6 +387,23 @@ module Aws::Kafka
328
387
  # },
329
388
  # enhanced_monitoring: "DEFAULT", # accepts DEFAULT, PER_BROKER, PER_TOPIC_PER_BROKER
330
389
  # kafka_version: "__stringMin1Max128", # required
390
+ # logging_info: {
391
+ # broker_logs: { # required
392
+ # cloud_watch_logs: {
393
+ # enabled: false, # required
394
+ # log_group: "__string",
395
+ # },
396
+ # firehose: {
397
+ # delivery_stream: "__string",
398
+ # enabled: false, # required
399
+ # },
400
+ # s3: {
401
+ # bucket: "__string",
402
+ # enabled: false, # required
403
+ # prefix: "__string",
404
+ # },
405
+ # },
406
+ # },
331
407
  # number_of_broker_nodes: 1, # required
332
408
  # open_monitoring: {
333
409
  # prometheus: { # required
@@ -364,7 +440,7 @@ module Aws::Kafka
364
440
  # @option params [String] :description
365
441
  # The description of the configuration.
366
442
  #
367
- # @option params [required, Array<String>] :kafka_versions
443
+ # @option params [Array<String>] :kafka_versions
368
444
  # The versions of Apache Kafka with which you can use this MSK
369
445
  # configuration.
370
446
  #
@@ -385,7 +461,7 @@ module Aws::Kafka
385
461
  #
386
462
  # resp = client.create_configuration({
387
463
  # description: "__string",
388
- # kafka_versions: ["__string"], # required
464
+ # kafka_versions: ["__string"],
389
465
  # name: "__string", # required
390
466
  # server_properties: "data", # required
391
467
  # })
@@ -474,6 +550,13 @@ module Aws::Kafka
474
550
  # resp.cluster_info.current_broker_software_info.configuration_arn #=> String
475
551
  # resp.cluster_info.current_broker_software_info.configuration_revision #=> Integer
476
552
  # resp.cluster_info.current_broker_software_info.kafka_version #=> String
553
+ # resp.cluster_info.logging_info.broker_logs.cloud_watch_logs.enabled #=> Boolean
554
+ # resp.cluster_info.logging_info.broker_logs.cloud_watch_logs.log_group #=> String
555
+ # resp.cluster_info.logging_info.broker_logs.firehose.delivery_stream #=> String
556
+ # resp.cluster_info.logging_info.broker_logs.firehose.enabled #=> Boolean
557
+ # resp.cluster_info.logging_info.broker_logs.s3.bucket #=> String
558
+ # resp.cluster_info.logging_info.broker_logs.s3.enabled #=> Boolean
559
+ # resp.cluster_info.logging_info.broker_logs.s3.prefix #=> String
477
560
  # resp.cluster_info.current_version #=> String
478
561
  # resp.cluster_info.encryption_info.encryption_at_rest.data_volume_kms_key_id #=> String
479
562
  # resp.cluster_info.encryption_info.encryption_in_transit.client_broker #=> String, one of "TLS", "TLS_PLAINTEXT", "PLAINTEXT"
@@ -518,6 +601,9 @@ module Aws::Kafka
518
601
  # resp.cluster_operation_info.end_time #=> Time
519
602
  # resp.cluster_operation_info.error_info.error_code #=> String
520
603
  # resp.cluster_operation_info.error_info.error_string #=> String
604
+ # resp.cluster_operation_info.operation_steps #=> Array
605
+ # resp.cluster_operation_info.operation_steps[0].step_info.step_status #=> String
606
+ # resp.cluster_operation_info.operation_steps[0].step_name #=> String
521
607
  # resp.cluster_operation_info.operation_arn #=> String
522
608
  # resp.cluster_operation_info.operation_state #=> String
523
609
  # resp.cluster_operation_info.operation_type #=> String
@@ -530,6 +616,14 @@ module Aws::Kafka
530
616
  # resp.cluster_operation_info.source_cluster_info.open_monitoring.prometheus.jmx_exporter.enabled_in_broker #=> Boolean
531
617
  # resp.cluster_operation_info.source_cluster_info.open_monitoring.prometheus.node_exporter.enabled_in_broker #=> Boolean
532
618
  # resp.cluster_operation_info.source_cluster_info.enhanced_monitoring #=> String, one of "DEFAULT", "PER_BROKER", "PER_TOPIC_PER_BROKER"
619
+ # resp.cluster_operation_info.source_cluster_info.kafka_version #=> String
620
+ # resp.cluster_operation_info.source_cluster_info.logging_info.broker_logs.cloud_watch_logs.enabled #=> Boolean
621
+ # resp.cluster_operation_info.source_cluster_info.logging_info.broker_logs.cloud_watch_logs.log_group #=> String
622
+ # resp.cluster_operation_info.source_cluster_info.logging_info.broker_logs.firehose.delivery_stream #=> String
623
+ # resp.cluster_operation_info.source_cluster_info.logging_info.broker_logs.firehose.enabled #=> Boolean
624
+ # resp.cluster_operation_info.source_cluster_info.logging_info.broker_logs.s3.bucket #=> String
625
+ # resp.cluster_operation_info.source_cluster_info.logging_info.broker_logs.s3.enabled #=> Boolean
626
+ # resp.cluster_operation_info.source_cluster_info.logging_info.broker_logs.s3.prefix #=> String
533
627
  # resp.cluster_operation_info.target_cluster_info.broker_ebs_volume_info #=> Array
534
628
  # resp.cluster_operation_info.target_cluster_info.broker_ebs_volume_info[0].kafka_broker_node_id #=> String
535
629
  # resp.cluster_operation_info.target_cluster_info.broker_ebs_volume_info[0].volume_size_gb #=> Integer
@@ -539,6 +633,14 @@ module Aws::Kafka
539
633
  # resp.cluster_operation_info.target_cluster_info.open_monitoring.prometheus.jmx_exporter.enabled_in_broker #=> Boolean
540
634
  # resp.cluster_operation_info.target_cluster_info.open_monitoring.prometheus.node_exporter.enabled_in_broker #=> Boolean
541
635
  # resp.cluster_operation_info.target_cluster_info.enhanced_monitoring #=> String, one of "DEFAULT", "PER_BROKER", "PER_TOPIC_PER_BROKER"
636
+ # resp.cluster_operation_info.target_cluster_info.kafka_version #=> String
637
+ # resp.cluster_operation_info.target_cluster_info.logging_info.broker_logs.cloud_watch_logs.enabled #=> Boolean
638
+ # resp.cluster_operation_info.target_cluster_info.logging_info.broker_logs.cloud_watch_logs.log_group #=> String
639
+ # resp.cluster_operation_info.target_cluster_info.logging_info.broker_logs.firehose.delivery_stream #=> String
640
+ # resp.cluster_operation_info.target_cluster_info.logging_info.broker_logs.firehose.enabled #=> Boolean
641
+ # resp.cluster_operation_info.target_cluster_info.logging_info.broker_logs.s3.bucket #=> String
642
+ # resp.cluster_operation_info.target_cluster_info.logging_info.broker_logs.s3.enabled #=> Boolean
643
+ # resp.cluster_operation_info.target_cluster_info.logging_info.broker_logs.s3.prefix #=> String
542
644
  #
543
645
  # @see http://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/DescribeClusterOperation AWS API Documentation
544
646
  #
@@ -656,6 +758,37 @@ module Aws::Kafka
656
758
  req.send_request(options)
657
759
  end
658
760
 
761
+ # Gets the Apache Kafka versions to which you can update the MSK
762
+ # cluster.
763
+ #
764
+ # @option params [String] :cluster_arn
765
+ #
766
+ # @return [Types::GetCompatibleKafkaVersionsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
767
+ #
768
+ # * {Types::GetCompatibleKafkaVersionsResponse#compatible_kafka_versions #compatible_kafka_versions} => Array&lt;Types::CompatibleKafkaVersion&gt;
769
+ #
770
+ # @example Request syntax with placeholder values
771
+ #
772
+ # resp = client.get_compatible_kafka_versions({
773
+ # cluster_arn: "__string",
774
+ # })
775
+ #
776
+ # @example Response structure
777
+ #
778
+ # resp.compatible_kafka_versions #=> Array
779
+ # resp.compatible_kafka_versions[0].source_version #=> String
780
+ # resp.compatible_kafka_versions[0].target_versions #=> Array
781
+ # resp.compatible_kafka_versions[0].target_versions[0] #=> String
782
+ #
783
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/GetCompatibleKafkaVersions AWS API Documentation
784
+ #
785
+ # @overload get_compatible_kafka_versions(params = {})
786
+ # @param [Hash] params ({})
787
+ def get_compatible_kafka_versions(params = {}, options = {})
788
+ req = build_request(:get_compatible_kafka_versions, params)
789
+ req.send_request(options)
790
+ end
791
+
659
792
  # Returns a list of all the operations that have been performed on the
660
793
  # specified MSK cluster.
661
794
  #
@@ -670,6 +803,8 @@ module Aws::Kafka
670
803
  # * {Types::ListClusterOperationsResponse#cluster_operation_info_list #cluster_operation_info_list} => Array&lt;Types::ClusterOperationInfo&gt;
671
804
  # * {Types::ListClusterOperationsResponse#next_token #next_token} => String
672
805
  #
806
+ # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
807
+ #
673
808
  # @example Request syntax with placeholder values
674
809
  #
675
810
  # resp = client.list_cluster_operations({
@@ -687,6 +822,9 @@ module Aws::Kafka
687
822
  # resp.cluster_operation_info_list[0].end_time #=> Time
688
823
  # resp.cluster_operation_info_list[0].error_info.error_code #=> String
689
824
  # resp.cluster_operation_info_list[0].error_info.error_string #=> String
825
+ # resp.cluster_operation_info_list[0].operation_steps #=> Array
826
+ # resp.cluster_operation_info_list[0].operation_steps[0].step_info.step_status #=> String
827
+ # resp.cluster_operation_info_list[0].operation_steps[0].step_name #=> String
690
828
  # resp.cluster_operation_info_list[0].operation_arn #=> String
691
829
  # resp.cluster_operation_info_list[0].operation_state #=> String
692
830
  # resp.cluster_operation_info_list[0].operation_type #=> String
@@ -699,6 +837,14 @@ module Aws::Kafka
699
837
  # resp.cluster_operation_info_list[0].source_cluster_info.open_monitoring.prometheus.jmx_exporter.enabled_in_broker #=> Boolean
700
838
  # resp.cluster_operation_info_list[0].source_cluster_info.open_monitoring.prometheus.node_exporter.enabled_in_broker #=> Boolean
701
839
  # resp.cluster_operation_info_list[0].source_cluster_info.enhanced_monitoring #=> String, one of "DEFAULT", "PER_BROKER", "PER_TOPIC_PER_BROKER"
840
+ # resp.cluster_operation_info_list[0].source_cluster_info.kafka_version #=> String
841
+ # resp.cluster_operation_info_list[0].source_cluster_info.logging_info.broker_logs.cloud_watch_logs.enabled #=> Boolean
842
+ # resp.cluster_operation_info_list[0].source_cluster_info.logging_info.broker_logs.cloud_watch_logs.log_group #=> String
843
+ # resp.cluster_operation_info_list[0].source_cluster_info.logging_info.broker_logs.firehose.delivery_stream #=> String
844
+ # resp.cluster_operation_info_list[0].source_cluster_info.logging_info.broker_logs.firehose.enabled #=> Boolean
845
+ # resp.cluster_operation_info_list[0].source_cluster_info.logging_info.broker_logs.s3.bucket #=> String
846
+ # resp.cluster_operation_info_list[0].source_cluster_info.logging_info.broker_logs.s3.enabled #=> Boolean
847
+ # resp.cluster_operation_info_list[0].source_cluster_info.logging_info.broker_logs.s3.prefix #=> String
702
848
  # resp.cluster_operation_info_list[0].target_cluster_info.broker_ebs_volume_info #=> Array
703
849
  # resp.cluster_operation_info_list[0].target_cluster_info.broker_ebs_volume_info[0].kafka_broker_node_id #=> String
704
850
  # resp.cluster_operation_info_list[0].target_cluster_info.broker_ebs_volume_info[0].volume_size_gb #=> Integer
@@ -708,6 +854,14 @@ module Aws::Kafka
708
854
  # resp.cluster_operation_info_list[0].target_cluster_info.open_monitoring.prometheus.jmx_exporter.enabled_in_broker #=> Boolean
709
855
  # resp.cluster_operation_info_list[0].target_cluster_info.open_monitoring.prometheus.node_exporter.enabled_in_broker #=> Boolean
710
856
  # resp.cluster_operation_info_list[0].target_cluster_info.enhanced_monitoring #=> String, one of "DEFAULT", "PER_BROKER", "PER_TOPIC_PER_BROKER"
857
+ # resp.cluster_operation_info_list[0].target_cluster_info.kafka_version #=> String
858
+ # resp.cluster_operation_info_list[0].target_cluster_info.logging_info.broker_logs.cloud_watch_logs.enabled #=> Boolean
859
+ # resp.cluster_operation_info_list[0].target_cluster_info.logging_info.broker_logs.cloud_watch_logs.log_group #=> String
860
+ # resp.cluster_operation_info_list[0].target_cluster_info.logging_info.broker_logs.firehose.delivery_stream #=> String
861
+ # resp.cluster_operation_info_list[0].target_cluster_info.logging_info.broker_logs.firehose.enabled #=> Boolean
862
+ # resp.cluster_operation_info_list[0].target_cluster_info.logging_info.broker_logs.s3.bucket #=> String
863
+ # resp.cluster_operation_info_list[0].target_cluster_info.logging_info.broker_logs.s3.enabled #=> Boolean
864
+ # resp.cluster_operation_info_list[0].target_cluster_info.logging_info.broker_logs.s3.prefix #=> String
711
865
  # resp.next_token #=> String
712
866
  #
713
867
  # @see http://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListClusterOperations AWS API Documentation
@@ -732,6 +886,8 @@ module Aws::Kafka
732
886
  # * {Types::ListClustersResponse#cluster_info_list #cluster_info_list} => Array&lt;Types::ClusterInfo&gt;
733
887
  # * {Types::ListClustersResponse#next_token #next_token} => String
734
888
  #
889
+ # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
890
+ #
735
891
  # @example Request syntax with placeholder values
736
892
  #
737
893
  # resp = client.list_clusters({
@@ -759,6 +915,13 @@ module Aws::Kafka
759
915
  # resp.cluster_info_list[0].current_broker_software_info.configuration_arn #=> String
760
916
  # resp.cluster_info_list[0].current_broker_software_info.configuration_revision #=> Integer
761
917
  # resp.cluster_info_list[0].current_broker_software_info.kafka_version #=> String
918
+ # resp.cluster_info_list[0].logging_info.broker_logs.cloud_watch_logs.enabled #=> Boolean
919
+ # resp.cluster_info_list[0].logging_info.broker_logs.cloud_watch_logs.log_group #=> String
920
+ # resp.cluster_info_list[0].logging_info.broker_logs.firehose.delivery_stream #=> String
921
+ # resp.cluster_info_list[0].logging_info.broker_logs.firehose.enabled #=> Boolean
922
+ # resp.cluster_info_list[0].logging_info.broker_logs.s3.bucket #=> String
923
+ # resp.cluster_info_list[0].logging_info.broker_logs.s3.enabled #=> Boolean
924
+ # resp.cluster_info_list[0].logging_info.broker_logs.s3.prefix #=> String
762
925
  # resp.cluster_info_list[0].current_version #=> String
763
926
  # resp.cluster_info_list[0].encryption_info.encryption_at_rest.data_volume_kms_key_id #=> String
764
927
  # resp.cluster_info_list[0].encryption_info.encryption_in_transit.client_broker #=> String, one of "TLS", "TLS_PLAINTEXT", "PLAINTEXT"
@@ -795,6 +958,8 @@ module Aws::Kafka
795
958
  # * {Types::ListConfigurationRevisionsResponse#next_token #next_token} => String
796
959
  # * {Types::ListConfigurationRevisionsResponse#revisions #revisions} => Array&lt;Types::ConfigurationRevision&gt;
797
960
  #
961
+ # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
962
+ #
798
963
  # @example Request syntax with placeholder values
799
964
  #
800
965
  # resp = client.list_configuration_revisions({
@@ -831,6 +996,8 @@ module Aws::Kafka
831
996
  # * {Types::ListConfigurationsResponse#configurations #configurations} => Array&lt;Types::Configuration&gt;
832
997
  # * {Types::ListConfigurationsResponse#next_token #next_token} => String
833
998
  #
999
+ # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
1000
+ #
834
1001
  # @example Request syntax with placeholder values
835
1002
  #
836
1003
  # resp = client.list_configurations({
@@ -872,6 +1039,8 @@ module Aws::Kafka
872
1039
  # * {Types::ListKafkaVersionsResponse#kafka_versions #kafka_versions} => Array&lt;Types::KafkaVersion&gt;
873
1040
  # * {Types::ListKafkaVersionsResponse#next_token #next_token} => String
874
1041
  #
1042
+ # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
1043
+ #
875
1044
  # @example Request syntax with placeholder values
876
1045
  #
877
1046
  # resp = client.list_kafka_versions({
@@ -908,6 +1077,8 @@ module Aws::Kafka
908
1077
  # * {Types::ListNodesResponse#next_token #next_token} => String
909
1078
  # * {Types::ListNodesResponse#node_info_list #node_info_list} => Array&lt;Types::NodeInfo&gt;
910
1079
  #
1080
+ # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
1081
+ #
911
1082
  # @example Request syntax with placeholder values
912
1083
  #
913
1084
  # resp = client.list_nodes({
@@ -1160,6 +1331,50 @@ module Aws::Kafka
1160
1331
  req.send_request(options)
1161
1332
  end
1162
1333
 
1334
+ # Updates the Apache Kafka version for the cluster.
1335
+ #
1336
+ # @option params [required, String] :cluster_arn
1337
+ #
1338
+ # @option params [Types::ConfigurationInfo] :configuration_info
1339
+ # Specifies the configuration to use for the brokers.
1340
+ #
1341
+ # @option params [required, String] :current_version
1342
+ # Current cluster version.
1343
+ #
1344
+ # @option params [required, String] :target_kafka_version
1345
+ # Target Kafka version.
1346
+ #
1347
+ # @return [Types::UpdateClusterKafkaVersionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1348
+ #
1349
+ # * {Types::UpdateClusterKafkaVersionResponse#cluster_arn #cluster_arn} => String
1350
+ # * {Types::UpdateClusterKafkaVersionResponse#cluster_operation_arn #cluster_operation_arn} => String
1351
+ #
1352
+ # @example Request syntax with placeholder values
1353
+ #
1354
+ # resp = client.update_cluster_kafka_version({
1355
+ # cluster_arn: "__string", # required
1356
+ # configuration_info: {
1357
+ # arn: "__string", # required
1358
+ # revision: 1, # required
1359
+ # },
1360
+ # current_version: "__string", # required
1361
+ # target_kafka_version: "__string", # required
1362
+ # })
1363
+ #
1364
+ # @example Response structure
1365
+ #
1366
+ # resp.cluster_arn #=> String
1367
+ # resp.cluster_operation_arn #=> String
1368
+ #
1369
+ # @see http://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UpdateClusterKafkaVersion AWS API Documentation
1370
+ #
1371
+ # @overload update_cluster_kafka_version(params = {})
1372
+ # @param [Hash] params ({})
1373
+ def update_cluster_kafka_version(params = {}, options = {})
1374
+ req = build_request(:update_cluster_kafka_version, params)
1375
+ req.send_request(options)
1376
+ end
1377
+
1163
1378
  # Updates the monitoring settings for the cluster. You can use this
1164
1379
  # operation to specify which Apache Kafka metrics you want Amazon MSK to
1165
1380
  # send to Amazon CloudWatch. You can also specify settings for open
@@ -1178,6 +1393,9 @@ module Aws::Kafka
1178
1393
  # @option params [Types::OpenMonitoringInfo] :open_monitoring
1179
1394
  # The settings for open monitoring.
1180
1395
  #
1396
+ # @option params [Types::LoggingInfo] :logging_info
1397
+ # LoggingInfo details.
1398
+ #
1181
1399
  # @return [Types::UpdateMonitoringResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1182
1400
  #
1183
1401
  # * {Types::UpdateMonitoringResponse#cluster_arn #cluster_arn} => String
@@ -1199,6 +1417,23 @@ module Aws::Kafka
1199
1417
  # },
1200
1418
  # },
1201
1419
  # },
1420
+ # logging_info: {
1421
+ # broker_logs: { # required
1422
+ # cloud_watch_logs: {
1423
+ # enabled: false, # required
1424
+ # log_group: "__string",
1425
+ # },
1426
+ # firehose: {
1427
+ # delivery_stream: "__string",
1428
+ # enabled: false, # required
1429
+ # },
1430
+ # s3: {
1431
+ # bucket: "__string",
1432
+ # enabled: false, # required
1433
+ # prefix: "__string",
1434
+ # },
1435
+ # },
1436
+ # },
1202
1437
  # })
1203
1438
  #
1204
1439
  # @example Response structure
@@ -1228,7 +1463,7 @@ module Aws::Kafka
1228
1463
  params: params,
1229
1464
  config: config)
1230
1465
  context[:gem_name] = 'aws-sdk-kafka'
1231
- context[:gem_version] = '1.17.0'
1466
+ context[:gem_version] = '1.22.1'
1232
1467
  Seahorse::Client::Request.new(handlers, context)
1233
1468
  end
1234
1469