aws-sdk-kafka 1.18.0 → 1.23.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +5 -5
- data/lib/aws-sdk-kafka.rb +9 -4
- data/lib/aws-sdk-kafka/client.rb +171 -16
- data/lib/aws-sdk-kafka/client_api.rb +75 -1
- data/lib/aws-sdk-kafka/errors.rb +32 -8
- data/lib/aws-sdk-kafka/resource.rb +3 -0
- data/lib/aws-sdk-kafka/types.rb +238 -1
- metadata +5 -5
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
|
-
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
2
|
+
SHA256:
|
3
|
+
metadata.gz: 6a328f4d7fb601718cf817217069d1fa958ca53dc01d0c44ef0c3b874d9ca26f
|
4
|
+
data.tar.gz: 60e7808e7d995601805fbc30d99c7a53d26c721d004e87a9d2579e60539fa02a
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 54e57715dc7b5a0a5fd256710904a172ceb0f71fdf4d75f3c68e6f8d953c2b1e4b5b3bbd1d209c5b3e872da745ce9a45f0cfb0666602bf87cb05be899d937890
|
7
|
+
data.tar.gz: a0bd560858150d245c03132d40d7d84d26ee0399bc78a89f7fe35bd49d9a943194eb102cc453b13f9747401d6a2108b824d0ce6adb7224e573caad01783ef1a3
|
data/lib/aws-sdk-kafka.rb
CHANGED
@@ -1,3 +1,5 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
# WARNING ABOUT GENERATED CODE
|
2
4
|
#
|
3
5
|
# This file is generated. See the contributing guide for more information:
|
@@ -24,17 +26,20 @@ require_relative 'aws-sdk-kafka/customizations'
|
|
24
26
|
# methods each accept a hash of request parameters and return a response
|
25
27
|
# structure.
|
26
28
|
#
|
29
|
+
# kafka = Aws::Kafka::Client.new
|
30
|
+
# resp = kafka.create_cluster(params)
|
31
|
+
#
|
27
32
|
# See {Client} for more information.
|
28
33
|
#
|
29
34
|
# # Errors
|
30
35
|
#
|
31
|
-
# Errors returned from Managed Streaming for Kafka
|
32
|
-
# extend {Errors::ServiceError}.
|
36
|
+
# Errors returned from Managed Streaming for Kafka are defined in the
|
37
|
+
# {Errors} module and all extend {Errors::ServiceError}.
|
33
38
|
#
|
34
39
|
# begin
|
35
40
|
# # do stuff
|
36
41
|
# rescue Aws::Kafka::Errors::ServiceError
|
37
|
-
# # rescues all
|
42
|
+
# # rescues all Managed Streaming for Kafka API errors
|
38
43
|
# end
|
39
44
|
#
|
40
45
|
# See {Errors} for more information.
|
@@ -42,6 +47,6 @@ require_relative 'aws-sdk-kafka/customizations'
|
|
42
47
|
# @service
|
43
48
|
module Aws::Kafka
|
44
49
|
|
45
|
-
GEM_VERSION = '1.
|
50
|
+
GEM_VERSION = '1.23.0'
|
46
51
|
|
47
52
|
end
|
data/lib/aws-sdk-kafka/client.rb
CHANGED
@@ -1,3 +1,5 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
# WARNING ABOUT GENERATED CODE
|
2
4
|
#
|
3
5
|
# This file is generated. See the contributing guide for more information:
|
@@ -24,12 +26,25 @@ require 'aws-sdk-core/plugins/jsonvalue_converter.rb'
|
|
24
26
|
require 'aws-sdk-core/plugins/client_metrics_plugin.rb'
|
25
27
|
require 'aws-sdk-core/plugins/client_metrics_send_plugin.rb'
|
26
28
|
require 'aws-sdk-core/plugins/transfer_encoding.rb'
|
29
|
+
require 'aws-sdk-core/plugins/http_checksum.rb'
|
27
30
|
require 'aws-sdk-core/plugins/signature_v4.rb'
|
28
31
|
require 'aws-sdk-core/plugins/protocols/rest_json.rb'
|
29
32
|
|
30
33
|
Aws::Plugins::GlobalConfiguration.add_identifier(:kafka)
|
31
34
|
|
32
35
|
module Aws::Kafka
|
36
|
+
# An API client for Kafka. To construct a client, you need to configure a `:region` and `:credentials`.
|
37
|
+
#
|
38
|
+
# client = Aws::Kafka::Client.new(
|
39
|
+
# region: region_name,
|
40
|
+
# credentials: credentials,
|
41
|
+
# # ...
|
42
|
+
# )
|
43
|
+
#
|
44
|
+
# For details on configuring region and credentials see
|
45
|
+
# the [developer guide](/sdk-for-ruby/v3/developer-guide/setup-config.html).
|
46
|
+
#
|
47
|
+
# See {#initialize} for a full list of supported configuration options.
|
33
48
|
class Client < Seahorse::Client::Base
|
34
49
|
|
35
50
|
include Aws::ClientStubs
|
@@ -57,6 +72,7 @@ module Aws::Kafka
|
|
57
72
|
add_plugin(Aws::Plugins::ClientMetricsPlugin)
|
58
73
|
add_plugin(Aws::Plugins::ClientMetricsSendPlugin)
|
59
74
|
add_plugin(Aws::Plugins::TransferEncoding)
|
75
|
+
add_plugin(Aws::Plugins::HttpChecksum)
|
60
76
|
add_plugin(Aws::Plugins::SignatureV4)
|
61
77
|
add_plugin(Aws::Plugins::Protocols::RestJson)
|
62
78
|
|
@@ -93,7 +109,7 @@ module Aws::Kafka
|
|
93
109
|
# @option options [required, String] :region
|
94
110
|
# The AWS region to connect to. The configured `:region` is
|
95
111
|
# used to determine the service `:endpoint`. When not passed,
|
96
|
-
# a default `:region` is
|
112
|
+
# a default `:region` is searched for in the following locations:
|
97
113
|
#
|
98
114
|
# * `Aws.config[:region]`
|
99
115
|
# * `ENV['AWS_REGION']`
|
@@ -108,6 +124,12 @@ module Aws::Kafka
|
|
108
124
|
# When set to `true`, a thread polling for endpoints will be running in
|
109
125
|
# the background every 60 secs (default). Defaults to `false`.
|
110
126
|
#
|
127
|
+
# @option options [Boolean] :adaptive_retry_wait_to_fill (true)
|
128
|
+
# Used only in `adaptive` retry mode. When true, the request will sleep
|
129
|
+
# until there is sufficent client side capacity to retry the request.
|
130
|
+
# When false, the request will raise a `RetryCapacityNotAvailableError` and will
|
131
|
+
# not retry instead of sleeping.
|
132
|
+
#
|
111
133
|
# @option options [Boolean] :client_side_monitoring (false)
|
112
134
|
# When `true`, client-side metrics will be collected for all API requests from
|
113
135
|
# this client.
|
@@ -132,6 +154,10 @@ module Aws::Kafka
|
|
132
154
|
# When `true`, an attempt is made to coerce request parameters into
|
133
155
|
# the required types.
|
134
156
|
#
|
157
|
+
# @option options [Boolean] :correct_clock_skew (true)
|
158
|
+
# Used only in `standard` and adaptive retry modes. Specifies whether to apply
|
159
|
+
# a clock skew correction and retry requests with skewed client clocks.
|
160
|
+
#
|
135
161
|
# @option options [Boolean] :disable_host_prefix_injection (false)
|
136
162
|
# Set to true to disable SDK automatically adding host prefix
|
137
163
|
# to default service endpoint when available.
|
@@ -139,7 +165,7 @@ module Aws::Kafka
|
|
139
165
|
# @option options [String] :endpoint
|
140
166
|
# The client endpoint is normally constructed from the `:region`
|
141
167
|
# option. You should only configure an `:endpoint` when connecting
|
142
|
-
# to test endpoints. This should be
|
168
|
+
# to test or custom endpoints. This should be a valid HTTP(S) URI.
|
143
169
|
#
|
144
170
|
# @option options [Integer] :endpoint_cache_max_entries (1000)
|
145
171
|
# Used for the maximum size limit of the LRU cache storing endpoints data
|
@@ -154,7 +180,7 @@ module Aws::Kafka
|
|
154
180
|
# requests fetching endpoints information. Defaults to 60 sec.
|
155
181
|
#
|
156
182
|
# @option options [Boolean] :endpoint_discovery (false)
|
157
|
-
# When set to `true`, endpoint discovery will be enabled for operations when available.
|
183
|
+
# When set to `true`, endpoint discovery will be enabled for operations when available.
|
158
184
|
#
|
159
185
|
# @option options [Aws::Log::Formatter] :log_formatter (Aws::Log::Formatter.default)
|
160
186
|
# The log formatter.
|
@@ -166,15 +192,29 @@ module Aws::Kafka
|
|
166
192
|
# The Logger instance to send log messages to. If this option
|
167
193
|
# is not set, logging will be disabled.
|
168
194
|
#
|
195
|
+
# @option options [Integer] :max_attempts (3)
|
196
|
+
# An integer representing the maximum number attempts that will be made for
|
197
|
+
# a single request, including the initial attempt. For example,
|
198
|
+
# setting this value to 5 will result in a request being retried up to
|
199
|
+
# 4 times. Used in `standard` and `adaptive` retry modes.
|
200
|
+
#
|
169
201
|
# @option options [String] :profile ("default")
|
170
202
|
# Used when loading credentials from the shared credentials file
|
171
203
|
# at HOME/.aws/credentials. When not specified, 'default' is used.
|
172
204
|
#
|
205
|
+
# @option options [Proc] :retry_backoff
|
206
|
+
# A proc or lambda used for backoff. Defaults to 2**retries * retry_base_delay.
|
207
|
+
# This option is only used in the `legacy` retry mode.
|
208
|
+
#
|
173
209
|
# @option options [Float] :retry_base_delay (0.3)
|
174
|
-
# The base delay in seconds used by the default backoff function.
|
210
|
+
# The base delay in seconds used by the default backoff function. This option
|
211
|
+
# is only used in the `legacy` retry mode.
|
175
212
|
#
|
176
213
|
# @option options [Symbol] :retry_jitter (:none)
|
177
|
-
# A delay randomiser function used by the default backoff function.
|
214
|
+
# A delay randomiser function used by the default backoff function.
|
215
|
+
# Some predefined functions can be referenced by name - :none, :equal, :full,
|
216
|
+
# otherwise a Proc that takes and returns a number. This option is only used
|
217
|
+
# in the `legacy` retry mode.
|
178
218
|
#
|
179
219
|
# @see https://www.awsarchitectureblog.com/2015/03/backoff.html
|
180
220
|
#
|
@@ -182,11 +222,30 @@ module Aws::Kafka
|
|
182
222
|
# The maximum number of times to retry failed requests. Only
|
183
223
|
# ~ 500 level server errors and certain ~ 400 level client errors
|
184
224
|
# are retried. Generally, these are throttling errors, data
|
185
|
-
# checksum errors, networking errors, timeout errors
|
186
|
-
# errors from expired credentials.
|
225
|
+
# checksum errors, networking errors, timeout errors, auth errors,
|
226
|
+
# endpoint discovery, and errors from expired credentials.
|
227
|
+
# This option is only used in the `legacy` retry mode.
|
187
228
|
#
|
188
229
|
# @option options [Integer] :retry_max_delay (0)
|
189
|
-
# The maximum number of seconds to delay between retries (0 for no limit)
|
230
|
+
# The maximum number of seconds to delay between retries (0 for no limit)
|
231
|
+
# used by the default backoff function. This option is only used in the
|
232
|
+
# `legacy` retry mode.
|
233
|
+
#
|
234
|
+
# @option options [String] :retry_mode ("legacy")
|
235
|
+
# Specifies which retry algorithm to use. Values are:
|
236
|
+
#
|
237
|
+
# * `legacy` - The pre-existing retry behavior. This is default value if
|
238
|
+
# no retry mode is provided.
|
239
|
+
#
|
240
|
+
# * `standard` - A standardized set of retry rules across the AWS SDKs.
|
241
|
+
# This includes support for retry quotas, which limit the number of
|
242
|
+
# unsuccessful retries a client can make.
|
243
|
+
#
|
244
|
+
# * `adaptive` - An experimental retry mode that includes all the
|
245
|
+
# functionality of `standard` mode along with automatic client side
|
246
|
+
# throttling. This is a provisional mode that may change behavior
|
247
|
+
# in the future.
|
248
|
+
#
|
190
249
|
#
|
191
250
|
# @option options [String] :secret_access_key
|
192
251
|
#
|
@@ -209,16 +268,15 @@ module Aws::Kafka
|
|
209
268
|
# requests through. Formatted like 'http://proxy.com:123'.
|
210
269
|
#
|
211
270
|
# @option options [Float] :http_open_timeout (15) The number of
|
212
|
-
# seconds to wait when opening a HTTP session before
|
271
|
+
# seconds to wait when opening a HTTP session before raising a
|
213
272
|
# `Timeout::Error`.
|
214
273
|
#
|
215
274
|
# @option options [Integer] :http_read_timeout (60) The default
|
216
275
|
# number of seconds to wait for response data. This value can
|
217
|
-
# safely be set
|
218
|
-
# per-request on the session yeidled by {#session_for}.
|
276
|
+
# safely be set per-request on the session.
|
219
277
|
#
|
220
278
|
# @option options [Float] :http_idle_timeout (5) The number of
|
221
|
-
# seconds a connection is allowed to sit
|
279
|
+
# seconds a connection is allowed to sit idle before it is
|
222
280
|
# considered stale. Stale connections are closed and removed
|
223
281
|
# from the pool before making a request.
|
224
282
|
#
|
@@ -227,7 +285,7 @@ module Aws::Kafka
|
|
227
285
|
# request body. This option has no effect unless the request has
|
228
286
|
# "Expect" header set to "100-continue". Defaults to `nil` which
|
229
287
|
# disables this behaviour. This value can safely be set per
|
230
|
-
# request on the session
|
288
|
+
# request on the session.
|
231
289
|
#
|
232
290
|
# @option options [Boolean] :http_wire_trace (false) When `true`,
|
233
291
|
# HTTP debug output will be sent to the `:logger`.
|
@@ -384,7 +442,7 @@ module Aws::Kafka
|
|
384
442
|
# @option params [String] :description
|
385
443
|
# The description of the configuration.
|
386
444
|
#
|
387
|
-
# @option params [
|
445
|
+
# @option params [Array<String>] :kafka_versions
|
388
446
|
# The versions of Apache Kafka with which you can use this MSK
|
389
447
|
# configuration.
|
390
448
|
#
|
@@ -405,7 +463,7 @@ module Aws::Kafka
|
|
405
463
|
#
|
406
464
|
# resp = client.create_configuration({
|
407
465
|
# description: "__string",
|
408
|
-
# kafka_versions: ["__string"],
|
466
|
+
# kafka_versions: ["__string"],
|
409
467
|
# name: "__string", # required
|
410
468
|
# server_properties: "data", # required
|
411
469
|
# })
|
@@ -545,6 +603,9 @@ module Aws::Kafka
|
|
545
603
|
# resp.cluster_operation_info.end_time #=> Time
|
546
604
|
# resp.cluster_operation_info.error_info.error_code #=> String
|
547
605
|
# resp.cluster_operation_info.error_info.error_string #=> String
|
606
|
+
# resp.cluster_operation_info.operation_steps #=> Array
|
607
|
+
# resp.cluster_operation_info.operation_steps[0].step_info.step_status #=> String
|
608
|
+
# resp.cluster_operation_info.operation_steps[0].step_name #=> String
|
548
609
|
# resp.cluster_operation_info.operation_arn #=> String
|
549
610
|
# resp.cluster_operation_info.operation_state #=> String
|
550
611
|
# resp.cluster_operation_info.operation_type #=> String
|
@@ -557,6 +618,7 @@ module Aws::Kafka
|
|
557
618
|
# resp.cluster_operation_info.source_cluster_info.open_monitoring.prometheus.jmx_exporter.enabled_in_broker #=> Boolean
|
558
619
|
# resp.cluster_operation_info.source_cluster_info.open_monitoring.prometheus.node_exporter.enabled_in_broker #=> Boolean
|
559
620
|
# resp.cluster_operation_info.source_cluster_info.enhanced_monitoring #=> String, one of "DEFAULT", "PER_BROKER", "PER_TOPIC_PER_BROKER"
|
621
|
+
# resp.cluster_operation_info.source_cluster_info.kafka_version #=> String
|
560
622
|
# resp.cluster_operation_info.source_cluster_info.logging_info.broker_logs.cloud_watch_logs.enabled #=> Boolean
|
561
623
|
# resp.cluster_operation_info.source_cluster_info.logging_info.broker_logs.cloud_watch_logs.log_group #=> String
|
562
624
|
# resp.cluster_operation_info.source_cluster_info.logging_info.broker_logs.firehose.delivery_stream #=> String
|
@@ -573,6 +635,7 @@ module Aws::Kafka
|
|
573
635
|
# resp.cluster_operation_info.target_cluster_info.open_monitoring.prometheus.jmx_exporter.enabled_in_broker #=> Boolean
|
574
636
|
# resp.cluster_operation_info.target_cluster_info.open_monitoring.prometheus.node_exporter.enabled_in_broker #=> Boolean
|
575
637
|
# resp.cluster_operation_info.target_cluster_info.enhanced_monitoring #=> String, one of "DEFAULT", "PER_BROKER", "PER_TOPIC_PER_BROKER"
|
638
|
+
# resp.cluster_operation_info.target_cluster_info.kafka_version #=> String
|
576
639
|
# resp.cluster_operation_info.target_cluster_info.logging_info.broker_logs.cloud_watch_logs.enabled #=> Boolean
|
577
640
|
# resp.cluster_operation_info.target_cluster_info.logging_info.broker_logs.cloud_watch_logs.log_group #=> String
|
578
641
|
# resp.cluster_operation_info.target_cluster_info.logging_info.broker_logs.firehose.delivery_stream #=> String
|
@@ -697,6 +760,37 @@ module Aws::Kafka
|
|
697
760
|
req.send_request(options)
|
698
761
|
end
|
699
762
|
|
763
|
+
# Gets the Apache Kafka versions to which you can update the MSK
|
764
|
+
# cluster.
|
765
|
+
#
|
766
|
+
# @option params [String] :cluster_arn
|
767
|
+
#
|
768
|
+
# @return [Types::GetCompatibleKafkaVersionsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
|
769
|
+
#
|
770
|
+
# * {Types::GetCompatibleKafkaVersionsResponse#compatible_kafka_versions #compatible_kafka_versions} => Array<Types::CompatibleKafkaVersion>
|
771
|
+
#
|
772
|
+
# @example Request syntax with placeholder values
|
773
|
+
#
|
774
|
+
# resp = client.get_compatible_kafka_versions({
|
775
|
+
# cluster_arn: "__string",
|
776
|
+
# })
|
777
|
+
#
|
778
|
+
# @example Response structure
|
779
|
+
#
|
780
|
+
# resp.compatible_kafka_versions #=> Array
|
781
|
+
# resp.compatible_kafka_versions[0].source_version #=> String
|
782
|
+
# resp.compatible_kafka_versions[0].target_versions #=> Array
|
783
|
+
# resp.compatible_kafka_versions[0].target_versions[0] #=> String
|
784
|
+
#
|
785
|
+
# @see http://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/GetCompatibleKafkaVersions AWS API Documentation
|
786
|
+
#
|
787
|
+
# @overload get_compatible_kafka_versions(params = {})
|
788
|
+
# @param [Hash] params ({})
|
789
|
+
def get_compatible_kafka_versions(params = {}, options = {})
|
790
|
+
req = build_request(:get_compatible_kafka_versions, params)
|
791
|
+
req.send_request(options)
|
792
|
+
end
|
793
|
+
|
700
794
|
# Returns a list of all the operations that have been performed on the
|
701
795
|
# specified MSK cluster.
|
702
796
|
#
|
@@ -711,6 +805,8 @@ module Aws::Kafka
|
|
711
805
|
# * {Types::ListClusterOperationsResponse#cluster_operation_info_list #cluster_operation_info_list} => Array<Types::ClusterOperationInfo>
|
712
806
|
# * {Types::ListClusterOperationsResponse#next_token #next_token} => String
|
713
807
|
#
|
808
|
+
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
|
809
|
+
#
|
714
810
|
# @example Request syntax with placeholder values
|
715
811
|
#
|
716
812
|
# resp = client.list_cluster_operations({
|
@@ -728,6 +824,9 @@ module Aws::Kafka
|
|
728
824
|
# resp.cluster_operation_info_list[0].end_time #=> Time
|
729
825
|
# resp.cluster_operation_info_list[0].error_info.error_code #=> String
|
730
826
|
# resp.cluster_operation_info_list[0].error_info.error_string #=> String
|
827
|
+
# resp.cluster_operation_info_list[0].operation_steps #=> Array
|
828
|
+
# resp.cluster_operation_info_list[0].operation_steps[0].step_info.step_status #=> String
|
829
|
+
# resp.cluster_operation_info_list[0].operation_steps[0].step_name #=> String
|
731
830
|
# resp.cluster_operation_info_list[0].operation_arn #=> String
|
732
831
|
# resp.cluster_operation_info_list[0].operation_state #=> String
|
733
832
|
# resp.cluster_operation_info_list[0].operation_type #=> String
|
@@ -740,6 +839,7 @@ module Aws::Kafka
|
|
740
839
|
# resp.cluster_operation_info_list[0].source_cluster_info.open_monitoring.prometheus.jmx_exporter.enabled_in_broker #=> Boolean
|
741
840
|
# resp.cluster_operation_info_list[0].source_cluster_info.open_monitoring.prometheus.node_exporter.enabled_in_broker #=> Boolean
|
742
841
|
# resp.cluster_operation_info_list[0].source_cluster_info.enhanced_monitoring #=> String, one of "DEFAULT", "PER_BROKER", "PER_TOPIC_PER_BROKER"
|
842
|
+
# resp.cluster_operation_info_list[0].source_cluster_info.kafka_version #=> String
|
743
843
|
# resp.cluster_operation_info_list[0].source_cluster_info.logging_info.broker_logs.cloud_watch_logs.enabled #=> Boolean
|
744
844
|
# resp.cluster_operation_info_list[0].source_cluster_info.logging_info.broker_logs.cloud_watch_logs.log_group #=> String
|
745
845
|
# resp.cluster_operation_info_list[0].source_cluster_info.logging_info.broker_logs.firehose.delivery_stream #=> String
|
@@ -756,6 +856,7 @@ module Aws::Kafka
|
|
756
856
|
# resp.cluster_operation_info_list[0].target_cluster_info.open_monitoring.prometheus.jmx_exporter.enabled_in_broker #=> Boolean
|
757
857
|
# resp.cluster_operation_info_list[0].target_cluster_info.open_monitoring.prometheus.node_exporter.enabled_in_broker #=> Boolean
|
758
858
|
# resp.cluster_operation_info_list[0].target_cluster_info.enhanced_monitoring #=> String, one of "DEFAULT", "PER_BROKER", "PER_TOPIC_PER_BROKER"
|
859
|
+
# resp.cluster_operation_info_list[0].target_cluster_info.kafka_version #=> String
|
759
860
|
# resp.cluster_operation_info_list[0].target_cluster_info.logging_info.broker_logs.cloud_watch_logs.enabled #=> Boolean
|
760
861
|
# resp.cluster_operation_info_list[0].target_cluster_info.logging_info.broker_logs.cloud_watch_logs.log_group #=> String
|
761
862
|
# resp.cluster_operation_info_list[0].target_cluster_info.logging_info.broker_logs.firehose.delivery_stream #=> String
|
@@ -787,6 +888,8 @@ module Aws::Kafka
|
|
787
888
|
# * {Types::ListClustersResponse#cluster_info_list #cluster_info_list} => Array<Types::ClusterInfo>
|
788
889
|
# * {Types::ListClustersResponse#next_token #next_token} => String
|
789
890
|
#
|
891
|
+
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
|
892
|
+
#
|
790
893
|
# @example Request syntax with placeholder values
|
791
894
|
#
|
792
895
|
# resp = client.list_clusters({
|
@@ -857,6 +960,8 @@ module Aws::Kafka
|
|
857
960
|
# * {Types::ListConfigurationRevisionsResponse#next_token #next_token} => String
|
858
961
|
# * {Types::ListConfigurationRevisionsResponse#revisions #revisions} => Array<Types::ConfigurationRevision>
|
859
962
|
#
|
963
|
+
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
|
964
|
+
#
|
860
965
|
# @example Request syntax with placeholder values
|
861
966
|
#
|
862
967
|
# resp = client.list_configuration_revisions({
|
@@ -893,6 +998,8 @@ module Aws::Kafka
|
|
893
998
|
# * {Types::ListConfigurationsResponse#configurations #configurations} => Array<Types::Configuration>
|
894
999
|
# * {Types::ListConfigurationsResponse#next_token #next_token} => String
|
895
1000
|
#
|
1001
|
+
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
|
1002
|
+
#
|
896
1003
|
# @example Request syntax with placeholder values
|
897
1004
|
#
|
898
1005
|
# resp = client.list_configurations({
|
@@ -934,6 +1041,8 @@ module Aws::Kafka
|
|
934
1041
|
# * {Types::ListKafkaVersionsResponse#kafka_versions #kafka_versions} => Array<Types::KafkaVersion>
|
935
1042
|
# * {Types::ListKafkaVersionsResponse#next_token #next_token} => String
|
936
1043
|
#
|
1044
|
+
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
|
1045
|
+
#
|
937
1046
|
# @example Request syntax with placeholder values
|
938
1047
|
#
|
939
1048
|
# resp = client.list_kafka_versions({
|
@@ -970,6 +1079,8 @@ module Aws::Kafka
|
|
970
1079
|
# * {Types::ListNodesResponse#next_token #next_token} => String
|
971
1080
|
# * {Types::ListNodesResponse#node_info_list #node_info_list} => Array<Types::NodeInfo>
|
972
1081
|
#
|
1082
|
+
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
|
1083
|
+
#
|
973
1084
|
# @example Request syntax with placeholder values
|
974
1085
|
#
|
975
1086
|
# resp = client.list_nodes({
|
@@ -1222,6 +1333,50 @@ module Aws::Kafka
|
|
1222
1333
|
req.send_request(options)
|
1223
1334
|
end
|
1224
1335
|
|
1336
|
+
# Updates the Apache Kafka version for the cluster.
|
1337
|
+
#
|
1338
|
+
# @option params [required, String] :cluster_arn
|
1339
|
+
#
|
1340
|
+
# @option params [Types::ConfigurationInfo] :configuration_info
|
1341
|
+
# Specifies the configuration to use for the brokers.
|
1342
|
+
#
|
1343
|
+
# @option params [required, String] :current_version
|
1344
|
+
# Current cluster version.
|
1345
|
+
#
|
1346
|
+
# @option params [required, String] :target_kafka_version
|
1347
|
+
# Target Kafka version.
|
1348
|
+
#
|
1349
|
+
# @return [Types::UpdateClusterKafkaVersionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
|
1350
|
+
#
|
1351
|
+
# * {Types::UpdateClusterKafkaVersionResponse#cluster_arn #cluster_arn} => String
|
1352
|
+
# * {Types::UpdateClusterKafkaVersionResponse#cluster_operation_arn #cluster_operation_arn} => String
|
1353
|
+
#
|
1354
|
+
# @example Request syntax with placeholder values
|
1355
|
+
#
|
1356
|
+
# resp = client.update_cluster_kafka_version({
|
1357
|
+
# cluster_arn: "__string", # required
|
1358
|
+
# configuration_info: {
|
1359
|
+
# arn: "__string", # required
|
1360
|
+
# revision: 1, # required
|
1361
|
+
# },
|
1362
|
+
# current_version: "__string", # required
|
1363
|
+
# target_kafka_version: "__string", # required
|
1364
|
+
# })
|
1365
|
+
#
|
1366
|
+
# @example Response structure
|
1367
|
+
#
|
1368
|
+
# resp.cluster_arn #=> String
|
1369
|
+
# resp.cluster_operation_arn #=> String
|
1370
|
+
#
|
1371
|
+
# @see http://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UpdateClusterKafkaVersion AWS API Documentation
|
1372
|
+
#
|
1373
|
+
# @overload update_cluster_kafka_version(params = {})
|
1374
|
+
# @param [Hash] params ({})
|
1375
|
+
def update_cluster_kafka_version(params = {}, options = {})
|
1376
|
+
req = build_request(:update_cluster_kafka_version, params)
|
1377
|
+
req.send_request(options)
|
1378
|
+
end
|
1379
|
+
|
1225
1380
|
# Updates the monitoring settings for the cluster. You can use this
|
1226
1381
|
# operation to specify which Apache Kafka metrics you want Amazon MSK to
|
1227
1382
|
# send to Amazon CloudWatch. You can also specify settings for open
|
@@ -1310,7 +1465,7 @@ module Aws::Kafka
|
|
1310
1465
|
params: params,
|
1311
1466
|
config: config)
|
1312
1467
|
context[:gem_name] = 'aws-sdk-kafka'
|
1313
|
-
context[:gem_version] = '1.
|
1468
|
+
context[:gem_version] = '1.23.0'
|
1314
1469
|
Seahorse::Client::Request.new(handlers, context)
|
1315
1470
|
end
|
1316
1471
|
|
@@ -1,3 +1,5 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
# WARNING ABOUT GENERATED CODE
|
2
4
|
#
|
3
5
|
# This file is generated. See the contributing guide for more information:
|
@@ -23,7 +25,10 @@ module Aws::Kafka
|
|
23
25
|
CloudWatchLogs = Shapes::StructureShape.new(name: 'CloudWatchLogs')
|
24
26
|
ClusterInfo = Shapes::StructureShape.new(name: 'ClusterInfo')
|
25
27
|
ClusterOperationInfo = Shapes::StructureShape.new(name: 'ClusterOperationInfo')
|
28
|
+
ClusterOperationStep = Shapes::StructureShape.new(name: 'ClusterOperationStep')
|
29
|
+
ClusterOperationStepInfo = Shapes::StructureShape.new(name: 'ClusterOperationStepInfo')
|
26
30
|
ClusterState = Shapes::StringShape.new(name: 'ClusterState')
|
31
|
+
CompatibleKafkaVersion = Shapes::StructureShape.new(name: 'CompatibleKafkaVersion')
|
27
32
|
Configuration = Shapes::StructureShape.new(name: 'Configuration')
|
28
33
|
ConfigurationInfo = Shapes::StructureShape.new(name: 'ConfigurationInfo')
|
29
34
|
ConfigurationRevision = Shapes::StructureShape.new(name: 'ConfigurationRevision')
|
@@ -53,6 +58,8 @@ module Aws::Kafka
|
|
53
58
|
ForbiddenException = Shapes::StructureShape.new(name: 'ForbiddenException')
|
54
59
|
GetBootstrapBrokersRequest = Shapes::StructureShape.new(name: 'GetBootstrapBrokersRequest')
|
55
60
|
GetBootstrapBrokersResponse = Shapes::StructureShape.new(name: 'GetBootstrapBrokersResponse')
|
61
|
+
GetCompatibleKafkaVersionsRequest = Shapes::StructureShape.new(name: 'GetCompatibleKafkaVersionsRequest')
|
62
|
+
GetCompatibleKafkaVersionsResponse = Shapes::StructureShape.new(name: 'GetCompatibleKafkaVersionsResponse')
|
56
63
|
InternalServerErrorException = Shapes::StructureShape.new(name: 'InternalServerErrorException')
|
57
64
|
JmxExporter = Shapes::StructureShape.new(name: 'JmxExporter')
|
58
65
|
JmxExporterInfo = Shapes::StructureShape.new(name: 'JmxExporterInfo')
|
@@ -99,6 +106,8 @@ module Aws::Kafka
|
|
99
106
|
UpdateBrokerStorageResponse = Shapes::StructureShape.new(name: 'UpdateBrokerStorageResponse')
|
100
107
|
UpdateClusterConfigurationRequest = Shapes::StructureShape.new(name: 'UpdateClusterConfigurationRequest')
|
101
108
|
UpdateClusterConfigurationResponse = Shapes::StructureShape.new(name: 'UpdateClusterConfigurationResponse')
|
109
|
+
UpdateClusterKafkaVersionRequest = Shapes::StructureShape.new(name: 'UpdateClusterKafkaVersionRequest')
|
110
|
+
UpdateClusterKafkaVersionResponse = Shapes::StructureShape.new(name: 'UpdateClusterKafkaVersionResponse')
|
102
111
|
UpdateMonitoringRequest = Shapes::StructureShape.new(name: 'UpdateMonitoringRequest')
|
103
112
|
UpdateMonitoringResponse = Shapes::StructureShape.new(name: 'UpdateMonitoringResponse')
|
104
113
|
ZookeeperNodeInfo = Shapes::StructureShape.new(name: 'ZookeeperNodeInfo')
|
@@ -111,6 +120,8 @@ module Aws::Kafka
|
|
111
120
|
__listOfBrokerEBSVolumeInfo = Shapes::ListShape.new(name: '__listOfBrokerEBSVolumeInfo')
|
112
121
|
__listOfClusterInfo = Shapes::ListShape.new(name: '__listOfClusterInfo')
|
113
122
|
__listOfClusterOperationInfo = Shapes::ListShape.new(name: '__listOfClusterOperationInfo')
|
123
|
+
__listOfClusterOperationStep = Shapes::ListShape.new(name: '__listOfClusterOperationStep')
|
124
|
+
__listOfCompatibleKafkaVersion = Shapes::ListShape.new(name: '__listOfCompatibleKafkaVersion')
|
114
125
|
__listOfConfiguration = Shapes::ListShape.new(name: '__listOfConfiguration')
|
115
126
|
__listOfConfigurationRevision = Shapes::ListShape.new(name: '__listOfConfigurationRevision')
|
116
127
|
__listOfKafkaVersion = Shapes::ListShape.new(name: '__listOfKafkaVersion')
|
@@ -187,6 +198,7 @@ module Aws::Kafka
|
|
187
198
|
ClusterOperationInfo.add_member(:creation_time, Shapes::ShapeRef.new(shape: __timestampIso8601, location_name: "creationTime"))
|
188
199
|
ClusterOperationInfo.add_member(:end_time, Shapes::ShapeRef.new(shape: __timestampIso8601, location_name: "endTime"))
|
189
200
|
ClusterOperationInfo.add_member(:error_info, Shapes::ShapeRef.new(shape: ErrorInfo, location_name: "errorInfo"))
|
201
|
+
ClusterOperationInfo.add_member(:operation_steps, Shapes::ShapeRef.new(shape: __listOfClusterOperationStep, location_name: "operationSteps"))
|
190
202
|
ClusterOperationInfo.add_member(:operation_arn, Shapes::ShapeRef.new(shape: __string, location_name: "operationArn"))
|
191
203
|
ClusterOperationInfo.add_member(:operation_state, Shapes::ShapeRef.new(shape: __string, location_name: "operationState"))
|
192
204
|
ClusterOperationInfo.add_member(:operation_type, Shapes::ShapeRef.new(shape: __string, location_name: "operationType"))
|
@@ -194,6 +206,17 @@ module Aws::Kafka
|
|
194
206
|
ClusterOperationInfo.add_member(:target_cluster_info, Shapes::ShapeRef.new(shape: MutableClusterInfo, location_name: "targetClusterInfo"))
|
195
207
|
ClusterOperationInfo.struct_class = Types::ClusterOperationInfo
|
196
208
|
|
209
|
+
ClusterOperationStep.add_member(:step_info, Shapes::ShapeRef.new(shape: ClusterOperationStepInfo, location_name: "stepInfo"))
|
210
|
+
ClusterOperationStep.add_member(:step_name, Shapes::ShapeRef.new(shape: __string, location_name: "stepName"))
|
211
|
+
ClusterOperationStep.struct_class = Types::ClusterOperationStep
|
212
|
+
|
213
|
+
ClusterOperationStepInfo.add_member(:step_status, Shapes::ShapeRef.new(shape: __string, location_name: "stepStatus"))
|
214
|
+
ClusterOperationStepInfo.struct_class = Types::ClusterOperationStepInfo
|
215
|
+
|
216
|
+
CompatibleKafkaVersion.add_member(:source_version, Shapes::ShapeRef.new(shape: __string, location_name: "sourceVersion"))
|
217
|
+
CompatibleKafkaVersion.add_member(:target_versions, Shapes::ShapeRef.new(shape: __listOf__string, location_name: "targetVersions"))
|
218
|
+
CompatibleKafkaVersion.struct_class = Types::CompatibleKafkaVersion
|
219
|
+
|
197
220
|
Configuration.add_member(:arn, Shapes::ShapeRef.new(shape: __string, required: true, location_name: "arn"))
|
198
221
|
Configuration.add_member(:creation_time, Shapes::ShapeRef.new(shape: __timestampIso8601, required: true, location_name: "creationTime"))
|
199
222
|
Configuration.add_member(:description, Shapes::ShapeRef.new(shape: __string, required: true, location_name: "description"))
|
@@ -234,7 +257,7 @@ module Aws::Kafka
|
|
234
257
|
CreateClusterResponse.struct_class = Types::CreateClusterResponse
|
235
258
|
|
236
259
|
CreateConfigurationRequest.add_member(:description, Shapes::ShapeRef.new(shape: __string, location_name: "description"))
|
237
|
-
CreateConfigurationRequest.add_member(:kafka_versions, Shapes::ShapeRef.new(shape: __listOf__string,
|
260
|
+
CreateConfigurationRequest.add_member(:kafka_versions, Shapes::ShapeRef.new(shape: __listOf__string, location_name: "kafkaVersions"))
|
238
261
|
CreateConfigurationRequest.add_member(:name, Shapes::ShapeRef.new(shape: __string, required: true, location_name: "name"))
|
239
262
|
CreateConfigurationRequest.add_member(:server_properties, Shapes::ShapeRef.new(shape: __blob, required: true, location_name: "serverProperties"))
|
240
263
|
CreateConfigurationRequest.struct_class = Types::CreateConfigurationRequest
|
@@ -324,6 +347,12 @@ module Aws::Kafka
|
|
324
347
|
GetBootstrapBrokersResponse.add_member(:bootstrap_broker_string_tls, Shapes::ShapeRef.new(shape: __string, location_name: "bootstrapBrokerStringTls"))
|
325
348
|
GetBootstrapBrokersResponse.struct_class = Types::GetBootstrapBrokersResponse
|
326
349
|
|
350
|
+
GetCompatibleKafkaVersionsRequest.add_member(:cluster_arn, Shapes::ShapeRef.new(shape: __string, location: "querystring", location_name: "clusterArn"))
|
351
|
+
GetCompatibleKafkaVersionsRequest.struct_class = Types::GetCompatibleKafkaVersionsRequest
|
352
|
+
|
353
|
+
GetCompatibleKafkaVersionsResponse.add_member(:compatible_kafka_versions, Shapes::ShapeRef.new(shape: __listOfCompatibleKafkaVersion, location_name: "compatibleKafkaVersions"))
|
354
|
+
GetCompatibleKafkaVersionsResponse.struct_class = Types::GetCompatibleKafkaVersionsResponse
|
355
|
+
|
327
356
|
InternalServerErrorException.add_member(:invalid_parameter, Shapes::ShapeRef.new(shape: __string, location_name: "invalidParameter"))
|
328
357
|
InternalServerErrorException.add_member(:message, Shapes::ShapeRef.new(shape: __string, location_name: "message"))
|
329
358
|
InternalServerErrorException.struct_class = Types::InternalServerErrorException
|
@@ -404,6 +433,7 @@ module Aws::Kafka
|
|
404
433
|
MutableClusterInfo.add_member(:number_of_broker_nodes, Shapes::ShapeRef.new(shape: __integer, location_name: "numberOfBrokerNodes"))
|
405
434
|
MutableClusterInfo.add_member(:open_monitoring, Shapes::ShapeRef.new(shape: OpenMonitoring, location_name: "openMonitoring"))
|
406
435
|
MutableClusterInfo.add_member(:enhanced_monitoring, Shapes::ShapeRef.new(shape: EnhancedMonitoring, location_name: "enhancedMonitoring"))
|
436
|
+
MutableClusterInfo.add_member(:kafka_version, Shapes::ShapeRef.new(shape: __string, location_name: "kafkaVersion"))
|
407
437
|
MutableClusterInfo.add_member(:logging_info, Shapes::ShapeRef.new(shape: LoggingInfo, location_name: "loggingInfo"))
|
408
438
|
MutableClusterInfo.struct_class = Types::MutableClusterInfo
|
409
439
|
|
@@ -501,6 +531,16 @@ module Aws::Kafka
|
|
501
531
|
UpdateClusterConfigurationResponse.add_member(:cluster_operation_arn, Shapes::ShapeRef.new(shape: __string, location_name: "clusterOperationArn"))
|
502
532
|
UpdateClusterConfigurationResponse.struct_class = Types::UpdateClusterConfigurationResponse
|
503
533
|
|
534
|
+
UpdateClusterKafkaVersionRequest.add_member(:cluster_arn, Shapes::ShapeRef.new(shape: __string, required: true, location: "uri", location_name: "clusterArn"))
|
535
|
+
UpdateClusterKafkaVersionRequest.add_member(:configuration_info, Shapes::ShapeRef.new(shape: ConfigurationInfo, location_name: "configurationInfo"))
|
536
|
+
UpdateClusterKafkaVersionRequest.add_member(:current_version, Shapes::ShapeRef.new(shape: __string, required: true, location_name: "currentVersion"))
|
537
|
+
UpdateClusterKafkaVersionRequest.add_member(:target_kafka_version, Shapes::ShapeRef.new(shape: __string, required: true, location_name: "targetKafkaVersion"))
|
538
|
+
UpdateClusterKafkaVersionRequest.struct_class = Types::UpdateClusterKafkaVersionRequest
|
539
|
+
|
540
|
+
UpdateClusterKafkaVersionResponse.add_member(:cluster_arn, Shapes::ShapeRef.new(shape: __string, location_name: "clusterArn"))
|
541
|
+
UpdateClusterKafkaVersionResponse.add_member(:cluster_operation_arn, Shapes::ShapeRef.new(shape: __string, location_name: "clusterOperationArn"))
|
542
|
+
UpdateClusterKafkaVersionResponse.struct_class = Types::UpdateClusterKafkaVersionResponse
|
543
|
+
|
504
544
|
UpdateMonitoringRequest.add_member(:cluster_arn, Shapes::ShapeRef.new(shape: __string, required: true, location: "uri", location_name: "clusterArn"))
|
505
545
|
UpdateMonitoringRequest.add_member(:current_version, Shapes::ShapeRef.new(shape: __string, required: true, location_name: "currentVersion"))
|
506
546
|
UpdateMonitoringRequest.add_member(:enhanced_monitoring, Shapes::ShapeRef.new(shape: EnhancedMonitoring, location_name: "enhancedMonitoring"))
|
@@ -525,6 +565,10 @@ module Aws::Kafka
|
|
525
565
|
|
526
566
|
__listOfClusterOperationInfo.member = Shapes::ShapeRef.new(shape: ClusterOperationInfo)
|
527
567
|
|
568
|
+
__listOfClusterOperationStep.member = Shapes::ShapeRef.new(shape: ClusterOperationStep)
|
569
|
+
|
570
|
+
__listOfCompatibleKafkaVersion.member = Shapes::ShapeRef.new(shape: CompatibleKafkaVersion)
|
571
|
+
|
528
572
|
__listOfConfiguration.member = Shapes::ShapeRef.new(shape: Configuration)
|
529
573
|
|
530
574
|
__listOfConfigurationRevision.member = Shapes::ShapeRef.new(shape: ConfigurationRevision)
|
@@ -666,6 +710,21 @@ module Aws::Kafka
|
|
666
710
|
o.errors << Shapes::ShapeRef.new(shape: ForbiddenException)
|
667
711
|
end)
|
668
712
|
|
713
|
+
api.add_operation(:get_compatible_kafka_versions, Seahorse::Model::Operation.new.tap do |o|
|
714
|
+
o.name = "GetCompatibleKafkaVersions"
|
715
|
+
o.http_method = "GET"
|
716
|
+
o.http_request_uri = "/v1/compatible-kafka-versions"
|
717
|
+
o.input = Shapes::ShapeRef.new(shape: GetCompatibleKafkaVersionsRequest)
|
718
|
+
o.output = Shapes::ShapeRef.new(shape: GetCompatibleKafkaVersionsResponse)
|
719
|
+
o.errors << Shapes::ShapeRef.new(shape: BadRequestException)
|
720
|
+
o.errors << Shapes::ShapeRef.new(shape: UnauthorizedException)
|
721
|
+
o.errors << Shapes::ShapeRef.new(shape: InternalServerErrorException)
|
722
|
+
o.errors << Shapes::ShapeRef.new(shape: ForbiddenException)
|
723
|
+
o.errors << Shapes::ShapeRef.new(shape: NotFoundException)
|
724
|
+
o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
|
725
|
+
o.errors << Shapes::ShapeRef.new(shape: TooManyRequestsException)
|
726
|
+
end)
|
727
|
+
|
669
728
|
api.add_operation(:list_cluster_operations, Seahorse::Model::Operation.new.tap do |o|
|
670
729
|
o.name = "ListClusterOperations"
|
671
730
|
o.http_method = "GET"
|
@@ -850,6 +909,21 @@ module Aws::Kafka
|
|
850
909
|
o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
|
851
910
|
end)
|
852
911
|
|
912
|
+
api.add_operation(:update_cluster_kafka_version, Seahorse::Model::Operation.new.tap do |o|
|
913
|
+
o.name = "UpdateClusterKafkaVersion"
|
914
|
+
o.http_method = "PUT"
|
915
|
+
o.http_request_uri = "/v1/clusters/{clusterArn}/version"
|
916
|
+
o.input = Shapes::ShapeRef.new(shape: UpdateClusterKafkaVersionRequest)
|
917
|
+
o.output = Shapes::ShapeRef.new(shape: UpdateClusterKafkaVersionResponse)
|
918
|
+
o.errors << Shapes::ShapeRef.new(shape: BadRequestException)
|
919
|
+
o.errors << Shapes::ShapeRef.new(shape: UnauthorizedException)
|
920
|
+
o.errors << Shapes::ShapeRef.new(shape: InternalServerErrorException)
|
921
|
+
o.errors << Shapes::ShapeRef.new(shape: ForbiddenException)
|
922
|
+
o.errors << Shapes::ShapeRef.new(shape: NotFoundException)
|
923
|
+
o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
|
924
|
+
o.errors << Shapes::ShapeRef.new(shape: TooManyRequestsException)
|
925
|
+
end)
|
926
|
+
|
853
927
|
api.add_operation(:update_monitoring, Seahorse::Model::Operation.new.tap do |o|
|
854
928
|
o.name = "UpdateMonitoring"
|
855
929
|
o.http_method = "PUT"
|