pulumi-eks 4.3.0a1768463252__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pulumi_eks/__init__.py +51 -0
- pulumi_eks/_enums.py +164 -0
- pulumi_eks/_inputs.py +3445 -0
- pulumi_eks/_utilities.py +331 -0
- pulumi_eks/addon.py +272 -0
- pulumi_eks/cluster.py +1879 -0
- pulumi_eks/cluster_creation_role_provider.py +118 -0
- pulumi_eks/managed_node_group.py +1240 -0
- pulumi_eks/node_group.py +1161 -0
- pulumi_eks/node_group_security_group.py +183 -0
- pulumi_eks/node_group_v2.py +1212 -0
- pulumi_eks/outputs.py +1400 -0
- pulumi_eks/provider.py +77 -0
- pulumi_eks/pulumi-plugin.json +5 -0
- pulumi_eks/py.typed +0 -0
- pulumi_eks/vpc_cni_addon.py +719 -0
- pulumi_eks-4.3.0a1768463252.dist-info/METADATA +94 -0
- pulumi_eks-4.3.0a1768463252.dist-info/RECORD +20 -0
- pulumi_eks-4.3.0a1768463252.dist-info/WHEEL +5 -0
- pulumi_eks-4.3.0a1768463252.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1240 @@
|
|
|
1
|
+
# coding=utf-8
|
|
2
|
+
# *** WARNING: this file was generated by pulumi-gen-eks. ***
|
|
3
|
+
# *** Do not edit by hand unless you're certain you know what you are doing! ***
|
|
4
|
+
|
|
5
|
+
import builtins as _builtins
|
|
6
|
+
import warnings
|
|
7
|
+
import sys
|
|
8
|
+
import pulumi
|
|
9
|
+
import pulumi.runtime
|
|
10
|
+
from typing import Any, Mapping, Optional, Sequence, Union, overload
|
|
11
|
+
if sys.version_info >= (3, 11):
|
|
12
|
+
from typing import NotRequired, TypedDict, TypeAlias
|
|
13
|
+
else:
|
|
14
|
+
from typing_extensions import NotRequired, TypedDict, TypeAlias
|
|
15
|
+
from . import _utilities
|
|
16
|
+
from ._enums import *
|
|
17
|
+
from ._inputs import *
|
|
18
|
+
from .cluster import Cluster
|
|
19
|
+
from .vpc_cni_addon import VpcCniAddon
|
|
20
|
+
import pulumi_aws
|
|
21
|
+
import pulumi_kubernetes
|
|
22
|
+
|
|
23
|
+
__all__ = ['ManagedNodeGroupArgs', 'ManagedNodeGroup']
|
|
24
|
+
|
|
25
|
+
@pulumi.input_type
|
|
26
|
+
class ManagedNodeGroupArgs:
|
|
27
|
+
def __init__(__self__, *,
|
|
28
|
+
cluster: pulumi.Input[Union['Cluster', 'CoreDataArgs']],
|
|
29
|
+
ami_id: Optional[pulumi.Input[_builtins.str]] = None,
|
|
30
|
+
ami_type: Optional[pulumi.Input[_builtins.str]] = None,
|
|
31
|
+
bootstrap_extra_args: Optional[_builtins.str] = None,
|
|
32
|
+
bottlerocket_settings: Optional[pulumi.Input[Mapping[str, Any]]] = None,
|
|
33
|
+
capacity_type: Optional[pulumi.Input[_builtins.str]] = None,
|
|
34
|
+
cluster_name: Optional[pulumi.Input[_builtins.str]] = None,
|
|
35
|
+
disk_size: Optional[pulumi.Input[_builtins.int]] = None,
|
|
36
|
+
enable_efa_support: Optional[_builtins.bool] = None,
|
|
37
|
+
enable_imd_sv2: Optional[_builtins.bool] = None,
|
|
38
|
+
force_update_version: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
39
|
+
gpu: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
40
|
+
ignore_scaling_changes: Optional[_builtins.bool] = None,
|
|
41
|
+
instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
42
|
+
kubelet_extra_args: Optional[_builtins.str] = None,
|
|
43
|
+
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
|
|
44
|
+
launch_template: Optional[pulumi.Input['pulumi_aws.eks.NodeGroupLaunchTemplateArgs']] = None,
|
|
45
|
+
node_group_name: Optional[pulumi.Input[_builtins.str]] = None,
|
|
46
|
+
node_group_name_prefix: Optional[pulumi.Input[_builtins.str]] = None,
|
|
47
|
+
node_role: Optional[pulumi.Input['pulumi_aws.iam.Role']] = None,
|
|
48
|
+
node_role_arn: Optional[pulumi.Input[_builtins.str]] = None,
|
|
49
|
+
nodeadm_extra_options: Optional[pulumi.Input[Sequence[pulumi.Input['NodeadmOptionsArgs']]]] = None,
|
|
50
|
+
operating_system: Optional[pulumi.Input['OperatingSystem']] = None,
|
|
51
|
+
placement_group_availability_zone: Optional[pulumi.Input[_builtins.str]] = None,
|
|
52
|
+
release_version: Optional[pulumi.Input[_builtins.str]] = None,
|
|
53
|
+
remote_access: Optional[pulumi.Input['pulumi_aws.eks.NodeGroupRemoteAccessArgs']] = None,
|
|
54
|
+
scaling_config: Optional[pulumi.Input['pulumi_aws.eks.NodeGroupScalingConfigArgs']] = None,
|
|
55
|
+
subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
56
|
+
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
|
|
57
|
+
taints: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_aws.eks.NodeGroupTaintArgs']]]] = None,
|
|
58
|
+
user_data: Optional[pulumi.Input[_builtins.str]] = None,
|
|
59
|
+
version: Optional[pulumi.Input[_builtins.str]] = None):
|
|
60
|
+
"""
|
|
61
|
+
The set of arguments for constructing a ManagedNodeGroup resource.
|
|
62
|
+
:param pulumi.Input[Union['Cluster', 'CoreDataArgs']] cluster: The target EKS cluster.
|
|
63
|
+
:param pulumi.Input[_builtins.str] ami_id: The AMI ID to use for the worker nodes.
|
|
64
|
+
Defaults to the latest recommended EKS Optimized AMI from the AWS Systems Manager Parameter Store.
|
|
65
|
+
|
|
66
|
+
Note: `amiId` is mutually exclusive with `gpu` and `amiType`.
|
|
67
|
+
|
|
68
|
+
See for more details: https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html.
|
|
69
|
+
:param pulumi.Input[_builtins.str] ami_type: Type of Amazon Machine Image (AMI) associated with the EKS Node Group. Defaults to `AL2_x86_64`.
|
|
70
|
+
Note: `amiType` and `amiId` are mutually exclusive.
|
|
71
|
+
|
|
72
|
+
See the AWS documentation (https://docs.aws.amazon.com/eks/latest/APIReference/API_Nodegroup.html#AmazonEKS-Type-Nodegroup-amiType) for valid AMI Types. This provider will only perform drift detection if a configuration value is provided.
|
|
73
|
+
:param _builtins.str bootstrap_extra_args: Additional args to pass directly to `/etc/eks/bootstrap.sh`. For details on available options, see: https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh. Note that the `--apiserver-endpoint`, `--b64-cluster-ca` and `--kubelet-extra-args` flags are included automatically based on other configuration parameters.
|
|
74
|
+
|
|
75
|
+
Note that this field conflicts with `launchTemplate`.
|
|
76
|
+
:param pulumi.Input[Mapping[str, Any]] bottlerocket_settings: The configuration settings for Bottlerocket OS.
|
|
77
|
+
The settings will get merged with the base settings the provider uses to configure Bottlerocket.
|
|
78
|
+
|
|
79
|
+
This includes:
|
|
80
|
+
- settings.kubernetes.api-server
|
|
81
|
+
- settings.kubernetes.cluster-certificate
|
|
82
|
+
- settings.kubernetes.cluster-name
|
|
83
|
+
- settings.kubernetes.cluster-dns-ip
|
|
84
|
+
|
|
85
|
+
For an overview of the available settings, see https://bottlerocket.dev/en/os/1.20.x/api/settings/.
|
|
86
|
+
:param pulumi.Input[_builtins.str] capacity_type: Type of capacity associated with the EKS Node Group. Valid values: `ON_DEMAND`, `SPOT`. This provider will only perform drift detection if a configuration value is provided.
|
|
87
|
+
:param pulumi.Input[_builtins.str] cluster_name: Name of the EKS Cluster.
|
|
88
|
+
:param pulumi.Input[_builtins.int] disk_size: Disk size in GiB for worker nodes. Defaults to `20`. This provider will only perform drift detection if a configuration value is provided.
|
|
89
|
+
:param _builtins.bool enable_efa_support: Determines whether to enable Elastic Fabric Adapter (EFA) support for the node group. If multiple different instance types are configured for the node group, the first one will be used to determine the network interfaces to use. Requires `placementGroupAvailabilityZone` to be set.
|
|
90
|
+
:param _builtins.bool enable_imd_sv2: Enables the ability to use EC2 Instance Metadata Service v2, which provides a more secure way to access instance metadata. For more information, see: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html.
|
|
91
|
+
Defaults to `false`.
|
|
92
|
+
|
|
93
|
+
Note that this field conflicts with `launchTemplate`. If you are providing a custom `launchTemplate`, you should enable this feature within the `launchTemplateMetadataOptions` of the supplied `launchTemplate`.
|
|
94
|
+
:param pulumi.Input[_builtins.bool] force_update_version: Force version update if existing pods are unable to be drained due to a pod disruption budget issue.
|
|
95
|
+
:param pulumi.Input[_builtins.bool] gpu: Use the latest recommended EKS Optimized AMI with GPU support for the worker nodes.
|
|
96
|
+
Defaults to false.
|
|
97
|
+
|
|
98
|
+
Note: `gpu` and `amiId` are mutually exclusive.
|
|
99
|
+
|
|
100
|
+
See for more details: https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-amis.html.
|
|
101
|
+
:param _builtins.bool ignore_scaling_changes: Whether to ignore changes to the desired size of the Auto Scaling Group. This is useful when using Cluster Autoscaler.
|
|
102
|
+
|
|
103
|
+
See [EKS best practices](https://aws.github.io/aws-eks-best-practices/cluster-autoscaling/) for more details.
|
|
104
|
+
:param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] instance_types: Set of instance types associated with the EKS Node Group. Defaults to `["t3.medium"]`. This provider will only perform drift detection if a configuration value is provided. Currently, the EKS API only accepts a single value in the set.
|
|
105
|
+
:param _builtins.str kubelet_extra_args: Extra args to pass to the Kubelet. Corresponds to the options passed in the `--kubeletExtraArgs` flag to `/etc/eks/bootstrap.sh`. For example, '--port=10251 --address=0.0.0.0'. To escape characters in the extra argsvalue, wrap the value in quotes. For example, `kubeletExtraArgs = '--allowed-unsafe-sysctls "net.core.somaxconn"'`.
|
|
106
|
+
Note that this field conflicts with `launchTemplate`.
|
|
107
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] labels: Key-value map of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed.
|
|
108
|
+
:param pulumi.Input['pulumi_aws.eks.NodeGroupLaunchTemplateArgs'] launch_template: Launch Template settings.
|
|
109
|
+
|
|
110
|
+
Note: This field is mutually exclusive with `kubeletExtraArgs` and `bootstrapExtraArgs`.
|
|
111
|
+
:param pulumi.Input[_builtins.str] node_group_name: Name of the EKS Node Group. If omitted, this provider will assign a random, unique name. Conflicts with `nodeGroupNamePrefix`.
|
|
112
|
+
:param pulumi.Input[_builtins.str] node_group_name_prefix: Creates a unique name beginning with the specified prefix. Conflicts with `nodeGroupName`.
|
|
113
|
+
:param pulumi.Input['pulumi_aws.iam.Role'] node_role: The IAM Role that provides permissions for the EKS Node Group.
|
|
114
|
+
|
|
115
|
+
Note, `nodeRole` and `nodeRoleArn` are mutually exclusive, and a single option must be used.
|
|
116
|
+
:param pulumi.Input[_builtins.str] node_role_arn: Amazon Resource Name (ARN) of the IAM Role that provides permissions for the EKS Node Group.
|
|
117
|
+
|
|
118
|
+
Note, `nodeRoleArn` and `nodeRole` are mutually exclusive, and a single option must be used.
|
|
119
|
+
:param pulumi.Input[Sequence[pulumi.Input['NodeadmOptionsArgs']]] nodeadm_extra_options: Extra nodeadm configuration sections to be added to the nodeadm user data. This can be shell scripts, nodeadm NodeConfig or any other user data compatible script. When configuring additional nodeadm NodeConfig sections, they'll be merged with the base settings the provider sets. You can overwrite base settings or provide additional settings this way.
|
|
120
|
+
The base settings the provider sets are:
|
|
121
|
+
- cluster.name
|
|
122
|
+
- cluster.apiServerEndpoint
|
|
123
|
+
- cluster.certificateAuthority
|
|
124
|
+
- cluster.cidr
|
|
125
|
+
|
|
126
|
+
Note: This is only applicable when using AL2023.
|
|
127
|
+
See for more details:
|
|
128
|
+
- https://awslabs.github.io/amazon-eks-ami/nodeadm/
|
|
129
|
+
- https://awslabs.github.io/amazon-eks-ami/nodeadm/doc/api/
|
|
130
|
+
:param pulumi.Input['OperatingSystem'] operating_system: The type of OS to use for the node group. Will be used to determine the right EKS optimized AMI to use based on the instance types and gpu configuration.
|
|
131
|
+
Valid values are `RECOMMENDED`, `AL2`, `AL2023` and `Bottlerocket`.
|
|
132
|
+
|
|
133
|
+
Defaults to the current recommended OS.
|
|
134
|
+
:param pulumi.Input[_builtins.str] placement_group_availability_zone: The availability zone of the placement group for EFA support. Required if `enableEfaSupport` is true.
|
|
135
|
+
:param pulumi.Input[_builtins.str] release_version: AMI version of the EKS Node Group. Defaults to latest version for Kubernetes version.
|
|
136
|
+
:param pulumi.Input['pulumi_aws.eks.NodeGroupRemoteAccessArgs'] remote_access: Remote access settings.
|
|
137
|
+
:param pulumi.Input['pulumi_aws.eks.NodeGroupScalingConfigArgs'] scaling_config: Scaling settings.
|
|
138
|
+
|
|
139
|
+
Default scaling amounts of the node group autoscaling group are:
|
|
140
|
+
- desiredSize: 2
|
|
141
|
+
- minSize: 1
|
|
142
|
+
- maxSize: 2
|
|
143
|
+
:param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] subnet_ids: Identifiers of EC2 Subnets to associate with the EKS Node Group. These subnets must have the following resource tag: `kubernetes.io/cluster/CLUSTER_NAME` (where `CLUSTER_NAME` is replaced with the name of the EKS Cluster).
|
|
144
|
+
|
|
145
|
+
Default subnetIds is chosen from the following list, in order, if subnetIds arg is not set:
|
|
146
|
+
- core.subnetIds
|
|
147
|
+
- core.privateIds
|
|
148
|
+
- core.publicSubnetIds
|
|
149
|
+
|
|
150
|
+
This default logic is based on the existing subnet IDs logic of this package: https://git.io/JeM11
|
|
151
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] tags: Key-value mapping of resource tags.
|
|
152
|
+
:param pulumi.Input[Sequence[pulumi.Input['pulumi_aws.eks.NodeGroupTaintArgs']]] taints: The Kubernetes taints to be applied to the nodes in the node group. Maximum of 50 taints per node group.
|
|
153
|
+
:param pulumi.Input[_builtins.str] user_data: User specified code to run on node startup. This is expected to handle the full AWS EKS node bootstrapping. If omitted, the provider will configure the user data.
|
|
154
|
+
|
|
155
|
+
See for more details: https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-user-data.
|
|
156
|
+
"""
|
|
157
|
+
pulumi.set(__self__, "cluster", cluster)
|
|
158
|
+
if ami_id is not None:
|
|
159
|
+
pulumi.set(__self__, "ami_id", ami_id)
|
|
160
|
+
if ami_type is not None:
|
|
161
|
+
pulumi.set(__self__, "ami_type", ami_type)
|
|
162
|
+
if bootstrap_extra_args is not None:
|
|
163
|
+
pulumi.set(__self__, "bootstrap_extra_args", bootstrap_extra_args)
|
|
164
|
+
if bottlerocket_settings is not None:
|
|
165
|
+
pulumi.set(__self__, "bottlerocket_settings", bottlerocket_settings)
|
|
166
|
+
if capacity_type is not None:
|
|
167
|
+
pulumi.set(__self__, "capacity_type", capacity_type)
|
|
168
|
+
if cluster_name is not None:
|
|
169
|
+
pulumi.set(__self__, "cluster_name", cluster_name)
|
|
170
|
+
if disk_size is not None:
|
|
171
|
+
pulumi.set(__self__, "disk_size", disk_size)
|
|
172
|
+
if enable_efa_support is not None:
|
|
173
|
+
pulumi.set(__self__, "enable_efa_support", enable_efa_support)
|
|
174
|
+
if enable_imd_sv2 is not None:
|
|
175
|
+
pulumi.set(__self__, "enable_imd_sv2", enable_imd_sv2)
|
|
176
|
+
if force_update_version is not None:
|
|
177
|
+
pulumi.set(__self__, "force_update_version", force_update_version)
|
|
178
|
+
if gpu is not None:
|
|
179
|
+
pulumi.set(__self__, "gpu", gpu)
|
|
180
|
+
if ignore_scaling_changes is not None:
|
|
181
|
+
pulumi.set(__self__, "ignore_scaling_changes", ignore_scaling_changes)
|
|
182
|
+
if instance_types is not None:
|
|
183
|
+
pulumi.set(__self__, "instance_types", instance_types)
|
|
184
|
+
if kubelet_extra_args is not None:
|
|
185
|
+
pulumi.set(__self__, "kubelet_extra_args", kubelet_extra_args)
|
|
186
|
+
if labels is not None:
|
|
187
|
+
pulumi.set(__self__, "labels", labels)
|
|
188
|
+
if launch_template is not None:
|
|
189
|
+
pulumi.set(__self__, "launch_template", launch_template)
|
|
190
|
+
if node_group_name is not None:
|
|
191
|
+
pulumi.set(__self__, "node_group_name", node_group_name)
|
|
192
|
+
if node_group_name_prefix is not None:
|
|
193
|
+
pulumi.set(__self__, "node_group_name_prefix", node_group_name_prefix)
|
|
194
|
+
if node_role is not None:
|
|
195
|
+
pulumi.set(__self__, "node_role", node_role)
|
|
196
|
+
if node_role_arn is not None:
|
|
197
|
+
pulumi.set(__self__, "node_role_arn", node_role_arn)
|
|
198
|
+
if nodeadm_extra_options is not None:
|
|
199
|
+
pulumi.set(__self__, "nodeadm_extra_options", nodeadm_extra_options)
|
|
200
|
+
if operating_system is not None:
|
|
201
|
+
pulumi.set(__self__, "operating_system", operating_system)
|
|
202
|
+
if placement_group_availability_zone is not None:
|
|
203
|
+
pulumi.set(__self__, "placement_group_availability_zone", placement_group_availability_zone)
|
|
204
|
+
if release_version is not None:
|
|
205
|
+
pulumi.set(__self__, "release_version", release_version)
|
|
206
|
+
if remote_access is not None:
|
|
207
|
+
pulumi.set(__self__, "remote_access", remote_access)
|
|
208
|
+
if scaling_config is not None:
|
|
209
|
+
pulumi.set(__self__, "scaling_config", scaling_config)
|
|
210
|
+
if subnet_ids is not None:
|
|
211
|
+
pulumi.set(__self__, "subnet_ids", subnet_ids)
|
|
212
|
+
if tags is not None:
|
|
213
|
+
pulumi.set(__self__, "tags", tags)
|
|
214
|
+
if taints is not None:
|
|
215
|
+
pulumi.set(__self__, "taints", taints)
|
|
216
|
+
if user_data is not None:
|
|
217
|
+
pulumi.set(__self__, "user_data", user_data)
|
|
218
|
+
if version is not None:
|
|
219
|
+
pulumi.set(__self__, "version", version)
|
|
220
|
+
|
|
221
|
+
@_builtins.property
|
|
222
|
+
@pulumi.getter
|
|
223
|
+
def cluster(self) -> pulumi.Input[Union['Cluster', 'CoreDataArgs']]:
|
|
224
|
+
"""
|
|
225
|
+
The target EKS cluster.
|
|
226
|
+
"""
|
|
227
|
+
return pulumi.get(self, "cluster")
|
|
228
|
+
|
|
229
|
+
@cluster.setter
|
|
230
|
+
def cluster(self, value: pulumi.Input[Union['Cluster', 'CoreDataArgs']]):
|
|
231
|
+
pulumi.set(self, "cluster", value)
|
|
232
|
+
|
|
233
|
+
@_builtins.property
|
|
234
|
+
@pulumi.getter(name="amiId")
|
|
235
|
+
def ami_id(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
236
|
+
"""
|
|
237
|
+
The AMI ID to use for the worker nodes.
|
|
238
|
+
Defaults to the latest recommended EKS Optimized AMI from the AWS Systems Manager Parameter Store.
|
|
239
|
+
|
|
240
|
+
Note: `amiId` is mutually exclusive with `gpu` and `amiType`.
|
|
241
|
+
|
|
242
|
+
See for more details: https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html.
|
|
243
|
+
"""
|
|
244
|
+
return pulumi.get(self, "ami_id")
|
|
245
|
+
|
|
246
|
+
@ami_id.setter
|
|
247
|
+
def ami_id(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
248
|
+
pulumi.set(self, "ami_id", value)
|
|
249
|
+
|
|
250
|
+
@_builtins.property
|
|
251
|
+
@pulumi.getter(name="amiType")
|
|
252
|
+
def ami_type(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
253
|
+
"""
|
|
254
|
+
Type of Amazon Machine Image (AMI) associated with the EKS Node Group. Defaults to `AL2_x86_64`.
|
|
255
|
+
Note: `amiType` and `amiId` are mutually exclusive.
|
|
256
|
+
|
|
257
|
+
See the AWS documentation (https://docs.aws.amazon.com/eks/latest/APIReference/API_Nodegroup.html#AmazonEKS-Type-Nodegroup-amiType) for valid AMI Types. This provider will only perform drift detection if a configuration value is provided.
|
|
258
|
+
"""
|
|
259
|
+
return pulumi.get(self, "ami_type")
|
|
260
|
+
|
|
261
|
+
@ami_type.setter
|
|
262
|
+
def ami_type(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
263
|
+
pulumi.set(self, "ami_type", value)
|
|
264
|
+
|
|
265
|
+
@_builtins.property
|
|
266
|
+
@pulumi.getter(name="bootstrapExtraArgs")
|
|
267
|
+
def bootstrap_extra_args(self) -> Optional[_builtins.str]:
|
|
268
|
+
"""
|
|
269
|
+
Additional args to pass directly to `/etc/eks/bootstrap.sh`. For details on available options, see: https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh. Note that the `--apiserver-endpoint`, `--b64-cluster-ca` and `--kubelet-extra-args` flags are included automatically based on other configuration parameters.
|
|
270
|
+
|
|
271
|
+
Note that this field conflicts with `launchTemplate`.
|
|
272
|
+
"""
|
|
273
|
+
return pulumi.get(self, "bootstrap_extra_args")
|
|
274
|
+
|
|
275
|
+
@bootstrap_extra_args.setter
|
|
276
|
+
def bootstrap_extra_args(self, value: Optional[_builtins.str]):
|
|
277
|
+
pulumi.set(self, "bootstrap_extra_args", value)
|
|
278
|
+
|
|
279
|
+
@_builtins.property
|
|
280
|
+
@pulumi.getter(name="bottlerocketSettings")
|
|
281
|
+
def bottlerocket_settings(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
|
|
282
|
+
"""
|
|
283
|
+
The configuration settings for Bottlerocket OS.
|
|
284
|
+
The settings will get merged with the base settings the provider uses to configure Bottlerocket.
|
|
285
|
+
|
|
286
|
+
This includes:
|
|
287
|
+
- settings.kubernetes.api-server
|
|
288
|
+
- settings.kubernetes.cluster-certificate
|
|
289
|
+
- settings.kubernetes.cluster-name
|
|
290
|
+
- settings.kubernetes.cluster-dns-ip
|
|
291
|
+
|
|
292
|
+
For an overview of the available settings, see https://bottlerocket.dev/en/os/1.20.x/api/settings/.
|
|
293
|
+
"""
|
|
294
|
+
return pulumi.get(self, "bottlerocket_settings")
|
|
295
|
+
|
|
296
|
+
@bottlerocket_settings.setter
|
|
297
|
+
def bottlerocket_settings(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
|
|
298
|
+
pulumi.set(self, "bottlerocket_settings", value)
|
|
299
|
+
|
|
300
|
+
@_builtins.property
|
|
301
|
+
@pulumi.getter(name="capacityType")
|
|
302
|
+
def capacity_type(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
303
|
+
"""
|
|
304
|
+
Type of capacity associated with the EKS Node Group. Valid values: `ON_DEMAND`, `SPOT`. This provider will only perform drift detection if a configuration value is provided.
|
|
305
|
+
"""
|
|
306
|
+
return pulumi.get(self, "capacity_type")
|
|
307
|
+
|
|
308
|
+
@capacity_type.setter
|
|
309
|
+
def capacity_type(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
310
|
+
pulumi.set(self, "capacity_type", value)
|
|
311
|
+
|
|
312
|
+
@_builtins.property
|
|
313
|
+
@pulumi.getter(name="clusterName")
|
|
314
|
+
def cluster_name(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
315
|
+
"""
|
|
316
|
+
Name of the EKS Cluster.
|
|
317
|
+
"""
|
|
318
|
+
return pulumi.get(self, "cluster_name")
|
|
319
|
+
|
|
320
|
+
@cluster_name.setter
|
|
321
|
+
def cluster_name(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
322
|
+
pulumi.set(self, "cluster_name", value)
|
|
323
|
+
|
|
324
|
+
@_builtins.property
|
|
325
|
+
@pulumi.getter(name="diskSize")
|
|
326
|
+
def disk_size(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
327
|
+
"""
|
|
328
|
+
Disk size in GiB for worker nodes. Defaults to `20`. This provider will only perform drift detection if a configuration value is provided.
|
|
329
|
+
"""
|
|
330
|
+
return pulumi.get(self, "disk_size")
|
|
331
|
+
|
|
332
|
+
@disk_size.setter
|
|
333
|
+
def disk_size(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
334
|
+
pulumi.set(self, "disk_size", value)
|
|
335
|
+
|
|
336
|
+
@_builtins.property
|
|
337
|
+
@pulumi.getter(name="enableEfaSupport")
|
|
338
|
+
def enable_efa_support(self) -> Optional[_builtins.bool]:
|
|
339
|
+
"""
|
|
340
|
+
Determines whether to enable Elastic Fabric Adapter (EFA) support for the node group. If multiple different instance types are configured for the node group, the first one will be used to determine the network interfaces to use. Requires `placementGroupAvailabilityZone` to be set.
|
|
341
|
+
"""
|
|
342
|
+
return pulumi.get(self, "enable_efa_support")
|
|
343
|
+
|
|
344
|
+
@enable_efa_support.setter
|
|
345
|
+
def enable_efa_support(self, value: Optional[_builtins.bool]):
|
|
346
|
+
pulumi.set(self, "enable_efa_support", value)
|
|
347
|
+
|
|
348
|
+
@_builtins.property
|
|
349
|
+
@pulumi.getter(name="enableIMDSv2")
|
|
350
|
+
def enable_imd_sv2(self) -> Optional[_builtins.bool]:
|
|
351
|
+
"""
|
|
352
|
+
Enables the ability to use EC2 Instance Metadata Service v2, which provides a more secure way to access instance metadata. For more information, see: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html.
|
|
353
|
+
Defaults to `false`.
|
|
354
|
+
|
|
355
|
+
Note that this field conflicts with `launchTemplate`. If you are providing a custom `launchTemplate`, you should enable this feature within the `launchTemplateMetadataOptions` of the supplied `launchTemplate`.
|
|
356
|
+
"""
|
|
357
|
+
return pulumi.get(self, "enable_imd_sv2")
|
|
358
|
+
|
|
359
|
+
@enable_imd_sv2.setter
|
|
360
|
+
def enable_imd_sv2(self, value: Optional[_builtins.bool]):
|
|
361
|
+
pulumi.set(self, "enable_imd_sv2", value)
|
|
362
|
+
|
|
363
|
+
@_builtins.property
|
|
364
|
+
@pulumi.getter(name="forceUpdateVersion")
|
|
365
|
+
def force_update_version(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
366
|
+
"""
|
|
367
|
+
Force version update if existing pods are unable to be drained due to a pod disruption budget issue.
|
|
368
|
+
"""
|
|
369
|
+
return pulumi.get(self, "force_update_version")
|
|
370
|
+
|
|
371
|
+
@force_update_version.setter
|
|
372
|
+
def force_update_version(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
373
|
+
pulumi.set(self, "force_update_version", value)
|
|
374
|
+
|
|
375
|
+
@_builtins.property
|
|
376
|
+
@pulumi.getter
|
|
377
|
+
def gpu(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
378
|
+
"""
|
|
379
|
+
Use the latest recommended EKS Optimized AMI with GPU support for the worker nodes.
|
|
380
|
+
Defaults to false.
|
|
381
|
+
|
|
382
|
+
Note: `gpu` and `amiId` are mutually exclusive.
|
|
383
|
+
|
|
384
|
+
See for more details: https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-amis.html.
|
|
385
|
+
"""
|
|
386
|
+
return pulumi.get(self, "gpu")
|
|
387
|
+
|
|
388
|
+
@gpu.setter
|
|
389
|
+
def gpu(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
390
|
+
pulumi.set(self, "gpu", value)
|
|
391
|
+
|
|
392
|
+
@_builtins.property
|
|
393
|
+
@pulumi.getter(name="ignoreScalingChanges")
|
|
394
|
+
def ignore_scaling_changes(self) -> Optional[_builtins.bool]:
|
|
395
|
+
"""
|
|
396
|
+
Whether to ignore changes to the desired size of the Auto Scaling Group. This is useful when using Cluster Autoscaler.
|
|
397
|
+
|
|
398
|
+
See [EKS best practices](https://aws.github.io/aws-eks-best-practices/cluster-autoscaling/) for more details.
|
|
399
|
+
"""
|
|
400
|
+
return pulumi.get(self, "ignore_scaling_changes")
|
|
401
|
+
|
|
402
|
+
@ignore_scaling_changes.setter
|
|
403
|
+
def ignore_scaling_changes(self, value: Optional[_builtins.bool]):
|
|
404
|
+
pulumi.set(self, "ignore_scaling_changes", value)
|
|
405
|
+
|
|
406
|
+
@_builtins.property
|
|
407
|
+
@pulumi.getter(name="instanceTypes")
|
|
408
|
+
def instance_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]:
|
|
409
|
+
"""
|
|
410
|
+
Set of instance types associated with the EKS Node Group. Defaults to `["t3.medium"]`. This provider will only perform drift detection if a configuration value is provided. Currently, the EKS API only accepts a single value in the set.
|
|
411
|
+
"""
|
|
412
|
+
return pulumi.get(self, "instance_types")
|
|
413
|
+
|
|
414
|
+
@instance_types.setter
|
|
415
|
+
def instance_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]):
|
|
416
|
+
pulumi.set(self, "instance_types", value)
|
|
417
|
+
|
|
418
|
+
@_builtins.property
|
|
419
|
+
@pulumi.getter(name="kubeletExtraArgs")
|
|
420
|
+
def kubelet_extra_args(self) -> Optional[_builtins.str]:
|
|
421
|
+
"""
|
|
422
|
+
Extra args to pass to the Kubelet. Corresponds to the options passed in the `--kubeletExtraArgs` flag to `/etc/eks/bootstrap.sh`. For example, '--port=10251 --address=0.0.0.0'. To escape characters in the extra argsvalue, wrap the value in quotes. For example, `kubeletExtraArgs = '--allowed-unsafe-sysctls "net.core.somaxconn"'`.
|
|
423
|
+
Note that this field conflicts with `launchTemplate`.
|
|
424
|
+
"""
|
|
425
|
+
return pulumi.get(self, "kubelet_extra_args")
|
|
426
|
+
|
|
427
|
+
@kubelet_extra_args.setter
|
|
428
|
+
def kubelet_extra_args(self, value: Optional[_builtins.str]):
|
|
429
|
+
pulumi.set(self, "kubelet_extra_args", value)
|
|
430
|
+
|
|
431
|
+
@_builtins.property
|
|
432
|
+
@pulumi.getter
|
|
433
|
+
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]:
|
|
434
|
+
"""
|
|
435
|
+
Key-value map of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed.
|
|
436
|
+
"""
|
|
437
|
+
return pulumi.get(self, "labels")
|
|
438
|
+
|
|
439
|
+
@labels.setter
|
|
440
|
+
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]):
|
|
441
|
+
pulumi.set(self, "labels", value)
|
|
442
|
+
|
|
443
|
+
@_builtins.property
|
|
444
|
+
@pulumi.getter(name="launchTemplate")
|
|
445
|
+
def launch_template(self) -> Optional[pulumi.Input['pulumi_aws.eks.NodeGroupLaunchTemplateArgs']]:
|
|
446
|
+
"""
|
|
447
|
+
Launch Template settings.
|
|
448
|
+
|
|
449
|
+
Note: This field is mutually exclusive with `kubeletExtraArgs` and `bootstrapExtraArgs`.
|
|
450
|
+
"""
|
|
451
|
+
return pulumi.get(self, "launch_template")
|
|
452
|
+
|
|
453
|
+
@launch_template.setter
|
|
454
|
+
def launch_template(self, value: Optional[pulumi.Input['pulumi_aws.eks.NodeGroupLaunchTemplateArgs']]):
|
|
455
|
+
pulumi.set(self, "launch_template", value)
|
|
456
|
+
|
|
457
|
+
@_builtins.property
|
|
458
|
+
@pulumi.getter(name="nodeGroupName")
|
|
459
|
+
def node_group_name(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
460
|
+
"""
|
|
461
|
+
Name of the EKS Node Group. If omitted, this provider will assign a random, unique name. Conflicts with `nodeGroupNamePrefix`.
|
|
462
|
+
"""
|
|
463
|
+
return pulumi.get(self, "node_group_name")
|
|
464
|
+
|
|
465
|
+
@node_group_name.setter
|
|
466
|
+
def node_group_name(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
467
|
+
pulumi.set(self, "node_group_name", value)
|
|
468
|
+
|
|
469
|
+
@_builtins.property
|
|
470
|
+
@pulumi.getter(name="nodeGroupNamePrefix")
|
|
471
|
+
def node_group_name_prefix(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
472
|
+
"""
|
|
473
|
+
Creates a unique name beginning with the specified prefix. Conflicts with `nodeGroupName`.
|
|
474
|
+
"""
|
|
475
|
+
return pulumi.get(self, "node_group_name_prefix")
|
|
476
|
+
|
|
477
|
+
@node_group_name_prefix.setter
|
|
478
|
+
def node_group_name_prefix(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
479
|
+
pulumi.set(self, "node_group_name_prefix", value)
|
|
480
|
+
|
|
481
|
+
@_builtins.property
|
|
482
|
+
@pulumi.getter(name="nodeRole")
|
|
483
|
+
def node_role(self) -> Optional[pulumi.Input['pulumi_aws.iam.Role']]:
|
|
484
|
+
"""
|
|
485
|
+
The IAM Role that provides permissions for the EKS Node Group.
|
|
486
|
+
|
|
487
|
+
Note, `nodeRole` and `nodeRoleArn` are mutually exclusive, and a single option must be used.
|
|
488
|
+
"""
|
|
489
|
+
return pulumi.get(self, "node_role")
|
|
490
|
+
|
|
491
|
+
@node_role.setter
|
|
492
|
+
def node_role(self, value: Optional[pulumi.Input['pulumi_aws.iam.Role']]):
|
|
493
|
+
pulumi.set(self, "node_role", value)
|
|
494
|
+
|
|
495
|
+
@_builtins.property
|
|
496
|
+
@pulumi.getter(name="nodeRoleArn")
|
|
497
|
+
def node_role_arn(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
498
|
+
"""
|
|
499
|
+
Amazon Resource Name (ARN) of the IAM Role that provides permissions for the EKS Node Group.
|
|
500
|
+
|
|
501
|
+
Note, `nodeRoleArn` and `nodeRole` are mutually exclusive, and a single option must be used.
|
|
502
|
+
"""
|
|
503
|
+
return pulumi.get(self, "node_role_arn")
|
|
504
|
+
|
|
505
|
+
@node_role_arn.setter
|
|
506
|
+
def node_role_arn(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
507
|
+
pulumi.set(self, "node_role_arn", value)
|
|
508
|
+
|
|
509
|
+
@_builtins.property
|
|
510
|
+
@pulumi.getter(name="nodeadmExtraOptions")
|
|
511
|
+
def nodeadm_extra_options(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NodeadmOptionsArgs']]]]:
|
|
512
|
+
"""
|
|
513
|
+
Extra nodeadm configuration sections to be added to the nodeadm user data. This can be shell scripts, nodeadm NodeConfig or any other user data compatible script. When configuring additional nodeadm NodeConfig sections, they'll be merged with the base settings the provider sets. You can overwrite base settings or provide additional settings this way.
|
|
514
|
+
The base settings the provider sets are:
|
|
515
|
+
- cluster.name
|
|
516
|
+
- cluster.apiServerEndpoint
|
|
517
|
+
- cluster.certificateAuthority
|
|
518
|
+
- cluster.cidr
|
|
519
|
+
|
|
520
|
+
Note: This is only applicable when using AL2023.
|
|
521
|
+
See for more details:
|
|
522
|
+
- https://awslabs.github.io/amazon-eks-ami/nodeadm/
|
|
523
|
+
- https://awslabs.github.io/amazon-eks-ami/nodeadm/doc/api/
|
|
524
|
+
"""
|
|
525
|
+
return pulumi.get(self, "nodeadm_extra_options")
|
|
526
|
+
|
|
527
|
+
@nodeadm_extra_options.setter
|
|
528
|
+
def nodeadm_extra_options(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['NodeadmOptionsArgs']]]]):
|
|
529
|
+
pulumi.set(self, "nodeadm_extra_options", value)
|
|
530
|
+
|
|
531
|
+
@_builtins.property
|
|
532
|
+
@pulumi.getter(name="operatingSystem")
|
|
533
|
+
def operating_system(self) -> Optional[pulumi.Input['OperatingSystem']]:
|
|
534
|
+
"""
|
|
535
|
+
The type of OS to use for the node group. Will be used to determine the right EKS optimized AMI to use based on the instance types and gpu configuration.
|
|
536
|
+
Valid values are `RECOMMENDED`, `AL2`, `AL2023` and `Bottlerocket`.
|
|
537
|
+
|
|
538
|
+
Defaults to the current recommended OS.
|
|
539
|
+
"""
|
|
540
|
+
return pulumi.get(self, "operating_system")
|
|
541
|
+
|
|
542
|
+
@operating_system.setter
|
|
543
|
+
def operating_system(self, value: Optional[pulumi.Input['OperatingSystem']]):
|
|
544
|
+
pulumi.set(self, "operating_system", value)
|
|
545
|
+
|
|
546
|
+
@_builtins.property
|
|
547
|
+
@pulumi.getter(name="placementGroupAvailabilityZone")
|
|
548
|
+
def placement_group_availability_zone(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
549
|
+
"""
|
|
550
|
+
The availability zone of the placement group for EFA support. Required if `enableEfaSupport` is true.
|
|
551
|
+
"""
|
|
552
|
+
return pulumi.get(self, "placement_group_availability_zone")
|
|
553
|
+
|
|
554
|
+
@placement_group_availability_zone.setter
|
|
555
|
+
def placement_group_availability_zone(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
556
|
+
pulumi.set(self, "placement_group_availability_zone", value)
|
|
557
|
+
|
|
558
|
+
@_builtins.property
|
|
559
|
+
@pulumi.getter(name="releaseVersion")
|
|
560
|
+
def release_version(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
561
|
+
"""
|
|
562
|
+
AMI version of the EKS Node Group. Defaults to latest version for Kubernetes version.
|
|
563
|
+
"""
|
|
564
|
+
return pulumi.get(self, "release_version")
|
|
565
|
+
|
|
566
|
+
@release_version.setter
|
|
567
|
+
def release_version(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
568
|
+
pulumi.set(self, "release_version", value)
|
|
569
|
+
|
|
570
|
+
@_builtins.property
|
|
571
|
+
@pulumi.getter(name="remoteAccess")
|
|
572
|
+
def remote_access(self) -> Optional[pulumi.Input['pulumi_aws.eks.NodeGroupRemoteAccessArgs']]:
|
|
573
|
+
"""
|
|
574
|
+
Remote access settings.
|
|
575
|
+
"""
|
|
576
|
+
return pulumi.get(self, "remote_access")
|
|
577
|
+
|
|
578
|
+
@remote_access.setter
|
|
579
|
+
def remote_access(self, value: Optional[pulumi.Input['pulumi_aws.eks.NodeGroupRemoteAccessArgs']]):
|
|
580
|
+
pulumi.set(self, "remote_access", value)
|
|
581
|
+
|
|
582
|
+
@_builtins.property
|
|
583
|
+
@pulumi.getter(name="scalingConfig")
|
|
584
|
+
def scaling_config(self) -> Optional[pulumi.Input['pulumi_aws.eks.NodeGroupScalingConfigArgs']]:
|
|
585
|
+
"""
|
|
586
|
+
Scaling settings.
|
|
587
|
+
|
|
588
|
+
Default scaling amounts of the node group autoscaling group are:
|
|
589
|
+
- desiredSize: 2
|
|
590
|
+
- minSize: 1
|
|
591
|
+
- maxSize: 2
|
|
592
|
+
"""
|
|
593
|
+
return pulumi.get(self, "scaling_config")
|
|
594
|
+
|
|
595
|
+
@scaling_config.setter
|
|
596
|
+
def scaling_config(self, value: Optional[pulumi.Input['pulumi_aws.eks.NodeGroupScalingConfigArgs']]):
|
|
597
|
+
pulumi.set(self, "scaling_config", value)
|
|
598
|
+
|
|
599
|
+
@_builtins.property
|
|
600
|
+
@pulumi.getter(name="subnetIds")
|
|
601
|
+
def subnet_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]:
|
|
602
|
+
"""
|
|
603
|
+
Identifiers of EC2 Subnets to associate with the EKS Node Group. These subnets must have the following resource tag: `kubernetes.io/cluster/CLUSTER_NAME` (where `CLUSTER_NAME` is replaced with the name of the EKS Cluster).
|
|
604
|
+
|
|
605
|
+
Default subnetIds is chosen from the following list, in order, if subnetIds arg is not set:
|
|
606
|
+
- core.subnetIds
|
|
607
|
+
- core.privateIds
|
|
608
|
+
- core.publicSubnetIds
|
|
609
|
+
|
|
610
|
+
This default logic is based on the existing subnet IDs logic of this package: https://git.io/JeM11
|
|
611
|
+
"""
|
|
612
|
+
return pulumi.get(self, "subnet_ids")
|
|
613
|
+
|
|
614
|
+
@subnet_ids.setter
|
|
615
|
+
def subnet_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]):
|
|
616
|
+
pulumi.set(self, "subnet_ids", value)
|
|
617
|
+
|
|
618
|
+
@_builtins.property
|
|
619
|
+
@pulumi.getter
|
|
620
|
+
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]:
|
|
621
|
+
"""
|
|
622
|
+
Key-value mapping of resource tags.
|
|
623
|
+
"""
|
|
624
|
+
return pulumi.get(self, "tags")
|
|
625
|
+
|
|
626
|
+
@tags.setter
|
|
627
|
+
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]):
|
|
628
|
+
pulumi.set(self, "tags", value)
|
|
629
|
+
|
|
630
|
+
@_builtins.property
|
|
631
|
+
@pulumi.getter
|
|
632
|
+
def taints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_aws.eks.NodeGroupTaintArgs']]]]:
|
|
633
|
+
"""
|
|
634
|
+
The Kubernetes taints to be applied to the nodes in the node group. Maximum of 50 taints per node group.
|
|
635
|
+
"""
|
|
636
|
+
return pulumi.get(self, "taints")
|
|
637
|
+
|
|
638
|
+
@taints.setter
|
|
639
|
+
def taints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_aws.eks.NodeGroupTaintArgs']]]]):
|
|
640
|
+
pulumi.set(self, "taints", value)
|
|
641
|
+
|
|
642
|
+
@_builtins.property
|
|
643
|
+
@pulumi.getter(name="userData")
|
|
644
|
+
def user_data(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
645
|
+
"""
|
|
646
|
+
User specified code to run on node startup. This is expected to handle the full AWS EKS node bootstrapping. If omitted, the provider will configure the user data.
|
|
647
|
+
|
|
648
|
+
See for more details: https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-user-data.
|
|
649
|
+
"""
|
|
650
|
+
return pulumi.get(self, "user_data")
|
|
651
|
+
|
|
652
|
+
@user_data.setter
|
|
653
|
+
def user_data(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
654
|
+
pulumi.set(self, "user_data", value)
|
|
655
|
+
|
|
656
|
+
@_builtins.property
|
|
657
|
+
@pulumi.getter
|
|
658
|
+
def version(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
659
|
+
return pulumi.get(self, "version")
|
|
660
|
+
|
|
661
|
+
@version.setter
|
|
662
|
+
def version(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
663
|
+
pulumi.set(self, "version", value)
|
|
664
|
+
|
|
665
|
+
|
|
666
|
+
@pulumi.type_token("eks:index:ManagedNodeGroup")
|
|
667
|
+
class ManagedNodeGroup(pulumi.ComponentResource):
|
|
668
|
+
@overload
|
|
669
|
+
def __init__(__self__,
|
|
670
|
+
resource_name: str,
|
|
671
|
+
opts: Optional[pulumi.ResourceOptions] = None,
|
|
672
|
+
ami_id: Optional[pulumi.Input[_builtins.str]] = None,
|
|
673
|
+
ami_type: Optional[pulumi.Input[_builtins.str]] = None,
|
|
674
|
+
bootstrap_extra_args: Optional[_builtins.str] = None,
|
|
675
|
+
bottlerocket_settings: Optional[pulumi.Input[Mapping[str, Any]]] = None,
|
|
676
|
+
capacity_type: Optional[pulumi.Input[_builtins.str]] = None,
|
|
677
|
+
cluster: Optional[pulumi.Input[Union['Cluster', Union['CoreDataArgs', 'CoreDataArgsDict']]]] = None,
|
|
678
|
+
cluster_name: Optional[pulumi.Input[_builtins.str]] = None,
|
|
679
|
+
disk_size: Optional[pulumi.Input[_builtins.int]] = None,
|
|
680
|
+
enable_efa_support: Optional[_builtins.bool] = None,
|
|
681
|
+
enable_imd_sv2: Optional[_builtins.bool] = None,
|
|
682
|
+
force_update_version: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
683
|
+
gpu: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
684
|
+
ignore_scaling_changes: Optional[_builtins.bool] = None,
|
|
685
|
+
instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
686
|
+
kubelet_extra_args: Optional[_builtins.str] = None,
|
|
687
|
+
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
|
|
688
|
+
launch_template: Optional[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupLaunchTemplateArgs']]] = None,
|
|
689
|
+
node_group_name: Optional[pulumi.Input[_builtins.str]] = None,
|
|
690
|
+
node_group_name_prefix: Optional[pulumi.Input[_builtins.str]] = None,
|
|
691
|
+
node_role: Optional[pulumi.Input['pulumi_aws.iam.Role']] = None,
|
|
692
|
+
node_role_arn: Optional[pulumi.Input[_builtins.str]] = None,
|
|
693
|
+
nodeadm_extra_options: Optional[pulumi.Input[Sequence[pulumi.Input[Union['NodeadmOptionsArgs', 'NodeadmOptionsArgsDict']]]]] = None,
|
|
694
|
+
operating_system: Optional[pulumi.Input['OperatingSystem']] = None,
|
|
695
|
+
placement_group_availability_zone: Optional[pulumi.Input[_builtins.str]] = None,
|
|
696
|
+
release_version: Optional[pulumi.Input[_builtins.str]] = None,
|
|
697
|
+
remote_access: Optional[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupRemoteAccessArgs']]] = None,
|
|
698
|
+
scaling_config: Optional[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupScalingConfigArgs']]] = None,
|
|
699
|
+
subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
700
|
+
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
|
|
701
|
+
taints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupTaintArgs']]]]] = None,
|
|
702
|
+
user_data: Optional[pulumi.Input[_builtins.str]] = None,
|
|
703
|
+
version: Optional[pulumi.Input[_builtins.str]] = None,
|
|
704
|
+
__props__=None):
|
|
705
|
+
"""
|
|
706
|
+
Manages an EKS Node Group, which can provision and optionally update an Auto Scaling Group of Kubernetes worker nodes compatible with EKS. Additional documentation about this functionality can be found in the [EKS User Guide](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html).
|
|
707
|
+
|
|
708
|
+
## Example Usage
|
|
709
|
+
### Basic Managed Node Group
|
|
710
|
+
This example demonstrates creating a managed node group with typical defaults. The node group uses the latest EKS-optimized Amazon Linux AMI, creates 2 nodes, and runs on t3.medium instances. Instance security groups are automatically configured.
|
|
711
|
+
|
|
712
|
+
```python
|
|
713
|
+
import pulumi
|
|
714
|
+
import json
|
|
715
|
+
import pulumi_aws as aws
|
|
716
|
+
import pulumi_awsx as awsx
|
|
717
|
+
import pulumi_eks as eks
|
|
718
|
+
|
|
719
|
+
eks_vpc = awsx.ec2.Vpc("eks-vpc",
|
|
720
|
+
enable_dns_hostnames=True,
|
|
721
|
+
cidr_block="10.0.0.0/16")
|
|
722
|
+
eks_cluster = eks.Cluster("eks-cluster",
|
|
723
|
+
vpc_id=eks_vpc.vpc_id,
|
|
724
|
+
authentication_mode=eks.AuthenticationMode.API,
|
|
725
|
+
public_subnet_ids=eks_vpc.public_subnet_ids,
|
|
726
|
+
private_subnet_ids=eks_vpc.private_subnet_ids,
|
|
727
|
+
skip_default_node_group=True)
|
|
728
|
+
node_role = aws.iam.Role("node-role", assume_role_policy=json.dumps({
|
|
729
|
+
"Version": "2012-10-17",
|
|
730
|
+
"Statement": [{
|
|
731
|
+
"Action": "sts:AssumeRole",
|
|
732
|
+
"Effect": "Allow",
|
|
733
|
+
"Sid": "",
|
|
734
|
+
"Principal": {
|
|
735
|
+
"Service": "ec2.amazonaws.com",
|
|
736
|
+
},
|
|
737
|
+
}],
|
|
738
|
+
}))
|
|
739
|
+
worker_node_policy = aws.iam.RolePolicyAttachment("worker-node-policy",
|
|
740
|
+
role=node_role.name,
|
|
741
|
+
policy_arn="arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy")
|
|
742
|
+
cni_policy = aws.iam.RolePolicyAttachment("cni-policy",
|
|
743
|
+
role=node_role.name,
|
|
744
|
+
policy_arn="arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy")
|
|
745
|
+
registry_policy = aws.iam.RolePolicyAttachment("registry-policy",
|
|
746
|
+
role=node_role.name,
|
|
747
|
+
policy_arn="arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly")
|
|
748
|
+
node_group = eks.ManagedNodeGroup("node-group",
|
|
749
|
+
cluster=eks_cluster,
|
|
750
|
+
node_role=node_role)
|
|
751
|
+
|
|
752
|
+
```
|
|
753
|
+
### Enabling EFA Support
|
|
754
|
+
|
|
755
|
+
Enabling EFA support for a node group will do the following:
|
|
756
|
+
- All EFA interfaces supported by the instance will be exposed on the launch template used by the node group
|
|
757
|
+
- A `clustered` placement group will be created and passed to the launch template
|
|
758
|
+
- Checks will be performed to ensure that the instance type supports EFA and that the specified AZ is supported by the chosen instance type
|
|
759
|
+
|
|
760
|
+
The GPU optimized AMIs include all necessary drivers and libraries to support EFA. If you're choosing an instance type without GPU acceleration you will need to install the drivers and libraries manually and bake a custom AMI.
|
|
761
|
+
|
|
762
|
+
You can use the [aws-efa-k8s-device-plugin](https://github.com/aws/eks-charts/tree/master/stable/aws-efa-k8s-device-plugin) Helm chart to expose the EFA interfaces on the nodes as an extended resource, and allow pods to request these interfaces to be mounted to their containers.
|
|
763
|
+
Your application container will need to have the necessary libraries and runtimes in order to leverage the EFA interfaces (e.g. libfabric).
|
|
764
|
+
|
|
765
|
+
```python
|
|
766
|
+
import pulumi
|
|
767
|
+
import json
|
|
768
|
+
import pulumi_aws as aws
|
|
769
|
+
import pulumi_awsx as awsx
|
|
770
|
+
import pulumi_eks as eks
|
|
771
|
+
import pulumi_kubernetes as kubernetes
|
|
772
|
+
|
|
773
|
+
eks_vpc = awsx.ec2.Vpc("eks-vpc",
|
|
774
|
+
enable_dns_hostnames=True,
|
|
775
|
+
cidr_block="10.0.0.0/16")
|
|
776
|
+
eks_cluster = eks.Cluster("eks-cluster",
|
|
777
|
+
vpc_id=eks_vpc.vpc_id,
|
|
778
|
+
authentication_mode=eks.AuthenticationMode.API,
|
|
779
|
+
public_subnet_ids=eks_vpc.public_subnet_ids,
|
|
780
|
+
private_subnet_ids=eks_vpc.private_subnet_ids,
|
|
781
|
+
skip_default_node_group=True)
|
|
782
|
+
k8_s_provider = kubernetes.Provider("k8sProvider", kubeconfig=eks_cluster.kubeconfig)
|
|
783
|
+
node_role = aws.iam.Role("node-role", assume_role_policy=json.dumps({
|
|
784
|
+
"Version": "2012-10-17",
|
|
785
|
+
"Statement": [{
|
|
786
|
+
"Action": "sts:AssumeRole",
|
|
787
|
+
"Effect": "Allow",
|
|
788
|
+
"Sid": "",
|
|
789
|
+
"Principal": {
|
|
790
|
+
"Service": "ec2.amazonaws.com",
|
|
791
|
+
},
|
|
792
|
+
}],
|
|
793
|
+
}))
|
|
794
|
+
worker_node_policy = aws.iam.RolePolicyAttachment("worker-node-policy",
|
|
795
|
+
role=node_role.name,
|
|
796
|
+
policy_arn="arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy")
|
|
797
|
+
cni_policy = aws.iam.RolePolicyAttachment("cni-policy",
|
|
798
|
+
role=node_role.name,
|
|
799
|
+
policy_arn="arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy")
|
|
800
|
+
registry_policy = aws.iam.RolePolicyAttachment("registry-policy",
|
|
801
|
+
role=node_role.name,
|
|
802
|
+
policy_arn="arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly")
|
|
803
|
+
|
|
804
|
+
# The node group for running system pods (e.g. coredns, etc.)
|
|
805
|
+
system_node_group = eks.ManagedNodeGroup("system-node-group",
|
|
806
|
+
cluster=eks_cluster,
|
|
807
|
+
node_role=node_role)
|
|
808
|
+
|
|
809
|
+
# The EFA device plugin for exposing EFA interfaces as extended resources
|
|
810
|
+
device_plugin = kubernetes.helm.v3.Release("device-plugin",
|
|
811
|
+
version="0.5.7",
|
|
812
|
+
repository_opts={
|
|
813
|
+
"repo": "https://aws.github.io/eks-charts",
|
|
814
|
+
},
|
|
815
|
+
chart="aws-efa-k8s-device-plugin",
|
|
816
|
+
namespace="kube-system",
|
|
817
|
+
atomic=True,
|
|
818
|
+
values={
|
|
819
|
+
"tolerations": [{
|
|
820
|
+
"key": "efa-enabled",
|
|
821
|
+
"operator": "Exists",
|
|
822
|
+
"effect": "NoExecute",
|
|
823
|
+
}],
|
|
824
|
+
},
|
|
825
|
+
opts = pulumi.ResourceOptions(provider=k8_s_provider))
|
|
826
|
+
|
|
827
|
+
# The node group for running EFA enabled workloads
|
|
828
|
+
efa_node_group = eks.ManagedNodeGroup("efa-node-group",
|
|
829
|
+
cluster=eks_cluster,
|
|
830
|
+
node_role=node_role,
|
|
831
|
+
instance_types=["g6.8xlarge"],
|
|
832
|
+
gpu=True,
|
|
833
|
+
scaling_config={
|
|
834
|
+
"min_size": 2,
|
|
835
|
+
"desired_size": 2,
|
|
836
|
+
"max_size": 4,
|
|
837
|
+
},
|
|
838
|
+
enable_efa_support=True,
|
|
839
|
+
placement_group_availability_zone="us-west-2b",
|
|
840
|
+
|
|
841
|
+
# Taint the nodes so that only pods with the efa-enabled label can be scheduled on them
|
|
842
|
+
taints=[{
|
|
843
|
+
"key": "efa-enabled",
|
|
844
|
+
"value": "true",
|
|
845
|
+
"effect": "NO_EXECUTE",
|
|
846
|
+
}],
|
|
847
|
+
|
|
848
|
+
# Instances with GPUs usually have nvme instance store volumes, so we can mount them in RAID-0 for kubelet and containerd
|
|
849
|
+
# These are faster than the regular EBS volumes
|
|
850
|
+
nodeadm_extra_options=[{
|
|
851
|
+
"content_type": "application/node.eks.aws",
|
|
852
|
+
"content": \"\"\"apiVersion: node.eks.aws/v1alpha1
|
|
853
|
+
kind: NodeConfig
|
|
854
|
+
spec:
|
|
855
|
+
instance:
|
|
856
|
+
localStorage:
|
|
857
|
+
strategy: RAID0
|
|
858
|
+
\"\"\",
|
|
859
|
+
}])
|
|
860
|
+
|
|
861
|
+
```
|
|
862
|
+
|
|
863
|
+
:param str resource_name: The name of the resource.
|
|
864
|
+
:param pulumi.ResourceOptions opts: Options for the resource.
|
|
865
|
+
:param pulumi.Input[_builtins.str] ami_id: The AMI ID to use for the worker nodes.
|
|
866
|
+
Defaults to the latest recommended EKS Optimized AMI from the AWS Systems Manager Parameter Store.
|
|
867
|
+
|
|
868
|
+
Note: `amiId` is mutually exclusive with `gpu` and `amiType`.
|
|
869
|
+
|
|
870
|
+
See for more details: https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html.
|
|
871
|
+
:param pulumi.Input[_builtins.str] ami_type: Type of Amazon Machine Image (AMI) associated with the EKS Node Group. Defaults to `AL2_x86_64`.
|
|
872
|
+
Note: `amiType` and `amiId` are mutually exclusive.
|
|
873
|
+
|
|
874
|
+
See the AWS documentation (https://docs.aws.amazon.com/eks/latest/APIReference/API_Nodegroup.html#AmazonEKS-Type-Nodegroup-amiType) for valid AMI Types. This provider will only perform drift detection if a configuration value is provided.
|
|
875
|
+
:param _builtins.str bootstrap_extra_args: Additional args to pass directly to `/etc/eks/bootstrap.sh`. For details on available options, see: https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh. Note that the `--apiserver-endpoint`, `--b64-cluster-ca` and `--kubelet-extra-args` flags are included automatically based on other configuration parameters.
|
|
876
|
+
|
|
877
|
+
Note that this field conflicts with `launchTemplate`.
|
|
878
|
+
:param pulumi.Input[Mapping[str, Any]] bottlerocket_settings: The configuration settings for Bottlerocket OS.
|
|
879
|
+
The settings will get merged with the base settings the provider uses to configure Bottlerocket.
|
|
880
|
+
|
|
881
|
+
This includes:
|
|
882
|
+
- settings.kubernetes.api-server
|
|
883
|
+
- settings.kubernetes.cluster-certificate
|
|
884
|
+
- settings.kubernetes.cluster-name
|
|
885
|
+
- settings.kubernetes.cluster-dns-ip
|
|
886
|
+
|
|
887
|
+
For an overview of the available settings, see https://bottlerocket.dev/en/os/1.20.x/api/settings/.
|
|
888
|
+
:param pulumi.Input[_builtins.str] capacity_type: Type of capacity associated with the EKS Node Group. Valid values: `ON_DEMAND`, `SPOT`. This provider will only perform drift detection if a configuration value is provided.
|
|
889
|
+
:param pulumi.Input[Union['Cluster', Union['CoreDataArgs', 'CoreDataArgsDict']]] cluster: The target EKS cluster.
|
|
890
|
+
:param pulumi.Input[_builtins.str] cluster_name: Name of the EKS Cluster.
|
|
891
|
+
:param pulumi.Input[_builtins.int] disk_size: Disk size in GiB for worker nodes. Defaults to `20`. This provider will only perform drift detection if a configuration value is provided.
|
|
892
|
+
:param _builtins.bool enable_efa_support: Determines whether to enable Elastic Fabric Adapter (EFA) support for the node group. If multiple different instance types are configured for the node group, the first one will be used to determine the network interfaces to use. Requires `placementGroupAvailabilityZone` to be set.
|
|
893
|
+
:param _builtins.bool enable_imd_sv2: Enables the ability to use EC2 Instance Metadata Service v2, which provides a more secure way to access instance metadata. For more information, see: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html.
|
|
894
|
+
Defaults to `false`.
|
|
895
|
+
|
|
896
|
+
Note that this field conflicts with `launchTemplate`. If you are providing a custom `launchTemplate`, you should enable this feature within the `launchTemplateMetadataOptions` of the supplied `launchTemplate`.
|
|
897
|
+
:param pulumi.Input[_builtins.bool] force_update_version: Force version update if existing pods are unable to be drained due to a pod disruption budget issue.
|
|
898
|
+
:param pulumi.Input[_builtins.bool] gpu: Use the latest recommended EKS Optimized AMI with GPU support for the worker nodes.
|
|
899
|
+
Defaults to false.
|
|
900
|
+
|
|
901
|
+
Note: `gpu` and `amiId` are mutually exclusive.
|
|
902
|
+
|
|
903
|
+
See for more details: https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-amis.html.
|
|
904
|
+
:param _builtins.bool ignore_scaling_changes: Whether to ignore changes to the desired size of the Auto Scaling Group. This is useful when using Cluster Autoscaler.
|
|
905
|
+
|
|
906
|
+
See [EKS best practices](https://aws.github.io/aws-eks-best-practices/cluster-autoscaling/) for more details.
|
|
907
|
+
:param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] instance_types: Set of instance types associated with the EKS Node Group. Defaults to `["t3.medium"]`. This provider will only perform drift detection if a configuration value is provided. Currently, the EKS API only accepts a single value in the set.
|
|
908
|
+
:param _builtins.str kubelet_extra_args: Extra args to pass to the Kubelet. Corresponds to the options passed in the `--kubeletExtraArgs` flag to `/etc/eks/bootstrap.sh`. For example, '--port=10251 --address=0.0.0.0'. To escape characters in the extra argsvalue, wrap the value in quotes. For example, `kubeletExtraArgs = '--allowed-unsafe-sysctls "net.core.somaxconn"'`.
|
|
909
|
+
Note that this field conflicts with `launchTemplate`.
|
|
910
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] labels: Key-value map of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed.
|
|
911
|
+
:param pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupLaunchTemplateArgs']] launch_template: Launch Template settings.
|
|
912
|
+
|
|
913
|
+
Note: This field is mutually exclusive with `kubeletExtraArgs` and `bootstrapExtraArgs`.
|
|
914
|
+
:param pulumi.Input[_builtins.str] node_group_name: Name of the EKS Node Group. If omitted, this provider will assign a random, unique name. Conflicts with `nodeGroupNamePrefix`.
|
|
915
|
+
:param pulumi.Input[_builtins.str] node_group_name_prefix: Creates a unique name beginning with the specified prefix. Conflicts with `nodeGroupName`.
|
|
916
|
+
:param pulumi.Input['pulumi_aws.iam.Role'] node_role: The IAM Role that provides permissions for the EKS Node Group.
|
|
917
|
+
|
|
918
|
+
Note, `nodeRole` and `nodeRoleArn` are mutually exclusive, and a single option must be used.
|
|
919
|
+
:param pulumi.Input[_builtins.str] node_role_arn: Amazon Resource Name (ARN) of the IAM Role that provides permissions for the EKS Node Group.
|
|
920
|
+
|
|
921
|
+
Note, `nodeRoleArn` and `nodeRole` are mutually exclusive, and a single option must be used.
|
|
922
|
+
:param pulumi.Input[Sequence[pulumi.Input[Union['NodeadmOptionsArgs', 'NodeadmOptionsArgsDict']]]] nodeadm_extra_options: Extra nodeadm configuration sections to be added to the nodeadm user data. This can be shell scripts, nodeadm NodeConfig or any other user data compatible script. When configuring additional nodeadm NodeConfig sections, they'll be merged with the base settings the provider sets. You can overwrite base settings or provide additional settings this way.
|
|
923
|
+
The base settings the provider sets are:
|
|
924
|
+
- cluster.name
|
|
925
|
+
- cluster.apiServerEndpoint
|
|
926
|
+
- cluster.certificateAuthority
|
|
927
|
+
- cluster.cidr
|
|
928
|
+
|
|
929
|
+
Note: This is only applicable when using AL2023.
|
|
930
|
+
See for more details:
|
|
931
|
+
- https://awslabs.github.io/amazon-eks-ami/nodeadm/
|
|
932
|
+
- https://awslabs.github.io/amazon-eks-ami/nodeadm/doc/api/
|
|
933
|
+
:param pulumi.Input['OperatingSystem'] operating_system: The type of OS to use for the node group. Will be used to determine the right EKS optimized AMI to use based on the instance types and gpu configuration.
|
|
934
|
+
Valid values are `RECOMMENDED`, `AL2`, `AL2023` and `Bottlerocket`.
|
|
935
|
+
|
|
936
|
+
Defaults to the current recommended OS.
|
|
937
|
+
:param pulumi.Input[_builtins.str] placement_group_availability_zone: The availability zone of the placement group for EFA support. Required if `enableEfaSupport` is true.
|
|
938
|
+
:param pulumi.Input[_builtins.str] release_version: AMI version of the EKS Node Group. Defaults to latest version for Kubernetes version.
|
|
939
|
+
:param pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupRemoteAccessArgs']] remote_access: Remote access settings.
|
|
940
|
+
:param pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupScalingConfigArgs']] scaling_config: Scaling settings.
|
|
941
|
+
|
|
942
|
+
Default scaling amounts of the node group autoscaling group are:
|
|
943
|
+
- desiredSize: 2
|
|
944
|
+
- minSize: 1
|
|
945
|
+
- maxSize: 2
|
|
946
|
+
:param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] subnet_ids: Identifiers of EC2 Subnets to associate with the EKS Node Group. These subnets must have the following resource tag: `kubernetes.io/cluster/CLUSTER_NAME` (where `CLUSTER_NAME` is replaced with the name of the EKS Cluster).
|
|
947
|
+
|
|
948
|
+
Default subnetIds is chosen from the following list, in order, if subnetIds arg is not set:
|
|
949
|
+
- core.subnetIds
|
|
950
|
+
- core.privateIds
|
|
951
|
+
- core.publicSubnetIds
|
|
952
|
+
|
|
953
|
+
This default logic is based on the existing subnet IDs logic of this package: https://git.io/JeM11
|
|
954
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] tags: Key-value mapping of resource tags.
|
|
955
|
+
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupTaintArgs']]]] taints: The Kubernetes taints to be applied to the nodes in the node group. Maximum of 50 taints per node group.
|
|
956
|
+
:param pulumi.Input[_builtins.str] user_data: User specified code to run on node startup. This is expected to handle the full AWS EKS node bootstrapping. If omitted, the provider will configure the user data.
|
|
957
|
+
|
|
958
|
+
See for more details: https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-user-data.
|
|
959
|
+
"""
|
|
960
|
+
...
|
|
961
|
+
@overload
|
|
962
|
+
def __init__(__self__,
|
|
963
|
+
resource_name: str,
|
|
964
|
+
args: ManagedNodeGroupArgs,
|
|
965
|
+
opts: Optional[pulumi.ResourceOptions] = None):
|
|
966
|
+
"""
|
|
967
|
+
Manages an EKS Node Group, which can provision and optionally update an Auto Scaling Group of Kubernetes worker nodes compatible with EKS. Additional documentation about this functionality can be found in the [EKS User Guide](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html).
|
|
968
|
+
|
|
969
|
+
## Example Usage
|
|
970
|
+
### Basic Managed Node Group
|
|
971
|
+
This example demonstrates creating a managed node group with typical defaults. The node group uses the latest EKS-optimized Amazon Linux AMI, creates 2 nodes, and runs on t3.medium instances. Instance security groups are automatically configured.
|
|
972
|
+
|
|
973
|
+
```python
|
|
974
|
+
import pulumi
|
|
975
|
+
import json
|
|
976
|
+
import pulumi_aws as aws
|
|
977
|
+
import pulumi_awsx as awsx
|
|
978
|
+
import pulumi_eks as eks
|
|
979
|
+
|
|
980
|
+
eks_vpc = awsx.ec2.Vpc("eks-vpc",
|
|
981
|
+
enable_dns_hostnames=True,
|
|
982
|
+
cidr_block="10.0.0.0/16")
|
|
983
|
+
eks_cluster = eks.Cluster("eks-cluster",
|
|
984
|
+
vpc_id=eks_vpc.vpc_id,
|
|
985
|
+
authentication_mode=eks.AuthenticationMode.API,
|
|
986
|
+
public_subnet_ids=eks_vpc.public_subnet_ids,
|
|
987
|
+
private_subnet_ids=eks_vpc.private_subnet_ids,
|
|
988
|
+
skip_default_node_group=True)
|
|
989
|
+
node_role = aws.iam.Role("node-role", assume_role_policy=json.dumps({
|
|
990
|
+
"Version": "2012-10-17",
|
|
991
|
+
"Statement": [{
|
|
992
|
+
"Action": "sts:AssumeRole",
|
|
993
|
+
"Effect": "Allow",
|
|
994
|
+
"Sid": "",
|
|
995
|
+
"Principal": {
|
|
996
|
+
"Service": "ec2.amazonaws.com",
|
|
997
|
+
},
|
|
998
|
+
}],
|
|
999
|
+
}))
|
|
1000
|
+
worker_node_policy = aws.iam.RolePolicyAttachment("worker-node-policy",
|
|
1001
|
+
role=node_role.name,
|
|
1002
|
+
policy_arn="arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy")
|
|
1003
|
+
cni_policy = aws.iam.RolePolicyAttachment("cni-policy",
|
|
1004
|
+
role=node_role.name,
|
|
1005
|
+
policy_arn="arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy")
|
|
1006
|
+
registry_policy = aws.iam.RolePolicyAttachment("registry-policy",
|
|
1007
|
+
role=node_role.name,
|
|
1008
|
+
policy_arn="arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly")
|
|
1009
|
+
node_group = eks.ManagedNodeGroup("node-group",
|
|
1010
|
+
cluster=eks_cluster,
|
|
1011
|
+
node_role=node_role)
|
|
1012
|
+
|
|
1013
|
+
```
|
|
1014
|
+
### Enabling EFA Support
|
|
1015
|
+
|
|
1016
|
+
Enabling EFA support for a node group will do the following:
|
|
1017
|
+
- All EFA interfaces supported by the instance will be exposed on the launch template used by the node group
|
|
1018
|
+
- A `clustered` placement group will be created and passed to the launch template
|
|
1019
|
+
- Checks will be performed to ensure that the instance type supports EFA and that the specified AZ is supported by the chosen instance type
|
|
1020
|
+
|
|
1021
|
+
The GPU optimized AMIs include all necessary drivers and libraries to support EFA. If you're choosing an instance type without GPU acceleration you will need to install the drivers and libraries manually and bake a custom AMI.
|
|
1022
|
+
|
|
1023
|
+
You can use the [aws-efa-k8s-device-plugin](https://github.com/aws/eks-charts/tree/master/stable/aws-efa-k8s-device-plugin) Helm chart to expose the EFA interfaces on the nodes as an extended resource, and allow pods to request these interfaces to be mounted to their containers.
|
|
1024
|
+
Your application container will need to have the necessary libraries and runtimes in order to leverage the EFA interfaces (e.g. libfabric).
|
|
1025
|
+
|
|
1026
|
+
```python
|
|
1027
|
+
import pulumi
|
|
1028
|
+
import json
|
|
1029
|
+
import pulumi_aws as aws
|
|
1030
|
+
import pulumi_awsx as awsx
|
|
1031
|
+
import pulumi_eks as eks
|
|
1032
|
+
import pulumi_kubernetes as kubernetes
|
|
1033
|
+
|
|
1034
|
+
eks_vpc = awsx.ec2.Vpc("eks-vpc",
|
|
1035
|
+
enable_dns_hostnames=True,
|
|
1036
|
+
cidr_block="10.0.0.0/16")
|
|
1037
|
+
eks_cluster = eks.Cluster("eks-cluster",
|
|
1038
|
+
vpc_id=eks_vpc.vpc_id,
|
|
1039
|
+
authentication_mode=eks.AuthenticationMode.API,
|
|
1040
|
+
public_subnet_ids=eks_vpc.public_subnet_ids,
|
|
1041
|
+
private_subnet_ids=eks_vpc.private_subnet_ids,
|
|
1042
|
+
skip_default_node_group=True)
|
|
1043
|
+
k8_s_provider = kubernetes.Provider("k8sProvider", kubeconfig=eks_cluster.kubeconfig)
|
|
1044
|
+
node_role = aws.iam.Role("node-role", assume_role_policy=json.dumps({
|
|
1045
|
+
"Version": "2012-10-17",
|
|
1046
|
+
"Statement": [{
|
|
1047
|
+
"Action": "sts:AssumeRole",
|
|
1048
|
+
"Effect": "Allow",
|
|
1049
|
+
"Sid": "",
|
|
1050
|
+
"Principal": {
|
|
1051
|
+
"Service": "ec2.amazonaws.com",
|
|
1052
|
+
},
|
|
1053
|
+
}],
|
|
1054
|
+
}))
|
|
1055
|
+
worker_node_policy = aws.iam.RolePolicyAttachment("worker-node-policy",
|
|
1056
|
+
role=node_role.name,
|
|
1057
|
+
policy_arn="arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy")
|
|
1058
|
+
cni_policy = aws.iam.RolePolicyAttachment("cni-policy",
|
|
1059
|
+
role=node_role.name,
|
|
1060
|
+
policy_arn="arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy")
|
|
1061
|
+
registry_policy = aws.iam.RolePolicyAttachment("registry-policy",
|
|
1062
|
+
role=node_role.name,
|
|
1063
|
+
policy_arn="arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly")
|
|
1064
|
+
|
|
1065
|
+
# The node group for running system pods (e.g. coredns, etc.)
|
|
1066
|
+
system_node_group = eks.ManagedNodeGroup("system-node-group",
|
|
1067
|
+
cluster=eks_cluster,
|
|
1068
|
+
node_role=node_role)
|
|
1069
|
+
|
|
1070
|
+
# The EFA device plugin for exposing EFA interfaces as extended resources
|
|
1071
|
+
device_plugin = kubernetes.helm.v3.Release("device-plugin",
|
|
1072
|
+
version="0.5.7",
|
|
1073
|
+
repository_opts={
|
|
1074
|
+
"repo": "https://aws.github.io/eks-charts",
|
|
1075
|
+
},
|
|
1076
|
+
chart="aws-efa-k8s-device-plugin",
|
|
1077
|
+
namespace="kube-system",
|
|
1078
|
+
atomic=True,
|
|
1079
|
+
values={
|
|
1080
|
+
"tolerations": [{
|
|
1081
|
+
"key": "efa-enabled",
|
|
1082
|
+
"operator": "Exists",
|
|
1083
|
+
"effect": "NoExecute",
|
|
1084
|
+
}],
|
|
1085
|
+
},
|
|
1086
|
+
opts = pulumi.ResourceOptions(provider=k8_s_provider))
|
|
1087
|
+
|
|
1088
|
+
# The node group for running EFA enabled workloads
|
|
1089
|
+
efa_node_group = eks.ManagedNodeGroup("efa-node-group",
|
|
1090
|
+
cluster=eks_cluster,
|
|
1091
|
+
node_role=node_role,
|
|
1092
|
+
instance_types=["g6.8xlarge"],
|
|
1093
|
+
gpu=True,
|
|
1094
|
+
scaling_config={
|
|
1095
|
+
"min_size": 2,
|
|
1096
|
+
"desired_size": 2,
|
|
1097
|
+
"max_size": 4,
|
|
1098
|
+
},
|
|
1099
|
+
enable_efa_support=True,
|
|
1100
|
+
placement_group_availability_zone="us-west-2b",
|
|
1101
|
+
|
|
1102
|
+
# Taint the nodes so that only pods with the efa-enabled label can be scheduled on them
|
|
1103
|
+
taints=[{
|
|
1104
|
+
"key": "efa-enabled",
|
|
1105
|
+
"value": "true",
|
|
1106
|
+
"effect": "NO_EXECUTE",
|
|
1107
|
+
}],
|
|
1108
|
+
|
|
1109
|
+
# Instances with GPUs usually have nvme instance store volumes, so we can mount them in RAID-0 for kubelet and containerd
|
|
1110
|
+
# These are faster than the regular EBS volumes
|
|
1111
|
+
nodeadm_extra_options=[{
|
|
1112
|
+
"content_type": "application/node.eks.aws",
|
|
1113
|
+
"content": \"\"\"apiVersion: node.eks.aws/v1alpha1
|
|
1114
|
+
kind: NodeConfig
|
|
1115
|
+
spec:
|
|
1116
|
+
instance:
|
|
1117
|
+
localStorage:
|
|
1118
|
+
strategy: RAID0
|
|
1119
|
+
\"\"\",
|
|
1120
|
+
}])
|
|
1121
|
+
|
|
1122
|
+
```
|
|
1123
|
+
|
|
1124
|
+
:param str resource_name: The name of the resource.
|
|
1125
|
+
:param ManagedNodeGroupArgs args: The arguments to use to populate this resource's properties.
|
|
1126
|
+
:param pulumi.ResourceOptions opts: Options for the resource.
|
|
1127
|
+
"""
|
|
1128
|
+
...
|
|
1129
|
+
def __init__(__self__, resource_name: str, *args, **kwargs):
|
|
1130
|
+
resource_args, opts = _utilities.get_resource_args_opts(ManagedNodeGroupArgs, pulumi.ResourceOptions, *args, **kwargs)
|
|
1131
|
+
if resource_args is not None:
|
|
1132
|
+
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
|
|
1133
|
+
else:
|
|
1134
|
+
__self__._internal_init(resource_name, *args, **kwargs)
|
|
1135
|
+
|
|
1136
|
+
def _internal_init(__self__,
|
|
1137
|
+
resource_name: str,
|
|
1138
|
+
opts: Optional[pulumi.ResourceOptions] = None,
|
|
1139
|
+
ami_id: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1140
|
+
ami_type: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1141
|
+
bootstrap_extra_args: Optional[_builtins.str] = None,
|
|
1142
|
+
bottlerocket_settings: Optional[pulumi.Input[Mapping[str, Any]]] = None,
|
|
1143
|
+
capacity_type: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1144
|
+
cluster: Optional[pulumi.Input[Union['Cluster', Union['CoreDataArgs', 'CoreDataArgsDict']]]] = None,
|
|
1145
|
+
cluster_name: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1146
|
+
disk_size: Optional[pulumi.Input[_builtins.int]] = None,
|
|
1147
|
+
enable_efa_support: Optional[_builtins.bool] = None,
|
|
1148
|
+
enable_imd_sv2: Optional[_builtins.bool] = None,
|
|
1149
|
+
force_update_version: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
1150
|
+
gpu: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
1151
|
+
ignore_scaling_changes: Optional[_builtins.bool] = None,
|
|
1152
|
+
instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
1153
|
+
kubelet_extra_args: Optional[_builtins.str] = None,
|
|
1154
|
+
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
|
|
1155
|
+
launch_template: Optional[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupLaunchTemplateArgs']]] = None,
|
|
1156
|
+
node_group_name: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1157
|
+
node_group_name_prefix: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1158
|
+
node_role: Optional[pulumi.Input['pulumi_aws.iam.Role']] = None,
|
|
1159
|
+
node_role_arn: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1160
|
+
nodeadm_extra_options: Optional[pulumi.Input[Sequence[pulumi.Input[Union['NodeadmOptionsArgs', 'NodeadmOptionsArgsDict']]]]] = None,
|
|
1161
|
+
operating_system: Optional[pulumi.Input['OperatingSystem']] = None,
|
|
1162
|
+
placement_group_availability_zone: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1163
|
+
release_version: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1164
|
+
remote_access: Optional[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupRemoteAccessArgs']]] = None,
|
|
1165
|
+
scaling_config: Optional[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupScalingConfigArgs']]] = None,
|
|
1166
|
+
subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
1167
|
+
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
|
|
1168
|
+
taints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupTaintArgs']]]]] = None,
|
|
1169
|
+
user_data: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1170
|
+
version: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1171
|
+
__props__=None):
|
|
1172
|
+
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
|
|
1173
|
+
if not isinstance(opts, pulumi.ResourceOptions):
|
|
1174
|
+
raise TypeError('Expected resource options to be a ResourceOptions instance')
|
|
1175
|
+
if opts.id is not None:
|
|
1176
|
+
raise ValueError('ComponentResource classes do not support opts.id')
|
|
1177
|
+
else:
|
|
1178
|
+
if __props__ is not None:
|
|
1179
|
+
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
|
|
1180
|
+
__props__ = ManagedNodeGroupArgs.__new__(ManagedNodeGroupArgs)
|
|
1181
|
+
|
|
1182
|
+
__props__.__dict__["ami_id"] = ami_id
|
|
1183
|
+
__props__.__dict__["ami_type"] = ami_type
|
|
1184
|
+
__props__.__dict__["bootstrap_extra_args"] = bootstrap_extra_args
|
|
1185
|
+
__props__.__dict__["bottlerocket_settings"] = bottlerocket_settings
|
|
1186
|
+
__props__.__dict__["capacity_type"] = capacity_type
|
|
1187
|
+
if cluster is None and not opts.urn:
|
|
1188
|
+
raise TypeError("Missing required property 'cluster'")
|
|
1189
|
+
__props__.__dict__["cluster"] = cluster
|
|
1190
|
+
__props__.__dict__["cluster_name"] = cluster_name
|
|
1191
|
+
__props__.__dict__["disk_size"] = disk_size
|
|
1192
|
+
__props__.__dict__["enable_efa_support"] = enable_efa_support
|
|
1193
|
+
__props__.__dict__["enable_imd_sv2"] = enable_imd_sv2
|
|
1194
|
+
__props__.__dict__["force_update_version"] = force_update_version
|
|
1195
|
+
__props__.__dict__["gpu"] = gpu
|
|
1196
|
+
__props__.__dict__["ignore_scaling_changes"] = ignore_scaling_changes
|
|
1197
|
+
__props__.__dict__["instance_types"] = instance_types
|
|
1198
|
+
__props__.__dict__["kubelet_extra_args"] = kubelet_extra_args
|
|
1199
|
+
__props__.__dict__["labels"] = labels
|
|
1200
|
+
__props__.__dict__["launch_template"] = launch_template
|
|
1201
|
+
__props__.__dict__["node_group_name"] = node_group_name
|
|
1202
|
+
__props__.__dict__["node_group_name_prefix"] = node_group_name_prefix
|
|
1203
|
+
__props__.__dict__["node_role"] = node_role
|
|
1204
|
+
__props__.__dict__["node_role_arn"] = node_role_arn
|
|
1205
|
+
__props__.__dict__["nodeadm_extra_options"] = nodeadm_extra_options
|
|
1206
|
+
__props__.__dict__["operating_system"] = operating_system
|
|
1207
|
+
__props__.__dict__["placement_group_availability_zone"] = placement_group_availability_zone
|
|
1208
|
+
__props__.__dict__["release_version"] = release_version
|
|
1209
|
+
__props__.__dict__["remote_access"] = remote_access
|
|
1210
|
+
__props__.__dict__["scaling_config"] = scaling_config
|
|
1211
|
+
__props__.__dict__["subnet_ids"] = subnet_ids
|
|
1212
|
+
__props__.__dict__["tags"] = tags
|
|
1213
|
+
__props__.__dict__["taints"] = taints
|
|
1214
|
+
__props__.__dict__["user_data"] = user_data
|
|
1215
|
+
__props__.__dict__["version"] = version
|
|
1216
|
+
__props__.__dict__["node_group"] = None
|
|
1217
|
+
__props__.__dict__["placement_group_name"] = None
|
|
1218
|
+
super(ManagedNodeGroup, __self__).__init__(
|
|
1219
|
+
'eks:index:ManagedNodeGroup',
|
|
1220
|
+
resource_name,
|
|
1221
|
+
__props__,
|
|
1222
|
+
opts,
|
|
1223
|
+
remote=True)
|
|
1224
|
+
|
|
1225
|
+
@_builtins.property
|
|
1226
|
+
@pulumi.getter(name="nodeGroup")
|
|
1227
|
+
def node_group(self) -> pulumi.Output['pulumi_aws.eks.NodeGroup']:
|
|
1228
|
+
"""
|
|
1229
|
+
The AWS managed node group.
|
|
1230
|
+
"""
|
|
1231
|
+
return pulumi.get(self, "node_group")
|
|
1232
|
+
|
|
1233
|
+
@_builtins.property
|
|
1234
|
+
@pulumi.getter(name="placementGroupName")
|
|
1235
|
+
def placement_group_name(self) -> pulumi.Output[_builtins.str]:
|
|
1236
|
+
"""
|
|
1237
|
+
The name of the placement group created for the managed node group.
|
|
1238
|
+
"""
|
|
1239
|
+
return pulumi.get(self, "placement_group_name")
|
|
1240
|
+
|