eks_cli 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
data/README.md ADDED
@@ -0,0 +1,52 @@
1
+ # EKS-CLI
2
+
3
+ EKS cluster bootstrap with batteries included
4
+
5
+ ## Usage
6
+
7
+ ```
8
+ $ gem install eks_cli
9
+ $ eks bootstrap us-west-2 --cluster-name=My-EKS-Cluster
10
+ $ eks create-cluster-vpc --cluster-name=My-EKS-Cluster
11
+ $ eks create-cluster-security-group --cluster-name My-EKS-Cluster --open-ports=22
12
+ $ eks create-nodegroup --cluster-name My-EKS-Cluster --group-name nodes --ssh-key-name my-ssh-key --min 1 --max 3
13
+ $ eks create-nodegroup --cluster-name My-EKS-Cluster --group-name other-nodes --ssh-key-name my-ssh-key --min 3 --max 3 --instance-type m5.2xlarge
14
+ $ eks create-nodegroup --all --cluster-name My-EKS-Cluster --yes
15
+ ```
16
+
17
+ ## Extra Stuff
18
+
19
+ ### Setting IAM policies to be attached to EKS nodes
20
+
21
+ `$ eks set-iam-policies --cluster-name=My-EKS-Cluster --policies=AmazonS3FullAccess AmazonDynamoDBFullAccess`
22
+
23
+ Makes sure all nodegroup instances are attached with the above policies once created
24
+
25
+ ### Routing Route53 hostnames to Kubernetes service
26
+
27
+ `$ eks update-dns my-cool-service.my-company.com cool-service --route53-hosted-zone-id=XXXXX --elb-hosted-zone-id=XXXXXX --cluster-name=My-EKS-Cluster`
28
+
29
+ Takes the ELB endpoint from `cool-service` and puts it as an alias record of `my-cool-service.my-company.com`
30
+
31
+ ### Enabling GPU
32
+
33
+ `$ eks enable-gpu --cluster-name EKS-Staging`
34
+
35
+ Installs the nvidia device plugin required to have your GPUs exposed
36
+
37
+ *Assumptions*:
38
+
39
+ 1. You have a nodegroup using [EKS GPU AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html)
40
+ 2. This nodegroup uses a GPU instance (p2.x / p3.x etc)
41
+
42
+ ### Adding Dockerhub Secrets
43
+
44
+ `$ eks set-docker-registry-credentials <dockerhub-user> <dockerhub-email> <dockerhub-password> --cluster-name My-EKS-Cluster`
45
+
46
+ Adds your dockerhub credentials as a secret and attaches it to the default serviceaccount imagePullSecrets
47
+
48
+ ### Creating Default Storage Class
49
+
50
+ `$ eks create-default-storage-class --cluster-name My-EKS-Cluster`
51
+
52
+ Creates a standard gp2 default storage class named gp2
data/bin/eks ADDED
@@ -0,0 +1,3 @@
1
+ #!/usr/bin/env ruby
2
+ require 'eks_cli'
3
+ EksCli::Cli.start(ARGV)
data/eks_cli.gemspec ADDED
@@ -0,0 +1,31 @@
1
+ # coding: utf-8
2
+ lib = File.expand_path("../lib", __FILE__)
3
+ $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
4
+
5
+ Gem::Specification.new do |s|
6
+ s.name = 'eks_cli'
7
+ s.version = '0.1.0'
8
+ s.date = '2018-11-18'
9
+ s.summary = "Make EKS great again!"
10
+ s.description = "A utility to manage and create EKS (Kubernetes) cluster on Amazon Web Services"
11
+ s.authors = ["Erez Rabih"]
12
+ s.email = 'erez.rabih@gmail.com'
13
+ s.homepage =
14
+ 'https://github.com/nanit/eks_cli'
15
+ s.license = 'MIT'
16
+ s.files = `git ls-files -z`.split("\x0").reject do |f|
17
+ f.match(%r{^(test|spec|features)/})
18
+ end
19
+ s.bindir = "bin"
20
+ s.executables = ["eks"]
21
+ s.require_paths = ["lib"]
22
+ s.add_dependency 'thor'
23
+ s.add_dependency 'aws-sdk-iam'
24
+ s.add_dependency 'aws-sdk-eks'
25
+ s.add_dependency 'aws-sdk-ec2'
26
+ s.add_dependency 'aws-sdk-cloudformation'
27
+ s.add_dependency 'aws-sdk-route53'
28
+ s.add_dependency 'activesupport'
29
+ s.add_dependency 'kubeclient'
30
+ end
31
+
@@ -0,0 +1,10 @@
1
+ kind: StorageClass
2
+ apiVersion: storage.k8s.io/v1
3
+ metadata:
4
+ name: gp2
5
+ annotations:
6
+ storageclass.kubernetes.io/is-default-class: "true"
7
+ provisioner: kubernetes.io/aws-ebs
8
+ parameters:
9
+ type: gp2
10
+ reclaimPolicy: Retain
@@ -0,0 +1,305 @@
1
+ ---
2
+ AWSTemplateFormatVersion: '2010-09-09'
3
+ Description: 'Amazon EKS - Node Group - Released 2018-08-30'
4
+
5
+ Parameters:
6
+
7
+ KeyName:
8
+ Description: The EC2 Key Pair to allow SSH access to the instances
9
+ Type: AWS::EC2::KeyPair::KeyName
10
+
11
+ NodeImageId:
12
+ Type: AWS::EC2::Image::Id
13
+ Description: AMI id for the node instances.
14
+
15
+ NodeInstanceType:
16
+ Description: EC2 instance type for the node instances
17
+ Type: String
18
+ Default: t2.medium
19
+ AllowedValues:
20
+ - t2.small
21
+ - t2.medium
22
+ - t2.large
23
+ - t2.xlarge
24
+ - t2.2xlarge
25
+ - m3.medium
26
+ - m3.large
27
+ - m3.xlarge
28
+ - m3.2xlarge
29
+ - m4.large
30
+ - m4.xlarge
31
+ - m4.2xlarge
32
+ - m4.4xlarge
33
+ - m4.10xlarge
34
+ - m5.large
35
+ - m5.xlarge
36
+ - m5.2xlarge
37
+ - m5.4xlarge
38
+ - m5.12xlarge
39
+ - m5.24xlarge
40
+ - c4.large
41
+ - c4.xlarge
42
+ - c4.2xlarge
43
+ - c4.4xlarge
44
+ - c4.8xlarge
45
+ - c5.large
46
+ - c5.xlarge
47
+ - c5.2xlarge
48
+ - c5.4xlarge
49
+ - c5.9xlarge
50
+ - c5.18xlarge
51
+ - i3.large
52
+ - i3.xlarge
53
+ - i3.2xlarge
54
+ - i3.4xlarge
55
+ - i3.8xlarge
56
+ - i3.16xlarge
57
+ - r3.xlarge
58
+ - r3.2xlarge
59
+ - r3.4xlarge
60
+ - r3.8xlarge
61
+ - r4.large
62
+ - r4.xlarge
63
+ - r4.2xlarge
64
+ - r4.4xlarge
65
+ - r4.8xlarge
66
+ - r4.16xlarge
67
+ - x1.16xlarge
68
+ - x1.32xlarge
69
+ - p2.xlarge
70
+ - p2.8xlarge
71
+ - p2.16xlarge
72
+ - p3.2xlarge
73
+ - p3.8xlarge
74
+ - p3.16xlarge
75
+ ConstraintDescription: Must be a valid EC2 instance type
76
+
77
+ NodeAutoScalingGroupMinSize:
78
+ Type: Number
79
+ Description: Minimum size of Node Group ASG.
80
+ Default: 1
81
+
82
+ NodeAutoScalingGroupMaxSize:
83
+ Type: Number
84
+ Description: Maximum size of Node Group ASG.
85
+ Default: 3
86
+
87
+ NodeVolumeSize:
88
+ Type: Number
89
+ Description: Node volume size
90
+ Default: 20
91
+
92
+ ClusterName:
93
+ Description: The cluster name provided when the cluster was created. If it is incorrect, nodes will not be able to join the cluster.
94
+ Type: String
95
+
96
+ BootstrapArguments:
97
+ Description: Arguments to pass to the bootstrap script. See files/bootstrap.sh in https://github.com/awslabs/amazon-eks-ami
98
+ Default: ""
99
+ Type: String
100
+
101
+ NodeGroupName:
102
+ Description: Unique identifier for the Node Group.
103
+ Type: String
104
+
105
+ ClusterControlPlaneSecurityGroup:
106
+ Description: The security group of the cluster control plane.
107
+ Type: AWS::EC2::SecurityGroup::Id
108
+
109
+ VpcId:
110
+ Description: The VPC of the worker instances
111
+ Type: AWS::EC2::VPC::Id
112
+
113
+ Subnets:
114
+ Description: The subnets where workers can be created.
115
+ Type: List<AWS::EC2::Subnet::Id>
116
+
117
+ ClusterSecurityGroup:
118
+ Description: Security group ID for in-cluster communication between node groups
119
+ Type: AWS::EC2::SecurityGroup::Id
120
+
121
+ Metadata:
122
+ AWS::CloudFormation::Interface:
123
+ ParameterGroups:
124
+ -
125
+ Label:
126
+ default: "EKS Cluster"
127
+ Parameters:
128
+ - ClusterName
129
+ - ClusterControlPlaneSecurityGroup
130
+ -
131
+ Label:
132
+ default: "Worker Node Configuration"
133
+ Parameters:
134
+ - NodeGroupName
135
+ - NodeAutoScalingGroupMinSize
136
+ - NodeAutoScalingGroupMaxSize
137
+ - NodeInstanceType
138
+ - NodeImageId
139
+ - NodeVolumeSize
140
+ - KeyName
141
+ - BootstrapArguments
142
+ -
143
+ Label:
144
+ default: "Worker Network Configuration"
145
+ Parameters:
146
+ - VpcId
147
+ - Subnets
148
+
149
+ Resources:
150
+
151
+ NodeInstanceProfile:
152
+ Type: AWS::IAM::InstanceProfile
153
+ Properties:
154
+ Path: "/"
155
+ Roles:
156
+ - !Ref NodeInstanceRole
157
+
158
+ NodeInstanceRole:
159
+ Type: AWS::IAM::Role
160
+ Properties:
161
+ AssumeRolePolicyDocument:
162
+ Version: '2012-10-17'
163
+ Statement:
164
+ - Effect: Allow
165
+ Principal:
166
+ Service:
167
+ - ec2.amazonaws.com
168
+ Action:
169
+ - sts:AssumeRole
170
+ Path: "/"
171
+ ManagedPolicyArns:
172
+ - arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy
173
+ - arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy
174
+ - arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly
175
+
176
+ NodeSecurityGroup:
177
+ Type: AWS::EC2::SecurityGroup
178
+ Properties:
179
+ GroupDescription: Security group for all nodes in the cluster
180
+ VpcId:
181
+ !Ref VpcId
182
+ Tags:
183
+ - Key: !Sub "kubernetes.io/cluster/${ClusterName}"
184
+ Value: 'owned'
185
+
186
+ NodeSecurityGroupIngress:
187
+ Type: AWS::EC2::SecurityGroupIngress
188
+ DependsOn: NodeSecurityGroup
189
+ Properties:
190
+ Description: Allow node to communicate with each other
191
+ GroupId: !Ref NodeSecurityGroup
192
+ SourceSecurityGroupId: !Ref NodeSecurityGroup
193
+ IpProtocol: '-1'
194
+ FromPort: 0
195
+ ToPort: 65535
196
+
197
+ NodeSecurityGroupFromControlPlaneIngress:
198
+ Type: AWS::EC2::SecurityGroupIngress
199
+ DependsOn: NodeSecurityGroup
200
+ Properties:
201
+ Description: Allow worker Kubelets and pods to receive communication from the cluster control plane
202
+ GroupId: !Ref NodeSecurityGroup
203
+ SourceSecurityGroupId: !Ref ClusterControlPlaneSecurityGroup
204
+ IpProtocol: tcp
205
+ FromPort: 1025
206
+ ToPort: 65535
207
+
208
+ ControlPlaneEgressToNodeSecurityGroup:
209
+ Type: AWS::EC2::SecurityGroupEgress
210
+ DependsOn: NodeSecurityGroup
211
+ Properties:
212
+ Description: Allow the cluster control plane to communicate with worker Kubelet and pods
213
+ GroupId: !Ref ClusterControlPlaneSecurityGroup
214
+ DestinationSecurityGroupId: !Ref NodeSecurityGroup
215
+ IpProtocol: tcp
216
+ FromPort: 1025
217
+ ToPort: 65535
218
+
219
+ NodeSecurityGroupFromControlPlaneOn443Ingress:
220
+ Type: AWS::EC2::SecurityGroupIngress
221
+ DependsOn: NodeSecurityGroup
222
+ Properties:
223
+ Description: Allow pods running extension API servers on port 443 to receive communication from cluster control plane
224
+ GroupId: !Ref NodeSecurityGroup
225
+ SourceSecurityGroupId: !Ref ClusterControlPlaneSecurityGroup
226
+ IpProtocol: tcp
227
+ FromPort: 443
228
+ ToPort: 443
229
+
230
+ ControlPlaneEgressToNodeSecurityGroupOn443:
231
+ Type: AWS::EC2::SecurityGroupEgress
232
+ DependsOn: NodeSecurityGroup
233
+ Properties:
234
+ Description: Allow the cluster control plane to communicate with pods running extension API servers on port 443
235
+ GroupId: !Ref ClusterControlPlaneSecurityGroup
236
+ DestinationSecurityGroupId: !Ref NodeSecurityGroup
237
+ IpProtocol: tcp
238
+ FromPort: 443
239
+ ToPort: 443
240
+
241
+ ClusterControlPlaneSecurityGroupIngress:
242
+ Type: AWS::EC2::SecurityGroupIngress
243
+ DependsOn: NodeSecurityGroup
244
+ Properties:
245
+ Description: Allow pods to communicate with the cluster API Server
246
+ GroupId: !Ref ClusterControlPlaneSecurityGroup
247
+ SourceSecurityGroupId: !Ref NodeSecurityGroup
248
+ IpProtocol: tcp
249
+ ToPort: 443
250
+ FromPort: 443
251
+
252
+ NodeGroup:
253
+ Type: AWS::AutoScaling::AutoScalingGroup
254
+ Properties:
255
+ DesiredCapacity: !Ref NodeAutoScalingGroupMaxSize
256
+ LaunchConfigurationName: !Ref NodeLaunchConfig
257
+ MinSize: !Ref NodeAutoScalingGroupMinSize
258
+ MaxSize: !Ref NodeAutoScalingGroupMaxSize
259
+ VPCZoneIdentifier:
260
+ !Ref Subnets
261
+ Tags:
262
+ - Key: Name
263
+ Value: !Sub "${ClusterName}-${NodeGroupName}-Node"
264
+ PropagateAtLaunch: 'true'
265
+ - Key: !Sub 'kubernetes.io/cluster/${ClusterName}'
266
+ Value: 'owned'
267
+ PropagateAtLaunch: 'true'
268
+ UpdatePolicy:
269
+ AutoScalingRollingUpdate:
270
+ MinInstancesInService: '1'
271
+ MaxBatchSize: '1'
272
+
273
+ NodeLaunchConfig:
274
+ Type: AWS::AutoScaling::LaunchConfiguration
275
+ Properties:
276
+ AssociatePublicIpAddress: 'true'
277
+ IamInstanceProfile: !Ref NodeInstanceProfile
278
+ ImageId: !Ref NodeImageId
279
+ InstanceType: !Ref NodeInstanceType
280
+ KeyName: !Ref KeyName
281
+ SecurityGroups:
282
+ - !Ref NodeSecurityGroup
283
+ - !Ref ClusterSecurityGroup
284
+ BlockDeviceMappings:
285
+ - DeviceName: /dev/xvda
286
+ Ebs:
287
+ VolumeSize: !Ref NodeVolumeSize
288
+ VolumeType: gp2
289
+ DeleteOnTermination: true
290
+ UserData:
291
+ Fn::Base64:
292
+ !Sub |
293
+ #!/bin/bash
294
+ set -o xtrace
295
+ /etc/eks/bootstrap.sh ${ClusterName} ${BootstrapArguments}
296
+ /opt/aws/bin/cfn-signal --exit-code $? \
297
+ --stack ${AWS::StackName} \
298
+ --resource NodeGroup \
299
+ --region ${AWS::Region}
300
+
301
+ Outputs:
302
+ NodeInstanceRole:
303
+ Description: The node instance role
304
+ Value: !GetAtt NodeInstanceRole.Arn
305
+
@@ -0,0 +1,38 @@
1
+ apiVersion: apps/v1
2
+ kind: DaemonSet
3
+ metadata:
4
+ name: nvidia-device-plugin-daemonset
5
+ namespace: kube-system
6
+ spec:
7
+ selector:
8
+ matchLabels:
9
+ name: nvidia-device-plugin-ds
10
+ template:
11
+ metadata:
12
+ # Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
13
+ # reserves resources for critical add-on pods so that they can be rescheduled after
14
+ # a failure. This annotation works in tandem with the toleration below.
15
+ annotations:
16
+ scheduler.alpha.kubernetes.io/critical-pod: ""
17
+ labels:
18
+ name: nvidia-device-plugin-ds
19
+ spec:
20
+ tolerations:
21
+ # Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
22
+ # This, along with the annotation above marks this pod as a critical add-on.
23
+ - key: CriticalAddonsOnly
24
+ operator: Exists
25
+ containers:
26
+ - image: nvidia/k8s-device-plugin:1.10
27
+ name: nvidia-device-plugin-ctr
28
+ securityContext:
29
+ allowPrivilegeEscalation: false
30
+ capabilities:
31
+ drop: ["ALL"]
32
+ volumeMounts:
33
+ - name: device-plugin
34
+ mountPath: /var/lib/kubelet/device-plugins
35
+ volumes:
36
+ - name: device-plugin
37
+ hostPath:
38
+ path: /var/lib/kubelet/device-plugins
@@ -0,0 +1,161 @@
1
+ require 'thor'
2
+
3
+ autoload :JSON, 'json'
4
+
5
+ module EksCli
6
+
7
+ autoload :Config, 'config'
8
+ autoload :NodeGroup, 'nodegroup'
9
+ module CloudFormation
10
+ autoload :Stack, 'cloudformation/stack'
11
+ autoload :VPC, 'cloudformation/vpc'
12
+ end
13
+ module EKS
14
+ autoload :Cluster, 'eks/cluster'
15
+ end
16
+ module K8s
17
+ autoload :Auth, 'k8s/auth'
18
+ autoload :Client, 'k8s/client'
19
+ end
20
+ module EC2
21
+ autoload :SecurityGroup, 'ec2/security_group'
22
+ end
23
+ module IAM
24
+ autoload :Client, 'iam/client'
25
+ end
26
+ module Route53
27
+ autoload :Client, 'route53/client'
28
+ end
29
+ module VPC
30
+ autoload :Client, 'vpc/client'
31
+ end
32
+
33
+ class Cli < Thor
34
+
35
+ class_option :cluster_name, required: true, aliases: :c
36
+
37
+ desc "bootstrap REGION", "bootstrap cluster configuration"
38
+ def bootstrap(region)
39
+ role = IAM::Client.new(cluster_name).create_eks_role
40
+ Config[cluster_name].bootstrap({region: region, eks_role_arn: role.arn})
41
+ end
42
+
43
+ desc "show-config", "print cluster configuration"
44
+ option :group_name, desc: "group name to show configuration for"
45
+ def show_config
46
+ if options[:group_name]
47
+ puts JSON.pretty_generate(Config[cluster_name].for_group(options[:group_name]))
48
+ else
49
+ puts JSON.pretty_generate(Config[cluster_name].read_from_disk)
50
+ end
51
+ end
52
+
53
+ desc "create-cluster-vpc", "creates a vpc according to aws cloudformation template"
54
+ def create_cluster_vpc
55
+ cfg = CloudFormation::VPC.create(cluster_name)
56
+ Config[cluster_name].write(cfg)
57
+ end
58
+
59
+ desc "create-eks-cluster", "create EKS cluster on AWS"
60
+ def create_eks_cluster
61
+ cluster = EKS::Cluster.new(cluster_name).create
62
+ cluster.await
63
+ Config[cluster_name].write({cluster_arn: cluster.arn})
64
+ cluster.update_kubeconfig
65
+ end
66
+
67
+ desc "enable-gpu", "installs nvidia plugin as a daemonset on the cluster"
68
+ def enable_gpu
69
+ K8s::Client.new(cluster_name).enable_gpu
70
+ end
71
+
72
+ desc "set-docker-registry-credentials USERNAME PASSWORD EMAIL", "sets docker registry credentials"
73
+ def set_docker_registry_credentials(username, password, email)
74
+ K8s::Client.new(cluster_name).set_docker_registry_credentials(username, password, email)
75
+ end
76
+
77
+ desc "create-default-storage-class", "creates default storage class on a new k8s cluster"
78
+ def create_default_storage_class
79
+ K8s::Client.new(cluster_name).create_default_storage_class
80
+ end
81
+
82
+ desc "create-nodegroup", "creates all nodegroups on environment"
83
+ option :all, type: :boolean, default: false, desc: "create all nodegroups. must be used in conjunction with --yes"
84
+ option :group_name, type: :string, desc: "create a specific nodegroup. can't be used with --all"
85
+ option :ami, desc: "AMI for the nodegroup"
86
+ option :instance_type, desc: "EC2 instance type (m5.xlarge etc...)"
87
+ option :num_subnets, type: :numeric, desc: "Number of subnets (AZs) to spread the nodegroup across"
88
+ option :ssh_key_name, desc: "Name of the default SSH key for the nodes"
89
+ option :taints, desc: "Kubernetes taints to put on the nodes for example \"dedicated=critical:NoSchedule\""
90
+ option :min, type: :numeric, desc: "Minimum number of nodes on the nodegroup"
91
+ option :max, type: :numeric, desc: "Maximum number of nodes on the nodegroup"
92
+ option :yes, type: :boolean, default: false, desc: "Perform nodegroup creation"
93
+ def create_nodegroup
94
+ Config[cluster_name].update_nodegroup(options) unless options[:all]
95
+ if options[:yes]
96
+ cf_stacks = nodegroups.map {|ng| ng.create(wait_for_completion: false)}
97
+ CloudFormation::Stack.await(cf_stacks)
98
+ cf_stacks.each {|s| IAM::Client.new(cluster_name).attach_node_policies(s.node_instance_role_name)}
99
+ K8s::Auth.new(cluster_name).update
100
+ end
101
+ end
102
+
103
+ desc "delete-nodegroup", "deletes cloudformation stack for nodegroup"
104
+ option :all, type: :boolean, default: false, desc: "delete all nodegroups. can't be used with --name"
105
+ option :name, type: :string, desc: "delete a specific nodegroup. can't be used with --all"
106
+ def delete_nodegroup
107
+ nodegroups.each(&:delete)
108
+ end
109
+
110
+ desc "update-auth", "update aws auth configmap to allow all nodegroups to connect to control plane"
111
+ def update_auth
112
+ K8s::Auth.new(cluster_name).update
113
+ end
114
+
115
+ desc "detach-iam-policies", "detaches added policies to nodegroup IAM Role"
116
+ option :all, type: :boolean, default: false, desc: "detach from all nodegroups. can't be used with --name"
117
+ option :name, type: :string, desc: "detach from a specific nodegroup. can't be used with --all"
118
+ def detach_iam_policies
119
+ nodegroups.each(&:detach_iam_policies)
120
+ end
121
+
122
+ desc "set-iam-policies", "sets IAM policies to be attached to created nodegroups"
123
+ option :policies, type: :array, required: true, desc: "IAM policies ARNs"
124
+ def set_iam_policies
125
+ Config[cluster_name].set_iam_policies(options[:policies])
126
+ end
127
+
128
+ desc "create-cluster-security-group", "creates a SG for cluster communication"
129
+ option :open_ports, type: :array, default: [], desc: "open ports on cluster nodes"
130
+ def create_cluster_security_group
131
+ open_ports = options[:open_ports].map(&:to_i)
132
+ gid = EC2::SecurityGroup.new(cluster_name, open_ports).create
133
+ Config[cluster_name].write({nodes_sg_id: gid})
134
+ end
135
+
136
+ desc "update-dns HOSTNAME K8S_SERVICE_NAME", "alters route53 CNAME records to point to k8s service ELBs"
137
+ option :route53_hosted_zone_id, required: true, desc: "hosted zone ID for the cname record on route53"
138
+ option :elb_hosted_zone_id, required: true, desc: "hosted zone ID for the ELB on ec2"
139
+ option :namespace, default: "default", desc: "the k8s namespace of the service"
140
+ def update_dns(hostname, k8s_service_name)
141
+ Route53::Client.new(cluster_name).update_dns(hostname, k8s_service_name, options[:namespace], options[:route53_hosted_zone_id], options[:elb_hosted_zone_id])
142
+ end
143
+
144
+ desc "set-inter-vpc-networking TO_VPC_ID TO_SG_ID", "creates a vpc peering connection, sets route tables and allows network access on SG"
145
+ def set_inter_vpc_networking(to_vpc_id, to_sg_id)
146
+ VPC::Client.new(cluster_name).set_inter_vpc_networking(to_vpc_id, to_sg_id)
147
+ end
148
+
149
+ no_commands do
150
+ def cluster_name; options[:cluster_name]; end
151
+
152
+ def all_nodegroups; Config[cluster_name]["groups"].keys ;end
153
+
154
+ def nodegroups
155
+ ng = options[:group_name] ? [options[:group_name]] : all_nodegroups
156
+ ng.map {|n| NodeGroup.new(cluster_name, n)}
157
+ end
158
+ end
159
+
160
+ end
161
+ end
@@ -0,0 +1,11 @@
1
+ require 'aws-sdk-cloudformation'
2
+ require 'config'
3
+ module EksCli
4
+ module CloudFormation
5
+ class Client
6
+ def self.get(cluster_name)
7
+ @client ||= Aws::CloudFormation::Client.new(region: Config[cluster_name]["region"])
8
+ end
9
+ end
10
+ end
11
+ end