eks_cli 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,113 @@
1
+ require 'cloudformation/client'
2
+ require 'log'
3
+
4
+ module EksCli
5
+ module CloudFormation
6
+ class Stack
7
+
8
+ def self.create(cluster_name, config)
9
+ Log.info "creating cloudformation stack #{config[:stack_name]}"
10
+ begin
11
+ stack_id = client(cluster_name).create_stack(config).stack_id
12
+ rescue Aws::CloudFormation::Errors::AlreadyExistsException => e
13
+ Log.warn "stack #{config[:stack_name]} already exists"
14
+ stack_id = Aws::CloudFormation::Stack.new(config[:stack_name], client: client(cluster_name)).stack_id
15
+ end
16
+ new(cluster_name, stack_id)
17
+ end
18
+
19
+ def self.await(stacks)
20
+ while pending(stacks) > 0 do
21
+ Log.info "#{pending(stacks)} stacks out of #{stacks.count} are still being created"
22
+ sleep 10
23
+ end
24
+ stacks
25
+ end
26
+
27
+ def self.find(cluster_name, name)
28
+ new(cluster_name, Aws::CloudFormation::Stack.new(name, client: client(cluster_name)).stack_id)
29
+ end
30
+
31
+ def initialize(cluster_name, stack_id)
32
+ @cluster_name = cluster_name
33
+ @id = stack_id
34
+ end
35
+
36
+ def delete
37
+ Log.info "deleting stack #{id} from cloudformation"
38
+ client.delete_stack(stack_name: id)
39
+ end
40
+
41
+ def id; @id; end
42
+
43
+ def pending?
44
+ status == "CREATE_IN_PROGRESS"
45
+ end
46
+
47
+ def eks_worker?
48
+ !worker_tag.empty?
49
+ end
50
+
51
+ def node_instance_role_arn
52
+ output("NodeInstanceRole")
53
+ end
54
+
55
+ def node_instance_role_name
56
+ node_instance_role_arn.split("/")[1]
57
+ end
58
+
59
+ def status
60
+ stack(reload: true).stack_status
61
+ end
62
+
63
+ def reload
64
+ stack(reload: true)
65
+ self
66
+ end
67
+
68
+ def output(key)
69
+ stack.outputs.select {|a| a.output_key == key}.first.output_value
70
+ end
71
+
72
+ def outputs
73
+ stack.outputs
74
+ end
75
+
76
+ private
77
+
78
+ def self.pending(stacks)
79
+ stacks.select(&:pending?).count
80
+ end
81
+
82
+ def self.client(cluster_name)
83
+ CloudFormation::Client.get(cluster_name)
84
+ end
85
+
86
+ def client
87
+ self.class.client(@cluster_name)
88
+ end
89
+
90
+ def stack(reload: false)
91
+ if reload
92
+ @stack = fetch
93
+ else
94
+ @stack ||= fetch
95
+ end
96
+ end
97
+
98
+ def fetch
99
+ client.describe_stacks(stack_name: @id).stacks.first
100
+ end
101
+
102
+ def worker_tag
103
+ stack.tags.select {|t| worker_tag?(t)}
104
+ end
105
+
106
+ def worker_tag?(tag)
107
+ tag.key == "eks-nodegroup"
108
+ end
109
+
110
+ end
111
+
112
+ end
113
+ end
@@ -0,0 +1,33 @@
1
+ require 'cloudformation/stack'
2
+ require 'log'
3
+
4
+ module EksCli
5
+ module CloudFormation
6
+ class VPC
7
+
8
+ def self.create(cluster_name)
9
+ Log.info "creating VPC stack for #{cluster_name}"
10
+ s = Stack.create(cluster_name, config(cluster_name))
11
+ Stack.await([s])
12
+ s.reload
13
+ puts "Outputs are:
14
+ SecurityGroups: #{s.output("SecurityGroups")}
15
+ VpcId: #{s.output("VpcId")}
16
+ SubnetIds: #{s.output("SubnetIds")}
17
+ "
18
+ {control_plane_sg_id: s.output("SecurityGroups"),
19
+ vpc_id: s.output("VpcId"),
20
+ subnets: s.output("SubnetIds").split(",")}
21
+ end
22
+
23
+ private
24
+
25
+ def self.config(cluster_name)
26
+ {stack_name: "EKS-VPC-#{cluster_name}",
27
+ template_url: "https://amazon-eks.s3-us-west-2.amazonaws.com/cloudformation/2018-08-30/amazon-eks-vpc-sample.yaml",
28
+ tags: [{key: "eks-cluster", value: cluster_name.to_s}]}
29
+ end
30
+
31
+ end
32
+ end
33
+ end
@@ -0,0 +1,117 @@
1
+ require 'json'
2
+ require_relative 'log'
3
+ require 'active_support/core_ext/hash'
4
+ require 'fileutils'
5
+ module EksCli
6
+ class Config
7
+ class << self
8
+ def [](cluster_name)
9
+ new(cluster_name)
10
+ end
11
+
12
+ end
13
+
14
+ def initialize(cluster_name)
15
+ @cluster_name = cluster_name
16
+ end
17
+
18
+ def read_from_disk
19
+ base = read(config_path)
20
+ base["cluster_name"] = @cluster_name
21
+ base = base.merge(read(state_path)).merge(read(groups_path))
22
+ base
23
+ end
24
+
25
+ def [](k)
26
+ read_from_disk[k]
27
+ end
28
+
29
+ def for_group(group_name)
30
+ all = read_from_disk
31
+ group = group_defaults
32
+ .merge(all["groups"][group_name])
33
+ .merge(all.slice("cluster_name", "control_plane_sg_id", "nodes_sg_id", "vpc_id"))
34
+ group["subnets"] = all["subnets"][0..(group["num_subnets"]-1)].join(",")
35
+ group
36
+ end
37
+
38
+ def write(attrs, to = :state)
39
+ to_path = resolve_config_file(to)
40
+ Log.info "updating configuration file #{to_path}:\n#{attrs}"
41
+ attrs = attrs.inject({}) {|h,(k,v)| h[k.to_s] = v; h}
42
+ current = read(to_path)
43
+ updated = current.deep_merge(attrs)
44
+ write_to_file(updated, to_path)
45
+ end
46
+
47
+ def bootstrap(attrs)
48
+ write_to_file(attrs, config_path)
49
+ write_to_file({}, state_path)
50
+ write_to_file({}, groups_path)
51
+ Log.info "written configuration files to:\n#{config_path}\n#{state_path}\n#{groups_path}"
52
+ end
53
+
54
+ def set_iam_policies(policies)
55
+ write({iam_policies: policies}, :groups)
56
+ end
57
+
58
+ def update_nodegroup(options)
59
+ options = options.slice(:ami, :group_name, :instance_type, :num_subnets, :ssh_key_name, :taints, :min, :max)
60
+ write({groups: { options[:group_name] => options }}, :groups)
61
+ end
62
+
63
+ private
64
+
65
+ def resolve_config_file(sym)
66
+ case sym
67
+ when :state
68
+ state_path
69
+ when :config
70
+ config_path
71
+ when :groups
72
+ groups_path
73
+ else raise "no such config #{sym}"
74
+ end
75
+ end
76
+
77
+ def write_to_file(attrs, path)
78
+ File.open(path, 'w') {|file| file.write(attrs.to_json)}
79
+ end
80
+
81
+ def read(path)
82
+ f = File.read(path)
83
+ JSON.parse(f)
84
+ end
85
+
86
+ def groups_path
87
+ with_config_dir { |dir| "#{dir}/groups.json" }
88
+ end
89
+
90
+ def state_path
91
+ with_config_dir { |dir| "#{dir}/state.json" }
92
+ end
93
+
94
+ def config_path
95
+ with_config_dir { |dir| "#{dir}/config.json" }
96
+ end
97
+
98
+ def dir
99
+ "#{ENV['HOME']}/.eks/#{@cluster_name}"
100
+ end
101
+
102
+ def with_config_dir
103
+ FileUtils.mkdir_p(dir)
104
+ yield dir
105
+ end
106
+
107
+ def group_defaults
108
+ {"group_name" => "Workers",
109
+ "ami" => "ami-0a54c984b9f908c81",
110
+ "instance_type" => "m5.xlarge",
111
+ "max" => 1,
112
+ "min" => 1,
113
+ "num_subnets" => 3,
114
+ "volume_size" => 100}
115
+ end
116
+ end
117
+ end
@@ -0,0 +1,53 @@
1
+ require 'aws-sdk-ec2'
2
+ require 'log'
3
+ require 'config'
4
+ module EksCli
5
+ module EC2
6
+ class SecurityGroup
7
+
8
+ def initialize(cluster_name, open_ports)
9
+ @cluster_name = cluster_name
10
+ @open_ports = open_ports
11
+ end
12
+
13
+ def create
14
+ Log.info "creating security group for in-cluster communication for #{@cluster_name}"
15
+ gid = client.create_security_group(description: "Security group for in-cluster communication on #{@cluster_name}",
16
+ group_name: "#{@cluster_name}-SG",
17
+ vpc_id: vpc_id).group_id
18
+
19
+ Log.info "created security group #{gid}, setting ingress/egress rules"
20
+
21
+ client.authorize_security_group_ingress(group_id: gid,
22
+ ip_permissions: [{from_port: -1,
23
+ ip_protocol: "-1",
24
+ to_port: -1,
25
+ user_id_group_pairs: [{description: "in-cluster communication for #{@cluster_name}",
26
+ group_id: gid}]}])
27
+
28
+ @open_ports.each do |port|
29
+
30
+ client.authorize_security_group_ingress(group_id: gid,
31
+ ip_permissions: [{from_port: port,
32
+ to_port: port,
33
+ ip_protocol: "tcp",
34
+ ip_ranges: [{cidr_ip: "0.0.0.0/0",
35
+ description: "EKS cluster allow access on port #{port}"}]}])
36
+ end
37
+
38
+ Log.info "done"
39
+ gid
40
+ end
41
+
42
+ private
43
+
44
+ def vpc_id
45
+ Config[@cluster_name]["vpc_id"]
46
+ end
47
+
48
+ def client
49
+ @client ||= Aws::EC2::Client.new(region: Config[@cluster_name]["region"])
50
+ end
51
+ end
52
+ end
53
+ end
@@ -0,0 +1,11 @@
1
+ require 'aws-sdk-eks'
2
+ require 'config'
3
+ module EksCli
4
+ module EKS
5
+ class Client
6
+ def self.get(cluster_name)
7
+ @client ||= Aws::EKS::Client.new(region: Config[cluster_name]["region"])
8
+ end
9
+ end
10
+ end
11
+ end
@@ -0,0 +1,61 @@
1
+ require 'aws-sdk-eks'
2
+ require 'eks/client'
3
+ require 'config'
4
+ require 'log'
5
+
6
+ module EksCli
7
+ module EKS
8
+ class Cluster
9
+
10
+ def initialize(cluster_name)
11
+ @cluster_name = cluster_name
12
+ end
13
+
14
+ def create
15
+ Log.info "creating cluster #{@cluster_name}"
16
+ Log.debug config
17
+ resp = client.create_cluster(config)
18
+ Log.info "response: #{resp.cluster}"
19
+ self
20
+ end
21
+
22
+ def config
23
+ {name: @cluster_name,
24
+ role_arn: Config[@cluster_name]["eks_role_arn"],
25
+ resources_vpc_config: {
26
+ subnet_ids: Config[@cluster_name]["subnets"],
27
+ security_group_ids: [Config[@cluster_name]["control_plane_sg_id"]]}}
28
+ end
29
+
30
+ def await
31
+ while status == "CREATING" do
32
+ Log.info "waiting for cluster #{@cluster_name} to finish creation (#{status})"
33
+ sleep 10
34
+ end
35
+ Log.info "cluster #{@cluster_name} created with status #{status}"
36
+ end
37
+
38
+ def status
39
+ cluster.status
40
+ end
41
+
42
+ def cluster
43
+ client.describe_cluster(name: @cluster_name).cluster
44
+ end
45
+
46
+ def arn
47
+ cluster.arn
48
+ end
49
+
50
+ def client
51
+ Client.get(@cluster_name)
52
+ end
53
+
54
+ def update_kubeconfig
55
+ Log.info "updating kubeconfig for cluster #{@cluster_name}"
56
+ Log.info `aws eks update-kubeconfig --name=#{@cluster_name} --region=#{Config[@cluster_name]["region"]}`
57
+ end
58
+
59
+ end
60
+ end
61
+ end
@@ -0,0 +1,84 @@
1
+ require 'aws-sdk-iam'
2
+ require 'config'
3
+ module EksCli
4
+ module IAM
5
+ class Client
6
+
7
+ EKS_CLUSTER_POLICIES = ["AmazonEKSClusterPolicy", "AmazonEKSServicePolicy"]
8
+ ASSUME_ROLE = {
9
+ "Version" => "2012-10-17",
10
+ "Statement" => [
11
+ {
12
+ "Effect" => "Allow",
13
+ "Principal" => {
14
+ "Service" => "eks.amazonaws.com"
15
+ },
16
+ "Action" => "sts:AssumeRole"
17
+ }
18
+ ]
19
+ }
20
+
21
+ def initialize(cluster_name)
22
+ @cluster_name = cluster_name
23
+ end
24
+
25
+ def client
26
+ @client ||= Aws::IAM::Client.new(region: config["region"])
27
+ end
28
+
29
+ def config
30
+ @config ||= Config[@cluster_name]
31
+ end
32
+
33
+ def create_eks_role
34
+ Log.info "creating IAM cluster role for #{@cluster_name}"
35
+ begin
36
+ role = client.get_role(role_name: role_name).role
37
+ rescue Aws::IAM::Errors::NoSuchEntity => e
38
+ role = client.create_role(role_name: role_name,
39
+ description: "created by eks cli for #{@cluster_name}",
40
+ assume_role_policy_document: ASSUME_ROLE.to_json).role
41
+ attach_policies(role.role_name, EKS_CLUSTER_POLICIES)
42
+ end
43
+ Log.info "created role #{role}"
44
+ role
45
+ end
46
+
47
+ def attach_node_policies(role_name)
48
+ attach_policies(role_name, node_policies)
49
+ end
50
+
51
+ def detach_node_policies(role_name)
52
+ detach_policies(role_name, node_policies)
53
+ end
54
+
55
+ def attach_policies(role_name, policies)
56
+ Log.info "attaching IAM policies to #{role_name}"
57
+ policies.each do |p|
58
+ client.attach_role_policy(policy_arn: arn(p),
59
+ role_name: role_name)
60
+ end
61
+ end
62
+
63
+ def detach_policies(role_name, policies)
64
+ Log.info "detaching IAM policies to #{role_name}"
65
+ policies.each do |p|
66
+ client.detach_role_policy(policy_arn: arn(p),
67
+ role_name: role_name)
68
+ end
69
+ end
70
+
71
+ def node_policies
72
+ config["iam_policies"]
73
+ end
74
+
75
+ def arn(p)
76
+ "arn:aws:iam::aws:policy/#{p}"
77
+ end
78
+
79
+ def role_name
80
+ "#{@cluster_name}-EKS-Role"
81
+ end
82
+ end
83
+ end
84
+ end
@@ -0,0 +1,53 @@
1
+ require 'k8s/configmap_builder'
2
+ require 'k8s/client'
3
+ require 'cloudformation/client'
4
+ require 'cloudformation/stack'
5
+ require 'log'
6
+
7
+ module EksCli
8
+ module K8s
9
+ class Auth
10
+
11
+ def initialize(cluster_name)
12
+ @cluster_name = cluster_name
13
+ end
14
+
15
+ def update
16
+ Log.info "updating auth configmap on kubernetes"
17
+ begin
18
+ k8s_client.get_config_map("aws-auth", "kube-system")
19
+ k8s_client.update_config_map(configmap)
20
+ rescue KubeException => e
21
+ Log.debug "exception updating configmap: #{e}"
22
+ k8s_client.create_config_map(configmap)
23
+ end
24
+ Log.info "done"
25
+ end
26
+
27
+ private
28
+
29
+ def k8s_client
30
+ @k8s_client ||= K8s::Client.new(@cluster_name)
31
+ end
32
+
33
+ def client
34
+ CloudFormation::Client.get(@cluster_name)
35
+ end
36
+
37
+ def arns
38
+ client
39
+ .list_stacks(stack_status_filter: ["CREATE_COMPLETE"])
40
+ .stack_summaries
41
+ .map(&:stack_id)
42
+ .map {|id| CloudFormation::Stack.new(@cluster_name, id)}
43
+ .select {|stack| stack.eks_worker?}
44
+ .map {|stack| stack.node_instance_role_arn}
45
+ end
46
+
47
+ def configmap
48
+ ConfigmapBuilder.build(arns)
49
+ end
50
+
51
+ end
52
+ end
53
+ end
@@ -0,0 +1,95 @@
1
+ require 'yaml'
2
+ require 'kubeclient'
3
+ require_relative '../log'
4
+ require_relative '../config'
5
+
6
+ module EksCli
7
+ module K8s
8
+ class Client
9
+
10
+ def initialize(cluster_name)
11
+ @cluster_name = cluster_name
12
+ end
13
+
14
+ def get_elb(service_name, ns = "default")
15
+ self.get_service(service_name, ns).status.loadBalancer.ingress.first.hostname
16
+ end
17
+
18
+ def enable_gpu
19
+ self.create_daemon_set(resource_from_yaml("nvidia_device_plugin.yaml"))
20
+ end
21
+
22
+ def set_docker_registry_credentials(user, password, email)
23
+ Log.info "setting docker registry credentials"
24
+ Log.info `kubectl config use-context #{config["cluster_arn"]} &&
25
+ kubectl create secret docker-registry registrykey --docker-server=https://index.docker.io/v1/ --docker-username=#{user} --docker-password=#{password} --docker-email=#{email} &&
26
+ kubectl --namespace=kube-system create secret docker-registry registrykey --docker-server=https://index.docker.io/v1/ --docker-username=#{user} --docker-password=#{password} --docker-email=#{email}`
27
+
28
+ Log.info client.patch_service_account("default", {imagePullSecrets: [{name: "registrykey"}]}, "default")
29
+ Log.info client.patch_service_account("default", {imagePullSecrets: [{name: "registrykey"}]}, "kube-system")
30
+ end
31
+
32
+ def create_default_storage_class
33
+ Log.info "creating default storage class"
34
+ Log.info self.create_storage_class(resource_from_yaml("default_storage_class.yaml"))
35
+ end
36
+
37
+ private
38
+
39
+ def resource_from_yaml(filename)
40
+ yaml = YAML.load_file(File.join($root_dir, "/assets/#{filename}"))
41
+ Kubeclient::Resource.new(yaml)
42
+ end
43
+
44
+ def method_missing(method, *args, &block)
45
+ if v1_client.respond_to?(method)
46
+ v1_client.send(method, *args, &block)
47
+ elsif apps_client.respond_to?(method)
48
+ apps_client.send(method, *args, &block)
49
+ elsif storage_client.respond_to?(method)
50
+ storage_client.send(method, *args, &block)
51
+ else
52
+ raise "unknown method #{method}"
53
+ end
54
+ end
55
+
56
+ def apps_client
57
+ @apps_client ||= client("/apis/apps")
58
+ end
59
+
60
+ def v1_client
61
+ @v1_client ||= client
62
+ end
63
+
64
+ def storage_client
65
+ @storage_client ||= client("/apis/storage.k8s.io")
66
+ end
67
+
68
+ def client(suffix = "")
69
+ Kubeclient::Client.new(
70
+ [context.api_endpoint, suffix].join,
71
+ context.api_version,
72
+ ssl_options: context.ssl_options,
73
+ auth_options: {bearer_token: token})
74
+ end
75
+
76
+ def config
77
+ @config ||= Config[@cluster_name]
78
+ end
79
+
80
+ def token
81
+ JSON.parse(`aws-iam-authenticator token -i #{config["cluster_name"]}`)["status"]["token"]
82
+ end
83
+
84
+ def kube_config
85
+ @kube_config ||= Kubeclient::Config.read("#{ENV['HOME']}/.kube/config")
86
+ end
87
+
88
+ def context
89
+ kube_config.context(config["cluster_arn"])
90
+ end
91
+
92
+
93
+ end
94
+ end
95
+ end
@@ -0,0 +1,30 @@
1
+ require 'yaml'
2
+ require 'kubeclient'
3
+
4
+ module EksCli
5
+ module K8s
6
+ class ConfigmapBuilder
7
+ class << self
8
+ def build(arns)
9
+ cm = Kubeclient::Resource.new
10
+ cm.metadata={}
11
+ cm.metadata.name = "aws-auth"
12
+ cm.metadata.namespace = "kube-system"
13
+ cm.data = {}
14
+ cm.data.mapRoles = map_roles(arns)
15
+ cm
16
+ end
17
+
18
+ def map_roles(arns)
19
+ arns.map {|a| map_role(a)}.to_yaml.sub("---\n","")
20
+ end
21
+
22
+ def map_role(stack_arn)
23
+ {"rolearn" => stack_arn,
24
+ "username" => "system:node:{{EC2PrivateDNSName}}",
25
+ "groups" => ["system:bootstrappers", "system:nodes"]}
26
+ end
27
+ end
28
+ end
29
+ end
30
+ end
@@ -0,0 +1,26 @@
1
+ require 'logger'
2
+ module EksCli
3
+ class Log
4
+ def self.info(str)
5
+ self.logger.info str
6
+ end
7
+
8
+ def self.error(str)
9
+ self.logger.error str
10
+ end
11
+
12
+ def self.debug(str)
13
+ self.logger.debug str
14
+ end
15
+
16
+ def self.warn(str)
17
+ self.logger.warn str
18
+ end
19
+
20
+ private
21
+
22
+ def self.logger
23
+ @logger ||= Logger.new(STDOUT)
24
+ end
25
+ end
26
+ end