hetzner-k3s 0.6.2 → 0.6.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Gemfile.lock +1 -1
- data/README.md +5 -5
- data/lib/hetzner/infra/client.rb +4 -4
- data/lib/hetzner/infra/firewall.rb +1 -2
- data/lib/hetzner/infra/load_balancer.rb +1 -1
- data/lib/hetzner/infra/server.rb +28 -24
- data/lib/hetzner/k3s/cluster.rb +62 -526
- data/lib/hetzner/k3s/version.rb +1 -1
- data/lib/hetzner/kubernetes/client.rb +475 -0
- data/lib/hetzner/utils.rb +4 -4
- metadata +2 -1
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 2bbaf7bd5387cc92f0725308a42f982df7b47596d5d1a3d50693cf3b6a741359
|
4
|
+
data.tar.gz: 0617b66a6ca8299c5c5ada69c586a532e915657c857c6f6821cd81d29c17a774
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: a457da09e05ac40da2d0b40de65193f16ecd42ae926f6d59271ca9fedc77587faf2bb90b4c4a62e326e509076b0f44e84062fb6c72536e5e8c1ce4b716d56813
|
7
|
+
data.tar.gz: '0628cf98daa772f50251257b626ca148420d1a8853618652c9fd7dd6750694393d9902ea684c786029e0db3bcf8d01729aa217e9fc3c070067fbbe6afb395dd8'
|
data/Gemfile.lock
CHANGED
data/README.md
CHANGED
@@ -58,13 +58,13 @@ Before using the tool, be sure to have kubectl installed as it's required to ins
|
|
58
58
|
#### With Homebrew
|
59
59
|
|
60
60
|
```bash
|
61
|
-
brew install vitobotta/tap/
|
61
|
+
brew install vitobotta/tap/hetzner_k3s
|
62
62
|
```
|
63
63
|
|
64
64
|
#### Binary installation (Intel)
|
65
65
|
|
66
66
|
```bash
|
67
|
-
wget https://github.com/vitobotta/hetzner-k3s/releases/download/v0.6.
|
67
|
+
wget https://github.com/vitobotta/hetzner-k3s/releases/download/v0.6.3/hetzner-k3s-mac-amd64
|
68
68
|
chmod +x hetzner-k3s-mac-x64
|
69
69
|
sudo mv hetzner-k3s-mac-x64 /usr/local/bin/hetzner-k3s
|
70
70
|
```
|
@@ -72,7 +72,7 @@ sudo mv hetzner-k3s-mac-x64 /usr/local/bin/hetzner-k3s
|
|
72
72
|
#### Binary installation (Apple Silicon/M1)
|
73
73
|
|
74
74
|
```bash
|
75
|
-
wget https://github.com/vitobotta/hetzner-k3s/releases/download/v0.6.
|
75
|
+
wget https://github.com/vitobotta/hetzner-k3s/releases/download/v0.6.3/hetzner-k3s-mac-arm64
|
76
76
|
chmod +x hetzner-k3s-mac-arm
|
77
77
|
sudo mv hetzner-k3s-mac-arm /usr/local/bin/hetzner-k3s
|
78
78
|
```
|
@@ -82,7 +82,7 @@ NOTE: currently the ARM version still requires [Rosetta](https://support.apple.c
|
|
82
82
|
### Linux
|
83
83
|
|
84
84
|
```bash
|
85
|
-
wget https://github.com/vitobotta/hetzner-k3s/releases/download/v0.6.
|
85
|
+
wget https://github.com/vitobotta/hetzner-k3s/releases/download/v0.6.3/hetzner-k3s-linux-x86_64
|
86
86
|
chmod +x hetzner-k3s-linux-x86_64
|
87
87
|
sudo mv hetzner-k3s-linux-x86_64 /usr/local/bin/hetzner-k3s
|
88
88
|
```
|
@@ -107,7 +107,7 @@ Alternatively, if you don't want to set up a Ruby runtime but have Docker instal
|
|
107
107
|
docker run --rm -it \
|
108
108
|
-v ${PWD}:/cluster \
|
109
109
|
-v ${HOME}/.ssh:/tmp/.ssh \
|
110
|
-
vitobotta/hetzner-k3s:v0.6.
|
110
|
+
vitobotta/hetzner-k3s:v0.6.3 \
|
111
111
|
create-cluster \
|
112
112
|
--config-file /cluster/test.yaml
|
113
113
|
```
|
data/lib/hetzner/infra/client.rb
CHANGED
@@ -6,10 +6,6 @@ module Hetzner
|
|
6
6
|
|
7
7
|
attr_reader :token
|
8
8
|
|
9
|
-
def initialize(token:)
|
10
|
-
@token = token
|
11
|
-
end
|
12
|
-
|
13
9
|
def get(path)
|
14
10
|
make_request do
|
15
11
|
JSON.parse HTTParty.get(BASE_URI + path, headers: headers).body
|
@@ -30,6 +26,10 @@ module Hetzner
|
|
30
26
|
|
31
27
|
private
|
32
28
|
|
29
|
+
def initialize(token:)
|
30
|
+
@token = token
|
31
|
+
end
|
32
|
+
|
33
33
|
def headers
|
34
34
|
{
|
35
35
|
'Authorization' => "Bearer #{@token}",
|
@@ -33,8 +33,7 @@ module Hetzner
|
|
33
33
|
puts 'Deleting firewall...'
|
34
34
|
|
35
35
|
servers.each do |server|
|
36
|
-
hetzner_client.post("/firewalls/#{firewall['id']}/actions/remove_from_resources",
|
37
|
-
remove_targets_config(server['id']))
|
36
|
+
hetzner_client.post("/firewalls/#{firewall['id']}/actions/remove_from_resources", remove_targets_config(server['id']))
|
38
37
|
end
|
39
38
|
|
40
39
|
hetzner_client.delete('/firewalls', firewall['id'])
|
@@ -33,8 +33,8 @@ module Hetzner
|
|
33
33
|
puts 'Deleting API load balancer...' unless high_availability
|
34
34
|
|
35
35
|
hetzner_client.post("/load_balancers/#{load_balancer['id']}/actions/remove_target", remove_targets_config)
|
36
|
-
|
37
36
|
hetzner_client.delete('/load_balancers', load_balancer['id'])
|
37
|
+
|
38
38
|
puts '...API load balancer deleted.' unless high_availability
|
39
39
|
elsif high_availability
|
40
40
|
puts 'API load balancer no longer exists, skipping.'
|
data/lib/hetzner/infra/server.rb
CHANGED
@@ -59,30 +59,6 @@ module Hetzner
|
|
59
59
|
end
|
60
60
|
|
61
61
|
def user_data
|
62
|
-
packages = %w[fail2ban wireguard]
|
63
|
-
packages += additional_packages if additional_packages
|
64
|
-
packages = "'#{packages.join("', '")}'"
|
65
|
-
|
66
|
-
post_create_commands = [
|
67
|
-
'crontab -l > /etc/cron_bkp',
|
68
|
-
'echo "@reboot echo true > /etc/ready" >> /etc/cron_bkp',
|
69
|
-
'crontab /etc/cron_bkp',
|
70
|
-
'sed -i \'s/[#]*PermitRootLogin yes/PermitRootLogin prohibit-password/g\' /etc/ssh/sshd_config',
|
71
|
-
'sed -i \'s/[#]*PasswordAuthentication yes/PasswordAuthentication no/g\' /etc/ssh/sshd_config',
|
72
|
-
'systemctl restart sshd',
|
73
|
-
'systemctl stop systemd-resolved',
|
74
|
-
'systemctl disable systemd-resolved',
|
75
|
-
'rm /etc/resolv.conf',
|
76
|
-
'echo \'nameserver 1.1.1.1\' > /etc/resolv.conf',
|
77
|
-
'echo \'nameserver 1.0.0.1\' >> /etc/resolv.conf'
|
78
|
-
]
|
79
|
-
|
80
|
-
post_create_commands += additional_post_create_commands if additional_post_create_commands
|
81
|
-
|
82
|
-
post_create_commands += ['shutdown -r now'] if post_create_commands.grep(/shutdown|reboot/).grep_v(/@reboot/).empty?
|
83
|
-
|
84
|
-
post_create_commands = " - #{post_create_commands.join("\n - ")}"
|
85
|
-
|
86
62
|
<<~YAML
|
87
63
|
#cloud-config
|
88
64
|
packages: [#{packages}]
|
@@ -125,5 +101,33 @@ module Hetzner
|
|
125
101
|
|
126
102
|
JSON.parse(response_body)['server']
|
127
103
|
end
|
104
|
+
|
105
|
+
def post_create_commands
|
106
|
+
commands = [
|
107
|
+
'crontab -l > /etc/cron_bkp',
|
108
|
+
'echo "@reboot echo true > /etc/ready" >> /etc/cron_bkp',
|
109
|
+
'crontab /etc/cron_bkp',
|
110
|
+
'sed -i \'s/[#]*PermitRootLogin yes/PermitRootLogin prohibit-password/g\' /etc/ssh/sshd_config',
|
111
|
+
'sed -i \'s/[#]*PasswordAuthentication yes/PasswordAuthentication no/g\' /etc/ssh/sshd_config',
|
112
|
+
'systemctl restart sshd',
|
113
|
+
'systemctl stop systemd-resolved',
|
114
|
+
'systemctl disable systemd-resolved',
|
115
|
+
'rm /etc/resolv.conf',
|
116
|
+
'echo \'nameserver 1.1.1.1\' > /etc/resolv.conf',
|
117
|
+
'echo \'nameserver 1.0.0.1\' >> /etc/resolv.conf'
|
118
|
+
]
|
119
|
+
|
120
|
+
commands += additional_post_create_commands if additional_post_create_commands
|
121
|
+
|
122
|
+
commands << 'shutdown -r now' if commands.grep(/shutdown|reboot/).grep_v(/@reboot/).empty?
|
123
|
+
|
124
|
+
" - #{commands.join("\n - ")}"
|
125
|
+
end
|
126
|
+
|
127
|
+
def packages
|
128
|
+
packages = %w[fail2ban wireguard]
|
129
|
+
packages += additional_packages if additional_packages
|
130
|
+
"'#{packages.join("', '")}'"
|
131
|
+
end
|
128
132
|
end
|
129
133
|
end
|
data/lib/hetzner/k3s/cluster.rb
CHANGED
@@ -14,6 +14,8 @@ require_relative '../infra/server'
|
|
14
14
|
require_relative '../infra/load_balancer'
|
15
15
|
require_relative '../infra/placement_group'
|
16
16
|
|
17
|
+
require_relative '../kubernetes/client'
|
18
|
+
|
17
19
|
require_relative '../utils'
|
18
20
|
|
19
21
|
class Cluster
|
@@ -25,38 +27,18 @@ class Cluster
|
|
25
27
|
|
26
28
|
def create
|
27
29
|
@cluster_name = configuration['cluster_name']
|
28
|
-
@kubeconfig_path = File.expand_path(configuration['kubeconfig_path'])
|
29
|
-
@public_ssh_key_path = File.expand_path(configuration['public_ssh_key_path'])
|
30
|
-
private_ssh_key_path = configuration['private_ssh_key_path']
|
31
|
-
@private_ssh_key_path = private_ssh_key_path && File.expand_path(private_ssh_key_path)
|
32
|
-
@k3s_version = configuration['k3s_version']
|
33
30
|
@masters_config = configuration['masters']
|
34
31
|
@worker_node_pools = find_worker_node_pools(configuration)
|
35
32
|
@masters_location = configuration['location']
|
36
|
-
@verify_host_key = configuration.fetch('verify_host_key', false)
|
37
33
|
@servers = []
|
38
34
|
@ssh_networks = configuration['ssh_allowed_networks']
|
39
35
|
@api_networks = configuration['api_allowed_networks']
|
40
|
-
@
|
41
|
-
@
|
42
|
-
@kube_scheduler_args = configuration.fetch('kube_scheduler_args', [])
|
43
|
-
@kube_controller_manager_args = configuration.fetch('kube_controller_manager_args', [])
|
44
|
-
@kube_cloud_controller_manager_args = configuration.fetch('kube_cloud_controller_manager_args', [])
|
45
|
-
@kubelet_args = configuration.fetch('kubelet_args', [])
|
46
|
-
@kube_proxy_args = configuration.fetch('kube_proxy_args', [])
|
36
|
+
@private_ssh_key_path = File.expand_path(configuration['private_ssh_key_path'])
|
37
|
+
@public_ssh_key_path = File.expand_path(configuration['public_ssh_key_path'])
|
47
38
|
|
48
39
|
create_resources
|
49
40
|
|
50
|
-
|
51
|
-
|
52
|
-
sleep 10
|
53
|
-
|
54
|
-
label_nodes
|
55
|
-
taint_nodes
|
56
|
-
|
57
|
-
deploy_cloud_controller_manager
|
58
|
-
deploy_csi_driver
|
59
|
-
deploy_system_upgrade_controller
|
41
|
+
kubernetes_client.deploy(masters: masters, workers: workers, master_definitions: master_definitions_for_create, worker_definitions: workers_definitions_for_marking)
|
60
42
|
end
|
61
43
|
|
62
44
|
def delete
|
@@ -75,378 +57,26 @@ class Cluster
|
|
75
57
|
@new_k3s_version = new_k3s_version
|
76
58
|
@config_file = config_file
|
77
59
|
|
78
|
-
|
60
|
+
kubernetes_client.upgrade
|
79
61
|
end
|
80
62
|
|
81
63
|
private
|
82
64
|
|
83
65
|
attr_accessor :servers
|
84
66
|
|
85
|
-
attr_reader :configuration, :cluster_name, :kubeconfig_path,
|
67
|
+
attr_reader :configuration, :cluster_name, :kubeconfig_path,
|
86
68
|
:masters_config, :worker_node_pools,
|
87
|
-
:masters_location, :public_ssh_key_path,
|
69
|
+
:masters_location, :private_ssh_key_path, :public_ssh_key_path,
|
88
70
|
:hetzner_token, :new_k3s_version,
|
89
|
-
:config_file, :
|
90
|
-
:
|
91
|
-
:kube_controller_manager_args, :kube_cloud_controller_manager_args,
|
92
|
-
:kubelet_args, :kube_proxy_args, :api_networks
|
71
|
+
:config_file, :ssh_networks,
|
72
|
+
:api_networks
|
93
73
|
|
94
74
|
def find_worker_node_pools(configuration)
|
95
75
|
configuration.fetch('worker_node_pools', [])
|
96
76
|
end
|
97
77
|
|
98
|
-
def
|
99
|
-
|
100
|
-
JSON.parse(response).first['name']
|
101
|
-
end
|
102
|
-
|
103
|
-
def create_resources
|
104
|
-
create_servers
|
105
|
-
create_load_balancer if masters.size > 1
|
106
|
-
end
|
107
|
-
|
108
|
-
def delete_placement_groups
|
109
|
-
Hetzner::PlacementGroup.new(hetzner_client: hetzner_client, cluster_name: cluster_name).delete
|
110
|
-
|
111
|
-
worker_node_pools.each do |pool|
|
112
|
-
pool_name = pool['name']
|
113
|
-
Hetzner::PlacementGroup.new(hetzner_client: hetzner_client, cluster_name: cluster_name, pool_name: pool_name).delete
|
114
|
-
end
|
115
|
-
end
|
116
|
-
|
117
|
-
def delete_resources
|
118
|
-
Hetzner::LoadBalancer.new(hetzner_client: hetzner_client, cluster_name: cluster_name).delete(high_availability: (masters.size > 1))
|
119
|
-
|
120
|
-
Hetzner::Firewall.new(hetzner_client: hetzner_client, cluster_name: cluster_name).delete(all_servers)
|
121
|
-
|
122
|
-
Hetzner::Network.new(hetzner_client: hetzner_client, cluster_name: cluster_name, existing_network: existing_network).delete
|
123
|
-
|
124
|
-
Hetzner::SSHKey.new(hetzner_client: hetzner_client, cluster_name: cluster_name).delete(public_ssh_key_path: public_ssh_key_path)
|
125
|
-
|
126
|
-
delete_placement_groups
|
127
|
-
delete_servers
|
128
|
-
end
|
129
|
-
|
130
|
-
def upgrade_cluster
|
131
|
-
worker_upgrade_concurrency = workers.size - 1
|
132
|
-
worker_upgrade_concurrency = 1 if worker_upgrade_concurrency.zero?
|
133
|
-
|
134
|
-
cmd = <<~BASH
|
135
|
-
kubectl apply -f - <<-EOF
|
136
|
-
apiVersion: upgrade.cattle.io/v1
|
137
|
-
kind: Plan
|
138
|
-
metadata:
|
139
|
-
name: k3s-server
|
140
|
-
namespace: system-upgrade
|
141
|
-
labels:
|
142
|
-
k3s-upgrade: server
|
143
|
-
spec:
|
144
|
-
concurrency: 1
|
145
|
-
version: #{new_k3s_version}
|
146
|
-
nodeSelector:
|
147
|
-
matchExpressions:
|
148
|
-
- {key: node-role.kubernetes.io/master, operator: In, values: ["true"]}
|
149
|
-
serviceAccountName: system-upgrade
|
150
|
-
tolerations:
|
151
|
-
- key: "CriticalAddonsOnly"
|
152
|
-
operator: "Equal"
|
153
|
-
value: "true"
|
154
|
-
effect: "NoExecute"
|
155
|
-
cordon: true
|
156
|
-
upgrade:
|
157
|
-
image: rancher/k3s-upgrade
|
158
|
-
EOF
|
159
|
-
BASH
|
160
|
-
|
161
|
-
run cmd, kubeconfig_path: kubeconfig_path
|
162
|
-
|
163
|
-
cmd = <<~BASH
|
164
|
-
kubectl apply -f - <<-EOF
|
165
|
-
apiVersion: upgrade.cattle.io/v1
|
166
|
-
kind: Plan
|
167
|
-
metadata:
|
168
|
-
name: k3s-agent
|
169
|
-
namespace: system-upgrade
|
170
|
-
labels:
|
171
|
-
k3s-upgrade: agent
|
172
|
-
spec:
|
173
|
-
concurrency: #{worker_upgrade_concurrency}
|
174
|
-
version: #{new_k3s_version}
|
175
|
-
nodeSelector:
|
176
|
-
matchExpressions:
|
177
|
-
- {key: node-role.kubernetes.io/master, operator: NotIn, values: ["true"]}
|
178
|
-
serviceAccountName: system-upgrade
|
179
|
-
prepare:
|
180
|
-
image: rancher/k3s-upgrade
|
181
|
-
args: ["prepare", "k3s-server"]
|
182
|
-
cordon: true
|
183
|
-
upgrade:
|
184
|
-
image: rancher/k3s-upgrade
|
185
|
-
EOF
|
186
|
-
BASH
|
187
|
-
|
188
|
-
run cmd, kubeconfig_path: kubeconfig_path
|
189
|
-
|
190
|
-
puts 'Upgrade will now start. Run `watch kubectl get nodes` to see the nodes being upgraded. This should take a few minutes for a small cluster.'
|
191
|
-
puts 'The API server may be briefly unavailable during the upgrade of the controlplane.'
|
192
|
-
|
193
|
-
updated_configuration = configuration.raw
|
194
|
-
updated_configuration['k3s_version'] = new_k3s_version
|
195
|
-
|
196
|
-
File.write(config_file, updated_configuration.to_yaml)
|
197
|
-
end
|
198
|
-
|
199
|
-
def master_script(master)
|
200
|
-
server = master == first_master ? ' --cluster-init ' : " --server https://#{api_server_ip}:6443 "
|
201
|
-
flannel_interface = find_flannel_interface(master)
|
202
|
-
|
203
|
-
available_k3s_releases = Hetzner::Configuration.available_releases
|
204
|
-
wireguard_native_min_version_index = available_k3s_releases.find_index('v1.23.6+k3s1')
|
205
|
-
selected_version_index = available_k3s_releases.find_index(k3s_version)
|
206
|
-
|
207
|
-
flannel_wireguard = if enable_encryption
|
208
|
-
if selected_version_index >= wireguard_native_min_version_index
|
209
|
-
' --flannel-backend=wireguard-native '
|
210
|
-
else
|
211
|
-
' --flannel-backend=wireguard '
|
212
|
-
end
|
213
|
-
else
|
214
|
-
' '
|
215
|
-
end
|
216
|
-
|
217
|
-
extra_args = "#{kube_api_server_args_list} #{kube_scheduler_args_list} #{kube_controller_manager_args_list} #{kube_cloud_controller_manager_args_list} #{kubelet_args_list} #{kube_proxy_args_list}"
|
218
|
-
taint = schedule_workloads_on_masters? ? ' ' : ' --node-taint CriticalAddonsOnly=true:NoExecute '
|
219
|
-
|
220
|
-
<<~SCRIPT
|
221
|
-
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION="#{k3s_version}" K3S_TOKEN="#{k3s_token}" INSTALL_K3S_EXEC="server \
|
222
|
-
--disable-cloud-controller \
|
223
|
-
--disable servicelb \
|
224
|
-
--disable traefik \
|
225
|
-
--disable local-storage \
|
226
|
-
--disable metrics-server \
|
227
|
-
--write-kubeconfig-mode=644 \
|
228
|
-
--node-name="$(hostname -f)" \
|
229
|
-
--cluster-cidr=10.244.0.0/16 \
|
230
|
-
--etcd-expose-metrics=true \
|
231
|
-
#{flannel_wireguard} \
|
232
|
-
--kube-controller-manager-arg="bind-address=0.0.0.0" \
|
233
|
-
--kube-proxy-arg="metrics-bind-address=0.0.0.0" \
|
234
|
-
--kube-scheduler-arg="bind-address=0.0.0.0" \
|
235
|
-
#{taint} #{extra_args} \
|
236
|
-
--kubelet-arg="cloud-provider=external" \
|
237
|
-
--advertise-address=$(hostname -I | awk '{print $2}') \
|
238
|
-
--node-ip=$(hostname -I | awk '{print $2}') \
|
239
|
-
--node-external-ip=$(hostname -I | awk '{print $1}') \
|
240
|
-
--flannel-iface=#{flannel_interface} \
|
241
|
-
#{server} #{tls_sans}" sh -
|
242
|
-
SCRIPT
|
243
|
-
end
|
244
|
-
|
245
|
-
def worker_script(worker)
|
246
|
-
flannel_interface = find_flannel_interface(worker)
|
247
|
-
|
248
|
-
<<~BASH
|
249
|
-
curl -sfL https://get.k3s.io | K3S_TOKEN="#{k3s_token}" INSTALL_K3S_VERSION="#{k3s_version}" K3S_URL=https://#{first_master_private_ip}:6443 INSTALL_K3S_EXEC="agent \
|
250
|
-
--node-name="$(hostname -f)" \
|
251
|
-
--kubelet-arg="cloud-provider=external" \
|
252
|
-
--node-ip=$(hostname -I | awk '{print $2}') \
|
253
|
-
--node-external-ip=$(hostname -I | awk '{print $1}') \
|
254
|
-
--flannel-iface=#{flannel_interface}" sh -
|
255
|
-
BASH
|
256
|
-
end
|
257
|
-
|
258
|
-
def deploy_kubernetes
|
259
|
-
puts
|
260
|
-
puts "Deploying k3s to first master (#{first_master['name']})..."
|
261
|
-
|
262
|
-
ssh first_master, master_script(first_master), print_output: true
|
263
|
-
|
264
|
-
puts
|
265
|
-
puts '...k3s has been deployed to first master.'
|
266
|
-
|
267
|
-
save_kubeconfig
|
268
|
-
|
269
|
-
if masters.size > 1
|
270
|
-
threads = masters[1..].map do |master|
|
271
|
-
Thread.new do
|
272
|
-
puts
|
273
|
-
puts "Deploying k3s to master #{master['name']}..."
|
274
|
-
|
275
|
-
ssh master, master_script(master), print_output: true
|
276
|
-
|
277
|
-
puts
|
278
|
-
puts "...k3s has been deployed to master #{master['name']}."
|
279
|
-
end
|
280
|
-
end
|
281
|
-
|
282
|
-
threads.each(&:join) unless threads.empty?
|
283
|
-
end
|
284
|
-
|
285
|
-
threads = workers.map do |worker|
|
286
|
-
Thread.new do
|
287
|
-
puts
|
288
|
-
puts "Deploying k3s to worker (#{worker['name']})..."
|
289
|
-
|
290
|
-
ssh worker, worker_script(worker), print_output: true
|
291
|
-
|
292
|
-
puts
|
293
|
-
puts "...k3s has been deployed to worker (#{worker['name']})."
|
294
|
-
end
|
295
|
-
end
|
296
|
-
|
297
|
-
threads.each(&:join) unless threads.empty?
|
298
|
-
end
|
299
|
-
|
300
|
-
def label_nodes
|
301
|
-
check_kubectl
|
302
|
-
|
303
|
-
if master_definitions_for_create.first[:labels]
|
304
|
-
master_labels = master_definitions_for_create.first[:labels].map{ |k, v| "#{k}=#{v}" }.join(' ')
|
305
|
-
master_node_names = []
|
306
|
-
|
307
|
-
master_definitions_for_create.each do |master|
|
308
|
-
master_node_names << "#{configuration['cluster_name']}-#{master[:instance_type]}-#{master[:instance_id]}"
|
309
|
-
end
|
310
|
-
|
311
|
-
master_node_names = master_node_names.join(' ')
|
312
|
-
|
313
|
-
cmd = "kubectl label --overwrite nodes #{master_node_names} #{master_labels}"
|
314
|
-
|
315
|
-
run cmd, kubeconfig_path: kubeconfig_path
|
316
|
-
end
|
317
|
-
|
318
|
-
workers = []
|
319
|
-
|
320
|
-
worker_node_pools.each do |worker_node_pool|
|
321
|
-
workers += worker_node_pool_definitions(worker_node_pool)
|
322
|
-
end
|
323
|
-
|
324
|
-
return unless workers.any?
|
325
|
-
|
326
|
-
workers.each do |worker|
|
327
|
-
next unless worker[:labels]
|
328
|
-
|
329
|
-
worker_labels = worker[:labels].map{ |k, v| "#{k}=#{v}" }.join(' ')
|
330
|
-
worker_node_name = "#{configuration['cluster_name']}-#{worker[:instance_type]}-#{worker[:instance_id]}"
|
331
|
-
|
332
|
-
cmd = "kubectl label --overwrite nodes #{worker_node_name} #{worker_labels}"
|
333
|
-
|
334
|
-
run cmd, kubeconfig_path: kubeconfig_path
|
335
|
-
end
|
336
|
-
end
|
337
|
-
|
338
|
-
def taint_nodes
|
339
|
-
check_kubectl
|
340
|
-
|
341
|
-
if master_definitions_for_create.first[:taints]
|
342
|
-
master_taints = master_definitions_for_create.first[:taints].map{ |k, v| "#{k}=#{v}" }.join(' ')
|
343
|
-
master_node_names = []
|
344
|
-
|
345
|
-
master_definitions_for_create.each do |master|
|
346
|
-
master_node_names << "#{configuration['cluster_name']}-#{master[:instance_type]}-#{master[:instance_id]}"
|
347
|
-
end
|
348
|
-
|
349
|
-
master_node_names = master_node_names.join(' ')
|
350
|
-
|
351
|
-
cmd = "kubectl taint --overwrite nodes #{master_node_names} #{master_taints}"
|
352
|
-
|
353
|
-
run cmd, kubeconfig_path: kubeconfig_path
|
354
|
-
end
|
355
|
-
|
356
|
-
workers = []
|
357
|
-
|
358
|
-
worker_node_pools.each do |worker_node_pool|
|
359
|
-
workers += worker_node_pool_definitions(worker_node_pool)
|
360
|
-
end
|
361
|
-
|
362
|
-
return unless workers.any?
|
363
|
-
|
364
|
-
workers.each do |worker|
|
365
|
-
next unless worker[:taints]
|
366
|
-
|
367
|
-
worker_taints = worker[:taints].map{ |k, v| "#{k}=#{v}" }.join(' ')
|
368
|
-
worker_node_name = "#{configuration['cluster_name']}-#{worker[:instance_type]}-#{worker[:instance_id]}"
|
369
|
-
|
370
|
-
cmd = "kubectl taint --overwrite nodes #{worker_node_name} #{worker_taints}"
|
371
|
-
|
372
|
-
run cmd, kubeconfig_path: kubeconfig_path
|
373
|
-
end
|
374
|
-
end
|
375
|
-
|
376
|
-
def deploy_cloud_controller_manager
|
377
|
-
check_kubectl
|
378
|
-
|
379
|
-
puts
|
380
|
-
puts 'Deploying Hetzner Cloud Controller Manager...'
|
381
|
-
|
382
|
-
cmd = <<~BASH
|
383
|
-
kubectl apply -f - <<-EOF
|
384
|
-
apiVersion: "v1"
|
385
|
-
kind: "Secret"
|
386
|
-
metadata:
|
387
|
-
namespace: 'kube-system'
|
388
|
-
name: 'hcloud'
|
389
|
-
stringData:
|
390
|
-
network: "#{existing_network || cluster_name}"
|
391
|
-
token: "#{configuration.hetzner_token}"
|
392
|
-
EOF
|
393
|
-
BASH
|
394
|
-
|
395
|
-
run cmd, kubeconfig_path: kubeconfig_path
|
396
|
-
|
397
|
-
cmd = 'kubectl apply -f https://github.com/hetznercloud/hcloud-cloud-controller-manager/releases/latest/download/ccm-networks.yaml'
|
398
|
-
|
399
|
-
run cmd, kubeconfig_path: kubeconfig_path
|
400
|
-
|
401
|
-
puts '...Cloud Controller Manager deployed'
|
402
|
-
end
|
403
|
-
|
404
|
-
def deploy_system_upgrade_controller
|
405
|
-
check_kubectl
|
406
|
-
|
407
|
-
puts
|
408
|
-
puts 'Deploying k3s System Upgrade Controller...'
|
409
|
-
|
410
|
-
cmd = 'kubectl apply -f https://github.com/rancher/system-upgrade-controller/releases/download/v0.9.1/system-upgrade-controller.yaml'
|
411
|
-
|
412
|
-
run cmd, kubeconfig_path: kubeconfig_path
|
413
|
-
|
414
|
-
puts '...k3s System Upgrade Controller deployed'
|
415
|
-
end
|
416
|
-
|
417
|
-
def deploy_csi_driver
|
418
|
-
check_kubectl
|
419
|
-
|
420
|
-
puts
|
421
|
-
puts 'Deploying Hetzner CSI Driver...'
|
422
|
-
|
423
|
-
cmd = <<~BASH
|
424
|
-
kubectl apply -f - <<-EOF
|
425
|
-
apiVersion: "v1"
|
426
|
-
kind: "Secret"
|
427
|
-
metadata:
|
428
|
-
namespace: 'kube-system'
|
429
|
-
name: 'hcloud-csi'
|
430
|
-
stringData:
|
431
|
-
token: "#{configuration.hetzner_token}"
|
432
|
-
EOF
|
433
|
-
BASH
|
434
|
-
|
435
|
-
run cmd, kubeconfig_path: kubeconfig_path
|
436
|
-
|
437
|
-
cmd = 'kubectl apply -f https://raw.githubusercontent.com/hetznercloud/csi-driver/master/deploy/kubernetes/hcloud-csi.yml'
|
438
|
-
|
439
|
-
run cmd, kubeconfig_path: kubeconfig_path
|
440
|
-
|
441
|
-
puts '...CSI Driver deployed'
|
442
|
-
end
|
443
|
-
|
444
|
-
def find_flannel_interface(server)
|
445
|
-
if ssh(server, 'lscpu | grep Vendor') =~ /Intel/
|
446
|
-
'ens10'
|
447
|
-
else
|
448
|
-
'enp7s0'
|
449
|
-
end
|
78
|
+
def belongs_to_cluster?(server)
|
79
|
+
server.dig('labels', 'cluster') == cluster_name
|
450
80
|
end
|
451
81
|
|
452
82
|
def all_servers
|
@@ -463,74 +93,6 @@ class Cluster
|
|
463
93
|
@workers = all_servers.select { |server| server['name'] =~ /worker\d+\Z/ }.sort { |a, b| a['name'] <=> b['name'] }
|
464
94
|
end
|
465
95
|
|
466
|
-
def k3s_token
|
467
|
-
@k3s_token ||= begin
|
468
|
-
token = ssh(first_master, '{ TOKEN=$(< /var/lib/rancher/k3s/server/node-token); } 2> /dev/null; echo $TOKEN')
|
469
|
-
|
470
|
-
if token.empty?
|
471
|
-
SecureRandom.hex
|
472
|
-
else
|
473
|
-
token.split(':').last
|
474
|
-
end
|
475
|
-
end
|
476
|
-
end
|
477
|
-
|
478
|
-
def first_master_private_ip
|
479
|
-
@first_master_private_ip ||= first_master['private_net'][0]['ip']
|
480
|
-
end
|
481
|
-
|
482
|
-
def first_master
|
483
|
-
masters.first
|
484
|
-
end
|
485
|
-
|
486
|
-
def api_server_ip
|
487
|
-
return @api_server_ip if @api_server_ip
|
488
|
-
|
489
|
-
@api_server_ip = if masters.size > 1
|
490
|
-
load_balancer_name = "#{cluster_name}-api"
|
491
|
-
load_balancer = hetzner_client.get('/load_balancers')['load_balancers'].detect do |lb|
|
492
|
-
lb['name'] == load_balancer_name
|
493
|
-
end
|
494
|
-
load_balancer['public_net']['ipv4']['ip']
|
495
|
-
else
|
496
|
-
first_master_public_ip
|
497
|
-
end
|
498
|
-
end
|
499
|
-
|
500
|
-
def tls_sans
|
501
|
-
sans = " --tls-san=#{api_server_ip} "
|
502
|
-
|
503
|
-
masters.each do |master|
|
504
|
-
master_private_ip = master['private_net'][0]['ip']
|
505
|
-
sans += " --tls-san=#{master_private_ip} "
|
506
|
-
end
|
507
|
-
|
508
|
-
sans
|
509
|
-
end
|
510
|
-
|
511
|
-
def first_master_public_ip
|
512
|
-
@first_master_public_ip ||= first_master.dig('public_net', 'ipv4', 'ip')
|
513
|
-
end
|
514
|
-
|
515
|
-
def save_kubeconfig
|
516
|
-
kubeconfig = ssh(first_master, 'cat /etc/rancher/k3s/k3s.yaml')
|
517
|
-
.gsub('127.0.0.1', api_server_ip)
|
518
|
-
.gsub('default', cluster_name)
|
519
|
-
|
520
|
-
File.write(kubeconfig_path, kubeconfig)
|
521
|
-
|
522
|
-
FileUtils.chmod 'go-r', kubeconfig_path
|
523
|
-
end
|
524
|
-
|
525
|
-
def belongs_to_cluster?(server)
|
526
|
-
server.dig('labels', 'cluster') == cluster_name
|
527
|
-
end
|
528
|
-
|
529
|
-
def schedule_workloads_on_masters?
|
530
|
-
schedule_workloads_on_masters = configuration['schedule_workloads_on_masters']
|
531
|
-
schedule_workloads_on_masters ? !!schedule_workloads_on_masters : false
|
532
|
-
end
|
533
|
-
|
534
96
|
def image
|
535
97
|
configuration['image'] || 'ubuntu-20.04'
|
536
98
|
end
|
@@ -543,34 +105,19 @@ class Cluster
|
|
543
105
|
configuration['post_create_commands'] || []
|
544
106
|
end
|
545
107
|
|
546
|
-
def check_kubectl
|
547
|
-
return if which('kubectl')
|
548
|
-
|
549
|
-
puts 'Please ensure kubectl is installed and in your PATH.'
|
550
|
-
exit 1
|
551
|
-
end
|
552
|
-
|
553
|
-
def placement_group_id(pool_name = nil)
|
554
|
-
@placement_groups ||= {}
|
555
|
-
@placement_groups[pool_name || '__masters__'] ||= Hetzner::PlacementGroup.new(hetzner_client: hetzner_client, cluster_name: cluster_name, pool_name: pool_name).create
|
556
|
-
end
|
557
|
-
|
558
108
|
def master_instance_type
|
559
109
|
@master_instance_type ||= masters_config['instance_type']
|
560
110
|
end
|
561
111
|
|
562
|
-
def master_labels
|
563
|
-
@master_labels ||= masters_config['labels']
|
564
|
-
end
|
565
|
-
|
566
|
-
def master_taints
|
567
|
-
@master_taints ||= masters_config['taints']
|
568
|
-
end
|
569
|
-
|
570
112
|
def masters_count
|
571
113
|
@masters_count ||= masters_config['instance_count']
|
572
114
|
end
|
573
115
|
|
116
|
+
def placement_group_id(pool_name = nil)
|
117
|
+
@placement_groups ||= {}
|
118
|
+
@placement_groups[pool_name || '__masters__'] ||= Hetzner::PlacementGroup.new(hetzner_client: hetzner_client, cluster_name: cluster_name, pool_name: pool_name).create
|
119
|
+
end
|
120
|
+
|
574
121
|
def firewall_id
|
575
122
|
@firewall_id ||= Hetzner::Firewall.new(hetzner_client: hetzner_client, cluster_name: cluster_name).create(high_availability: (masters_count > 1), ssh_networks: ssh_networks, api_networks: api_networks)
|
576
123
|
end
|
@@ -598,8 +145,8 @@ class Cluster
|
|
598
145
|
image: image,
|
599
146
|
additional_packages: additional_packages,
|
600
147
|
additional_post_create_commands: additional_post_create_commands,
|
601
|
-
labels:
|
602
|
-
taints:
|
148
|
+
labels: masters_config['labels'],
|
149
|
+
taints: masters_config['taints']
|
603
150
|
}
|
604
151
|
end
|
605
152
|
|
@@ -649,10 +196,6 @@ class Cluster
|
|
649
196
|
definitions
|
650
197
|
end
|
651
198
|
|
652
|
-
def create_load_balancer
|
653
|
-
Hetzner::LoadBalancer.new(hetzner_client: hetzner_client, cluster_name: cluster_name).create(location: masters_location, network_id: network_id)
|
654
|
-
end
|
655
|
-
|
656
199
|
def server_configs
|
657
200
|
return @server_configs if @server_configs
|
658
201
|
|
@@ -665,6 +208,47 @@ class Cluster
|
|
665
208
|
@server_configs
|
666
209
|
end
|
667
210
|
|
211
|
+
def hetzner_client
|
212
|
+
configuration.hetzner_client
|
213
|
+
end
|
214
|
+
|
215
|
+
def kubernetes_client
|
216
|
+
@kubernetes_client ||= Kubernetes::Client.new(configuration: configuration)
|
217
|
+
end
|
218
|
+
|
219
|
+
def workers_definitions_for_marking
|
220
|
+
worker_node_pools.map do |worker_node_pool|
|
221
|
+
worker_node_pool_definitions(worker_node_pool)
|
222
|
+
end.flatten
|
223
|
+
end
|
224
|
+
|
225
|
+
def create_resources
|
226
|
+
create_servers
|
227
|
+
create_load_balancer if masters.size > 1
|
228
|
+
end
|
229
|
+
|
230
|
+
def delete_placement_groups
|
231
|
+
Hetzner::PlacementGroup.new(hetzner_client: hetzner_client, cluster_name: cluster_name).delete
|
232
|
+
|
233
|
+
worker_node_pools.each do |pool|
|
234
|
+
pool_name = pool['name']
|
235
|
+
Hetzner::PlacementGroup.new(hetzner_client: hetzner_client, cluster_name: cluster_name, pool_name: pool_name).delete
|
236
|
+
end
|
237
|
+
end
|
238
|
+
|
239
|
+
def delete_resources
|
240
|
+
Hetzner::LoadBalancer.new(hetzner_client: hetzner_client, cluster_name: cluster_name).delete(high_availability: (masters.size > 1))
|
241
|
+
|
242
|
+
Hetzner::Firewall.new(hetzner_client: hetzner_client, cluster_name: cluster_name).delete(all_servers)
|
243
|
+
|
244
|
+
Hetzner::Network.new(hetzner_client: hetzner_client, cluster_name: cluster_name, existing_network: existing_network).delete
|
245
|
+
|
246
|
+
Hetzner::SSHKey.new(hetzner_client: hetzner_client, cluster_name: cluster_name).delete(public_ssh_key_path: public_ssh_key_path)
|
247
|
+
|
248
|
+
delete_placement_groups
|
249
|
+
delete_servers
|
250
|
+
end
|
251
|
+
|
668
252
|
def create_servers
|
669
253
|
servers = []
|
670
254
|
|
@@ -701,56 +285,8 @@ class Cluster
|
|
701
285
|
threads.each(&:join) unless threads.empty?
|
702
286
|
end
|
703
287
|
|
704
|
-
def
|
705
|
-
|
706
|
-
|
707
|
-
kube_api_server_args.map do |arg|
|
708
|
-
" --kube-apiserver-arg=\"#{arg}\" "
|
709
|
-
end.join
|
710
|
-
end
|
711
|
-
|
712
|
-
def kube_scheduler_args_list
|
713
|
-
return '' if kube_scheduler_args.empty?
|
714
|
-
|
715
|
-
kube_scheduler_args.map do |arg|
|
716
|
-
" --kube-scheduler-arg=\"#{arg}\" "
|
717
|
-
end.join
|
718
|
-
end
|
719
|
-
|
720
|
-
def kube_controller_manager_args_list
|
721
|
-
return '' if kube_controller_manager_args.empty?
|
722
|
-
|
723
|
-
kube_controller_manager_args.map do |arg|
|
724
|
-
" --kube-controller-manager-arg=\"#{arg}\" "
|
725
|
-
end.join
|
726
|
-
end
|
727
|
-
|
728
|
-
def kube_cloud_controller_manager_args_list
|
729
|
-
return '' if kube_cloud_controller_manager_args.empty?
|
730
|
-
|
731
|
-
kube_cloud_controller_manager_args.map do |arg|
|
732
|
-
" --kube-cloud-controller-manager-arg=\"#{arg}\" "
|
733
|
-
end.join
|
734
|
-
end
|
735
|
-
|
736
|
-
def kubelet_args_list
|
737
|
-
return '' if kubelet_args.empty?
|
738
|
-
|
739
|
-
kubelet_args.map do |arg|
|
740
|
-
" --kubelet-arg=\"#{arg}\" "
|
741
|
-
end.join
|
742
|
-
end
|
743
|
-
|
744
|
-
def kube_proxy_args_list
|
745
|
-
return '' if kube_proxy_args.empty?
|
746
|
-
|
747
|
-
kube_api_server_args.map do |arg|
|
748
|
-
" --kube-proxy-arg=\"#{arg}\" "
|
749
|
-
end.join
|
750
|
-
end
|
751
|
-
|
752
|
-
def hetzner_client
|
753
|
-
configuration.hetzner_client
|
288
|
+
def create_load_balancer
|
289
|
+
Hetzner::LoadBalancer.new(hetzner_client: hetzner_client, cluster_name: cluster_name).create(location: masters_location, network_id: network_id)
|
754
290
|
end
|
755
291
|
|
756
292
|
def existing_network
|
data/lib/hetzner/k3s/version.rb
CHANGED
@@ -0,0 +1,475 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative '../utils'
|
4
|
+
|
5
|
+
module Kubernetes
|
6
|
+
class Client
|
7
|
+
include Utils
|
8
|
+
|
9
|
+
def initialize(configuration:)
|
10
|
+
@configuration = configuration
|
11
|
+
end
|
12
|
+
|
13
|
+
def deploy(masters:, workers:, master_definitions:, worker_definitions:)
|
14
|
+
@masters = masters
|
15
|
+
@workers = workers
|
16
|
+
@master_definitions = master_definitions
|
17
|
+
@worker_definitions = worker_definitions
|
18
|
+
|
19
|
+
@kube_api_server_args = configuration.fetch('kube_api_server_args', [])
|
20
|
+
@kube_scheduler_args = configuration.fetch('kube_scheduler_args', [])
|
21
|
+
@kube_controller_manager_args = configuration.fetch('kube_controller_manager_args', [])
|
22
|
+
@kube_cloud_controller_manager_args = configuration.fetch('kube_cloud_controller_manager_args', [])
|
23
|
+
@kubelet_args = configuration.fetch('kubelet_args', [])
|
24
|
+
@kube_proxy_args = configuration.fetch('kube_proxy_args', [])
|
25
|
+
@private_ssh_key_path = File.expand_path(configuration['private_ssh_key_path'])
|
26
|
+
@public_ssh_key_path = File.expand_path(configuration['public_ssh_key_path'])
|
27
|
+
@cluster_name = configuration['cluster_name']
|
28
|
+
|
29
|
+
set_up_k3s
|
30
|
+
|
31
|
+
update_nodes
|
32
|
+
|
33
|
+
post_setup_deployments
|
34
|
+
end
|
35
|
+
|
36
|
+
def upgrade
|
37
|
+
worker_upgrade_concurrency = workers.size - 1
|
38
|
+
worker_upgrade_concurrency = 1 if worker_upgrade_concurrency.zero?
|
39
|
+
|
40
|
+
cmd = <<~BASH
|
41
|
+
kubectl apply -f - <<-EOF
|
42
|
+
apiVersion: upgrade.cattle.io/v1
|
43
|
+
kind: Plan
|
44
|
+
metadata:
|
45
|
+
name: k3s-server
|
46
|
+
namespace: system-upgrade
|
47
|
+
labels:
|
48
|
+
k3s-upgrade: server
|
49
|
+
spec:
|
50
|
+
concurrency: 1
|
51
|
+
version: #{new_k3s_version}
|
52
|
+
nodeSelector:
|
53
|
+
matchExpressions:
|
54
|
+
- {key: node-role.kubernetes.io/master, operator: In, values: ["true"]}
|
55
|
+
serviceAccountName: system-upgrade
|
56
|
+
tolerations:
|
57
|
+
- key: "CriticalAddonsOnly"
|
58
|
+
operator: "Equal"
|
59
|
+
value: "true"
|
60
|
+
effect: "NoExecute"
|
61
|
+
cordon: true
|
62
|
+
upgrade:
|
63
|
+
image: rancher/k3s-upgrade
|
64
|
+
EOF
|
65
|
+
BASH
|
66
|
+
|
67
|
+
run cmd, kubeconfig_path: kubeconfig_path
|
68
|
+
|
69
|
+
cmd = <<~BASH
|
70
|
+
kubectl apply -f - <<-EOF
|
71
|
+
apiVersion: upgrade.cattle.io/v1
|
72
|
+
kind: Plan
|
73
|
+
metadata:
|
74
|
+
name: k3s-agent
|
75
|
+
namespace: system-upgrade
|
76
|
+
labels:
|
77
|
+
k3s-upgrade: agent
|
78
|
+
spec:
|
79
|
+
concurrency: #{worker_upgrade_concurrency}
|
80
|
+
version: #{new_k3s_version}
|
81
|
+
nodeSelector:
|
82
|
+
matchExpressions:
|
83
|
+
- {key: node-role.kubernetes.io/master, operator: NotIn, values: ["true"]}
|
84
|
+
serviceAccountName: system-upgrade
|
85
|
+
prepare:
|
86
|
+
image: rancher/k3s-upgrade
|
87
|
+
args: ["prepare", "k3s-server"]
|
88
|
+
cordon: true
|
89
|
+
upgrade:
|
90
|
+
image: rancher/k3s-upgrade
|
91
|
+
EOF
|
92
|
+
BASH
|
93
|
+
|
94
|
+
run cmd, kubeconfig_path: kubeconfig_path
|
95
|
+
|
96
|
+
puts 'Upgrade will now start. Run `watch kubectl get nodes` to see the nodes being upgraded. This should take a few minutes for a small cluster.'
|
97
|
+
puts 'The API server may be briefly unavailable during the upgrade of the controlplane.'
|
98
|
+
|
99
|
+
updated_configuration = configuration.raw
|
100
|
+
updated_configuration['k3s_version'] = new_k3s_version
|
101
|
+
|
102
|
+
File.write(config_file, updated_configuration.to_yaml)
|
103
|
+
end
|
104
|
+
|
105
|
+
private
|
106
|
+
|
107
|
+
attr_reader :configuration, :masters, :workers, :kube_api_server_args, :kube_scheduler_args,
|
108
|
+
:kube_controller_manager_args, :kube_cloud_controller_manager_args, :kubelet_args, :kube_proxy_args,
|
109
|
+
:private_ssh_key_path, :public_ssh_key_path, :master_definitions, :worker_definitions, :cluster_name
|
110
|
+
|
111
|
+
def set_up_k3s
|
112
|
+
set_up_first_master
|
113
|
+
set_up_additional_masters
|
114
|
+
set_up_workers
|
115
|
+
end
|
116
|
+
|
117
|
+
def set_up_first_master
|
118
|
+
puts
|
119
|
+
puts "Deploying k3s to first master (#{first_master['name']})..."
|
120
|
+
|
121
|
+
ssh first_master, master_install_script(first_master), print_output: true
|
122
|
+
|
123
|
+
puts
|
124
|
+
puts 'Waiting for the control plane to be ready...'
|
125
|
+
|
126
|
+
sleep 10
|
127
|
+
|
128
|
+
puts
|
129
|
+
puts '...k3s has been deployed to first master.'
|
130
|
+
|
131
|
+
save_kubeconfig
|
132
|
+
end
|
133
|
+
|
134
|
+
def set_up_additional_masters
|
135
|
+
return unless masters.size > 1
|
136
|
+
|
137
|
+
threads = masters[1..].map do |master|
|
138
|
+
Thread.new do
|
139
|
+
puts
|
140
|
+
puts "Deploying k3s to master #{master['name']}..."
|
141
|
+
|
142
|
+
ssh master, master_install_script(master), print_output: true
|
143
|
+
|
144
|
+
puts
|
145
|
+
puts "...k3s has been deployed to master #{master['name']}."
|
146
|
+
end
|
147
|
+
end
|
148
|
+
|
149
|
+
threads.each(&:join) unless threads.empty?
|
150
|
+
end
|
151
|
+
|
152
|
+
def set_up_workers
|
153
|
+
threads = workers.map do |worker|
|
154
|
+
Thread.new do
|
155
|
+
puts
|
156
|
+
puts "Deploying k3s to worker (#{worker['name']})..."
|
157
|
+
|
158
|
+
ssh worker, worker_install_script(worker), print_output: true
|
159
|
+
|
160
|
+
puts
|
161
|
+
puts "...k3s has been deployed to worker (#{worker['name']})."
|
162
|
+
end
|
163
|
+
end
|
164
|
+
|
165
|
+
threads.each(&:join) unless threads.empty?
|
166
|
+
end
|
167
|
+
|
168
|
+
def post_setup_deployments
|
169
|
+
deploy_cloud_controller_manager
|
170
|
+
deploy_csi_driver
|
171
|
+
deploy_system_upgrade_controller
|
172
|
+
end
|
173
|
+
|
174
|
+
def update_nodes
|
175
|
+
mark_nodes mark_type: :labels
|
176
|
+
mark_nodes mark_type: :taints
|
177
|
+
end
|
178
|
+
|
179
|
+
def first_master
|
180
|
+
masters.first
|
181
|
+
end
|
182
|
+
|
183
|
+
def kube_api_server_args_list
|
184
|
+
return '' if kube_api_server_args.empty?
|
185
|
+
|
186
|
+
kube_api_server_args.map do |arg|
|
187
|
+
" --kube-apiserver-arg=\"#{arg}\" "
|
188
|
+
end.join
|
189
|
+
end
|
190
|
+
|
191
|
+
def kube_scheduler_args_list
|
192
|
+
return '' if kube_scheduler_args.empty?
|
193
|
+
|
194
|
+
kube_scheduler_args.map do |arg|
|
195
|
+
" --kube-scheduler-arg=\"#{arg}\" "
|
196
|
+
end.join
|
197
|
+
end
|
198
|
+
|
199
|
+
def kube_controller_manager_args_list
|
200
|
+
return '' if kube_controller_manager_args.empty?
|
201
|
+
|
202
|
+
kube_controller_manager_args.map do |arg|
|
203
|
+
" --kube-controller-manager-arg=\"#{arg}\" "
|
204
|
+
end.join
|
205
|
+
end
|
206
|
+
|
207
|
+
def kube_cloud_controller_manager_args_list
|
208
|
+
return '' if kube_cloud_controller_manager_args.empty?
|
209
|
+
|
210
|
+
kube_cloud_controller_manager_args.map do |arg|
|
211
|
+
" --kube-cloud-controller-manager-arg=\"#{arg}\" "
|
212
|
+
end.join
|
213
|
+
end
|
214
|
+
|
215
|
+
def kubelet_args_list
|
216
|
+
return '' if kubelet_args.empty?
|
217
|
+
|
218
|
+
kubelet_args.map do |arg|
|
219
|
+
" --kubelet-arg=\"#{arg}\" "
|
220
|
+
end.join
|
221
|
+
end
|
222
|
+
|
223
|
+
def kube_proxy_args_list
|
224
|
+
return '' if kube_proxy_args.empty?
|
225
|
+
|
226
|
+
kube_api_server_args.map do |arg|
|
227
|
+
" --kube-proxy-arg=\"#{arg}\" "
|
228
|
+
end.join
|
229
|
+
end
|
230
|
+
|
231
|
+
def api_server_ip
|
232
|
+
return @api_server_ip if @api_server_ip
|
233
|
+
|
234
|
+
@api_server_ip = if masters.size > 1
|
235
|
+
load_balancer_name = "#{cluster_name}-api"
|
236
|
+
load_balancer = hetzner_client.get('/load_balancers')['load_balancers'].detect do |lb|
|
237
|
+
lb['name'] == load_balancer_name
|
238
|
+
end
|
239
|
+
load_balancer['public_net']['ipv4']['ip']
|
240
|
+
else
|
241
|
+
first_master_public_ip
|
242
|
+
end
|
243
|
+
end
|
244
|
+
|
245
|
+
def master_install_script(master)
|
246
|
+
server = master == first_master ? ' --cluster-init ' : " --server https://#{api_server_ip}:6443 "
|
247
|
+
flannel_interface = find_flannel_interface(master)
|
248
|
+
enable_encryption = configuration.fetch('enable_encryption', false)
|
249
|
+
flannel_wireguard = if enable_encryption
|
250
|
+
if Gem::Version.new(k3s_version.scan(/\Av(.*)\+.*\Z/).flatten.first) >= Gem::Version.new('1.23.6')
|
251
|
+
' --flannel-backend=wireguard-native '
|
252
|
+
else
|
253
|
+
' --flannel-backend=wireguard '
|
254
|
+
end
|
255
|
+
else
|
256
|
+
' '
|
257
|
+
end
|
258
|
+
|
259
|
+
extra_args = "#{kube_api_server_args_list} #{kube_scheduler_args_list} #{kube_controller_manager_args_list} #{kube_cloud_controller_manager_args_list} #{kubelet_args_list} #{kube_proxy_args_list}"
|
260
|
+
taint = schedule_workloads_on_masters? ? ' ' : ' --node-taint CriticalAddonsOnly=true:NoExecute '
|
261
|
+
|
262
|
+
<<~SCRIPT
|
263
|
+
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION="#{k3s_version}" K3S_TOKEN="#{k3s_token}" INSTALL_K3S_EXEC="server \
|
264
|
+
--disable-cloud-controller \
|
265
|
+
--disable servicelb \
|
266
|
+
--disable traefik \
|
267
|
+
--disable local-storage \
|
268
|
+
--disable metrics-server \
|
269
|
+
--write-kubeconfig-mode=644 \
|
270
|
+
--node-name="$(hostname -f)" \
|
271
|
+
--cluster-cidr=10.244.0.0/16 \
|
272
|
+
--etcd-expose-metrics=true \
|
273
|
+
#{flannel_wireguard} \
|
274
|
+
--kube-controller-manager-arg="bind-address=0.0.0.0" \
|
275
|
+
--kube-proxy-arg="metrics-bind-address=0.0.0.0" \
|
276
|
+
--kube-scheduler-arg="bind-address=0.0.0.0" \
|
277
|
+
#{taint} #{extra_args} \
|
278
|
+
--kubelet-arg="cloud-provider=external" \
|
279
|
+
--advertise-address=$(hostname -I | awk '{print $2}') \
|
280
|
+
--node-ip=$(hostname -I | awk '{print $2}') \
|
281
|
+
--node-external-ip=$(hostname -I | awk '{print $1}') \
|
282
|
+
--flannel-iface=#{flannel_interface} \
|
283
|
+
#{server} #{tls_sans}" sh -
|
284
|
+
SCRIPT
|
285
|
+
end
|
286
|
+
|
287
|
+
def worker_install_script(worker)
|
288
|
+
flannel_interface = find_flannel_interface(worker)
|
289
|
+
|
290
|
+
<<~BASH
|
291
|
+
curl -sfL https://get.k3s.io | K3S_TOKEN="#{k3s_token}" INSTALL_K3S_VERSION="#{k3s_version}" K3S_URL=https://#{first_master_private_ip}:6443 INSTALL_K3S_EXEC="agent \
|
292
|
+
--node-name="$(hostname -f)" \
|
293
|
+
--kubelet-arg="cloud-provider=external" \
|
294
|
+
--node-ip=$(hostname -I | awk '{print $2}') \
|
295
|
+
--node-external-ip=$(hostname -I | awk '{print $1}') \
|
296
|
+
--flannel-iface=#{flannel_interface}" sh -
|
297
|
+
BASH
|
298
|
+
end
|
299
|
+
|
300
|
+
def find_flannel_interface(server)
|
301
|
+
if ssh(server, 'lscpu | grep Vendor') =~ /Intel/
|
302
|
+
'ens10'
|
303
|
+
else
|
304
|
+
'enp7s0'
|
305
|
+
end
|
306
|
+
end
|
307
|
+
|
308
|
+
def hetzner_client
|
309
|
+
configuration.hetzner_client
|
310
|
+
end
|
311
|
+
|
312
|
+
def first_master_public_ip
|
313
|
+
@first_master_public_ip ||= first_master.dig('public_net', 'ipv4', 'ip')
|
314
|
+
end
|
315
|
+
|
316
|
+
def save_kubeconfig
|
317
|
+
kubeconfig = ssh(first_master, 'cat /etc/rancher/k3s/k3s.yaml')
|
318
|
+
.gsub('127.0.0.1', api_server_ip)
|
319
|
+
.gsub('default', configuration['cluster_name'])
|
320
|
+
|
321
|
+
File.write(kubeconfig_path, kubeconfig)
|
322
|
+
|
323
|
+
FileUtils.chmod 'go-r', kubeconfig_path
|
324
|
+
end
|
325
|
+
|
326
|
+
def kubeconfig_path
|
327
|
+
@kubeconfig_path ||= File.expand_path(configuration['kubeconfig_path'])
|
328
|
+
end
|
329
|
+
|
330
|
+
def schedule_workloads_on_masters?
|
331
|
+
schedule_workloads_on_masters = configuration['schedule_workloads_on_masters']
|
332
|
+
schedule_workloads_on_masters ? !!schedule_workloads_on_masters : false
|
333
|
+
end
|
334
|
+
|
335
|
+
def k3s_version
|
336
|
+
@k3s_version ||= configuration['k3s_version']
|
337
|
+
end
|
338
|
+
|
339
|
+
def k3s_token
|
340
|
+
@k3s_token ||= begin
|
341
|
+
token = ssh(first_master, '{ TOKEN=$(< /var/lib/rancher/k3s/server/node-token); } 2> /dev/null; echo $TOKEN')
|
342
|
+
|
343
|
+
if token.empty?
|
344
|
+
SecureRandom.hex
|
345
|
+
else
|
346
|
+
token.split(':').last
|
347
|
+
end
|
348
|
+
end
|
349
|
+
end
|
350
|
+
|
351
|
+
def tls_sans
|
352
|
+
sans = " --tls-san=#{api_server_ip} "
|
353
|
+
|
354
|
+
masters.each do |master|
|
355
|
+
master_private_ip = master['private_net'][0]['ip']
|
356
|
+
sans += " --tls-san=#{master_private_ip} "
|
357
|
+
end
|
358
|
+
|
359
|
+
sans
|
360
|
+
end
|
361
|
+
|
362
|
+
def mark_nodes(mark_type:)
|
363
|
+
check_kubectl
|
364
|
+
|
365
|
+
action = mark_type == :labels ? 'label' : 'taint'
|
366
|
+
|
367
|
+
if master_definitions.first[mark_type]
|
368
|
+
master_labels = master_definitions.first[mark_type].map { |k, v| "#{k}=#{v}" }.join(' ')
|
369
|
+
master_node_names = []
|
370
|
+
|
371
|
+
master_definitions.each do |master|
|
372
|
+
master_node_names << "#{configuration['cluster_name']}-#{master[:instance_type]}-#{master[:instance_id]}"
|
373
|
+
end
|
374
|
+
|
375
|
+
master_node_names = master_node_names.join(' ')
|
376
|
+
|
377
|
+
cmd = "kubectl #{action} --overwrite nodes #{master_node_names} #{master_labels}"
|
378
|
+
|
379
|
+
run cmd, kubeconfig_path: kubeconfig_path
|
380
|
+
end
|
381
|
+
|
382
|
+
return unless worker_definitions.any?
|
383
|
+
|
384
|
+
worker_definitions.each do |worker|
|
385
|
+
next unless worker[mark_type]
|
386
|
+
|
387
|
+
worker_labels = worker[mark_type].map { |k, v| "#{k}=#{v}" }.join(' ')
|
388
|
+
worker_node_name = "#{configuration['cluster_name']}-#{worker[:instance_type]}-#{worker[:instance_id]}"
|
389
|
+
|
390
|
+
cmd = "kubectl #{action} --overwrite nodes #{worker_node_name} #{worker_labels}"
|
391
|
+
|
392
|
+
run cmd, kubeconfig_path: kubeconfig_path
|
393
|
+
end
|
394
|
+
end
|
395
|
+
|
396
|
+
def deploy_cloud_controller_manager
|
397
|
+
check_kubectl
|
398
|
+
|
399
|
+
puts
|
400
|
+
puts 'Deploying Hetzner Cloud Controller Manager...'
|
401
|
+
|
402
|
+
cmd = <<~BASH
|
403
|
+
kubectl apply -f - <<-EOF
|
404
|
+
apiVersion: "v1"
|
405
|
+
kind: "Secret"
|
406
|
+
metadata:
|
407
|
+
namespace: 'kube-system'
|
408
|
+
name: 'hcloud'
|
409
|
+
stringData:
|
410
|
+
network: "#{configuration['existing_network'] || cluster_name}"
|
411
|
+
token: "#{configuration.hetzner_token}"
|
412
|
+
EOF
|
413
|
+
BASH
|
414
|
+
|
415
|
+
run cmd, kubeconfig_path: kubeconfig_path
|
416
|
+
|
417
|
+
cmd = 'kubectl apply -f https://github.com/hetznercloud/hcloud-cloud-controller-manager/releases/latest/download/ccm-networks.yaml'
|
418
|
+
|
419
|
+
run cmd, kubeconfig_path: kubeconfig_path
|
420
|
+
|
421
|
+
puts '...Cloud Controller Manager deployed'
|
422
|
+
end
|
423
|
+
|
424
|
+
def deploy_system_upgrade_controller
|
425
|
+
check_kubectl
|
426
|
+
|
427
|
+
puts
|
428
|
+
puts 'Deploying k3s System Upgrade Controller...'
|
429
|
+
|
430
|
+
cmd = 'kubectl apply -f https://github.com/rancher/system-upgrade-controller/releases/download/v0.9.1/system-upgrade-controller.yaml'
|
431
|
+
|
432
|
+
run cmd, kubeconfig_path: kubeconfig_path
|
433
|
+
|
434
|
+
puts '...k3s System Upgrade Controller deployed'
|
435
|
+
end
|
436
|
+
|
437
|
+
def deploy_csi_driver
|
438
|
+
check_kubectl
|
439
|
+
|
440
|
+
puts
|
441
|
+
puts 'Deploying Hetzner CSI Driver...'
|
442
|
+
|
443
|
+
cmd = <<~BASH
|
444
|
+
kubectl apply -f - <<-EOF
|
445
|
+
apiVersion: "v1"
|
446
|
+
kind: "Secret"
|
447
|
+
metadata:
|
448
|
+
namespace: 'kube-system'
|
449
|
+
name: 'hcloud-csi'
|
450
|
+
stringData:
|
451
|
+
token: "#{configuration.hetzner_token}"
|
452
|
+
EOF
|
453
|
+
BASH
|
454
|
+
|
455
|
+
run cmd, kubeconfig_path: kubeconfig_path
|
456
|
+
|
457
|
+
cmd = 'kubectl apply -f https://raw.githubusercontent.com/hetznercloud/csi-driver/master/deploy/kubernetes/hcloud-csi.yml'
|
458
|
+
|
459
|
+
run cmd, kubeconfig_path: kubeconfig_path
|
460
|
+
|
461
|
+
puts '...CSI Driver deployed'
|
462
|
+
end
|
463
|
+
|
464
|
+
def check_kubectl
|
465
|
+
return if which('kubectl')
|
466
|
+
|
467
|
+
puts 'Please ensure kubectl is installed and in your PATH.'
|
468
|
+
exit 1
|
469
|
+
end
|
470
|
+
|
471
|
+
def first_master_private_ip
|
472
|
+
@first_master_private_ip ||= first_master['private_net'][0]['ip']
|
473
|
+
end
|
474
|
+
end
|
475
|
+
end
|
data/lib/hetzner/utils.rb
CHANGED
@@ -85,10 +85,6 @@ module Utils
|
|
85
85
|
end
|
86
86
|
end
|
87
87
|
output.chop
|
88
|
-
# rescue StandardError => e
|
89
|
-
# p [e.class, e.message]
|
90
|
-
# retries += 1
|
91
|
-
# retry unless retries > 15 || e.message =~ /Bad file descriptor/
|
92
88
|
rescue Timeout::Error, IOError, Errno::EBADF
|
93
89
|
retries += 1
|
94
90
|
retry unless retries > 15
|
@@ -109,4 +105,8 @@ module Utils
|
|
109
105
|
MESSAGE
|
110
106
|
exit 1
|
111
107
|
end
|
108
|
+
|
109
|
+
def verify_host_key
|
110
|
+
@verify_host_key ||= configuration.fetch('verify_host_key', false)
|
111
|
+
end
|
112
112
|
end
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: hetzner-k3s
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.6.
|
4
|
+
version: 0.6.3
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Vito Botta
|
@@ -155,6 +155,7 @@ files:
|
|
155
155
|
- lib/hetzner/k3s/cluster.rb
|
156
156
|
- lib/hetzner/k3s/configuration.rb
|
157
157
|
- lib/hetzner/k3s/version.rb
|
158
|
+
- lib/hetzner/kubernetes/client.rb
|
158
159
|
- lib/hetzner/utils.rb
|
159
160
|
- spec/k3s_spec.rb
|
160
161
|
- spec/spec_helper.rb
|