hetzner-k3s 0.4.0 → 0.4.1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 6ee4a4ac2c31ebff805ee20edc3658ffe64be32e50b524ee4af3646e3ffc3a3c
4
- data.tar.gz: 8cbc33a2a696b19c8e614932d1daa7fa9beddaf9d69dd8377d909cb382e40f87
3
+ metadata.gz: cb83104df3f0724108d93046e10e5889be57a54d941549d2b8f2400344448ce6
4
+ data.tar.gz: 2f3a5069910608a299b611bd7ccfdcae4e82ac1d9d0e98ad4b21542173297662
5
5
  SHA512:
6
- metadata.gz: ff2ca466abbd198b3bc76c8854113d90033fb606e9f11152ecf6d079564ee4dcbdab359a5b17229770a7dc531a9674b211d079a2204596efe3ec5b67157bf82e
7
- data.tar.gz: a6a16c64b0ada5c4d1a740894df09a9f41ed0110b06cfd5629b1971c64836a5fd38bceebb7898432b275cb677779606ecd780b917d4095eb161987d26c0eecc0
6
+ metadata.gz: 400792543d20abaa5a6b57b26bdabc1ab475d9f5e991ee1808c77c9027e17039036bd1e80b292aeb9982275e05b93ec58b9986bbf7c689cbf568b3f558d23f8c
7
+ data.tar.gz: 71ef14f3b9d8c86590a11afe260ce68e69bc6bda532328b4d7f3b4064192bf6a2df587ea9107b441102ed564bcb3dcb11f661926185d7ef54f93d3c0a7c90f44
data/README.md CHANGED
@@ -38,7 +38,7 @@ This will install the `hetzner-k3s` executable in your PATH.
38
38
  Alternatively, if you don't want to set up a Ruby runtime but have Docker installed, you can use a container. Run the following from inside the directory where you have the config file for the cluster (described in the next section):
39
39
 
40
40
  ```bash
41
- docker run --rm -it -v ${PWD}:/cluster -v ${HOME}/.ssh:/tmp/.ssh vitobotta/hetzner-k3s:v0.3.8 create-cluster --config-file /cluster/test.yaml
41
+ docker run --rm -it -v ${PWD}:/cluster -v ${HOME}/.ssh:/tmp/.ssh vitobotta/hetzner-k3s:v0.4.1 create-cluster --config-file /cluster/test.yaml
42
42
  ```
43
43
 
44
44
  Replace `test.yaml` with the name of your config file.
@@ -53,7 +53,8 @@ hetzner_token: <your token>
53
53
  cluster_name: test
54
54
  kubeconfig_path: "./kubeconfig"
55
55
  k3s_version: v1.21.3+k3s1
56
- ssh_key_path: "~/.ssh/id_rsa.pub"
56
+ public_ssh_key_path: "~/.ssh/id_rsa.pub"
57
+ private_ssh_key_path: "~/.ssh/id_rsa"
57
58
  ssh_allowed_networks:
58
59
  - 0.0.0.0/0
59
60
  verify_host_key: false
@@ -239,11 +240,19 @@ I recommend that you create a separate Hetzner project for each cluster, because
239
240
 
240
241
  ## changelog
241
242
 
243
+ - 0.4.1
244
+ - Allow to optionally specify the path of the private SSH key
245
+ - Set correct permissions for the kubeconfig file
246
+ - Retry fetching manifests a few times to allow for temporary network issues
247
+ - Allow to optionally schedule workloads on masters
248
+ - Allow clusters with no worker node pools if shceduling is enabled for the masters
249
+
242
250
  - 0.4.0
243
251
  - Ensure the masters are removed from the API load balancer before deleting the load balancer
244
252
  - Ensure the servers are removed from the firewall before deleting it
245
253
  - Allow using an environment variable to specify the Hetzner token
246
254
  - Allow restricting SSH access to the nodes to specific networks
255
+ - Do not open the port 6443 on the nodes if a load balancer is created for an HA cluster
247
256
 
248
257
  - 0.3.9
249
258
  - Add command "version" to print the version of the tool in use
data/bin/build.sh ADDED
@@ -0,0 +1,12 @@
1
+ #!/bin/bash
2
+
3
+ set -e
4
+
5
+
6
+
7
+ IMAGE="vitobotta/hetzner-k3s"
8
+
9
+ docker build -t ${IMAGE}:v0.4.1 \
10
+ --platform=linux/amd64 \
11
+ --cache-from ${IMAGE}:v0.4.0 \
12
+ --build-arg BUILDKIT_INLINE_CACHE=1 .
@@ -5,15 +5,15 @@ module Hetzner
5
5
  @cluster_name = cluster_name
6
6
  end
7
7
 
8
- def create(ssh_key_path:)
9
- @ssh_key_path = ssh_key_path
8
+ def create(public_ssh_key_path:)
9
+ @public_ssh_key_path = public_ssh_key_path
10
10
 
11
11
  puts
12
12
 
13
- if ssh_key = find_ssh_key
13
+ if (public_ssh_key = find_public_ssh_key)
14
14
  puts "SSH key already exists, skipping."
15
15
  puts
16
- return ssh_key["id"]
16
+ return public_ssh_key["id"]
17
17
  end
18
18
 
19
19
  puts "Creating SSH key..."
@@ -26,13 +26,13 @@ module Hetzner
26
26
  JSON.parse(response)["ssh_key"]["id"]
27
27
  end
28
28
 
29
- def delete(ssh_key_path:)
30
- @ssh_key_path = ssh_key_path
29
+ def delete(public_ssh_key_path:)
30
+ @public_ssh_key_path = public_ssh_key_path
31
31
 
32
- if ssh_key = find_ssh_key
33
- if ssh_key["name"] == cluster_name
32
+ if (public_ssh_key = find_public_ssh_key)
33
+ if public_ssh_key["name"] == cluster_name
34
34
  puts "Deleting ssh_key..."
35
- hetzner_client.delete("/ssh_keys", ssh_key["id"])
35
+ hetzner_client.delete("/ssh_keys", public_ssh_key["id"])
36
36
  puts "...ssh_key deleted."
37
37
  else
38
38
  puts "The SSH key existed before creating the cluster, so I won't delete it."
@@ -46,24 +46,24 @@ module Hetzner
46
46
 
47
47
  private
48
48
 
49
- attr_reader :hetzner_client, :cluster_name, :ssh_key_path
49
+ attr_reader :hetzner_client, :cluster_name, :public_ssh_key_path
50
50
 
51
- def public_key
52
- @public_key ||= File.read(ssh_key_path).chop
51
+ def public_ssh_key
52
+ @public_ssh_key ||= File.read(public_ssh_key_path).chop
53
53
  end
54
54
 
55
55
  def ssh_key_config
56
56
  {
57
57
  name: cluster_name,
58
- public_key: public_key
58
+ public_ssh_key: public_ssh_key
59
59
  }
60
60
  end
61
61
 
62
62
  def fingerprint
63
- @fingerprint ||= ::SSHKey.fingerprint(public_key)
63
+ @fingerprint ||= ::SSHKey.fingerprint(public_ssh_key)
64
64
  end
65
65
 
66
- def find_ssh_key
66
+ def find_public_ssh_key
67
67
  key = hetzner_client.get("/ssh_keys")["ssh_keys"].detect do |ssh_key|
68
68
  ssh_key["fingerprint"] == fingerprint
69
69
  end
@@ -83,7 +83,8 @@ module Hetzner
83
83
 
84
84
  case action
85
85
  when :create
86
- validate_ssh_key
86
+ validate_public_ssh_key
87
+ validate_private_ssh_key
87
88
  validate_ssh_allowed_networks
88
89
  validate_location
89
90
  validate_k3s_version
@@ -147,16 +148,25 @@ module Hetzner
147
148
  errors << "Invalid path for the kubeconfig"
148
149
  end
149
150
 
150
- def validate_ssh_key
151
- path = File.expand_path(configuration.dig("ssh_key_path"))
151
+ def validate_public_ssh_key
152
+ path = File.expand_path(configuration.dig("public_ssh_key_path"))
152
153
  errors << "Invalid Public SSH key path" and return unless File.exists? path
153
154
 
154
155
  key = File.read(path)
155
- errors << "Public SSH key is invalid" unless ::SSHKey.valid_ssh_public_key? key
156
+ errors << "Public SSH key is invalid" unless ::SSHKey.valid_ssh_public_key?(key)
156
157
  rescue
157
158
  errors << "Invalid Public SSH key path"
158
159
  end
159
160
 
161
+ def validate_private_ssh_key
162
+ return unless (private_ssh_key_path = configuration.dig("private_ssh_key_path"))
163
+
164
+ path = File.expand_path(private_ssh_key_path)
165
+ errors << "Invalid Private SSH key path" and return unless File.exists?(path)
166
+ rescue
167
+ errors << "Invalid Private SSH key path"
168
+ end
169
+
160
170
  def validate_kubeconfig_path_must_exist
161
171
  path = File.expand_path configuration.dig("kubeconfig_path")
162
172
  errors << "kubeconfig path is invalid" and return unless File.exists? path
@@ -231,14 +241,22 @@ module Hetzner
231
241
  begin
232
242
  worker_node_pools = configuration.dig("worker_node_pools")
233
243
  rescue
234
- errors << "Invalid node pools configuration"
244
+ unless schedule_workloads_on_masters?
245
+ errors << "Invalid node pools configuration"
246
+ return
247
+ end
248
+ end
249
+
250
+ if worker_node_pools.nil? && schedule_workloads_on_masters?
235
251
  return
236
252
  end
237
253
 
238
254
  if !worker_node_pools.is_a? Array
239
255
  errors << "Invalid node pools configuration"
240
256
  elsif worker_node_pools.size == 0
241
- errors << "At least one node pool is required in order to schedule workloads"
257
+ unless schedule_workloads_on_masters?
258
+ errors << "At least one node pool is required in order to schedule workloads"
259
+ end
242
260
  elsif worker_node_pools.map{ |worker_node_pool| worker_node_pool["name"]}.uniq.size != worker_node_pools.size
243
261
  errors << "Each node pool must have an unique name"
244
262
  elsif server_types
@@ -248,6 +266,11 @@ module Hetzner
248
266
  end
249
267
  end
250
268
 
269
+ def schedule_workloads_on_masters?
270
+ schedule_workloads_on_masters = configuration.dig("schedule_workloads_on_masters")
271
+ schedule_workloads_on_masters ? !!schedule_workloads_on_masters : false
272
+ end
273
+
251
274
  def validate_new_k3s_version_must_be_more_recent
252
275
  return if options[:force] == "true"
253
276
  return unless kubernetes_client
@@ -321,7 +344,7 @@ module Hetzner
321
344
  end
322
345
 
323
346
  def validate_verify_host_key
324
- return unless [true, false].include?(configuration.fetch("ssh_key_path", false))
347
+ return unless [true, false].include?(configuration.fetch("public_ssh_key_path", false))
325
348
  errors << "Please set the verify_host_key option to either true or false"
326
349
  end
327
350
 
@@ -22,12 +22,15 @@ class Cluster
22
22
  end
23
23
 
24
24
  def create(configuration:)
25
+ @configuration = configuration
25
26
  @cluster_name = configuration.dig("cluster_name")
26
27
  @kubeconfig_path = File.expand_path(configuration.dig("kubeconfig_path"))
27
- @ssh_key_path = File.expand_path(configuration.dig("ssh_key_path"))
28
+ @public_ssh_key_path = File.expand_path(configuration.dig("public_ssh_key_path"))
29
+ private_ssh_key_path = configuration.dig("private_ssh_key_path")
30
+ @private_ssh_key_path = File.expand_path(private_ssh_key_path) if private_ssh_key_path
28
31
  @k3s_version = configuration.dig("k3s_version")
29
32
  @masters_config = configuration.dig("masters")
30
- @worker_node_pools = configuration.dig("worker_node_pools")
33
+ @worker_node_pools = find_worker_node_pools(configuration)
31
34
  @location = configuration.dig("location")
32
35
  @verify_host_key = configuration.fetch("verify_host_key", false)
33
36
  @servers = []
@@ -47,7 +50,7 @@ class Cluster
47
50
  def delete(configuration:)
48
51
  @cluster_name = configuration.dig("cluster_name")
49
52
  @kubeconfig_path = File.expand_path(configuration.dig("kubeconfig_path"))
50
- @ssh_key_path = File.expand_path(configuration.dig("ssh_key_path"))
53
+ @public_ssh_key_path = File.expand_path(configuration.dig("public_ssh_key_path"))
51
54
 
52
55
  delete_resources
53
56
  end
@@ -64,13 +67,17 @@ class Cluster
64
67
 
65
68
  private
66
69
 
70
+ def find_worker_node_pools(configuration)
71
+ configuration.fetch("worker_node_pools", [])
72
+ end
73
+
67
74
  attr_accessor :servers
68
75
 
69
76
  attr_reader :hetzner_client, :cluster_name, :kubeconfig_path, :k3s_version,
70
77
  :masters_config, :worker_node_pools,
71
- :location, :ssh_key_path, :kubernetes_client,
78
+ :location, :public_ssh_key_path, :kubernetes_client,
72
79
  :hetzner_token, :tls_sans, :new_k3s_version, :configuration,
73
- :config_file, :verify_host_key, :networks
80
+ :config_file, :verify_host_key, :networks, :private_ssh_key_path, :configuration
74
81
 
75
82
 
76
83
  def latest_k3s_version
@@ -95,7 +102,7 @@ class Cluster
95
102
  ssh_key_id = Hetzner::SSHKey.new(
96
103
  hetzner_client: hetzner_client,
97
104
  cluster_name: cluster_name
98
- ).create(ssh_key_path: ssh_key_path)
105
+ ).create(public_ssh_key_path: public_ssh_key_path)
99
106
 
100
107
  server_configs = []
101
108
 
@@ -169,7 +176,7 @@ class Cluster
169
176
  Hetzner::SSHKey.new(
170
177
  hetzner_client: hetzner_client,
171
178
  cluster_name: cluster_name
172
- ).delete(ssh_key_path: ssh_key_path)
179
+ ).delete(public_ssh_key_path: public_ssh_key_path)
173
180
 
174
181
  threads = all_servers.map do |server|
175
182
  Thread.new do
@@ -207,6 +214,8 @@ class Cluster
207
214
  server = master == first_master ? " --cluster-init " : " --server https://#{first_master_private_ip}:6443 "
208
215
  flannel_interface = find_flannel_interface(master)
209
216
 
217
+ taint = schedule_workloads_on_masters? ? " " : " --node-taint CriticalAddonsOnly=true:NoExecute "
218
+
210
219
  <<~EOF
211
220
  curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION="#{k3s_version}" K3S_TOKEN="#{k3s_token}" INSTALL_K3S_EXEC="server \
212
221
  --disable-cloud-controller \
@@ -223,7 +232,7 @@ class Cluster
223
232
  --kube-proxy-arg="metrics-bind-address=0.0.0.0" \
224
233
  --kube-scheduler-arg="address=0.0.0.0" \
225
234
  --kube-scheduler-arg="bind-address=0.0.0.0" \
226
- --node-taint CriticalAddonsOnly=true:NoExecute \
235
+ #{taint} \
227
236
  --kubelet-arg="cloud-provider=external" \
228
237
  --advertise-address=$(hostname -I | awk '{print $2}') \
229
238
  --node-ip=$(hostname -I | awk '{print $2}') \
@@ -313,7 +322,7 @@ class Cluster
313
322
  end
314
323
 
315
324
 
316
- manifest = HTTP.follow.get("https://github.com/hetznercloud/hcloud-cloud-controller-manager/releases/latest/download/ccm-networks.yaml").body
325
+ manifest = fetch_manifest("https://github.com/hetznercloud/hcloud-cloud-controller-manager/releases/latest/download/ccm-networks.yaml")
317
326
 
318
327
  File.write("/tmp/cloud-controller-manager.yaml", manifest)
319
328
 
@@ -338,6 +347,13 @@ class Cluster
338
347
  retry
339
348
  end
340
349
 
350
+ def fetch_manifest(url)
351
+ retries ||= 1
352
+ HTTP.follow.get(url).body
353
+ rescue
354
+ retry if (retries += 1) <= 10
355
+ end
356
+
341
357
  def deploy_system_upgrade_controller
342
358
  puts
343
359
  puts "Deploying k3s System Upgrade Controller..."
@@ -442,7 +458,13 @@ class Cluster
442
458
  public_ip = server.dig("public_net", "ipv4", "ip")
443
459
  output = ""
444
460
 
445
- Net::SSH.start(public_ip, "root", verify_host_key: (verify_host_key ? :always : :never)) do |session|
461
+ params = { verify_host_key: (verify_host_key ? :always : :never) }
462
+
463
+ if private_ssh_key_path
464
+ params[:keys] = [private_ssh_key_path]
465
+ end
466
+
467
+ Net::SSH.start(public_ip, "root", params) do |session|
446
468
  session.exec!(command) do |channel, stream, data|
447
469
  output << data
448
470
  puts data if print_output
@@ -453,6 +475,10 @@ class Cluster
453
475
  retry unless e.message =~ /Too many authentication failures/
454
476
  rescue Net::SSH::ConnectionTimeout, Errno::ECONNREFUSED, Errno::ENETUNREACH, Errno::EHOSTUNREACH
455
477
  retry
478
+ rescue Net::SSH::AuthenticationFailed
479
+ puts
480
+ puts "Cannot continue: SSH authentication failed. Please ensure that the private SSH key is correct."
481
+ exit 1
456
482
  rescue Net::SSH::HostKeyMismatch
457
483
  puts
458
484
  puts "Cannot continue: Unable to SSH into server with IP #{public_ip} because the existing fingerprint in the known_hosts file does not match that of the actual host key."
@@ -542,6 +568,8 @@ class Cluster
542
568
  gsub("default", cluster_name)
543
569
 
544
570
  File.write(kubeconfig_path, kubeconfig)
571
+
572
+ FileUtils.chmod "go-r", kubeconfig_path
545
573
  end
546
574
 
547
575
  def ugrade_plan_manifest_path
@@ -605,4 +633,9 @@ class Cluster
605
633
  server.dig("labels", "cluster") == cluster_name
606
634
  end
607
635
 
636
+ def schedule_workloads_on_masters?
637
+ schedule_workloads_on_masters = configuration.dig("schedule_workloads_on_masters")
638
+ schedule_workloads_on_masters ? !!schedule_workloads_on_masters : false
639
+ end
640
+
608
641
  end
@@ -1,5 +1,5 @@
1
1
  module Hetzner
2
2
  module K3s
3
- VERSION = "0.4.0"
3
+ VERSION = "0.4.1"
4
4
  end
5
5
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: hetzner-k3s
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.4.0
4
+ version: 0.4.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Vito Botta
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2021-08-24 00:00:00.000000000 Z
11
+ date: 2021-10-02 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: thor
@@ -127,6 +127,7 @@ files:
127
127
  - LICENSE.txt
128
128
  - README.md
129
129
  - Rakefile
130
+ - bin/build.sh
130
131
  - bin/console
131
132
  - bin/setup
132
133
  - cluster_config.yaml.example