hetzner-k3s 0.6.1 → 0.6.3

Sign up to get free protection for your applications and to get access to all the features.
@@ -14,6 +14,8 @@ require_relative '../infra/server'
14
14
  require_relative '../infra/load_balancer'
15
15
  require_relative '../infra/placement_group'
16
16
 
17
+ require_relative '../kubernetes/client'
18
+
17
19
  require_relative '../utils'
18
20
 
19
21
  class Cluster
@@ -25,35 +27,18 @@ class Cluster
25
27
 
26
28
  def create
27
29
  @cluster_name = configuration['cluster_name']
28
- @kubeconfig_path = File.expand_path(configuration['kubeconfig_path'])
29
- @public_ssh_key_path = File.expand_path(configuration['public_ssh_key_path'])
30
- private_ssh_key_path = configuration['private_ssh_key_path']
31
- @private_ssh_key_path = private_ssh_key_path && File.expand_path(private_ssh_key_path)
32
- @k3s_version = configuration['k3s_version']
33
30
  @masters_config = configuration['masters']
34
31
  @worker_node_pools = find_worker_node_pools(configuration)
35
32
  @masters_location = configuration['location']
36
- @verify_host_key = configuration.fetch('verify_host_key', false)
37
33
  @servers = []
38
34
  @ssh_networks = configuration['ssh_allowed_networks']
39
35
  @api_networks = configuration['api_allowed_networks']
40
- @enable_encryption = configuration.fetch('enable_encryption', false)
41
- @kube_api_server_args = configuration.fetch('kube_api_server_args', [])
42
- @kube_scheduler_args = configuration.fetch('kube_scheduler_args', [])
43
- @kube_controller_manager_args = configuration.fetch('kube_controller_manager_args', [])
44
- @kube_cloud_controller_manager_args = configuration.fetch('kube_cloud_controller_manager_args', [])
45
- @kubelet_args = configuration.fetch('kubelet_args', [])
46
- @kube_proxy_args = configuration.fetch('kube_proxy_args', [])
36
+ @private_ssh_key_path = File.expand_path(configuration['private_ssh_key_path'])
37
+ @public_ssh_key_path = File.expand_path(configuration['public_ssh_key_path'])
47
38
 
48
39
  create_resources
49
40
 
50
- deploy_kubernetes
51
-
52
- sleep 10
53
-
54
- deploy_cloud_controller_manager
55
- deploy_csi_driver
56
- deploy_system_upgrade_controller
41
+ kubernetes_client.deploy(masters: masters, workers: workers, master_definitions: master_definitions_for_create, worker_definitions: workers_definitions_for_marking)
57
42
  end
58
43
 
59
44
  def delete
@@ -72,302 +57,26 @@ class Cluster
72
57
  @new_k3s_version = new_k3s_version
73
58
  @config_file = config_file
74
59
 
75
- upgrade_cluster
60
+ kubernetes_client.upgrade
76
61
  end
77
62
 
78
63
  private
79
64
 
80
65
  attr_accessor :servers
81
66
 
82
- attr_reader :configuration, :cluster_name, :kubeconfig_path, :k3s_version,
67
+ attr_reader :configuration, :cluster_name, :kubeconfig_path,
83
68
  :masters_config, :worker_node_pools,
84
- :masters_location, :public_ssh_key_path,
69
+ :masters_location, :private_ssh_key_path, :public_ssh_key_path,
85
70
  :hetzner_token, :new_k3s_version,
86
- :config_file, :verify_host_key, :ssh_networks, :private_ssh_key_path,
87
- :enable_encryption, :kube_api_server_args, :kube_scheduler_args,
88
- :kube_controller_manager_args, :kube_cloud_controller_manager_args,
89
- :kubelet_args, :kube_proxy_args, :api_networks
71
+ :config_file, :ssh_networks,
72
+ :api_networks
90
73
 
91
74
  def find_worker_node_pools(configuration)
92
75
  configuration.fetch('worker_node_pools', [])
93
76
  end
94
77
 
95
- def latest_k3s_version
96
- response = HTTParty.get('https://api.github.com/repos/k3s-io/k3s/tags').body
97
- JSON.parse(response).first['name']
98
- end
99
-
100
- def create_resources
101
- create_servers
102
- create_load_balancer if masters.size > 1
103
- end
104
-
105
- def delete_placement_groups
106
- Hetzner::PlacementGroup.new(hetzner_client: hetzner_client, cluster_name: cluster_name).delete
107
-
108
- worker_node_pools.each do |pool|
109
- pool_name = pool['name']
110
- Hetzner::PlacementGroup.new(hetzner_client: hetzner_client, cluster_name: cluster_name, pool_name: pool_name).delete
111
- end
112
- end
113
-
114
- def delete_resources
115
- Hetzner::LoadBalancer.new(hetzner_client: hetzner_client, cluster_name: cluster_name).delete(high_availability: (masters.size > 1))
116
-
117
- Hetzner::Firewall.new(hetzner_client: hetzner_client, cluster_name: cluster_name).delete(all_servers)
118
-
119
- Hetzner::Network.new(hetzner_client: hetzner_client, cluster_name: cluster_name, existing_network: existing_network).delete
120
-
121
- Hetzner::SSHKey.new(hetzner_client: hetzner_client, cluster_name: cluster_name).delete(public_ssh_key_path: public_ssh_key_path)
122
-
123
- delete_placement_groups
124
- delete_servers
125
- end
126
-
127
- def upgrade_cluster
128
- worker_upgrade_concurrency = workers.size - 1
129
- worker_upgrade_concurrency = 1 if worker_upgrade_concurrency.zero?
130
-
131
- cmd = <<~BASH
132
- kubectl apply -f - <<-EOF
133
- apiVersion: upgrade.cattle.io/v1
134
- kind: Plan
135
- metadata:
136
- name: k3s-server
137
- namespace: system-upgrade
138
- labels:
139
- k3s-upgrade: server
140
- spec:
141
- concurrency: 1
142
- version: #{new_k3s_version}
143
- nodeSelector:
144
- matchExpressions:
145
- - {key: node-role.kubernetes.io/master, operator: In, values: ["true"]}
146
- serviceAccountName: system-upgrade
147
- tolerations:
148
- - key: "CriticalAddonsOnly"
149
- operator: "Equal"
150
- value: "true"
151
- effect: "NoExecute"
152
- cordon: true
153
- upgrade:
154
- image: rancher/k3s-upgrade
155
- EOF
156
- BASH
157
-
158
- run cmd, kubeconfig_path: kubeconfig_path
159
-
160
- cmd = <<~BASH
161
- kubectl apply -f - <<-EOF
162
- apiVersion: upgrade.cattle.io/v1
163
- kind: Plan
164
- metadata:
165
- name: k3s-agent
166
- namespace: system-upgrade
167
- labels:
168
- k3s-upgrade: agent
169
- spec:
170
- concurrency: #{worker_upgrade_concurrency}
171
- version: #{new_k3s_version}
172
- nodeSelector:
173
- matchExpressions:
174
- - {key: node-role.kubernetes.io/master, operator: NotIn, values: ["true"]}
175
- serviceAccountName: system-upgrade
176
- prepare:
177
- image: rancher/k3s-upgrade
178
- args: ["prepare", "k3s-server"]
179
- cordon: true
180
- upgrade:
181
- image: rancher/k3s-upgrade
182
- EOF
183
- BASH
184
-
185
- run cmd, kubeconfig_path: kubeconfig_path
186
-
187
- puts 'Upgrade will now start. Run `watch kubectl get nodes` to see the nodes being upgraded. This should take a few minutes for a small cluster.'
188
- puts 'The API server may be briefly unavailable during the upgrade of the controlplane.'
189
-
190
- updated_configuration = configuration.raw
191
- updated_configuration['k3s_version'] = new_k3s_version
192
-
193
- File.write(config_file, updated_configuration.to_yaml)
194
- end
195
-
196
- def master_script(master)
197
- server = master == first_master ? ' --cluster-init ' : " --server https://#{api_server_ip}:6443 "
198
- flannel_interface = find_flannel_interface(master)
199
-
200
- available_k3s_releases = Hetzner::Configuration.available_releases
201
- wireguard_native_min_version_index = available_k3s_releases.find_index('v1.23.6+k3s1')
202
- selected_version_index = available_k3s_releases.find_index(k3s_version)
203
-
204
- flannel_wireguard = if enable_encryption
205
- if selected_version_index >= wireguard_native_min_version_index
206
- ' --flannel-backend=wireguard-native '
207
- else
208
- ' --flannel-backend=wireguard '
209
- end
210
- else
211
- ' '
212
- end
213
-
214
- extra_args = "#{kube_api_server_args_list} #{kube_scheduler_args_list} #{kube_controller_manager_args_list} #{kube_cloud_controller_manager_args_list} #{kubelet_args_list} #{kube_proxy_args_list}"
215
- taint = schedule_workloads_on_masters? ? ' ' : ' --node-taint CriticalAddonsOnly=true:NoExecute '
216
-
217
- <<~SCRIPT
218
- curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION="#{k3s_version}" K3S_TOKEN="#{k3s_token}" INSTALL_K3S_EXEC="server \
219
- --disable-cloud-controller \
220
- --disable servicelb \
221
- --disable traefik \
222
- --disable local-storage \
223
- --disable metrics-server \
224
- --write-kubeconfig-mode=644 \
225
- --node-name="$(hostname -f)" \
226
- --cluster-cidr=10.244.0.0/16 \
227
- --etcd-expose-metrics=true \
228
- #{flannel_wireguard} \
229
- --kube-controller-manager-arg="bind-address=0.0.0.0" \
230
- --kube-proxy-arg="metrics-bind-address=0.0.0.0" \
231
- --kube-scheduler-arg="bind-address=0.0.0.0" \
232
- #{taint} #{extra_args} \
233
- --kubelet-arg="cloud-provider=external" \
234
- --advertise-address=$(hostname -I | awk '{print $2}') \
235
- --node-ip=$(hostname -I | awk '{print $2}') \
236
- --node-external-ip=$(hostname -I | awk '{print $1}') \
237
- --flannel-iface=#{flannel_interface} \
238
- #{server} #{tls_sans}" sh -
239
- SCRIPT
240
- end
241
-
242
- def worker_script(worker)
243
- flannel_interface = find_flannel_interface(worker)
244
-
245
- <<~BASH
246
- curl -sfL https://get.k3s.io | K3S_TOKEN="#{k3s_token}" INSTALL_K3S_VERSION="#{k3s_version}" K3S_URL=https://#{first_master_private_ip}:6443 INSTALL_K3S_EXEC="agent \
247
- --node-name="$(hostname -f)" \
248
- --kubelet-arg="cloud-provider=external" \
249
- --node-ip=$(hostname -I | awk '{print $2}') \
250
- --node-external-ip=$(hostname -I | awk '{print $1}') \
251
- --flannel-iface=#{flannel_interface}" sh -
252
- BASH
253
- end
254
-
255
- def deploy_kubernetes
256
- puts
257
- puts "Deploying k3s to first master (#{first_master['name']})..."
258
-
259
- ssh first_master, master_script(first_master), print_output: true
260
-
261
- puts
262
- puts '...k3s has been deployed to first master.'
263
-
264
- save_kubeconfig
265
-
266
- if masters.size > 1
267
- threads = masters[1..].map do |master|
268
- Thread.new do
269
- puts
270
- puts "Deploying k3s to master #{master['name']}..."
271
-
272
- ssh master, master_script(master), print_output: true
273
-
274
- puts
275
- puts "...k3s has been deployed to master #{master['name']}."
276
- end
277
- end
278
-
279
- threads.each(&:join) unless threads.empty?
280
- end
281
-
282
- threads = workers.map do |worker|
283
- Thread.new do
284
- puts
285
- puts "Deploying k3s to worker (#{worker['name']})..."
286
-
287
- ssh worker, worker_script(worker), print_output: true
288
-
289
- puts
290
- puts "...k3s has been deployed to worker (#{worker['name']})."
291
- end
292
- end
293
-
294
- threads.each(&:join) unless threads.empty?
295
- end
296
-
297
- def deploy_cloud_controller_manager
298
- check_kubectl
299
-
300
- puts
301
- puts 'Deploying Hetzner Cloud Controller Manager...'
302
-
303
- cmd = <<~BASH
304
- kubectl apply -f - <<-EOF
305
- apiVersion: "v1"
306
- kind: "Secret"
307
- metadata:
308
- namespace: 'kube-system'
309
- name: 'hcloud'
310
- stringData:
311
- network: "#{existing_network || cluster_name}"
312
- token: "#{configuration.hetzner_token}"
313
- EOF
314
- BASH
315
-
316
- run cmd, kubeconfig_path: kubeconfig_path
317
-
318
- cmd = 'kubectl apply -f https://github.com/hetznercloud/hcloud-cloud-controller-manager/releases/latest/download/ccm-networks.yaml'
319
-
320
- run cmd, kubeconfig_path: kubeconfig_path
321
-
322
- puts '...Cloud Controller Manager deployed'
323
- end
324
-
325
- def deploy_system_upgrade_controller
326
- check_kubectl
327
-
328
- puts
329
- puts 'Deploying k3s System Upgrade Controller...'
330
-
331
- cmd = 'kubectl apply -f https://github.com/rancher/system-upgrade-controller/releases/download/v0.9.1/system-upgrade-controller.yaml'
332
-
333
- run cmd, kubeconfig_path: kubeconfig_path
334
-
335
- puts '...k3s System Upgrade Controller deployed'
336
- end
337
-
338
- def deploy_csi_driver
339
- check_kubectl
340
-
341
- puts
342
- puts 'Deploying Hetzner CSI Driver...'
343
-
344
- cmd = <<~BASH
345
- kubectl apply -f - <<-EOF
346
- apiVersion: "v1"
347
- kind: "Secret"
348
- metadata:
349
- namespace: 'kube-system'
350
- name: 'hcloud-csi'
351
- stringData:
352
- token: "#{configuration.hetzner_token}"
353
- EOF
354
- BASH
355
-
356
- run cmd, kubeconfig_path: kubeconfig_path
357
-
358
- cmd = 'kubectl apply -f https://raw.githubusercontent.com/hetznercloud/csi-driver/master/deploy/kubernetes/hcloud-csi.yml'
359
-
360
- run cmd, kubeconfig_path: kubeconfig_path
361
-
362
- puts '...CSI Driver deployed'
363
- end
364
-
365
- def find_flannel_interface(server)
366
- if ssh(server, 'lscpu | grep Vendor') =~ /Intel/
367
- 'ens10'
368
- else
369
- 'enp7s0'
370
- end
78
+ def belongs_to_cluster?(server)
79
+ server.dig('labels', 'cluster') == cluster_name
371
80
  end
372
81
 
373
82
  def all_servers
@@ -384,74 +93,6 @@ class Cluster
384
93
  @workers = all_servers.select { |server| server['name'] =~ /worker\d+\Z/ }.sort { |a, b| a['name'] <=> b['name'] }
385
94
  end
386
95
 
387
- def k3s_token
388
- @k3s_token ||= begin
389
- token = ssh(first_master, '{ TOKEN=$(< /var/lib/rancher/k3s/server/node-token); } 2> /dev/null; echo $TOKEN')
390
-
391
- if token.empty?
392
- SecureRandom.hex
393
- else
394
- token.split(':').last
395
- end
396
- end
397
- end
398
-
399
- def first_master_private_ip
400
- @first_master_private_ip ||= first_master['private_net'][0]['ip']
401
- end
402
-
403
- def first_master
404
- masters.first
405
- end
406
-
407
- def api_server_ip
408
- return @api_server_ip if @api_server_ip
409
-
410
- @api_server_ip = if masters.size > 1
411
- load_balancer_name = "#{cluster_name}-api"
412
- load_balancer = hetzner_client.get('/load_balancers')['load_balancers'].detect do |lb|
413
- lb['name'] == load_balancer_name
414
- end
415
- load_balancer['public_net']['ipv4']['ip']
416
- else
417
- first_master_public_ip
418
- end
419
- end
420
-
421
- def tls_sans
422
- sans = " --tls-san=#{api_server_ip} "
423
-
424
- masters.each do |master|
425
- master_private_ip = master['private_net'][0]['ip']
426
- sans += " --tls-san=#{master_private_ip} "
427
- end
428
-
429
- sans
430
- end
431
-
432
- def first_master_public_ip
433
- @first_master_public_ip ||= first_master.dig('public_net', 'ipv4', 'ip')
434
- end
435
-
436
- def save_kubeconfig
437
- kubeconfig = ssh(first_master, 'cat /etc/rancher/k3s/k3s.yaml')
438
- .gsub('127.0.0.1', api_server_ip)
439
- .gsub('default', cluster_name)
440
-
441
- File.write(kubeconfig_path, kubeconfig)
442
-
443
- FileUtils.chmod 'go-r', kubeconfig_path
444
- end
445
-
446
- def belongs_to_cluster?(server)
447
- server.dig('labels', 'cluster') == cluster_name
448
- end
449
-
450
- def schedule_workloads_on_masters?
451
- schedule_workloads_on_masters = configuration['schedule_workloads_on_masters']
452
- schedule_workloads_on_masters ? !!schedule_workloads_on_masters : false
453
- end
454
-
455
96
  def image
456
97
  configuration['image'] || 'ubuntu-20.04'
457
98
  end
@@ -464,18 +105,6 @@ class Cluster
464
105
  configuration['post_create_commands'] || []
465
106
  end
466
107
 
467
- def check_kubectl
468
- return if which('kubectl')
469
-
470
- puts 'Please ensure kubectl is installed and in your PATH.'
471
- exit 1
472
- end
473
-
474
- def placement_group_id(pool_name = nil)
475
- @placement_groups ||= {}
476
- @placement_groups[pool_name || '__masters__'] ||= Hetzner::PlacementGroup.new(hetzner_client: hetzner_client, cluster_name: cluster_name, pool_name: pool_name).create
477
- end
478
-
479
108
  def master_instance_type
480
109
  @master_instance_type ||= masters_config['instance_type']
481
110
  end
@@ -484,6 +113,11 @@ class Cluster
484
113
  @masters_count ||= masters_config['instance_count']
485
114
  end
486
115
 
116
+ def placement_group_id(pool_name = nil)
117
+ @placement_groups ||= {}
118
+ @placement_groups[pool_name || '__masters__'] ||= Hetzner::PlacementGroup.new(hetzner_client: hetzner_client, cluster_name: cluster_name, pool_name: pool_name).create
119
+ end
120
+
487
121
  def firewall_id
488
122
  @firewall_id ||= Hetzner::Firewall.new(hetzner_client: hetzner_client, cluster_name: cluster_name).create(high_availability: (masters_count > 1), ssh_networks: ssh_networks, api_networks: api_networks)
489
123
  end
@@ -510,7 +144,9 @@ class Cluster
510
144
  ssh_key_id: ssh_key_id,
511
145
  image: image,
512
146
  additional_packages: additional_packages,
513
- additional_post_create_commands: additional_post_create_commands
147
+ additional_post_create_commands: additional_post_create_commands,
148
+ labels: masters_config['labels'],
149
+ taints: masters_config['taints']
514
150
  }
515
151
  end
516
152
 
@@ -535,6 +171,8 @@ class Cluster
535
171
  worker_instance_type = worker_node_pool['instance_type']
536
172
  worker_count = worker_node_pool['instance_count']
537
173
  worker_location = worker_node_pool['location'] || masters_location
174
+ labels = worker_node_pool['labels']
175
+ taints = worker_node_pool['taints']
538
176
 
539
177
  definitions = []
540
178
 
@@ -549,17 +187,15 @@ class Cluster
549
187
  ssh_key_id: ssh_key_id,
550
188
  image: image,
551
189
  additional_packages: additional_packages,
552
- additional_post_create_commands: additional_post_create_commands
190
+ additional_post_create_commands: additional_post_create_commands,
191
+ labels: labels,
192
+ taints: taints
553
193
  }
554
194
  end
555
195
 
556
196
  definitions
557
197
  end
558
198
 
559
- def create_load_balancer
560
- Hetzner::LoadBalancer.new(hetzner_client: hetzner_client, cluster_name: cluster_name).create(location: masters_location, network_id: network_id)
561
- end
562
-
563
199
  def server_configs
564
200
  return @server_configs if @server_configs
565
201
 
@@ -572,12 +208,55 @@ class Cluster
572
208
  @server_configs
573
209
  end
574
210
 
211
+ def hetzner_client
212
+ configuration.hetzner_client
213
+ end
214
+
215
+ def kubernetes_client
216
+ @kubernetes_client ||= Kubernetes::Client.new(configuration: configuration)
217
+ end
218
+
219
+ def workers_definitions_for_marking
220
+ worker_node_pools.map do |worker_node_pool|
221
+ worker_node_pool_definitions(worker_node_pool)
222
+ end.flatten
223
+ end
224
+
225
+ def create_resources
226
+ create_servers
227
+ create_load_balancer if masters.size > 1
228
+ end
229
+
230
+ def delete_placement_groups
231
+ Hetzner::PlacementGroup.new(hetzner_client: hetzner_client, cluster_name: cluster_name).delete
232
+
233
+ worker_node_pools.each do |pool|
234
+ pool_name = pool['name']
235
+ Hetzner::PlacementGroup.new(hetzner_client: hetzner_client, cluster_name: cluster_name, pool_name: pool_name).delete
236
+ end
237
+ end
238
+
239
+ def delete_resources
240
+ Hetzner::LoadBalancer.new(hetzner_client: hetzner_client, cluster_name: cluster_name).delete(high_availability: (masters.size > 1))
241
+
242
+ Hetzner::Firewall.new(hetzner_client: hetzner_client, cluster_name: cluster_name).delete(all_servers)
243
+
244
+ Hetzner::Network.new(hetzner_client: hetzner_client, cluster_name: cluster_name, existing_network: existing_network).delete
245
+
246
+ Hetzner::SSHKey.new(hetzner_client: hetzner_client, cluster_name: cluster_name).delete(public_ssh_key_path: public_ssh_key_path)
247
+
248
+ delete_placement_groups
249
+ delete_servers
250
+ end
251
+
575
252
  def create_servers
576
253
  servers = []
577
254
 
578
255
  threads = server_configs.map do |server_config|
256
+ config = server_config.reject! { |k, _v| %i[labels taints].include?(k) }
257
+
579
258
  Thread.new do
580
- servers << Hetzner::Server.new(hetzner_client: hetzner_client, cluster_name: cluster_name).create(**server_config)
259
+ servers << Hetzner::Server.new(hetzner_client: hetzner_client, cluster_name: cluster_name).create(**config)
581
260
  end
582
261
  end
583
262
 
@@ -606,56 +285,8 @@ class Cluster
606
285
  threads.each(&:join) unless threads.empty?
607
286
  end
608
287
 
609
- def kube_api_server_args_list
610
- return '' if kube_api_server_args.empty?
611
-
612
- kube_api_server_args.map do |arg|
613
- " --kube-apiserver-arg=\"#{arg}\" "
614
- end.join
615
- end
616
-
617
- def kube_scheduler_args_list
618
- return '' if kube_scheduler_args.empty?
619
-
620
- kube_scheduler_args.map do |arg|
621
- " --kube-scheduler-arg=\"#{arg}\" "
622
- end.join
623
- end
624
-
625
- def kube_controller_manager_args_list
626
- return '' if kube_controller_manager_args.empty?
627
-
628
- kube_controller_manager_args.map do |arg|
629
- " --kube-controller-manager-arg=\"#{arg}\" "
630
- end.join
631
- end
632
-
633
- def kube_cloud_controller_manager_args_list
634
- return '' if kube_cloud_controller_manager_args.empty?
635
-
636
- kube_cloud_controller_manager_args.map do |arg|
637
- " --kube-cloud-controller-manager-arg=\"#{arg}\" "
638
- end.join
639
- end
640
-
641
- def kubelet_args_list
642
- return '' if kubelet_args.empty?
643
-
644
- kubelet_args.map do |arg|
645
- " --kubelet-arg=\"#{arg}\" "
646
- end.join
647
- end
648
-
649
- def kube_proxy_args_list
650
- return '' if kube_proxy_args.empty?
651
-
652
- kube_api_server_args.map do |arg|
653
- " --kube-proxy-arg=\"#{arg}\" "
654
- end.join
655
- end
656
-
657
- def hetzner_client
658
- configuration.hetzner_client
288
+ def create_load_balancer
289
+ Hetzner::LoadBalancer.new(hetzner_client: hetzner_client, cluster_name: cluster_name).create(location: masters_location, network_id: network_id)
659
290
  end
660
291
 
661
292
  def existing_network
@@ -3,7 +3,7 @@
3
3
  module Hetzner
4
4
  class Configuration
5
5
  GITHUB_DELIM_LINKS = ','
6
- GITHUB_LINK_REGEX = /<([^>]+)>; rel="([^"]+)"/
6
+ GITHUB_LINK_REGEX = /<([^>]+)>; rel="([^"]+)"/.freeze
7
7
 
8
8
  attr_reader :hetzner_client
9
9
 
@@ -92,8 +92,6 @@ module Hetzner
92
92
  configuration
93
93
  end
94
94
 
95
- private_class_method
96
-
97
95
  def self.fetch_releases(url)
98
96
  response = HTTParty.get(url)
99
97
  [response, JSON.parse(response.body).map { |hash| hash['name'] }]
@@ -196,7 +194,7 @@ module Hetzner
196
194
 
197
195
  unless invalid_ranges.empty?
198
196
  invalid_ranges.each do |_network|
199
- errors << 'Please use the CIDR notation for the #{access_type} networks to avoid ambiguity'
197
+ errors << "Please use the CIDR notation for the #{access_type} networks to avoid ambiguity"
200
198
  end
201
199
  end
202
200
 
@@ -210,19 +208,17 @@ module Hetzner
210
208
  false
211
209
  end
212
210
 
213
- unless current_ip_network
214
- case access_type
215
- when "SSH"
216
- errors << "Your current IP #{current_ip} is not included into any of the #{access_type} networks you've specified, so we won't be able to SSH into the nodes "
217
- when "API"
218
- errors << "Your current IP #{current_ip} is not included into any of the #{access_type} networks you've specified, so we won't be able to connect to the Kubernetes API"
219
- end
211
+ return if current_ip_network
212
+
213
+ case access_type
214
+ when 'SSH'
215
+ errors << "Your current IP #{current_ip} is not included into any of the #{access_type} networks you've specified, so we won't be able to SSH into the nodes "
216
+ when 'API'
217
+ errors << "Your current IP #{current_ip} is not included into any of the #{access_type} networks you've specified, so we won't be able to connect to the Kubernetes API"
220
218
  end
221
219
  end
222
220
 
223
-
224
221
  def validate_ssh_allowed_networks
225
- return
226
222
  validate_networks('ssh_allowed_networks', 'SSH')
227
223
  end
228
224
 
@@ -441,6 +437,9 @@ module Hetzner
441
437
  instance_group_errors << "#{instance_group_type} has an invalid instance count"
442
438
  end
443
439
 
440
+ instance_group_errors << "#{instance_group_type} has an invalid labels format - a hash is expected" if !instance_group['labels'].nil? && !instance_group['labels'].is_a?(Hash)
441
+ instance_group_errors << "#{instance_group_type} has an invalid taints format - a hash is expected" if !instance_group['taints'].nil? && !instance_group['taints'].is_a?(Hash)
442
+
444
443
  errors << instance_group_errors
445
444
  end
446
445
 
@@ -2,6 +2,6 @@
2
2
 
3
3
  module Hetzner
4
4
  module K3s
5
- VERSION = '0.6.1'
5
+ VERSION = '0.6.3'
6
6
  end
7
7
  end