hetzner-k3s 0.5.1 → 0.5.5

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: c236d440e3e99f656c62b8ced99ac899199e435ca41e9675c4adeb326a87c5d8
4
- data.tar.gz: fd8da58d3c4cc3b54018c1ff261f8606391e8f524b974660fc5984fcd10397f7
3
+ metadata.gz: 54bad08b660435283978314edf5d2823fd98a72958051615efc8ec5b24ce0b88
4
+ data.tar.gz: 653edb24301d237515b4abfae4538893f40f4707d462c3344f083a082d0fe0e8
5
5
  SHA512:
6
- metadata.gz: 8ebe7779e0f79ec500c1e0d230212cd568ef62e94a3dd2d936214f03f48baf8fc937a683b7bf4478b66a3dc0a7b02ee001a6fe3e542ad401f0add4ee27a68b03
7
- data.tar.gz: dae117d8d1e3babf392814baf372009d1558113673a4968b7526a0a4e3a06b8eea034edaa11452047286721efc2a37af3e8214f2dbeafde73d864493feae5626
6
+ metadata.gz: 93536c6ddf9091ed6c148388e4f29dd2630fd5daf03dd7feafe218cc1dad97ebc3bf3db01799126a9fed70003b2b57db1eb73a81ad807362459c95190527684e
7
+ data.tar.gz: 6bf703828e75363b7ba03ec413b4eff9ab97ca06c2cd8d4f82baca05fc2c32e43d86f0137f1f8f5af0dc4455b7edf157f74bda1621869beb89432058a78b0dcd
data/Gemfile.lock CHANGED
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- hetzner-k3s (0.5.0)
4
+ hetzner-k3s (0.5.5)
5
5
  bcrypt_pbkdf
6
6
  ed25519
7
7
  http
data/README.md CHANGED
@@ -14,6 +14,7 @@ Using this tool, creating a highly available k3s cluster with 3 masters for the
14
14
  - installing the [Hetzner CSI Driver](https://github.com/hetznercloud/csi-driver) to provision persistent volumes using Hetzner's block storage
15
15
  - installing the [Rancher System Upgrade Controller](https://github.com/rancher/system-upgrade-controller) to make upgrades to a newer version of k3s easy and quick
16
16
 
17
+ See roadmap [here](https://github.com/vitobotta/hetzner-k3s/projects/1) for the features planned or in progress.
17
18
 
18
19
  ## Requirements
19
20
 
@@ -25,7 +26,7 @@ All that is needed to use this tool is
25
26
 
26
27
  ## Installation
27
28
 
28
- Once you have the Ruby runtime up and running (2.7.2 or newer), you just need to install the gem:
29
+ Once you have the Ruby runtime up and running (3.1.0 or newer), you just need to install the gem:
29
30
 
30
31
  ```bash
31
32
  gem install hetzner-k3s
@@ -38,7 +39,7 @@ This will install the `hetzner-k3s` executable in your PATH.
38
39
  Alternatively, if you don't want to set up a Ruby runtime but have Docker installed, you can use a container. Run the following from inside the directory where you have the config file for the cluster (described in the next section):
39
40
 
40
41
  ```bash
41
- docker run --rm -it -v ${PWD}:/cluster -v ${HOME}/.ssh:/tmp/.ssh vitobotta/hetzner-k3s:v0.5.1 create-cluster --config-file /cluster/test.yaml
42
+ docker run --rm -it -v ${PWD}:/cluster -v ${HOME}/.ssh:/tmp/.ssh vitobotta/hetzner-k3s:v0.5.5 create-cluster --config-file /cluster/test.yaml
42
43
  ```
43
44
 
44
45
  Replace `test.yaml` with the name of your config file.
@@ -72,7 +73,26 @@ worker_node_pools:
72
73
  instance_count: 2
73
74
  additional_packages:
74
75
  - somepackage
75
- enable_ipsec_encryption: true
76
+ enable_encryption: true
77
+ # kube_api_server_args:
78
+ # - arg1
79
+ # - ...
80
+ # kube_scheduler_args:
81
+ # - arg1
82
+ # - ...
83
+ # kube_controller_manager_args:
84
+ # - arg1
85
+ # - ...
86
+ # kube_cloud_controller_manager_args:
87
+ # - arg1
88
+ # - ...
89
+ # kubelet_args:
90
+ # - arg1
91
+ # - ...
92
+ # kube_proxy_args:
93
+ # - arg1
94
+ # - ...
95
+
76
96
  ```
77
97
 
78
98
  It should hopefully be self explanatory; you can run `hetzner-k3s releases` to see a list of the available releases from the most recent to the oldest available.
@@ -255,95 +275,6 @@ Once the cluster is ready you can create persistent volumes out of the box with
255
275
  I recommend that you create a separate Hetzner project for each cluster, because otherwise multiple clusters will attempt to create overlapping routes. I will make the pod cidr configurable in the future to avoid this, but I still recommend keeping clusters separated from each other. This way, if you want to delete a cluster with all the resources created for it, you can just delete the project.
256
276
 
257
277
 
258
- ## changelog
259
-
260
- - 0.5.1
261
- - Each node pool gets its own placement group. This is to minimize issues due to the max 10 nodes limitation for a single node group. A validation has also been added to limit pools to 10 nodes each because of this.
262
-
263
- - 0.5.0
264
- - Allow installing additional packages when creating the servers
265
- - Allow enabling ipsec encryption
266
-
267
- - 0.4.9
268
- - Ensure the program always exits with exit code 1 if the config file fails validation
269
- - Upgrade System Upgrade Controller to 0.8.1
270
- - Remove dependency on unmaintained gem k8s-ruby
271
- - Make the gem compatible with Ruby 3.1.0
272
-
273
- - 0.4.8
274
- - Increase timeout with API requests to 30 seconds
275
- - Limit number of retries for API requests to 3
276
- - Ensure all version tags are listed for k3s (thanks @janosmiko)
277
-
278
- - 0.4.7
279
- - Made it possible to specify a custom image/snapshot for the servers
280
-
281
- - 0.4.6
282
- - Added a check to abort gracefully when for some reason one or more servers are not created, for example due to temporary problems with the Hetzner API.
283
-
284
- - 0.4.5
285
- - Fix network creation (bug introduced in the previous version)
286
-
287
- - 0.4.4
288
- - Add support for the new Ashburn, Virginia (USA) location
289
- - Automatically use a placement group so that the instances are all created on different physical hosts for high availability
290
-
291
- - 0.4.3
292
- - Fix an issue with SSH key creation
293
-
294
- - 0.4.2
295
- - Update Hetzner CSI driver to v1.6.0
296
- - Update System Upgrade Controller to v0.8.0
297
-
298
- - 0.4.1
299
- - Allow to optionally specify the path of the private SSH key
300
- - Set correct permissions for the kubeconfig file
301
- - Retry fetching manifests a few times to allow for temporary network issues
302
- - Allow to optionally schedule workloads on masters
303
- - Allow clusters with no worker node pools if scheduling is enabled for the masters
304
-
305
- - 0.4.0
306
- - Ensure the masters are removed from the API load balancer before deleting the load balancer
307
- - Ensure the servers are removed from the firewall before deleting it
308
- - Allow using an environment variable to specify the Hetzner token
309
- - Allow restricting SSH access to the nodes to specific networks
310
- - Do not open the port 6443 on the nodes if a load balancer is created for an HA cluster
311
-
312
- - 0.3.9
313
- - Add command "version" to print the version of the tool in use
314
-
315
- - 0.3.8
316
- - Fix: added a check on a label to ensure that only servers that belong to the cluster are deleted from the project
317
-
318
- - 0.3.7
319
- - Ensure that the cluster name only contains lowercase letters, digits and dashes for compatibility with the cloud controller manager
320
-
321
- - 0.3.6
322
- - Retry SSH commands when IO errors occur
323
-
324
- - 0.3.5
325
- - Add descriptions for firewall rules
326
-
327
- - 0.3.4
328
- - Added Docker support
329
-
330
- - 0.3.3
331
- - Add some gems required on Linux
332
-
333
- - 0.3.2
334
- - Configure DNS to use Cloudflare's resolver instead of Hetzner's, since Hetzner's resolvers are not always reliable
335
-
336
- - 0.3.1
337
- - Allow enabling/disabling the host key verification
338
-
339
- - 0.3.0
340
- - Handle case when an SSH key with the given fingerprint already exists in the Hetzner project
341
- - Handle a timeout of 5 seconds for requests to the Hetzner API
342
- - Retry waiting for server to be up when timeouts/host-unreachable errors occur
343
- - Ignore known_hosts entry to prevent errors when recreating servers with IPs that have been used previously
344
-
345
- - 0.2.0
346
- - Allow mixing servers of different series Intel/AMD
347
278
  ## Contributing and support
348
279
 
349
280
  Please create a PR if you want to propose any changes, or open an issue if you are having trouble with the tool - I will do my best to help if I can.
data/bin/build.sh CHANGED
@@ -6,9 +6,9 @@ set -e
6
6
 
7
7
  IMAGE="vitobotta/hetzner-k3s"
8
8
 
9
- docker build -t ${IMAGE}:v0.5.1 \
9
+ docker build -t ${IMAGE}:v0.5.5 \
10
10
  --platform=linux/amd64 \
11
- --cache-from ${IMAGE}:v0.5.0 \
11
+ --cache-from ${IMAGE}:v0.5.4 \
12
12
  --build-arg BUILDKIT_INLINE_CACHE=1 .
13
13
 
14
- docker push vitobotta/hetzner-k3s:v0.5.1
14
+ docker push vitobotta/hetzner-k3s:v0.5.5
@@ -81,7 +81,7 @@ module Hetzner
81
81
  end
82
82
 
83
83
  def user_data
84
- packages = ['fail2ban']
84
+ packages = ['fail2ban', 'wireguard']
85
85
  packages += additional_packages if additional_packages
86
86
  packages = "'#{packages.join("', '")}'"
87
87
 
@@ -178,11 +178,16 @@ module Hetzner
178
178
  []
179
179
  end
180
180
 
181
- def validate_location
181
+ def valid_location?(location)
182
182
  return if locations.empty? && !valid_token?
183
- return if locations.include? configuration['location']
184
183
 
185
- errors << 'Invalid location - available locations: nbg1 (Nuremberg, Germany), fsn1 (Falkenstein, Germany), hel1 (Helsinki, Finland) or ash (Ashburn, Virginia, USA)'
184
+ locations.include? location
185
+ end
186
+
187
+ def validate_masters_location
188
+ return if valid_location?(configuration['location'])
189
+
190
+ errors << 'Invalid location for master nodes - valid locations: nbg1 (Nuremberg, Germany), fsn1 (Falkenstein, Germany), hel1 (Helsinki, Finland) or ash (Ashburn, Virginia, USA)'
186
191
  end
187
192
 
188
193
  def available_releases
@@ -261,6 +266,14 @@ module Hetzner
261
266
 
262
267
  instance_group_errors << "#{instance_group_type} has an invalid instance type" unless !valid_token? || server_types.include?(instance_group['instance_type'])
263
268
 
269
+ if workers
270
+ location = instance_group.fetch('location', configuration['location'])
271
+ instance_group_errors << "#{instance_group_type} has an invalid location - valid locations: nbg1 (Nuremberg, Germany), fsn1 (Falkenstein, Germany), hel1 (Helsinki, Finland) or ash (Ashburn, Virginia, USA)" unless valid_location?(location)
272
+
273
+ in_network_zone = configuration['location'] == 'ash' ? location == 'ash' : location != 'ash'
274
+ instance_group_errors << "#{instance_group_type} must be in the same network zone as the masters. If the masters are located in Ashburn, all the node pools must be located in Ashburn too, otherwise none of the node pools should be located in Ashburn." unless in_network_zone
275
+ end
276
+
264
277
  if instance_group['instance_count'].is_a? Integer
265
278
  if instance_group['instance_count'] < 1
266
279
  instance_group_errors << "#{instance_group_type} must have at least one node"
@@ -343,12 +356,18 @@ module Hetzner
343
356
  validate_public_ssh_key
344
357
  validate_private_ssh_key
345
358
  validate_ssh_allowed_networks
346
- validate_location
359
+ validate_masters_location
347
360
  validate_k3s_version
348
361
  validate_masters
349
362
  validate_worker_node_pools
350
363
  validate_verify_host_key
351
364
  validate_additional_packages
365
+ validate_kube_api_server_args
366
+ validate_kube_scheduler_args
367
+ validate_kube_controller_manager_args
368
+ validate_kube_cloud_controller_manager_args
369
+ validate_kubelet_args
370
+ validate_kube_proxy_args
352
371
  end
353
372
 
354
373
  def validate_upgrade
@@ -375,6 +394,48 @@ module Hetzner
375
394
  exit 1
376
395
  end
377
396
  end
397
+
398
+ def validate_kube_api_server_args
399
+ kube_api_server_args = configuration['kube_api_server_args']
400
+ return unless kube_api_server_args
401
+
402
+ errors << 'kube_api_server_args must be an array of arguments' unless kube_api_server_args.is_a? Array
403
+ end
404
+
405
+ def validate_kube_scheduler_args
406
+ kube_scheduler_args = configuration['kube_scheduler_args']
407
+ return unless kube_scheduler_args
408
+
409
+ errors << 'kube_scheduler_args must be an array of arguments' unless kube_scheduler_args.is_a? Array
410
+ end
411
+
412
+ def validate_kube_controller_manager_args
413
+ kube_controller_manager_args = configuration['kube_controller_manager_args']
414
+ return unless kube_controller_manager_args
415
+
416
+ errors << 'kube_controller_manager_args must be an array of arguments' unless kube_controller_manager_args.is_a? Array
417
+ end
418
+
419
+ def validate_kube_cloud_controller_manager_args
420
+ kube_cloud_controller_manager_args = configuration['kube_cloud_controller_manager_args']
421
+ return unless kube_cloud_controller_manager_args
422
+
423
+ errors << 'kube_cloud_controller_manager_args must be an array of arguments' unless kube_cloud_controller_manager_args.is_a? Array
424
+ end
425
+
426
+ def validate_kubelet_args
427
+ kubelet_args = configuration['kubelet_args']
428
+ return unless kubelet_args
429
+
430
+ errors << 'kubelet_args must be an array of arguments' unless kubelet_args.is_a? Array
431
+ end
432
+
433
+ def validate_kube_proxy_args
434
+ kube_proxy_args = configuration['kube_proxy_args']
435
+ return unless kube_proxy_args
436
+
437
+ errors << 'kube_proxy_args must be an array of arguments' unless kube_proxy_args.is_a? Array
438
+ end
378
439
  end
379
440
  end
380
441
  end
@@ -34,11 +34,17 @@ class Cluster
34
34
  @k3s_version = configuration['k3s_version']
35
35
  @masters_config = configuration['masters']
36
36
  @worker_node_pools = find_worker_node_pools(configuration)
37
- @location = configuration['location']
37
+ @masters_location = configuration['location']
38
38
  @verify_host_key = configuration.fetch('verify_host_key', false)
39
39
  @servers = []
40
40
  @networks = configuration['ssh_allowed_networks']
41
- @enable_ipsec_encryption = configuration.fetch('enable_ipsec_encryption', false)
41
+ @enable_encryption = configuration.fetch('enable_encryption', false)
42
+ @kube_api_server_args = configuration.fetch('kube_api_server_args', [])
43
+ @kube_scheduler_args = configuration.fetch('kube_scheduler_args', [])
44
+ @kube_controller_manager_args = configuration.fetch('kube_controller_manager_args', [])
45
+ @kube_cloud_controller_manager_args = configuration.fetch('kube_cloud_controller_manager_args', [])
46
+ @kubelet_args = configuration.fetch('kubelet_args', [])
47
+ @kube_proxy_args = configuration.fetch('kube_proxy_args', [])
42
48
 
43
49
  create_resources
44
50
 
@@ -78,10 +84,12 @@ class Cluster
78
84
 
79
85
  attr_reader :hetzner_client, :cluster_name, :kubeconfig_path, :k3s_version,
80
86
  :masters_config, :worker_node_pools,
81
- :location, :public_ssh_key_path,
87
+ :masters_location, :public_ssh_key_path,
82
88
  :hetzner_token, :new_k3s_version, :configuration,
83
89
  :config_file, :verify_host_key, :networks, :private_ssh_key_path,
84
- :enable_ipsec_encryption
90
+ :enable_encryption, :kube_api_server_args, :kube_scheduler_args,
91
+ :kube_controller_manager_args, :kube_cloud_controller_manager_args,
92
+ :kubelet_args, :kube_proxy_args
85
93
 
86
94
  def find_worker_node_pools(configuration)
87
95
  configuration.fetch('worker_node_pools', [])
@@ -188,10 +196,10 @@ class Cluster
188
196
  end
189
197
 
190
198
  def master_script(master)
191
- server = master == first_master ? ' --cluster-init ' : " --server https://#{first_master_private_ip}:6443 "
199
+ server = master == first_master ? ' --cluster-init ' : " --server https://#{api_server_ip}:6443 "
192
200
  flannel_interface = find_flannel_interface(master)
193
- flannel_ipsec = enable_ipsec_encryption ? ' --flannel-backend=ipsec ' : ' '
194
-
201
+ flannel_wireguard = enable_encryption ? ' --flannel-backend=wireguard ' : ' '
202
+ extra_args = "#{kube_api_server_args_list} #{kube_scheduler_args_list} #{kube_controller_manager_args_list} #{kube_cloud_controller_manager_args_list} #{kubelet_args_list} #{kube_proxy_args_list}"
195
203
  taint = schedule_workloads_on_masters? ? ' ' : ' --node-taint CriticalAddonsOnly=true:NoExecute '
196
204
 
197
205
  <<~SCRIPT
@@ -205,13 +213,13 @@ class Cluster
205
213
  --node-name="$(hostname -f)" \
206
214
  --cluster-cidr=10.244.0.0/16 \
207
215
  --etcd-expose-metrics=true \
208
- #{flannel_ipsec} \
216
+ #{flannel_wireguard} \
209
217
  --kube-controller-manager-arg="address=0.0.0.0" \
210
218
  --kube-controller-manager-arg="bind-address=0.0.0.0" \
211
219
  --kube-proxy-arg="metrics-bind-address=0.0.0.0" \
212
220
  --kube-scheduler-arg="address=0.0.0.0" \
213
221
  --kube-scheduler-arg="bind-address=0.0.0.0" \
214
- #{taint} \
222
+ #{taint} #{extra_args} \
215
223
  --kubelet-arg="cloud-provider=external" \
216
224
  --advertise-address=$(hostname -I | awk '{print $2}') \
217
225
  --node-ip=$(hostname -I | awk '{print $2}') \
@@ -467,7 +475,7 @@ class Cluster
467
475
  end
468
476
 
469
477
  def network_id
470
- @network_id ||= Hetzner::Network.new(hetzner_client:, cluster_name:).create(location:)
478
+ @network_id ||= Hetzner::Network.new(hetzner_client:, cluster_name:).create(location: masters_location)
471
479
  end
472
480
 
473
481
  def ssh_key_id
@@ -481,8 +489,8 @@ class Cluster
481
489
  definitions << {
482
490
  instance_type: master_instance_type,
483
491
  instance_id: "master#{i + 1}",
492
+ location: masters_location,
484
493
  placement_group_id:,
485
- location:,
486
494
  firewall_id:,
487
495
  network_id:,
488
496
  ssh_key_id:,
@@ -511,6 +519,7 @@ class Cluster
511
519
  worker_node_pool_name = worker_node_pool['name']
512
520
  worker_instance_type = worker_node_pool['instance_type']
513
521
  worker_count = worker_node_pool['instance_count']
522
+ worker_location = worker_node_pool['location'] || masters_location
514
523
 
515
524
  definitions = []
516
525
 
@@ -519,7 +528,7 @@ class Cluster
519
528
  instance_type: worker_instance_type,
520
529
  instance_id: "pool-#{worker_node_pool_name}-worker#{i + 1}",
521
530
  placement_group_id: placement_group_id(worker_node_pool_name),
522
- location:,
531
+ location: worker_location,
523
532
  firewall_id:,
524
533
  network_id:,
525
534
  ssh_key_id:,
@@ -580,4 +589,52 @@ class Cluster
580
589
 
581
590
  threads.each(&:join) unless threads.empty?
582
591
  end
592
+
593
+ def kube_api_server_args_list
594
+ return '' if kube_api_server_args.empty?
595
+
596
+ kube_api_server_args.map do |arg|
597
+ " --kube-apiserver-arg=\"#{arg}\" "
598
+ end.join
599
+ end
600
+
601
+ def kube_scheduler_args_list
602
+ return '' if kube_scheduler_args.empty?
603
+
604
+ kube_scheduler_args.map do |arg|
605
+ " --kube-scheduler-arg=\"#{arg}\" "
606
+ end.join
607
+ end
608
+
609
+ def kube_controller_manager_args_list
610
+ return '' if kube_controller_manager_args.empty?
611
+
612
+ kube_controller_manager_args.map do |arg|
613
+ " --kube-controller-manager-arg=\"#{arg}\" "
614
+ end.join
615
+ end
616
+
617
+ def kube_cloud_controller_manager_args_list
618
+ return '' if kube_cloud_controller_manager_args.empty?
619
+
620
+ kube_cloud_controller_manager_args.map do |arg|
621
+ " --kube-cloud-controller-manager-arg=\"#{arg}\" "
622
+ end.join
623
+ end
624
+
625
+ def kubelet_args_list
626
+ return '' if kubelet_args.empty?
627
+
628
+ kubelet_args.map do |arg|
629
+ " --kubelet-arg=\"#{arg}\" "
630
+ end.join
631
+ end
632
+
633
+ def kube_proxy_args_list
634
+ return '' if kube_proxy_args.empty?
635
+
636
+ kube_api_server_args.map do |arg|
637
+ " --kube-proxy-arg=\"#{arg}\" "
638
+ end.join
639
+ end
583
640
  end
@@ -2,6 +2,6 @@
2
2
 
3
3
  module Hetzner
4
4
  module K3s
5
- VERSION = '0.5.1'
5
+ VERSION = '0.5.5'
6
6
  end
7
7
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: hetzner-k3s
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.5.1
4
+ version: 0.5.5
5
5
  platform: ruby
6
6
  authors:
7
7
  - Vito Botta
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2022-02-06 00:00:00.000000000 Z
11
+ date: 2022-02-22 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bcrypt_pbkdf