hetzner-k3s 0.4.5 → 0.4.9

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: ffa9a3f4a629e25c670c3d4b3301aed094275d320db95e038ceac3e3ebebe1e2
4
- data.tar.gz: ada1a6fb351c70f5c3a2ad52139b96911bcaa99b7d2351c89f770204dc9883c2
3
+ metadata.gz: 22358cdc272faa5e09ae2f561bf4225990a87b0461f4920439f9d5e9543fbe59
4
+ data.tar.gz: fc7dc822d53cd881e01a18c509d666bdd0321ed25c636f2873bca3fa1e5e0ce9
5
5
  SHA512:
6
- metadata.gz: 6dcd87e350acedf02f21bc7732f6515c74a3ee4482f554f982a05bfa96be39bda1d205014a3ff66efc8ea69fda545a9b5fc363ab8f6f0f16036ddebe87b3fc16
7
- data.tar.gz: 2789c63c71e81b9334c9e95de8cd0e0f088f2f40bedaaf942e18003de70bac24535164f5ef225e6a9568360293611d1e20d192b1a88f306e425fb44e96a1d211
6
+ metadata.gz: 35a06d127f14f4848a6a87611292b67e0edbf6cc3fb1ae8b803214428369cfc519942702fe36c14c2537732b38fa024b5d2361f77db56ea58724446f4822537d
7
+ data.tar.gz: bfc7751afa7db09a5e929b8164cdab060dfcb7b86125ae30a4804e179ba8a0550bdb07c9a181e148284e10ef7a632fe26ad81da1e278aeeab492a2899e2cdffb
data/.ruby-version ADDED
@@ -0,0 +1 @@
1
+ 3.1.0
data/Gemfile.lock CHANGED
@@ -1,13 +1,13 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- hetzner-k3s (0.4.3)
4
+ hetzner-k3s (0.4.8)
5
5
  bcrypt_pbkdf
6
6
  ed25519
7
7
  http
8
- k8s-ruby
9
8
  net-ssh
10
9
  sshkey
10
+ subprocess
11
11
  thor
12
12
 
13
13
  GEM
@@ -16,43 +16,14 @@ GEM
16
16
  addressable (2.8.0)
17
17
  public_suffix (>= 2.0.2, < 5.0)
18
18
  bcrypt_pbkdf (1.1.0)
19
- concurrent-ruby (1.1.9)
20
19
  diff-lcs (1.4.4)
21
20
  domain_name (0.5.20190701)
22
21
  unf (>= 0.0.5, < 1.0.0)
23
- dry-configurable (0.12.1)
24
- concurrent-ruby (~> 1.0)
25
- dry-core (~> 0.5, >= 0.5.0)
26
- dry-container (0.8.0)
27
- concurrent-ruby (~> 1.0)
28
- dry-configurable (~> 0.1, >= 0.1.3)
29
- dry-core (0.7.1)
30
- concurrent-ruby (~> 1.0)
31
- dry-equalizer (0.3.0)
32
- dry-inflector (0.2.1)
33
- dry-logic (0.6.1)
34
- concurrent-ruby (~> 1.0)
35
- dry-core (~> 0.2)
36
- dry-equalizer (~> 0.2)
37
- dry-struct (0.5.1)
38
- dry-core (~> 0.4, >= 0.4.3)
39
- dry-equalizer (~> 0.2)
40
- dry-types (~> 0.13)
41
- ice_nine (~> 0.11)
42
- dry-types (0.13.4)
43
- concurrent-ruby (~> 1.0)
44
- dry-container (~> 0.3)
45
- dry-core (~> 0.4, >= 0.4.4)
46
- dry-equalizer (~> 0.2)
47
- dry-inflector (~> 0.1, >= 0.1.2)
48
- dry-logic (~> 0.4, >= 0.4.2)
49
22
  ed25519 (1.2.4)
50
- excon (0.85.0)
51
- ffi (1.15.3)
23
+ ffi (1.15.4)
52
24
  ffi-compiler (1.0.1)
53
25
  ffi (>= 1.0.0)
54
26
  rake
55
- hashdiff (1.0.1)
56
27
  http (4.4.1)
57
28
  addressable (~> 2.3)
58
29
  http-cookie (~> 1.0)
@@ -63,24 +34,9 @@ GEM
63
34
  http-form_data (2.3.0)
64
35
  http-parser (1.2.3)
65
36
  ffi-compiler (>= 1.0, < 2.0)
66
- ice_nine (0.11.2)
67
- jsonpath (0.9.9)
68
- multi_json
69
- to_regexp (~> 0.2.1)
70
- k8s-ruby (0.10.5)
71
- dry-struct (~> 0.5.0)
72
- dry-types (~> 0.13.0)
73
- excon (~> 0.71)
74
- hashdiff (~> 1.0.0)
75
- jsonpath (~> 0.9.5)
76
- recursive-open-struct (~> 1.1.0)
77
- yajl-ruby (~> 1.4.0)
78
- yaml-safe_load_stream (~> 0.1)
79
- multi_json (1.15.0)
80
37
  net-ssh (6.1.0)
81
38
  public_suffix (4.0.6)
82
39
  rake (12.3.3)
83
- recursive-open-struct (1.1.3)
84
40
  rspec (3.10.0)
85
41
  rspec-core (~> 3.10.0)
86
42
  rspec-expectations (~> 3.10.0)
@@ -95,13 +51,11 @@ GEM
95
51
  rspec-support (~> 3.10.0)
96
52
  rspec-support (3.10.2)
97
53
  sshkey (2.0.0)
98
- thor (1.1.0)
99
- to_regexp (0.2.1)
54
+ subprocess (1.5.5)
55
+ thor (1.2.1)
100
56
  unf (0.1.4)
101
57
  unf_ext
102
- unf_ext (0.0.7.7)
103
- yajl-ruby (1.4.1)
104
- yaml-safe_load_stream (0.1.1)
58
+ unf_ext (0.0.8)
105
59
 
106
60
  PLATFORMS
107
61
  ruby
@@ -112,4 +66,4 @@ DEPENDENCIES
112
66
  rspec (~> 3.0)
113
67
 
114
68
  BUNDLED WITH
115
- 2.1.4
69
+ 2.3.4
data/README.md CHANGED
@@ -38,7 +38,7 @@ This will install the `hetzner-k3s` executable in your PATH.
38
38
  Alternatively, if you don't want to set up a Ruby runtime but have Docker installed, you can use a container. Run the following from inside the directory where you have the config file for the cluster (described in the next section):
39
39
 
40
40
  ```bash
41
- docker run --rm -it -v ${PWD}:/cluster -v ${HOME}/.ssh:/tmp/.ssh vitobotta/hetzner-k3s:v0.4.5 create-cluster --config-file /cluster/test.yaml
41
+ docker run --rm -it -v ${PWD}:/cluster -v ${HOME}/.ssh:/tmp/.ssh vitobotta/hetzner-k3s:v0.4.9 create-cluster --config-file /cluster/test.yaml
42
42
  ```
43
43
 
44
44
  Replace `test.yaml` with the name of your config file.
@@ -97,8 +97,20 @@ curl \
97
97
  'https://api.hetzner.cloud/v1/server_types'
98
98
  ```
99
99
 
100
+ By default, the image in use is Ubuntu 20.04, but you can specify an image to use with the `image` config option. This makes it also possible
101
+ to use a snapshot that you have already created from and existing server (for example to preinstall some tools). If you want to use a custom
102
+ snapshot you'll need to specify the **ID** of the snapshot/image, not the description you gave when you created the template server. To find
103
+ the ID of your custom image/snapshot, run:
100
104
 
101
- Note: the option `verify_host_key` is by default set to `false` to disable host key verification. This is because sometimes when creating new servers, Hetzner may assign IP addresses that were previously used by other servers you owned in the past. Therefore the host key verification would fail. If you set this option to `true` and this happens, the tool won't be able to continue creating the cluster until you resolve the issue with one of the suggestions it will give you.
105
+ ```bash
106
+ curl \
107
+ -H "Authorization: Bearer $API_TOKEN" \
108
+ 'https://api.hetzner.cloud/v1/images'
109
+ ```
110
+
111
+ Note that if you use a custom image, the creation of the servers may take longer than when using the default image.
112
+
113
+ Also note: the option `verify_host_key` is by default set to `false` to disable host key verification. This is because sometimes when creating new servers, Hetzner may assign IP addresses that were previously used by other servers you owned in the past. Therefore the host key verification would fail. If you set this option to `true` and this happens, the tool won't be able to continue creating the cluster until you resolve the issue with one of the suggestions it will give you.
102
114
 
103
115
  Finally, to create the cluster run:
104
116
 
@@ -199,7 +211,7 @@ kubectl label node <master1> <master2> <master2> plan.upgrade.cattle.io/k3s-serv
199
211
  To delete a cluster, running
200
212
 
201
213
  ```bash
202
- hetzner-k3s delete-cluster --config-file cluster_config.yam
214
+ hetzner-k3s delete-cluster --config-file cluster_config.yaml
203
215
  ```
204
216
 
205
217
  This will delete all the resources in the Hetzner Cloud project for the cluster being deleted.
@@ -242,6 +254,23 @@ I recommend that you create a separate Hetzner project for each cluster, because
242
254
 
243
255
  ## changelog
244
256
 
257
+ - 0.4.9
258
+ - Ensure the program always exits with exit code 1 if the config file fails validation
259
+ - Upgrade System Upgrade Controller to 0.8.1
260
+ - Remove dependency on unmaintained gem k8s-ruby
261
+ - Make the gem compatible with Ruby 3.1.0
262
+
263
+ - 0.4.8
264
+ - Increase timeout with API requests to 30 seconds
265
+ - Limit number of retries for API requests to 3
266
+ - Ensure all version tags are listed for k3s (thanks @janosmiko)
267
+
268
+ - 0.4.7
269
+ - Made it possible to specify a custom image/snapshot for the servers
270
+
271
+ - 0.4.6
272
+ - Added a check to abort gracefully when for some reason one or more servers are not created, for example due to temporary problems with the Hetzner API.
273
+
245
274
  - 0.4.5
246
275
  - Fix network creation (bug introduced in the previous version)
247
276
 
data/bin/build.sh CHANGED
@@ -6,9 +6,9 @@ set -e
6
6
 
7
7
  IMAGE="vitobotta/hetzner-k3s"
8
8
 
9
- docker build -t ${IMAGE}:v0.4.5 \
9
+ docker build -t ${IMAGE}:v9 \
10
10
  --platform=linux/amd64 \
11
- --cache-from ${IMAGE}:v0.4.4 \
11
+ --cache-from ${IMAGE}:v0.4.8 \
12
12
  --build-arg BUILDKIT_INLINE_CACHE=1 .
13
13
 
14
- docker push vitobotta/hetzner-k3s:v0.4.5
14
+ docker push vitobotta/hetzner-k3s:v0.4.9
@@ -1,11 +1,15 @@
1
1
  ---
2
- hetzner_token: blah
2
+ hetzner_token: <your token>
3
3
  cluster_name: test
4
- kubeconfig_path: "../kubeconfig"
4
+ kubeconfig_path: "./kubeconfig"
5
5
  k3s_version: v1.21.3+k3s1
6
- ssh_key_path: "~/.ssh/id_rsa.pub"
6
+ public_ssh_key_path: "~/.ssh/id_rsa.pub"
7
+ private_ssh_key_path: "~/.ssh/id_rsa"
8
+ ssh_allowed_networks:
9
+ - 0.0.0.0/0
7
10
  verify_host_key: false
8
11
  location: nbg1
12
+ schedule_workloads_on_masters: false
9
13
  masters:
10
14
  instance_type: cpx21
11
15
  instance_count: 3
@@ -14,5 +18,5 @@ worker_node_pools:
14
18
  instance_type: cpx21
15
19
  instance_count: 4
16
20
  - name: big
17
- instance_type: cp321
21
+ instance_type: cpx31
18
22
  instance_count: 2
data/hetzner-k3s.gemspec CHANGED
@@ -21,10 +21,10 @@ Gem::Specification.new do |spec|
21
21
  spec.add_dependency "thor"
22
22
  spec.add_dependency "http"
23
23
  spec.add_dependency "net-ssh"
24
- spec.add_dependency "k8s-ruby"
25
24
  spec.add_dependency "sshkey"
26
25
  spec.add_dependency "ed25519"
27
26
  spec.add_dependency "bcrypt_pbkdf"
27
+ spec.add_dependency "subprocess"
28
28
 
29
29
  # Specify which files should be added to the gem when it is released.
30
30
  # The `git ls-files -z` loads the files in the RubyGem that have been added into git.
@@ -36,11 +36,13 @@ module Hetzner
36
36
  end
37
37
 
38
38
  def make_request &block
39
- Timeout::timeout(5) do
39
+ retries ||= 0
40
+
41
+ Timeout::timeout(30) do
40
42
  block.call
41
43
  end
42
44
  rescue Timeout::Error
43
- retry
45
+ retry if (retries += 1) < 3
44
46
  end
45
47
  end
46
48
  end
@@ -5,7 +5,7 @@ module Hetzner
5
5
  @cluster_name = cluster_name
6
6
  end
7
7
 
8
- def create(location:, instance_type:, instance_id:, firewall_id:, network_id:, ssh_key_id:, placement_group_id:)
8
+ def create(location:, instance_type:, instance_id:, firewall_id:, network_id:, ssh_key_id:, placement_group_id:, image:)
9
9
  puts
10
10
 
11
11
  server_name = "#{cluster_name}-#{instance_type}-#{instance_id}"
@@ -21,7 +21,7 @@ module Hetzner
21
21
  server_config = {
22
22
  name: server_name,
23
23
  location: location,
24
- image: "ubuntu-20.04",
24
+ image: image,
25
25
  firewalls: [
26
26
  { firewall: firewall_id }
27
27
  ],
@@ -40,12 +40,22 @@ module Hetzner
40
40
  placement_group: placement_group_id
41
41
  }
42
42
 
43
- response = hetzner_client.post("/servers", server_config).body
43
+ response = hetzner_client.post("/servers", server_config)
44
+ response_body = response.body
45
+
46
+ server = JSON.parse(response_body)["server"]
47
+
48
+ unless server
49
+ puts "Error creating server #{server_name}. Response details below:"
50
+ puts
51
+ p response
52
+ return
53
+ end
44
54
 
45
55
  puts "...server #{server_name} created."
46
56
  puts
47
57
 
48
- JSON.parse(response)["server"]
58
+ server
49
59
  end
50
60
 
51
61
  def delete(server_name:)
@@ -3,10 +3,12 @@ require "http"
3
3
  require "sshkey"
4
4
  require 'ipaddr'
5
5
  require 'open-uri'
6
+ require "yaml"
6
7
 
7
8
  require_relative "cluster"
8
9
  require_relative "version"
9
10
 
11
+
10
12
  module Hetzner
11
13
  module K3s
12
14
  class CLI < Thor
@@ -24,7 +26,6 @@ module Hetzner
24
26
 
25
27
  def create_cluster
26
28
  validate_config_file :create
27
-
28
29
  Cluster.new(hetzner_client: hetzner_client, hetzner_token: find_hetzner_token).create configuration: configuration
29
30
  end
30
31
 
@@ -64,14 +65,17 @@ module Hetzner
64
65
  if File.exists?(config_file_path)
65
66
  begin
66
67
  @configuration = YAML.load_file(options[:config_file])
67
- raise "invalid" unless configuration.is_a? Hash
68
- rescue
68
+ unless configuration.is_a? Hash
69
+ raise "Configuration is invalid"
70
+ exit 1
71
+ end
72
+ rescue => e
69
73
  puts "Please ensure that the config file is a correct YAML manifest."
70
- return
74
+ exit 1
71
75
  end
72
76
  else
73
77
  puts "Please specify a correct path for the config file."
74
- return
78
+ exit 1
75
79
  end
76
80
 
77
81
  @errors = []
@@ -96,7 +100,6 @@ module Hetzner
96
100
  when :upgrade
97
101
  validate_kubeconfig_path_must_exist
98
102
  validate_new_k3s_version
99
- validate_new_k3s_version_must_be_more_recent
100
103
  end
101
104
 
102
105
  errors.flatten!
@@ -198,7 +201,7 @@ module Hetzner
198
201
 
199
202
  def find_available_releases
200
203
  @available_releases ||= begin
201
- response = HTTP.get("https://api.github.com/repos/k3s-io/k3s/tags").body
204
+ response = HTTP.get("https://api.github.com/repos/k3s-io/k3s/tags?per_page=999").body
202
205
  JSON.parse(response).map { |hash| hash["name"] }
203
206
  end
204
207
  rescue
@@ -271,36 +274,6 @@ module Hetzner
271
274
  schedule_workloads_on_masters ? !!schedule_workloads_on_masters : false
272
275
  end
273
276
 
274
- def validate_new_k3s_version_must_be_more_recent
275
- return if options[:force] == "true"
276
- return unless kubernetes_client
277
-
278
- begin
279
- Timeout::timeout(5) do
280
- servers = kubernetes_client.api("v1").resource("nodes").list
281
-
282
- if servers.size == 0
283
- errors << "The cluster seems to have no nodes, nothing to upgrade"
284
- else
285
- available_releases = find_available_releases
286
-
287
- current_k3s_version = servers.first.dig(:status, :nodeInfo, :kubeletVersion)
288
- current_k3s_version_index = available_releases.index(current_k3s_version) || 1000
289
-
290
- new_k3s_version = options[:new_k3s_version]
291
- new_k3s_version_index = available_releases.index(new_k3s_version) || 1000
292
-
293
- unless new_k3s_version_index < current_k3s_version_index
294
- errors << "The new k3s version must be more recent than the current one"
295
- end
296
- end
297
- end
298
-
299
- rescue Timeout::Error
300
- puts "Cannot upgrade: Unable to fetch nodes from Kubernetes API. Is the cluster online?"
301
- end
302
- end
303
-
304
277
  def validate_instance_group(instance_group, workers: true)
305
278
  instance_group_errors = []
306
279
 
@@ -333,17 +306,6 @@ module Hetzner
333
306
  errors << instance_group_errors
334
307
  end
335
308
 
336
- def kubernetes_client
337
- return @kubernetes_client if @kubernetes_client
338
-
339
- config_hash = YAML.load_file(File.expand_path(configuration["kubeconfig_path"]))
340
- config_hash['current-context'] = configuration["cluster_name"]
341
- @kubernetes_client = K8s::Client.config(K8s::Config.new(config_hash))
342
- rescue
343
- errors << "Cannot connect to the Kubernetes cluster"
344
- false
345
- end
346
-
347
309
  def validate_verify_host_key
348
310
  return unless [true, false].include?(configuration.fetch("public_ssh_key_path", false))
349
311
  errors << "Please set the verify_host_key option to either true or false"
@@ -2,8 +2,8 @@ require 'thread'
2
2
  require 'net/ssh'
3
3
  require "securerandom"
4
4
  require "base64"
5
- require "k8s-ruby"
6
5
  require 'timeout'
6
+ require "subprocess"
7
7
 
8
8
  require_relative "../infra/client"
9
9
  require_relative "../infra/firewall"
@@ -13,10 +13,12 @@ require_relative "../infra/server"
13
13
  require_relative "../infra/load_balancer"
14
14
  require_relative "../infra/placement_group"
15
15
 
16
- require_relative "../k3s/client_patch"
16
+ require_relative "../utils"
17
17
 
18
18
 
19
19
  class Cluster
20
+ include Utils
21
+
20
22
  def initialize(hetzner_client:, hetzner_token:)
21
23
  @hetzner_client = hetzner_client
22
24
  @hetzner_token = hetzner_token
@@ -76,7 +78,7 @@ class Cluster
76
78
 
77
79
  attr_reader :hetzner_client, :cluster_name, :kubeconfig_path, :k3s_version,
78
80
  :masters_config, :worker_node_pools,
79
- :location, :public_ssh_key_path, :kubernetes_client,
81
+ :location, :public_ssh_key_path,
80
82
  :hetzner_token, :tls_sans, :new_k3s_version, :configuration,
81
83
  :config_file, :verify_host_key, :networks, :private_ssh_key_path, :configuration
82
84
 
@@ -120,7 +122,8 @@ class Cluster
120
122
  firewall_id: firewall_id,
121
123
  network_id: network_id,
122
124
  ssh_key_id: ssh_key_id,
123
- placement_group_id: placement_group_id
125
+ placement_group_id: placement_group_id,
126
+ image: image
124
127
  }
125
128
  end
126
129
 
@@ -144,19 +147,24 @@ class Cluster
144
147
  firewall_id: firewall_id,
145
148
  network_id: network_id,
146
149
  ssh_key_id: ssh_key_id,
147
- placement_group_id: placement_group_id
150
+ placement_group_id: placement_group_id,
151
+ image: image
148
152
  }
149
153
  end
150
154
  end
151
155
 
152
156
  threads = server_configs.map do |server_config|
153
157
  Thread.new do
154
- servers << Hetzner::Server.new(hetzner_client: hetzner_client, cluster_name: cluster_name).create(server_config)
158
+ servers << Hetzner::Server.new(hetzner_client: hetzner_client, cluster_name: cluster_name).create(**server_config)
155
159
  end
156
160
  end
157
161
 
158
162
  threads.each(&:join) unless threads.empty?
159
163
 
164
+ while servers.size != server_configs.size
165
+ sleep 1
166
+ end
167
+
160
168
  puts
161
169
  threads = servers.map do |server|
162
170
  Thread.new { wait_for_ssh server }
@@ -201,25 +209,71 @@ class Cluster
201
209
  end
202
210
 
203
211
  def upgrade_cluster
204
- resources = K8s::Resource.from_files(ugrade_plan_manifest_path)
205
-
206
- begin
207
- kubernetes_client.api("upgrade.cattle.io/v1").resource("plans").get("k3s-server", namespace: "system-upgrade")
208
-
209
- puts "Aborting - an upgrade is already in progress."
210
-
211
- rescue K8s::Error::NotFound
212
- resources.each do |resource|
213
- kubernetes_client.create_resource(resource)
214
- end
215
-
216
- puts "Upgrade will now start. Run `watch kubectl get nodes` to see the nodes being upgraded. This should take a few minutes for a small cluster."
217
- puts "The API server may be briefly unavailable during the upgrade of the controlplane."
218
-
219
- configuration["k3s_version"] = new_k3s_version
212
+ worker_upgrade_concurrency = workers.size - 1
213
+ worker_upgrade_concurrency = 1 if worker_upgrade_concurrency == 0
220
214
 
221
- File.write(config_file, configuration.to_yaml)
222
- end
215
+ cmd = <<~EOS
216
+ kubectl apply -f - <<-EOF
217
+ apiVersion: upgrade.cattle.io/v1
218
+ kind: Plan
219
+ metadata:
220
+ name: k3s-server
221
+ namespace: system-upgrade
222
+ labels:
223
+ k3s-upgrade: server
224
+ spec:
225
+ concurrency: 1
226
+ version: #{new_k3s_version}
227
+ nodeSelector:
228
+ matchExpressions:
229
+ - {key: node-role.kubernetes.io/master, operator: In, values: ["true"]}
230
+ serviceAccountName: system-upgrade
231
+ tolerations:
232
+ - key: "CriticalAddonsOnly"
233
+ operator: "Equal"
234
+ value: "true"
235
+ effect: "NoExecute"
236
+ cordon: true
237
+ upgrade:
238
+ image: rancher/k3s-upgrade
239
+ EOF
240
+ EOS
241
+
242
+ run cmd, kubeconfig_path: kubeconfig_path
243
+
244
+ cmd = <<~EOS
245
+ kubectl apply -f - <<-EOF
246
+ apiVersion: upgrade.cattle.io/v1
247
+ kind: Plan
248
+ metadata:
249
+ name: k3s-agent
250
+ namespace: system-upgrade
251
+ labels:
252
+ k3s-upgrade: agent
253
+ spec:
254
+ concurrency: #{worker_upgrade_concurrency}
255
+ version: #{new_k3s_version}
256
+ nodeSelector:
257
+ matchExpressions:
258
+ - {key: node-role.kubernetes.io/master, operator: NotIn, values: ["true"]}
259
+ serviceAccountName: system-upgrade
260
+ prepare:
261
+ image: rancher/k3s-upgrade
262
+ args: ["prepare", "k3s-server"]
263
+ cordon: true
264
+ upgrade:
265
+ image: rancher/k3s-upgrade
266
+ EOF
267
+ EOS
268
+
269
+ run cmd, kubeconfig_path: kubeconfig_path
270
+
271
+ puts "Upgrade will now start. Run `watch kubectl get nodes` to see the nodes being upgraded. This should take a few minutes for a small cluster."
272
+ puts "The API server may be briefly unavailable during the upgrade of the controlplane."
273
+
274
+ configuration["k3s_version"] = new_k3s_version
275
+
276
+ File.write(config_file, configuration.to_yaml)
223
277
  end
224
278
 
225
279
 
@@ -230,28 +284,28 @@ class Cluster
230
284
  taint = schedule_workloads_on_masters? ? " " : " --node-taint CriticalAddonsOnly=true:NoExecute "
231
285
 
232
286
  <<~EOF
233
- curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION="#{k3s_version}" K3S_TOKEN="#{k3s_token}" INSTALL_K3S_EXEC="server \
234
- --disable-cloud-controller \
235
- --disable servicelb \
236
- --disable traefik \
237
- --disable local-storage \
238
- --disable metrics-server \
239
- --write-kubeconfig-mode=644 \
240
- --node-name="$(hostname -f)" \
241
- --cluster-cidr=10.244.0.0/16 \
242
- --etcd-expose-metrics=true \
243
- --kube-controller-manager-arg="address=0.0.0.0" \
244
- --kube-controller-manager-arg="bind-address=0.0.0.0" \
245
- --kube-proxy-arg="metrics-bind-address=0.0.0.0" \
246
- --kube-scheduler-arg="address=0.0.0.0" \
247
- --kube-scheduler-arg="bind-address=0.0.0.0" \
248
- #{taint} \
249
- --kubelet-arg="cloud-provider=external" \
250
- --advertise-address=$(hostname -I | awk '{print $2}') \
251
- --node-ip=$(hostname -I | awk '{print $2}') \
252
- --node-external-ip=$(hostname -I | awk '{print $1}') \
253
- --flannel-iface=#{flannel_interface} \
254
- #{server} #{tls_sans}" sh -
287
+ curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION="#{k3s_version}" K3S_TOKEN="#{k3s_token}" INSTALL_K3S_EXEC="server \
288
+ --disable-cloud-controller \
289
+ --disable servicelb \
290
+ --disable traefik \
291
+ --disable local-storage \
292
+ --disable metrics-server \
293
+ --write-kubeconfig-mode=644 \
294
+ --node-name="$(hostname -f)" \
295
+ --cluster-cidr=10.244.0.0/16 \
296
+ --etcd-expose-metrics=true \
297
+ --kube-controller-manager-arg="address=0.0.0.0" \
298
+ --kube-controller-manager-arg="bind-address=0.0.0.0" \
299
+ --kube-proxy-arg="metrics-bind-address=0.0.0.0" \
300
+ --kube-scheduler-arg="address=0.0.0.0" \
301
+ --kube-scheduler-arg="bind-address=0.0.0.0" \
302
+ #{taint} \
303
+ --kubelet-arg="cloud-provider=external" \
304
+ --advertise-address=$(hostname -I | awk '{print $2}') \
305
+ --node-ip=$(hostname -I | awk '{print $2}') \
306
+ --node-external-ip=$(hostname -I | awk '{print $1}') \
307
+ --flannel-iface=#{flannel_interface} \
308
+ #{server} #{tls_sans}" sh -
255
309
  EOF
256
310
  end
257
311
 
@@ -259,12 +313,12 @@ class Cluster
259
313
  flannel_interface = find_flannel_interface(worker)
260
314
 
261
315
  <<~EOF
262
- curl -sfL https://get.k3s.io | K3S_TOKEN="#{k3s_token}" INSTALL_K3S_VERSION="#{k3s_version}" K3S_URL=https://#{first_master_private_ip}:6443 INSTALL_K3S_EXEC="agent \
263
- --node-name="$(hostname -f)" \
264
- --kubelet-arg="cloud-provider=external" \
265
- --node-ip=$(hostname -I | awk '{print $2}') \
266
- --node-external-ip=$(hostname -I | awk '{print $1}') \
267
- --flannel-iface=#{flannel_interface}" sh -
316
+ curl -sfL https://get.k3s.io | K3S_TOKEN="#{k3s_token}" INSTALL_K3S_VERSION="#{k3s_version}" K3S_URL=https://#{first_master_private_ip}:6443 INSTALL_K3S_EXEC="agent \
317
+ --node-name="$(hostname -f)" \
318
+ --kubelet-arg="cloud-provider=external" \
319
+ --node-ip=$(hostname -I | awk '{print $2}') \
320
+ --node-external-ip=$(hostname -I | awk '{print $1}') \
321
+ --flannel-iface=#{flannel_interface}" sh -
268
322
  EOF
269
323
  end
270
324
 
@@ -311,201 +365,71 @@ class Cluster
311
365
  end
312
366
 
313
367
  def deploy_cloud_controller_manager
368
+ check_kubectl
369
+
314
370
  puts
315
371
  puts "Deploying Hetzner Cloud Controller Manager..."
316
372
 
317
- begin
318
- kubernetes_client.api("v1").resource("secrets").get("hcloud", namespace: "kube-system")
319
-
320
- rescue K8s::Error::NotFound
321
- secret = K8s::Resource.new(
322
- apiVersion: "v1",
323
- kind: "Secret",
324
- metadata: {
325
- namespace: 'kube-system',
326
- name: 'hcloud',
327
- },
328
- data: {
329
- network: Base64.encode64(cluster_name),
330
- token: Base64.encode64(hetzner_token)
331
- }
332
- )
333
-
334
- kubernetes_client.api('v1').resource('secrets').create_resource(secret)
335
- end
336
-
337
-
338
- manifest = fetch_manifest("https://github.com/hetznercloud/hcloud-cloud-controller-manager/releases/latest/download/ccm-networks.yaml")
339
-
340
- File.write("/tmp/cloud-controller-manager.yaml", manifest)
341
-
342
- resources = K8s::Resource.from_files("/tmp/cloud-controller-manager.yaml")
343
-
344
- begin
345
- kubernetes_client.api("apps/v1").resource("deployments").get("hcloud-cloud-controller-manager", namespace: "kube-system")
373
+ cmd = <<~EOS
374
+ kubectl apply -f - <<-EOF
375
+ apiVersion: "v1"
376
+ kind: "Secret"
377
+ metadata:
378
+ namespace: 'kube-system'
379
+ name: 'hcloud'
380
+ stringData:
381
+ network: "#{cluster_name}"
382
+ token: "#{hetzner_token}"
383
+ EOF
384
+ EOS
346
385
 
347
- resources.each do |resource|
348
- kubernetes_client.update_resource(resource)
349
- end
386
+ run cmd, kubeconfig_path: kubeconfig_path
350
387
 
351
- rescue K8s::Error::NotFound
352
- resources.each do |resource|
353
- kubernetes_client.create_resource(resource)
354
- end
388
+ cmd = "kubectl apply -f https://github.com/hetznercloud/hcloud-cloud-controller-manager/releases/latest/download/ccm-networks.yaml"
355
389
 
356
- end
390
+ run cmd, kubeconfig_path: kubeconfig_path
357
391
 
358
392
  puts "...Cloud Controller Manager deployed"
359
- rescue Excon::Error::Socket
360
- retry
361
- end
362
-
363
- def fetch_manifest(url)
364
- retries ||= 1
365
- HTTP.follow.get(url).body
366
- rescue
367
- retry if (retries += 1) <= 10
368
393
  end
369
394
 
370
395
  def deploy_system_upgrade_controller
396
+ check_kubectl
397
+
371
398
  puts
372
399
  puts "Deploying k3s System Upgrade Controller..."
373
400
 
374
- manifest = HTTP.follow.get("https://github.com/rancher/system-upgrade-controller/releases/download/v0.8.0/system-upgrade-controller.yaml").body
401
+ cmd = "kubectl apply -f https://github.com/rancher/system-upgrade-controller/releases/download/v0.8.1/system-upgrade-controller.yaml"
375
402
 
376
- File.write("/tmp/system-upgrade-controller.yaml", manifest)
377
-
378
- resources = K8s::Resource.from_files("/tmp/system-upgrade-controller.yaml")
379
-
380
- begin
381
- kubernetes_client.api("apps/v1").resource("deployments").get("system-upgrade-controller", namespace: "system-upgrade")
382
-
383
- resources.each do |resource|
384
- kubernetes_client.update_resource(resource)
385
- end
386
-
387
- rescue K8s::Error::NotFound
388
- resources.each do |resource|
389
- kubernetes_client.create_resource(resource)
390
- end
391
-
392
- end
403
+ run cmd, kubeconfig_path: kubeconfig_path
393
404
 
394
405
  puts "...k3s System Upgrade Controller deployed"
395
- rescue Excon::Error::Socket
396
- retry
397
406
  end
398
407
 
399
408
  def deploy_csi_driver
409
+ check_kubectl
410
+
400
411
  puts
401
412
  puts "Deploying Hetzner CSI Driver..."
402
413
 
403
- begin
404
- kubernetes_client.api("v1").resource("secrets").get("hcloud-csi", namespace: "kube-system")
405
-
406
- rescue K8s::Error::NotFound
407
- secret = K8s::Resource.new(
408
- apiVersion: "v1",
409
- kind: "Secret",
410
- metadata: {
411
- namespace: 'kube-system',
412
- name: 'hcloud-csi',
413
- },
414
- data: {
415
- token: Base64.encode64(hetzner_token)
416
- }
417
- )
418
-
419
- kubernetes_client.api('v1').resource('secrets').create_resource(secret)
420
- end
421
-
422
-
423
- manifest = HTTP.follow.get("https://raw.githubusercontent.com/hetznercloud/csi-driver/v1.6.0/deploy/kubernetes/hcloud-csi.yml").body
424
-
425
- File.write("/tmp/csi-driver.yaml", manifest)
414
+ cmd = <<~EOS
415
+ kubectl apply -f - <<-EOF
416
+ apiVersion: "v1"
417
+ kind: "Secret"
418
+ metadata:
419
+ namespace: 'kube-system'
420
+ name: 'hcloud-csi'
421
+ stringData:
422
+ token: "#{hetzner_token}"
423
+ EOF
424
+ EOS
426
425
 
427
- resources = K8s::Resource.from_files("/tmp/csi-driver.yaml")
426
+ run cmd, kubeconfig_path: kubeconfig_path
428
427
 
429
- begin
430
- kubernetes_client.api("apps/v1").resource("daemonsets").get("hcloud-csi-node", namespace: "kube-system")
428
+ cmd = "kubectl apply -f https://raw.githubusercontent.com/hetznercloud/csi-driver/v1.6.0/deploy/kubernetes/hcloud-csi.yml"
431
429
 
432
-
433
- resources.each do |resource|
434
- begin
435
- kubernetes_client.update_resource(resource)
436
- rescue K8s::Error::Invalid => e
437
- raise e unless e.message =~ /must be specified/i
438
- end
439
- end
440
-
441
- rescue K8s::Error::NotFound
442
- resources.each do |resource|
443
- kubernetes_client.create_resource(resource)
444
- end
445
-
446
- end
430
+ run cmd, kubeconfig_path: kubeconfig_path
447
431
 
448
432
  puts "...CSI Driver deployed"
449
- rescue Excon::Error::Socket
450
- retry
451
- end
452
-
453
- def wait_for_ssh(server)
454
- Timeout::timeout(5) do
455
- server_name = server["name"]
456
-
457
- puts "Waiting for server #{server_name} to be up..."
458
-
459
- loop do
460
- result = ssh(server, "echo UP")
461
- break if result == "UP"
462
- end
463
-
464
- puts "...server #{server_name} is now up."
465
- end
466
- rescue Errno::ENETUNREACH, Errno::EHOSTUNREACH, Timeout::Error, IOError
467
- retry
468
- end
469
-
470
- def ssh(server, command, print_output: false)
471
- public_ip = server.dig("public_net", "ipv4", "ip")
472
- output = ""
473
-
474
- params = { verify_host_key: (verify_host_key ? :always : :never) }
475
-
476
- if private_ssh_key_path
477
- params[:keys] = [private_ssh_key_path]
478
- end
479
-
480
- Net::SSH.start(public_ip, "root", params) do |session|
481
- session.exec!(command) do |channel, stream, data|
482
- output << data
483
- puts data if print_output
484
- end
485
- end
486
- output.chop
487
- rescue Net::SSH::Disconnect => e
488
- retry unless e.message =~ /Too many authentication failures/
489
- rescue Net::SSH::ConnectionTimeout, Errno::ECONNREFUSED, Errno::ENETUNREACH, Errno::EHOSTUNREACH
490
- retry
491
- rescue Net::SSH::AuthenticationFailed
492
- puts
493
- puts "Cannot continue: SSH authentication failed. Please ensure that the private SSH key is correct."
494
- exit 1
495
- rescue Net::SSH::HostKeyMismatch
496
- puts
497
- puts "Cannot continue: Unable to SSH into server with IP #{public_ip} because the existing fingerprint in the known_hosts file does not match that of the actual host key."
498
- puts "This is due to a security check but can also happen when creating a new server that gets assigned the same IP address as another server you've owned in the past."
499
- puts "If are sure no security is being violated here and you're just creating new servers, you can eiher remove the relevant lines from your known_hosts (see IPs from the cloud console) or disable host key verification by setting the option 'verify_host_key' to false in the configuration file for the cluster."
500
- exit 1
501
- end
502
-
503
- def kubernetes_client
504
- return @kubernetes_client if @kubernetes_client
505
-
506
- config_hash = YAML.load_file(kubeconfig_path)
507
- config_hash['current-context'] = cluster_name
508
- @kubernetes_client = K8s::Client.config(K8s::Config.new(config_hash))
509
433
  end
510
434
 
511
435
  def find_flannel_interface(server)
@@ -585,63 +509,6 @@ class Cluster
585
509
  FileUtils.chmod "go-r", kubeconfig_path
586
510
  end
587
511
 
588
- def ugrade_plan_manifest_path
589
- worker_upgrade_concurrency = workers.size - 1
590
- worker_upgrade_concurrency = 1 if worker_upgrade_concurrency == 0
591
-
592
- manifest = <<~EOF
593
- apiVersion: upgrade.cattle.io/v1
594
- kind: Plan
595
- metadata:
596
- name: k3s-server
597
- namespace: system-upgrade
598
- labels:
599
- k3s-upgrade: server
600
- spec:
601
- concurrency: 1
602
- version: #{new_k3s_version}
603
- nodeSelector:
604
- matchExpressions:
605
- - {key: node-role.kubernetes.io/master, operator: In, values: ["true"]}
606
- serviceAccountName: system-upgrade
607
- tolerations:
608
- - key: "CriticalAddonsOnly"
609
- operator: "Equal"
610
- value: "true"
611
- effect: "NoExecute"
612
- cordon: true
613
- upgrade:
614
- image: rancher/k3s-upgrade
615
- ---
616
- apiVersion: upgrade.cattle.io/v1
617
- kind: Plan
618
- metadata:
619
- name: k3s-agent
620
- namespace: system-upgrade
621
- labels:
622
- k3s-upgrade: agent
623
- spec:
624
- concurrency: #{worker_upgrade_concurrency}
625
- version: #{new_k3s_version}
626
- nodeSelector:
627
- matchExpressions:
628
- - {key: node-role.kubernetes.io/master, operator: NotIn, values: ["true"]}
629
- serviceAccountName: system-upgrade
630
- prepare:
631
- image: rancher/k3s-upgrade
632
- args: ["prepare", "k3s-server"]
633
- cordon: true
634
- upgrade:
635
- image: rancher/k3s-upgrade
636
- EOF
637
-
638
- temp_file_path = "/tmp/k3s-upgrade-plan.yaml"
639
-
640
- File.write(temp_file_path, manifest)
641
-
642
- temp_file_path
643
- end
644
-
645
512
  def belongs_to_cluster?(server)
646
513
  server.dig("labels", "cluster") == cluster_name
647
514
  end
@@ -651,4 +518,15 @@ class Cluster
651
518
  schedule_workloads_on_masters ? !!schedule_workloads_on_masters : false
652
519
  end
653
520
 
521
+ def image
522
+ configuration.dig("image") || "ubuntu-20.04"
523
+ end
524
+
525
+ def check_kubectl
526
+ unless which("kubectl")
527
+ puts "Please ensure kubectl is installed and in your PATH."
528
+ exit 1
529
+ end
530
+ end
531
+
654
532
  end
@@ -1,5 +1,5 @@
1
1
  module Hetzner
2
2
  module K3s
3
- VERSION = "0.4.5"
3
+ VERSION = "0.4.9"
4
4
  end
5
5
  end
@@ -0,0 +1,99 @@
1
+ module Utils
2
+ def which(cmd)
3
+ exts = ENV['PATHEXT'] ? ENV['PATHEXT'].split(';') : ['']
4
+ ENV['PATH'].split(File::PATH_SEPARATOR).each do |path|
5
+ exts.each do |ext|
6
+ exe = File.join(path, "#{cmd}#{ext}")
7
+ return exe if File.executable?(exe) && !File.directory?(exe)
8
+ end
9
+ end
10
+ nil
11
+ end
12
+
13
+ def run(command, kubeconfig_path:)
14
+ env = ENV.to_hash.merge({
15
+ "KUBECONFIG" => kubeconfig_path
16
+ })
17
+
18
+ cmd_path = "/tmp/cli.cmd"
19
+
20
+ File.open(cmd_path, "w") do |f|
21
+ f.write("set -euo pipefail\n")
22
+ f.write(command)
23
+ end
24
+
25
+ FileUtils.chmod("+x", cmd_path)
26
+
27
+ begin
28
+ process = nil
29
+
30
+ at_exit do
31
+ begin
32
+ process&.send_signal("SIGTERM")
33
+ rescue Errno::ESRCH, Interrupt
34
+ end
35
+ end
36
+
37
+ Subprocess.check_call(["bash", "-c", cmd_path], env: env) do |p|
38
+ process = p
39
+ end
40
+
41
+ rescue Subprocess::NonZeroExit
42
+ puts "Command failed: non-zero exit code"
43
+ exit 1
44
+ rescue Interrupt
45
+ puts "Command interrupted"
46
+ exit 1
47
+ end
48
+ end
49
+
50
+ def wait_for_ssh(server)
51
+ Timeout::timeout(5) do
52
+ server_name = server["name"]
53
+
54
+ puts "Waiting for server #{server_name} to be up..."
55
+
56
+ loop do
57
+ result = ssh(server, "echo UP")
58
+ break if result == "UP"
59
+ end
60
+
61
+ puts "...server #{server_name} is now up."
62
+ end
63
+ rescue Errno::ENETUNREACH, Errno::EHOSTUNREACH, Timeout::Error, IOError
64
+ retry
65
+ end
66
+
67
+ def ssh(server, command, print_output: false)
68
+ public_ip = server.dig("public_net", "ipv4", "ip")
69
+ output = ""
70
+
71
+ params = { verify_host_key: (verify_host_key ? :always : :never) }
72
+
73
+ if private_ssh_key_path
74
+ params[:keys] = [private_ssh_key_path]
75
+ end
76
+
77
+ Net::SSH.start(public_ip, "root", params) do |session|
78
+ session.exec!(command) do |channel, stream, data|
79
+ output << data
80
+ puts data if print_output
81
+ end
82
+ end
83
+ output.chop
84
+ rescue Net::SSH::Disconnect => e
85
+ retry unless e.message =~ /Too many authentication failures/
86
+ rescue Net::SSH::ConnectionTimeout, Errno::ECONNREFUSED, Errno::ENETUNREACH, Errno::EHOSTUNREACH
87
+ retry
88
+ rescue Net::SSH::AuthenticationFailed
89
+ puts
90
+ puts "Cannot continue: SSH authentication failed. Please ensure that the private SSH key is correct."
91
+ exit 1
92
+ rescue Net::SSH::HostKeyMismatch
93
+ puts
94
+ puts "Cannot continue: Unable to SSH into server with IP #{public_ip} because the existing fingerprint in the known_hosts file does not match that of the actual host key."
95
+ puts "This is due to a security check but can also happen when creating a new server that gets assigned the same IP address as another server you've owned in the past."
96
+ puts "If are sure no security is being violated here and you're just creating new servers, you can eiher remove the relevant lines from your known_hosts (see IPs from the cloud console) or disable host key verification by setting the option 'verify_host_key' to false in the configuration file for the cluster."
97
+ exit 1
98
+ end
99
+ end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: hetzner-k3s
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.4.5
4
+ version: 0.4.9
5
5
  platform: ruby
6
6
  authors:
7
7
  - Vito Botta
8
- autorequire:
8
+ autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2021-11-03 00:00:00.000000000 Z
11
+ date: 2022-01-08 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: thor
@@ -53,7 +53,7 @@ dependencies:
53
53
  - !ruby/object:Gem::Version
54
54
  version: '0'
55
55
  - !ruby/object:Gem::Dependency
56
- name: k8s-ruby
56
+ name: sshkey
57
57
  requirement: !ruby/object:Gem::Requirement
58
58
  requirements:
59
59
  - - ">="
@@ -67,7 +67,7 @@ dependencies:
67
67
  - !ruby/object:Gem::Version
68
68
  version: '0'
69
69
  - !ruby/object:Gem::Dependency
70
- name: sshkey
70
+ name: ed25519
71
71
  requirement: !ruby/object:Gem::Requirement
72
72
  requirements:
73
73
  - - ">="
@@ -81,7 +81,7 @@ dependencies:
81
81
  - !ruby/object:Gem::Version
82
82
  version: '0'
83
83
  - !ruby/object:Gem::Dependency
84
- name: ed25519
84
+ name: bcrypt_pbkdf
85
85
  requirement: !ruby/object:Gem::Requirement
86
86
  requirements:
87
87
  - - ">="
@@ -95,7 +95,7 @@ dependencies:
95
95
  - !ruby/object:Gem::Version
96
96
  version: '0'
97
97
  - !ruby/object:Gem::Dependency
98
- name: bcrypt_pbkdf
98
+ name: subprocess
99
99
  requirement: !ruby/object:Gem::Requirement
100
100
  requirements:
101
101
  - - ">="
@@ -119,6 +119,7 @@ extra_rdoc_files: []
119
119
  files:
120
120
  - ".gitignore"
121
121
  - ".rspec"
122
+ - ".ruby-version"
122
123
  - ".travis.yml"
123
124
  - CODE_OF_CONDUCT.md
124
125
  - Dockerfile
@@ -144,9 +145,9 @@ files:
144
145
  - lib/hetzner/infra/server.rb
145
146
  - lib/hetzner/infra/ssh_key.rb
146
147
  - lib/hetzner/k3s/cli.rb
147
- - lib/hetzner/k3s/client_patch.rb
148
148
  - lib/hetzner/k3s/cluster.rb
149
149
  - lib/hetzner/k3s/version.rb
150
+ - lib/hetzner/utils.rb
150
151
  homepage: https://github.com/vitobotta/hetzner-k3s
151
152
  licenses:
152
153
  - MIT
@@ -154,7 +155,7 @@ metadata:
154
155
  homepage_uri: https://github.com/vitobotta/hetzner-k3s
155
156
  source_code_uri: https://github.com/vitobotta/hetzner-k3s
156
157
  changelog_uri: https://github.com/vitobotta/hetzner-k3s
157
- post_install_message:
158
+ post_install_message:
158
159
  rdoc_options: []
159
160
  require_paths:
160
161
  - lib
@@ -169,8 +170,8 @@ required_rubygems_version: !ruby/object:Gem::Requirement
169
170
  - !ruby/object:Gem::Version
170
171
  version: '0'
171
172
  requirements: []
172
- rubygems_version: 3.1.4
173
- signing_key:
173
+ rubygems_version: 3.3.3
174
+ signing_key:
174
175
  specification_version: 4
175
176
  summary: A CLI to create a Kubernetes cluster in Hetzner Cloud very quickly using
176
177
  k3s.
@@ -1,38 +0,0 @@
1
- module K8s
2
- class ResourceClient
3
- def initialize(transport, api_client, api_resource, namespace: nil, resource_class: K8s::Resource)
4
- @transport = transport
5
- @api_client = api_client
6
- @api_resource = api_resource
7
- @namespace = namespace
8
- @resource_class = resource_class
9
-
10
- if @api_resource.name.include? '/'
11
- @resource, @subresource = @api_resource.name.split('/', 2)
12
- else
13
- @resource = @api_resource.name
14
- @subresource = nil
15
- end
16
-
17
- # fail "Resource #{api_resource.name} is not namespaced" unless api_resource.namespaced || !namespace
18
- end
19
-
20
- def path(name = nil, subresource: @subresource, namespace: @namespace)
21
- namespace_part = namespace ? ['namespaces', namespace] : []
22
-
23
- if namespaced?
24
- if name && subresource
25
- @api_client.path(*namespace_part, @resource, name, subresource)
26
- elsif name
27
- @api_client.path(*namespace_part, @resource, name)
28
- else namespaced?
29
- @api_client.path(*namespace_part, @resource)
30
- end
31
- elsif name
32
- @api_client.path(@resource, name)
33
- else
34
- @api_client.path(@resource)
35
- end
36
- end
37
- end
38
- end