porkadot 0.21.0 → 0.23.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. checksums.yaml +4 -4
  2. data/lib/porkadot/assets/bootstrap/manifests/kube-apiserver.bootstrap.yaml.erb +32 -0
  3. data/lib/porkadot/assets/bootstrap/manifests/kube-controller-manager.bootstrap.yaml.erb +23 -0
  4. data/lib/porkadot/assets/bootstrap/manifests/kube-scheduler.bootstrap.yaml.erb +23 -0
  5. data/lib/porkadot/assets/kubelet/install-deps.sh.erb +9 -0
  6. data/lib/porkadot/assets/kubelet/setup-containerd.sh.erb +8 -1
  7. data/lib/porkadot/assets/kubelet-default/install.sh.erb +14 -0
  8. data/lib/porkadot/assets/kubelet.rb +32 -0
  9. data/lib/porkadot/assets/kubernetes/install.secrets.sh.erb +8 -0
  10. data/lib/porkadot/assets/kubernetes/install.sh.erb +8 -1
  11. data/lib/porkadot/assets/kubernetes/kustomization.yaml.erb +7 -0
  12. data/lib/porkadot/assets/kubernetes/manifests/{coredns.yaml.erb → addons/coredns/coredns.yaml.erb} +2 -1
  13. data/lib/porkadot/assets/kubernetes/manifests/{dns-horizontal-autoscaler.yaml.erb → addons/coredns/dns-horizontal-autoscaler.yaml.erb} +0 -2
  14. data/lib/porkadot/assets/kubernetes/manifests/addons/coredns/kustomization.yaml.erb +3 -0
  15. data/lib/porkadot/assets/kubernetes/manifests/{flannel.yaml.erb → addons/flannel/flannel.yaml.erb} +39 -10
  16. data/lib/porkadot/assets/kubernetes/manifests/addons/flannel/kustomization.yaml.erb +2 -0
  17. data/lib/porkadot/assets/kubernetes/manifests/{kubelet-rubber-stamp.yaml.erb → addons/kubelet-rubber-stamp/kubelet-rubber-stamp.yaml.erb} +1 -1
  18. data/lib/porkadot/assets/kubernetes/manifests/addons/kubelet-rubber-stamp/kustomization.yaml.erb +2 -0
  19. data/lib/porkadot/assets/kubernetes/manifests/addons/kustomization.yaml.erb +4 -0
  20. data/lib/porkadot/assets/kubernetes/manifests/{000-metallb.yaml.erb → addons/metallb/000-metallb.yaml.erb} +0 -0
  21. data/lib/porkadot/assets/kubernetes/manifests/addons/metallb/kustomization.yaml.erb +6 -0
  22. data/lib/porkadot/assets/kubernetes/manifests/{metallb.config.yaml.erb → addons/metallb/metallb.config.yaml.erb} +1 -2
  23. data/lib/porkadot/assets/kubernetes/manifests/addons/metallb/metallb.yaml +480 -0
  24. data/lib/porkadot/assets/kubernetes/manifests/{metallb.yaml.erb → addons/metallb/metallb.yaml.erb} +41 -4
  25. data/lib/porkadot/assets/kubernetes/manifests/addons/storage-version-migrator/kustomization.yaml.erb +2 -0
  26. data/lib/porkadot/assets/kubernetes/manifests/{storage-version-migrator.yaml.erb → addons/storage-version-migrator/storage-version-migrator.yaml.erb} +0 -0
  27. data/lib/porkadot/assets/kubernetes/manifests/kube-apiserver.yaml.erb +32 -0
  28. data/lib/porkadot/assets/kubernetes/manifests/kube-controller-manager.yaml.erb +20 -6
  29. data/lib/porkadot/assets/kubernetes/manifests/kube-scheduler.yaml.erb +20 -6
  30. data/lib/porkadot/assets/kubernetes/manifests/kubelet.yaml.erb +0 -1
  31. data/lib/porkadot/assets/kubernetes/manifests/kustomization.yaml.erb +8 -0
  32. data/lib/porkadot/assets/kubernetes.rb +91 -18
  33. data/lib/porkadot/assets.rb +13 -3
  34. data/lib/porkadot/cmd/cli.rb +27 -0
  35. data/lib/porkadot/cmd/etcd.rb +68 -0
  36. data/lib/porkadot/cmd/install.rb +15 -0
  37. data/lib/porkadot/config.rb +9 -5
  38. data/lib/porkadot/configs/addons.rb +21 -0
  39. data/lib/porkadot/configs/certs.rb +3 -0
  40. data/lib/porkadot/configs/etcd.rb +35 -2
  41. data/lib/porkadot/configs/kubelet.rb +26 -0
  42. data/lib/porkadot/configs/kubernetes.rb +27 -10
  43. data/lib/porkadot/const.rb +3 -0
  44. data/lib/porkadot/default.yaml +24 -6
  45. data/lib/porkadot/install/kubelet.rb +137 -0
  46. data/lib/porkadot/install/kubernetes.rb +2 -2
  47. data/lib/porkadot/version.rb +1 -1
  48. data/lib/porkadot.rb +2 -2
  49. data/porkadot.gemspec +1 -0
  50. metadata +38 -14
  51. data/lib/porkadot/assets/kubernetes/manifests/metallb.secrets.yaml.erb +0 -13
  52. data/lib/porkadot/configs/cni.rb +0 -22
  53. data/lib/porkadot/configs/loadbalancer.rb +0 -26
@@ -1,6 +1,6 @@
1
1
  <% k8s = global_config.k8s -%>
2
2
  ---
3
- apiVersion: policy/v1beta1
3
+ apiVersion: policy/v1
4
4
  kind: PodDisruptionBudget
5
5
  metadata:
6
6
  name: kube-scheduler
@@ -113,6 +113,11 @@ spec:
113
113
  annotations:
114
114
  scheduler.alpha.kubernetes.io/critical-pod: ''
115
115
  spec:
116
+ securityContext:
117
+ seccompProfile:
118
+ type: RuntimeDefault
119
+ runAsNonRoot: true
120
+ runAsUser: 65534
116
121
  affinity:
117
122
  podAntiAffinity:
118
123
  preferredDuringSchedulingIgnoredDuringExecution:
@@ -141,17 +146,26 @@ spec:
141
146
  - <%= k %><% if v ;%>=<%= v %><%; end %>
142
147
  <%- end -%>
143
148
  livenessProbe:
149
+ failureThreshold: 8
144
150
  httpGet:
145
151
  path: /healthz
146
- port: 10251 # Note: Using default port. Update if --port option is set differently.
147
- initialDelaySeconds: 15
152
+ port: 10259
153
+ scheme: HTTPS
154
+ initialDelaySeconds: 10
155
+ periodSeconds: 10
156
+ timeoutSeconds: 15
157
+ startupProbe:
158
+ failureThreshold: 24
159
+ httpGet:
160
+ path: /healthz
161
+ port: 10259
162
+ scheme: HTTPS
163
+ initialDelaySeconds: 10
164
+ periodSeconds: 10
148
165
  timeoutSeconds: 15
149
166
  priorityClassName: system-cluster-critical
150
167
  nodeSelector:
151
168
  k8s.unstable.cloud/master: ""
152
- securityContext:
153
- runAsNonRoot: true
154
- runAsUser: 65534
155
169
  serviceAccountName: kube-scheduler
156
170
  tolerations:
157
171
  - key: CriticalAddonsOnly
@@ -15,7 +15,6 @@ roleRef:
15
15
  kind: ClusterRoleBinding
16
16
  apiVersion: rbac.authorization.k8s.io/v1
17
17
  metadata:
18
- name: auto-approve-csrs-for-group
19
18
  name: porkadot:node-autoapprove-bootstrap
20
19
  subjects:
21
20
  - kind: Group
@@ -0,0 +1,8 @@
1
+ resources:
2
+ - addons
3
+ - kube-apiserver.yaml
4
+ - kube-controller-manager.yaml
5
+ - kube-proxy.yaml
6
+ - kube-scheduler.yaml
7
+ - kubelet.yaml
8
+ - porkadot.yaml
@@ -17,34 +17,107 @@ module Porkadot; module Assets
17
17
 
18
18
  def render
19
19
  logger.info "--> Rendering kubernetes manifests"
20
- unless File.directory?(config.manifests_path)
21
- FileUtils.mkdir_p(config.manifests_path)
22
- end
23
- unless File.directory?(config.manifests_secrets_path)
24
- FileUtils.mkdir_p(config.manifests_secrets_path)
25
- end
26
- lb = global_config.lb
27
- cni = global_config.cni
28
20
  render_erb 'manifests/porkadot.yaml'
29
21
  render_erb 'manifests/kubelet.yaml'
30
- render_erb "manifests/000-#{lb.type}.yaml"
31
- render_erb "manifests/#{lb.type}.yaml"
32
- render_erb "manifests/#{lb.type}.config.yaml"
33
- render_secrets_erb "manifests/#{lb.type}.secrets.yaml"
34
- render_erb "manifests/#{cni.type}.yaml"
35
- render_erb "manifests/coredns.yaml"
36
- render_erb "manifests/dns-horizontal-autoscaler.yaml"
37
22
  render_erb "manifests/kube-apiserver.yaml"
38
23
  render_secrets_erb "manifests/kube-apiserver.secrets.yaml"
39
24
  render_erb "manifests/kube-proxy.yaml"
40
25
  render_erb "manifests/kube-scheduler.yaml"
41
26
  render_erb "manifests/kube-controller-manager.yaml"
42
27
  render_secrets_erb "manifests/kube-controller-manager.secrets.yaml"
43
- render_erb "manifests/kubelet-rubber-stamp.yaml"
44
- render_erb "manifests/storage-version-migrator.yaml"
45
28
  render_secrets_erb "kubeconfig.yaml"
46
- render_erb 'install.sh'
29
+ render_erb 'manifests/kustomization.yaml'
30
+ render_erb 'kustomization.yaml', force: false
31
+ render_erb 'install.sh', prune_allowlist: prune_allowlist
32
+ render_secrets_erb 'install.secrets.sh'
33
+
34
+ addons = Addons.new(global_config)
35
+ addons.render
36
+ end
37
+
38
+ def prune_allowlist
39
+ return %w[
40
+ apiextensions.k8s.io/v1/customresourcedefinition
41
+ apps/v1/daemonset
42
+ apps/v1/deployment
43
+ core/v1/configmap
44
+ core/v1/namespace
45
+ core/v1/service
46
+ core/v1/serviceaccount
47
+ policy/v1/poddisruptionbudget
48
+ policy/v1beta1/podsecuritypolicy
49
+ rbac.authorization.k8s.io/v1/clusterrole
50
+ rbac.authorization.k8s.io/v1/clusterrolebinding
51
+ rbac.authorization.k8s.io/v1/role
52
+ rbac.authorization.k8s.io/v1/rolebinding
53
+ ]
47
54
  end
55
+ end
56
+
57
+ class Addons
58
+ include Porkadot::Assets
59
+ TEMPLATE_DIR = File.join(File.dirname(__FILE__), "kubernetes", "manifests", "addons")
60
+ attr_reader :global_config
61
+ attr_reader :config
62
+ attr_reader :logger
63
+
64
+ def initialize global_config
65
+ @global_config = global_config
66
+ @config = global_config.addons
67
+ @logger = global_config.logger
68
+ end
69
+
70
+ def render
71
+ logger.info "--> Rendering kubernetes addons"
72
+ render_erb "kustomization.yaml"
73
+
74
+ self.config.enabled.each do |name|
75
+ manifests = @@manifests[name]
76
+ manifests.each do |m|
77
+ render_erb(m)
78
+ end
79
+ secrets = @@secrets_manifests[name]
80
+ secrets.each do |m|
81
+ render_secrets_erb(m)
82
+ end
83
+ end
84
+ end
85
+
86
+ def self.register_manifests name, manifests, secrets: []
87
+ @@manifests ||= {}
88
+ @@manifests[name] = manifests
89
+ @@secrets_manifests ||= {}
90
+ @@secrets_manifests[name] = secrets
91
+ end
92
+
93
+ register_manifests('flannel', [
94
+ 'flannel/flannel.yaml',
95
+ 'flannel/kustomization.yaml'
96
+ ])
97
+
98
+ register_manifests('coredns', [
99
+ 'coredns/coredns.yaml',
100
+ 'coredns/dns-horizontal-autoscaler.yaml',
101
+ 'coredns/kustomization.yaml'
102
+ ])
103
+
104
+ register_manifests('metallb', [
105
+ 'metallb/000-metallb.yaml',
106
+ 'metallb/metallb.yaml',
107
+ 'metallb/metallb.config.yaml',
108
+ 'metallb/kustomization.yaml'
109
+ ])
110
+
111
+
112
+ register_manifests('kubelet-rubber-stamp', [
113
+ 'kubelet-rubber-stamp/kubelet-rubber-stamp.yaml',
114
+ 'kubelet-rubber-stamp/kustomization.yaml'
115
+ ])
116
+
117
+ register_manifests('storage-version-migrator', [
118
+ 'storage-version-migrator/storage-version-migrator.yaml',
119
+ 'storage-version-migrator/kustomization.yaml'
120
+ ])
48
121
 
49
122
  end
50
123
  end; end
@@ -15,7 +15,7 @@ module Porkadot::Assets
15
15
  end
16
16
  end
17
17
 
18
- def render_erb file, opts={}
18
+ def render_erb file, **opts
19
19
  file = file.to_s
20
20
  opts[:config] = self.config
21
21
  opts[:global_config] = self.global_config
@@ -23,8 +23,15 @@ module Porkadot::Assets
23
23
  opts[:u] = ErbUtils.new
24
24
 
25
25
  logger.info "----> #{file}"
26
+ asset = config.asset_path(file)
27
+ if opts[:force] != nil && File.file?(asset)
28
+ logger.debug "------> Already exists: skipping #{file}"
29
+ return
30
+ end
31
+ asset_dir = File.dirname(asset)
32
+ FileUtils.mkdir_p(asset_dir) unless File.directory?(asset_dir)
26
33
  open(File.join(self.class::TEMPLATE_DIR, "#{file}.erb")) do |io|
27
- open(config.asset_path(file), 'w') do |out|
34
+ open(asset, 'w') do |out|
28
35
  out.write ERB.new(io.read, trim_mode: '-').result_with_hash(opts)
29
36
  end
30
37
  end
@@ -38,8 +45,11 @@ module Porkadot::Assets
38
45
  opts[:u] = ErbUtils.new
39
46
 
40
47
  logger.info "----> #{file}"
48
+ secret = config.secrets_path(file)
49
+ secret_dir = File.dirname(secret)
50
+ FileUtils.mkdir_p(secret_dir) unless File.directory?(secret_dir)
41
51
  open(File.join(self.class::TEMPLATE_DIR, "#{file}.erb")) do |io|
42
- open(config.secrets_path(file), 'w') do |out|
52
+ open(secret, 'w') do |out|
43
53
  out.write ERB.new(io.read, trim_mode: '-').result_with_hash(opts)
44
54
  end
45
55
  end
@@ -13,15 +13,22 @@ module Porkadot; module Cmd
13
13
  desc "install", "Install kubernetes"
14
14
  subcommand "install", Porkadot::Cmd::Install::Cli
15
15
 
16
+ desc "etcd", "Interact with etcd"
17
+ subcommand "etcd", Porkadot::Cmd::Etcd::Cli
18
+
16
19
  desc "setup-containerd", "Setup containerd"
17
20
  option :node, type: :string
18
21
  option :force, type: :boolean, default: false
22
+ option :bootstrap, type: :boolean, default: false
19
23
  def setup_containerd
20
24
  logger.info "Setup containerd"
21
25
  kubelets = Porkadot::Install::KubeletList.new(self.config)
22
26
  nodes = []
23
27
  if node = options[:node]
24
28
  nodes = kubelets[node]
29
+ elsif options[:bootstrap]
30
+ bootstrap = Porkadot::Install::Bootstrap.new(self.config)
31
+ nodes = bootstrap.host
25
32
  else
26
33
  nodes = kubelets.kubelets.values
27
34
  end
@@ -29,6 +36,26 @@ module Porkadot; module Cmd
29
36
  ""
30
37
  end
31
38
 
39
+ desc "setup-node", "Setup node default settings"
40
+ option :node, type: :string
41
+ option :force, type: :boolean, default: false
42
+ option :bootstrap, type: :boolean, default: false
43
+ def setup_node
44
+ logger.info "Setup node default"
45
+ kubelets = Porkadot::Install::KubeletList.new(self.config)
46
+ nodes = []
47
+ if node = options[:node]
48
+ nodes = kubelets[node]
49
+ elsif options[:bootstrap]
50
+ bootstrap = Porkadot::Install::Bootstrap.new(self.config)
51
+ nodes = bootstrap.host
52
+ else
53
+ nodes = kubelets.kubelets.values
54
+ end
55
+ kubelets.setup_default hosts: nodes, force: options[:force]
56
+ ""
57
+ end
58
+
32
59
  desc "set-config", "Set cluster to kubeconfig"
33
60
  def set_config
34
61
  name = config.k8s.cluster_name
@@ -0,0 +1,68 @@
1
+
2
+ module Porkadot; module Cmd; module Etcd
3
+ class Cli < Porkadot::SubCommandBase
4
+ include Porkadot::Utils
5
+
6
+ default_task :all
7
+ desc "all", "Interact with etcd"
8
+ def all
9
+ "Use restore or backup sub commands."
10
+ end
11
+
12
+ desc "backup", "Backup etcd data"
13
+ option :node, type: :string
14
+ option :path, type: :string, default: "./backup", desc: "Directory where etcd backup data will be stored."
15
+ def backup
16
+ require 'date'
17
+
18
+ filename = "etcd-#{DateTime.now.to_s}.db"
19
+ path = File.join(options[:path], filename)
20
+
21
+ logger.info "Backing up etcd data to #{path}"
22
+ kubelets = Porkadot::Install::KubeletList.new(self.config)
23
+ kubelets.backup_etcd host: options[:node], path: path
24
+ ""
25
+ end
26
+
27
+ desc "start", "Start etcd"
28
+ option :node, type: :string
29
+ def start
30
+ logger.info "Start etcd"
31
+ kubelets = Porkadot::Install::KubeletList.new(self.config)
32
+ kubelets.start_etcd hosts: options[:node]
33
+ ""
34
+ end
35
+
36
+ desc "stop", "Stop etcd"
37
+ option :node, type: :string
38
+ def stop
39
+ logger.info "Start etcd"
40
+ kubelets = Porkadot::Install::KubeletList.new(self.config)
41
+ kubelets.stop_etcd hosts: options[:node]
42
+ ""
43
+ end
44
+
45
+ desc "restore", "Restore etcd data"
46
+ option :path, type: :string, default: "./backup", desc: "Directory where etcd backup data is stored."
47
+ def restore
48
+ invoke :stop, [], options
49
+
50
+ path = Dir.glob(File.join(options[:path], "etcd-*.db")).sort.reverse[0]
51
+ unless path
52
+ return "No backup data found...: #{options[:path]}"
53
+ end
54
+
55
+ logger.info "Restore etcd from #{path}"
56
+ kubelets = Porkadot::Install::KubeletList.new(self.config)
57
+ kubelets.restore_etcd path: path
58
+
59
+ invoke :start, [], options
60
+ ""
61
+ end
62
+
63
+ def self.subcommand_prefix
64
+ 'etcd'
65
+ end
66
+ end
67
+ end; end; end
68
+
@@ -26,6 +26,21 @@ module Porkadot; module Cmd; module Install
26
26
  ""
27
27
  end
28
28
 
29
+ desc "kubernetes", "Install kubernetes"
30
+ option :node, type: :string
31
+ def kubernetes
32
+ logger.info "Installing kubernetes"
33
+ kubelets = Porkadot::Install::KubeletList.new(self.config)
34
+ if node = options[:node]
35
+ nodes = kubelets[node]
36
+ else
37
+ nodes = Porkadot::Install::Bootstrap.new(self.config).host
38
+ end
39
+ k8s = Porkadot::Install::Kubernetes.new(self.config)
40
+ k8s.install(nodes)
41
+ ""
42
+ end
43
+
29
44
  desc "bootstrap", "Install bootstrap components"
30
45
  subcommand "bootstrap", Porkadot::Cmd::Install::Bootstrap::Cli
31
46
 
@@ -31,16 +31,15 @@ module Porkadot
31
31
  self.raw.connection
32
32
  end
33
33
 
34
+ def addons
35
+ @addons ||= Porkadot::Configs::Addons.new(self)
36
+ end
37
+
34
38
  def lb
35
39
  @lb ||= Porkadot::Configs::Lb.new(self)
36
40
  return @lb
37
41
  end
38
42
 
39
- def cni
40
- @cni ||= Porkadot::Configs::Cni.new(self)
41
- return @cni
42
- end
43
-
44
43
  def bootstrap
45
44
  @bootstrap ||= Porkadot::Configs::Bootstrap.new(self)
46
45
  return @bootstrap
@@ -57,6 +56,11 @@ module Porkadot
57
56
  return @etcd
58
57
  end
59
58
 
59
+ def kubelet_default
60
+ @kubelet_default ||= Porkadot::Configs::KubeletDefault.new(self)
61
+ return @kubelet_default
62
+ end
63
+
60
64
  def nodes
61
65
  @nodes ||= {}.tap do |nodes|
62
66
  self.raw.nodes.each do |k, v|
@@ -0,0 +1,21 @@
1
+
2
+ module Porkadot; module Configs
3
+ class Addons
4
+ include Porkadot::ConfigUtils
5
+
6
+ def initialize config
7
+ @config = config
8
+ @raw = config.raw.addons
9
+ end
10
+
11
+ def target_path
12
+ File.join(self.config.assets_dir, 'kubernetes', 'manifests', 'addons')
13
+ end
14
+
15
+ def target_secrets_path
16
+ File.join(self.config.secrets_root_dir, 'kubernetes', 'manifests', 'addons')
17
+ end
18
+
19
+ end
20
+ end; end
21
+
@@ -9,6 +9,9 @@ module Porkadot; module Configs
9
9
  end
10
10
 
11
11
  def ipaddr?(addr)
12
+ if addr.nil?
13
+ return false
14
+ end
12
15
  IPAddr.new(addr)
13
16
  return true
14
17
  rescue IPAddr::InvalidAddressError
@@ -39,6 +39,33 @@ module Porkadot; module Configs
39
39
  return (self.raw.labels && self.raw.labels[Porkadot::ETCD_ADDRESS_LABEL]) || self.raw.hostname || self.name
40
40
  end
41
41
 
42
+ def listen_address label_key
43
+ listen_address = nil
44
+ if self.raw.labels
45
+ listen_address = self.raw.labels[label_key] || self.raw.labels[Porkadot::ETCD_LISTEN_ADDRESS_LABEL]
46
+ end
47
+
48
+ if !listen_adress
49
+ if self.ipaddr?(self.raw.hostname)
50
+ listen_address = self.raw.hostname
51
+ elsif self.ipaddr?(self.raw.name)
52
+ listen_address = self.raw.name
53
+ else
54
+ listen_address = '0.0.0.0'
55
+ end
56
+ end
57
+
58
+ return listen_address
59
+ end
60
+
61
+ def listen_client_address
62
+ return self.listen_address(Porkadot::ETCD_LISTEN_CLIENT_ADDRESS_LABEL)
63
+ end
64
+
65
+ def listen_peer_address
66
+ return self.listen_address(Porkadot::ETCD_LISTEN_PEER_ADDRESS_LABEL)
67
+ end
68
+
42
69
  def advertise_client_urls
43
70
  ["https://#{member_address}:2379"]
44
71
  end
@@ -48,11 +75,16 @@ module Porkadot; module Configs
48
75
  end
49
76
 
50
77
  def listen_client_urls
51
- self.advertise_client_urls + ["https://127.0.0.1:2379"]
78
+ address = self.listen_client_address
79
+ if address != '0.0.0.0'
80
+ return ["https://#{address}:2379", "https://127.0.0.1:2379"]
81
+ else
82
+ return ["https://#{address}:2379"]
83
+ end
52
84
  end
53
85
 
54
86
  def listen_peer_urls
55
- self.advertise_peer_urls
87
+ ["https://#{self.listen_client_address}:2380"]
56
88
  end
57
89
 
58
90
  def initial_cluster
@@ -72,6 +104,7 @@ module Porkadot; module Configs
72
104
  sans << "DNS:#{san}"
73
105
  end
74
106
  end
107
+ sans << "IP:127.0.0.1"
75
108
  return sans
76
109
  end
77
110
 
@@ -1,4 +1,30 @@
1
1
  module Porkadot; module Configs
2
+ class KubeletDefault
3
+ include Porkadot::ConfigUtils
4
+
5
+ def initialize config
6
+ @config = config
7
+ @raw = ::Porkadot::Raw.new
8
+ end
9
+
10
+ def target_path
11
+ File.join(self.config.assets_dir, 'kubelet-default')
12
+ end
13
+
14
+ def target_secrets_path
15
+ File.join(self.config.secrets_root_dir, 'kubelet-default')
16
+ end
17
+
18
+ def addon_path
19
+ File.join(self.target_path, 'addons')
20
+ end
21
+
22
+ def addon_secrets_path
23
+ File.join(self.target_secrets_path, 'addons')
24
+ end
25
+
26
+ end
27
+
2
28
  class Kubelet
3
29
  include Porkadot::ConfigUtils
4
30
  attr_reader :name
@@ -1,4 +1,3 @@
1
-
2
1
  module Porkadot; module Configs
3
2
  class Kubernetes
4
3
  include Porkadot::ConfigUtils
@@ -35,10 +34,6 @@ module Porkadot; module Configs
35
34
  File.join(self.target_path, 'manifests')
36
35
  end
37
36
 
38
- def manifests_secrets_path
39
- File.join(self.target_secrets_path, 'manifests')
40
- end
41
-
42
37
  def control_plane_endpoint_host_and_port
43
38
  endpoint = self.config.k8s.control_plane_endpoint
44
39
  raise "kubernetes.control_plane_endpoint should not be nil" unless endpoint
@@ -196,9 +191,9 @@ module Porkadot; module Configs
196
191
  --cluster-signing-key-file=/etc/kubernetes/pki/kubernetes/ca.key
197
192
  --controllers=*,bootstrapsigner,tokencleaner
198
193
  --leader-elect=true
199
- --node-cidr-mask-size=24
200
194
  --root-ca-file=/etc/kubernetes/pki/kubernetes/ca.crt
201
195
  --service-account-private-key-file=/etc/kubernetes/pki/kubernetes/sa.key
196
+ --service-cluster-ip-range=#{config.k8s.networking.service_subnet}
202
197
  --use-service-account-credentials=true
203
198
  --v=#{self.log_level}
204
199
  ).map {|i| i.split('=', 2)}.to_h
@@ -249,13 +244,35 @@ module Porkadot; module Configs
249
244
  end
250
245
 
251
246
  def kubernetes_ip
252
- cluster_ip_range = IPAddr.new(self.service_subnet)
253
- cluster_ip_range.to_range.first(2)[1].to_s
247
+ cluster_ip_range = IPAddr.new(self.default_service_subnet)
248
+ cluster_ip_range.to_range.first(2)[1]
254
249
  end
255
250
 
256
251
  def dns_ip
257
- cluster_ip_range = IPAddr.new(self.service_subnet)
258
- cluster_ip_range.to_range.first(11)[10].to_s
252
+ cluster_ip_range = IPAddr.new(self.default_service_subnet)
253
+ cluster_ip_range.to_range.first(11)[10]
254
+ end
255
+
256
+ def default_service_subnet
257
+ self.service_subnet.split(',')[0]
258
+ end
259
+
260
+ def pod_v4subnet
261
+ if ip = self._pod_subnet.find{ |net| net.ipv4? }
262
+ return "#{ip.to_s}/#{ip.prefix}"
263
+ end
264
+ end
265
+ alias enable_ipv4 pod_v4subnet
266
+
267
+ def pod_v6subnet
268
+ if ip = self._pod_subnet.find{ |net| net.ipv6? }
269
+ return "#{ip.to_s}/#{ip.prefix}"
270
+ end
271
+ end
272
+ alias enable_ipv6 pod_v6subnet
273
+
274
+ def _pod_subnet
275
+ self.pod_subnet.split(",").map{|net| IPAddr.new(net)}
259
276
  end
260
277
  end
261
278
  end
@@ -5,4 +5,7 @@ module Porkadot
5
5
  K8S_MASTER_LABEL = "k8s.unstable.cloud/master"
6
6
  ETCD_MEMBER_LABEL = "etcd.unstable.cloud/member"
7
7
  ETCD_ADDRESS_LABEL = "etcd.unstable.cloud/address"
8
+ ETCD_LISTEN_ADDRESS_LABEL = "etcd.unstable.cloud/listen-address"
9
+ ETCD_LISTEN_CLIENT_ADDRESS_LABEL = "etcd.unstable.cloud/listen-client-address"
10
+ ETCD_LISTEN_PEER_ADDRESS_LABEL = "etcd.unstable.cloud/listen-client-address"
8
11
  end
@@ -10,13 +10,25 @@ nodes: {}
10
10
 
11
11
  bootstrap: {}
12
12
 
13
- cni:
14
- type: flannel
13
+ addons:
14
+ enabled: [flannel, coredns, metallb, kubelet-rubber-stamp, storage-version-migrator]
15
+
15
16
  flannel:
16
17
  backend: vxlan
18
+ plugin_image_repository: rancher/mirrored-flannelcni-flannel-cni-plugin
19
+ plugin_image_tag: v1.0.1
20
+ daemon_image_repository: rancher/mirrored-flannelcni-flannel
21
+ daemon_image_tag: v0.17.0
22
+ resources:
23
+ requests:
24
+ cpu: "100m"
25
+ memory: "50Mi"
26
+ limits:
27
+ cpu: "100m"
28
+ memory: "50Mi"
29
+
30
+ coredns: {}
17
31
 
18
- lb:
19
- type: metallb
20
32
  metallb:
21
33
  config: |
22
34
  address-pools:
@@ -25,20 +37,26 @@ lb:
25
37
  addresses:
26
38
  - 192.168.1.240-192.168.1.250
27
39
 
40
+ kubelet-rubber-stamp: {}
41
+
42
+ storage-version-migrator: {}
43
+
28
44
  etcd:
29
45
  image_repository: gcr.io/etcd-development/etcd
30
46
  image_tag: v3.4.13
31
47
  extra_env: []
32
48
 
33
49
  kubernetes:
34
- kubernetes_version: v1.21.3
50
+ kubernetes_version: v1.23.5
51
+ crictl_version: v1.23.0
35
52
  image_repository: k8s.gcr.io
36
53
 
37
54
  networking:
38
- cni_version: v0.8.2
55
+ cni_version: v1.0.1
39
56
  service_subnet: '10.254.0.0/24'
40
57
  pod_subnet: '10.244.0.0/16'
41
58
  dns_domain: 'cluster.local'
59
+ additional_domains: []
42
60
 
43
61
  apiserver:
44
62
  bind_port: 6443