porkadot 0.21.0 → 0.23.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. checksums.yaml +4 -4
  2. data/lib/porkadot/assets/bootstrap/manifests/kube-apiserver.bootstrap.yaml.erb +32 -0
  3. data/lib/porkadot/assets/bootstrap/manifests/kube-controller-manager.bootstrap.yaml.erb +23 -0
  4. data/lib/porkadot/assets/bootstrap/manifests/kube-scheduler.bootstrap.yaml.erb +23 -0
  5. data/lib/porkadot/assets/kubelet/install-deps.sh.erb +9 -0
  6. data/lib/porkadot/assets/kubelet/setup-containerd.sh.erb +8 -1
  7. data/lib/porkadot/assets/kubelet-default/install.sh.erb +14 -0
  8. data/lib/porkadot/assets/kubelet.rb +32 -0
  9. data/lib/porkadot/assets/kubernetes/install.secrets.sh.erb +8 -0
  10. data/lib/porkadot/assets/kubernetes/install.sh.erb +8 -1
  11. data/lib/porkadot/assets/kubernetes/kustomization.yaml.erb +7 -0
  12. data/lib/porkadot/assets/kubernetes/manifests/{coredns.yaml.erb → addons/coredns/coredns.yaml.erb} +2 -1
  13. data/lib/porkadot/assets/kubernetes/manifests/{dns-horizontal-autoscaler.yaml.erb → addons/coredns/dns-horizontal-autoscaler.yaml.erb} +0 -2
  14. data/lib/porkadot/assets/kubernetes/manifests/addons/coredns/kustomization.yaml.erb +3 -0
  15. data/lib/porkadot/assets/kubernetes/manifests/{flannel.yaml.erb → addons/flannel/flannel.yaml.erb} +39 -10
  16. data/lib/porkadot/assets/kubernetes/manifests/addons/flannel/kustomization.yaml.erb +2 -0
  17. data/lib/porkadot/assets/kubernetes/manifests/{kubelet-rubber-stamp.yaml.erb → addons/kubelet-rubber-stamp/kubelet-rubber-stamp.yaml.erb} +1 -1
  18. data/lib/porkadot/assets/kubernetes/manifests/addons/kubelet-rubber-stamp/kustomization.yaml.erb +2 -0
  19. data/lib/porkadot/assets/kubernetes/manifests/addons/kustomization.yaml.erb +4 -0
  20. data/lib/porkadot/assets/kubernetes/manifests/{000-metallb.yaml.erb → addons/metallb/000-metallb.yaml.erb} +0 -0
  21. data/lib/porkadot/assets/kubernetes/manifests/addons/metallb/kustomization.yaml.erb +6 -0
  22. data/lib/porkadot/assets/kubernetes/manifests/{metallb.config.yaml.erb → addons/metallb/metallb.config.yaml.erb} +1 -2
  23. data/lib/porkadot/assets/kubernetes/manifests/addons/metallb/metallb.yaml +480 -0
  24. data/lib/porkadot/assets/kubernetes/manifests/{metallb.yaml.erb → addons/metallb/metallb.yaml.erb} +41 -4
  25. data/lib/porkadot/assets/kubernetes/manifests/addons/storage-version-migrator/kustomization.yaml.erb +2 -0
  26. data/lib/porkadot/assets/kubernetes/manifests/{storage-version-migrator.yaml.erb → addons/storage-version-migrator/storage-version-migrator.yaml.erb} +0 -0
  27. data/lib/porkadot/assets/kubernetes/manifests/kube-apiserver.yaml.erb +32 -0
  28. data/lib/porkadot/assets/kubernetes/manifests/kube-controller-manager.yaml.erb +20 -6
  29. data/lib/porkadot/assets/kubernetes/manifests/kube-scheduler.yaml.erb +20 -6
  30. data/lib/porkadot/assets/kubernetes/manifests/kubelet.yaml.erb +0 -1
  31. data/lib/porkadot/assets/kubernetes/manifests/kustomization.yaml.erb +8 -0
  32. data/lib/porkadot/assets/kubernetes.rb +91 -18
  33. data/lib/porkadot/assets.rb +13 -3
  34. data/lib/porkadot/cmd/cli.rb +27 -0
  35. data/lib/porkadot/cmd/etcd.rb +68 -0
  36. data/lib/porkadot/cmd/install.rb +15 -0
  37. data/lib/porkadot/config.rb +9 -5
  38. data/lib/porkadot/configs/addons.rb +21 -0
  39. data/lib/porkadot/configs/certs.rb +3 -0
  40. data/lib/porkadot/configs/etcd.rb +35 -2
  41. data/lib/porkadot/configs/kubelet.rb +26 -0
  42. data/lib/porkadot/configs/kubernetes.rb +27 -10
  43. data/lib/porkadot/const.rb +3 -0
  44. data/lib/porkadot/default.yaml +24 -6
  45. data/lib/porkadot/install/kubelet.rb +137 -0
  46. data/lib/porkadot/install/kubernetes.rb +2 -2
  47. data/lib/porkadot/version.rb +1 -1
  48. data/lib/porkadot.rb +2 -2
  49. data/porkadot.gemspec +1 -0
  50. metadata +38 -14
  51. data/lib/porkadot/assets/kubernetes/manifests/metallb.secrets.yaml.erb +0 -13
  52. data/lib/porkadot/configs/cni.rb +0 -22
  53. data/lib/porkadot/configs/loadbalancer.rb +0 -26
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 839afa115dc53563a391b710c14ab686f6c45a5420a6d1f6c6eee21ebdb1e6cf
4
- data.tar.gz: 8f8fbc1099bebe03b5f994050e083c385baad03536d15c14ef6ed1f412ce278c
3
+ metadata.gz: 8e2a062cd96fa6e9c56b2fd70f7d0dd4709265e1b5cf58057048cc19bf46868e
4
+ data.tar.gz: 4a18c93e458b1a822fe35b73d7af0b12804d4d4c174cbb8a3efc0daf30bbbc1e
5
5
  SHA512:
6
- metadata.gz: 20194aa567e21c0e7af5caa6deb7645c617d58240c5685b0a90e477ea9331ea522618c206718b97a4218ab9d939fa6bd7b557df698703fb8c45c240dbb025e95
7
- data.tar.gz: 3fdd45b9a6132bf0167c4e30c939aa42db77331a6007a850baa1adb67335731e639ccc9483435aecc7c3411d49094698846592a712205b5377adda17965ea930
6
+ metadata.gz: fef441fe9dc698fa5e993ae9b7d5a4e6270590aa2f8fcdbb3bca4601266faed8d6b5c96f545d3347716915bdbe0f78ebc1898caef201c1be50cc006955dec44d
7
+ data.tar.gz: a7bfadba85de2c3d631ebab8f74c4afaa75fcfdabeccbae6d20d4f23e2817185efc7d4174d4e901e19da8dbabcf4bdf522e12706dbc025113e6b0480d9b32826
@@ -12,6 +12,9 @@ metadata:
12
12
  <%- end -%>
13
13
  spec:
14
14
  hostNetwork: true
15
+ securityContext:
16
+ seccompProfile:
17
+ type: RuntimeDefault
15
18
  containers:
16
19
  - name: kube-apiserver
17
20
  resources:
@@ -23,6 +26,35 @@ spec:
23
26
  <%- k8s.apiserver.args(bootstrap: true).each do |k, v| -%>
24
27
  - <%= k %><% if v ;%>=<%= v %><%; end %>
25
28
  <%- end -%>
29
+ livenessProbe:
30
+ failureThreshold: 8
31
+ httpGet:
32
+ host: 127.0.0.1
33
+ path: /livez
34
+ port: 6443
35
+ scheme: HTTPS
36
+ initialDelaySeconds: 10
37
+ periodSeconds: 10
38
+ timeoutSeconds: 15
39
+ readinessProbe:
40
+ failureThreshold: 3
41
+ httpGet:
42
+ host: 127.0.0.1
43
+ path: /readyz
44
+ port: 6443
45
+ scheme: HTTPS
46
+ periodSeconds: 1
47
+ timeoutSeconds: 15
48
+ startupProbe:
49
+ failureThreshold: 24
50
+ httpGet:
51
+ host: 127.0.0.1
52
+ path: /livez
53
+ port: 6443
54
+ scheme: HTTPS
55
+ initialDelaySeconds: 10
56
+ periodSeconds: 10
57
+ timeoutSeconds: 15
26
58
  env:
27
59
  - name: POD_IP
28
60
  valueFrom:
@@ -10,6 +10,9 @@ metadata:
10
10
  <%= k.to_s %>: <%= v %>
11
11
  <%- end -%>
12
12
  spec:
13
+ securityContext:
14
+ seccompProfile:
15
+ type: RuntimeDefault
13
16
  containers:
14
17
  - name: kube-controller-manager
15
18
  image: <%= k8s.image_repository %>/kube-controller-manager:<%= k8s.kubernetes_version %>
@@ -18,6 +21,26 @@ spec:
18
21
  <%- k8s.controller_manager.args(bootstrap: true).each do |k, v| -%>
19
22
  - <%= k %><% if v ;%>=<%= v %><%; end %>
20
23
  <%- end -%>
24
+ livenessProbe:
25
+ failureThreshold: 8
26
+ httpGet:
27
+ host: 127.0.0.1
28
+ path: /healthz
29
+ port: 10257
30
+ scheme: HTTPS
31
+ initialDelaySeconds: 10
32
+ periodSeconds: 10
33
+ timeoutSeconds: 15
34
+ startupProbe:
35
+ failureThreshold: 24
36
+ httpGet:
37
+ host: 127.0.0.1
38
+ path: /healthz
39
+ port: 10257
40
+ scheme: HTTPS
41
+ initialDelaySeconds: 10
42
+ periodSeconds: 10
43
+ timeoutSeconds: 15
21
44
  volumeMounts:
22
45
  - name: var-run-kubernetes
23
46
  mountPath: /var/run/kubernetes
@@ -10,6 +10,9 @@ metadata:
10
10
  <%= k.to_s %>: <%= v %>
11
11
  <%- end -%>
12
12
  spec:
13
+ securityContext:
14
+ seccompProfile:
15
+ type: RuntimeDefault
13
16
  containers:
14
17
  - name: kube-scheduler
15
18
  image: <%= k8s.image_repository %>/kube-scheduler:<%= k8s.kubernetes_version %>
@@ -18,6 +21,26 @@ spec:
18
21
  <%- k8s.scheduler.args(bootstrap: true).each do |k, v| -%>
19
22
  - <%= k %><% if v ;%>=<%= v %><%; end %>
20
23
  <%- end -%>
24
+ livenessProbe:
25
+ failureThreshold: 8
26
+ httpGet:
27
+ host: 127.0.0.1
28
+ path: /healthz
29
+ port: 10259
30
+ scheme: HTTPS
31
+ initialDelaySeconds: 10
32
+ periodSeconds: 10
33
+ timeoutSeconds: 15
34
+ startupProbe:
35
+ failureThreshold: 24
36
+ httpGet:
37
+ host: 127.0.0.1
38
+ path: /healthz
39
+ port: 10259
40
+ scheme: HTTPS
41
+ initialDelaySeconds: 10
42
+ periodSeconds: 10
43
+ timeoutSeconds: 15
21
44
  volumeMounts:
22
45
  - name: kubernetes
23
46
  mountPath: /etc/kubernetes
@@ -37,3 +37,12 @@ chmod +x ${ETCD_TMP}/etcdctl
37
37
  rm -f /opt/bin/etcdctl
38
38
  mv ${ETCD_TMP}/etcdctl /opt/bin/etcdctl-${ETCD_VER}
39
39
  ln -s /opt/bin/etcdctl-${ETCD_VER} /opt/bin/etcdctl
40
+
41
+ CRICTL_VER="<%= global_config.k8s.crictl_version %>"
42
+ CRICTL_URL=https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRICTL_VER}/crictl-${CRICTL_VER}-linux-${architecture}.tar.gz
43
+ CRICTL_TMP=$(mktemp -d)
44
+ curl -L ${CRICTL_URL} -o ${CRICTL_TMP}/crictl.tar.gz
45
+ tar zxvf ${CRICTL_TMP}/crictl.tar.gz -C ${CRICTL_TMP}/
46
+ rm -f /opt/bin/crictl
47
+ mv ${CRICTL_TMP}/crictl /opt/bin/crictl-${CRICTL_VER}
48
+ ln -s /opt/bin/crictl-${CRICTL_VER} /opt/bin/crictl
@@ -5,6 +5,13 @@ ROOT=$(dirname "${BASH_SOURCE}")
5
5
 
6
6
  mkdir -p /etc/containerd
7
7
  containerd config default | tee /etc/containerd/config.toml
8
- sed -i -e "/containerd.runtimes.runc.options/a SystemdCgroup = true" /etc/containerd/config.toml
8
+
9
+ grep SystemdCgroup /etc/containerd/config.toml && :
10
+
11
+ if [[ $? == 0 ]]; then
12
+ sed -i -e "s/SystemdCgroup.*$/SystemdCgroup = true/" /etc/containerd/config.toml
13
+ else
14
+ sed -i -e "/containerd.runtimes.runc.options/a SystemdCgroup = true" /etc/containerd/config.toml
15
+ fi
9
16
 
10
17
  systemctl restart containerd
@@ -0,0 +1,14 @@
1
+ #!/bin/bash
2
+
3
+ set -eu
4
+ export LC_ALL=C
5
+ ROOT=$(dirname "${BASH_SOURCE}")
6
+
7
+ # Install addons
8
+ for addon in $(ls ${ROOT}/addons/); do
9
+ install_sh="${ROOT}/addons/${addon}/install.sh"
10
+ if [[ -f ${install_sh} ]]; then
11
+ echo "Install: ${install_sh}"
12
+ bash ${install_sh}
13
+ fi
14
+ done
@@ -7,11 +7,13 @@ module Porkadot; module Assets
7
7
  class KubeletList
8
8
  attr_reader :global_config
9
9
  attr_reader :logger
10
+ attr_reader :kubelet_default
10
11
  attr_reader :kubelets
11
12
 
12
13
  def initialize global_config
13
14
  @global_config = global_config
14
15
  @logger = global_config.logger
16
+ @kubelet_default = KubeletDefault.new(global_config.kubelet_default)
15
17
  @kubelets = {}
16
18
  global_config.nodes.each do |k, config|
17
19
  @kubelets[k] = Kubelet.new(config)
@@ -19,6 +21,7 @@ module Porkadot; module Assets
19
21
  end
20
22
 
21
23
  def render
24
+ self.kubelet_default.render
22
25
  self.kubelets.each do |_, v|
23
26
  v.render
24
27
  end
@@ -29,6 +32,35 @@ module Porkadot; module Assets
29
32
  end
30
33
  end
31
34
 
35
+ class KubeletDefault
36
+ include Porkadot::Assets
37
+ TEMPLATE_DIR = File.join(File.dirname(__FILE__), "kubelet-default")
38
+
39
+ attr_reader :global_config
40
+ attr_reader :config
41
+ attr_reader :logger
42
+ attr_reader :certs
43
+
44
+ def initialize config
45
+ @config = config
46
+ @logger = config.logger
47
+ @global_config = config.config
48
+ @certs = Porkadot::Assets::Certs::Kubernetes.new(global_config)
49
+ end
50
+
51
+ def render
52
+ logger.info "--> Rendering Kubelet default configs"
53
+ unless File.directory?(config.addon_path)
54
+ FileUtils.mkdir_p(config.addon_path)
55
+ end
56
+ unless File.directory?(config.addon_secrets_path)
57
+ FileUtils.mkdir_p(config.addon_secrets_path)
58
+ end
59
+
60
+ render_erb 'install.sh'
61
+ end
62
+ end
63
+
32
64
  class Kubelet
33
65
  include Porkadot::Assets
34
66
  TEMPLATE_DIR = File.join(File.dirname(__FILE__), "kubelet")
@@ -0,0 +1,8 @@
1
+ #!/bin/bash
2
+
3
+ set -eu
4
+ export LC_ALL=C
5
+ ROOT=$(dirname "${BASH_SOURCE}")
6
+
7
+ /opt/bin/kubectl apply -R -f ${ROOT}/manifests
8
+
@@ -3,5 +3,12 @@
3
3
  set -eu
4
4
  export LC_ALL=C
5
5
  ROOT=$(dirname "${BASH_SOURCE}")
6
+ KUBECTL_OPTS=${KUBECTL_OPTS:-""}
6
7
 
7
- /opt/bin/kubectl apply -f ${ROOT}/manifests/
8
+ KUBECTL_OPTS="${KUBECTL_OPTS} --server-side --force-conflicts --prune"
9
+ KUBECTL_OPTS="${KUBECTL_OPTS} -l kubernetes.unstable.cloud/installed-by=porkadot"
10
+ <%- prune_allowlist.each do |a| -%>
11
+ KUBECTL_OPTS="${KUBECTL_OPTS} --prune-whitelist=<%= a %>"
12
+ <%- end -%>
13
+
14
+ /opt/bin/kubectl apply ${KUBECTL_OPTS} -k ${ROOT}
@@ -0,0 +1,7 @@
1
+ # Modify this file if you want to kustomize generated manifests
2
+ # This file will not be overridden by Porkadot.
3
+ labels:
4
+ - pairs:
5
+ 'kubernetes.unstable.cloud/installed-by': 'porkadot'
6
+ resources:
7
+ - manifests
@@ -75,7 +75,7 @@ data:
75
75
  lameduck 5s
76
76
  }
77
77
  ready
78
- kubernetes <%= k8s.networking.dns_domain %> in-addr.arpa ip6.arpa {
78
+ kubernetes <%= k8s.networking.dns_domain %> <%= k8s.networking.additional_domains.join(" ") %> in-addr.arpa ip6.arpa {
79
79
  pods insecure
80
80
  fallthrough in-addr.arpa ip6.arpa
81
81
  ttl 30
@@ -193,6 +193,7 @@ metadata:
193
193
  kubernetes.io/cluster-service: "true"
194
194
  addonmanager.kubernetes.io/mode: Reconcile
195
195
  kubernetes.io/name: "CoreDNS"
196
+ app.kubernetes.io/name: kube-dns
196
197
  spec:
197
198
  selector:
198
199
  k8s-app: kube-dns
@@ -82,8 +82,6 @@ spec:
82
82
  securityContext:
83
83
  supplementalGroups: [ 65534 ]
84
84
  fsGroup: 65534
85
- nodeSelector:
86
- kubernetes.io/os: linux
87
85
  containers:
88
86
  - name: autoscaler
89
87
  image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.7.1
@@ -0,0 +1,3 @@
1
+ resources:
2
+ - coredns.yaml
3
+ - dns-horizontal-autoscaler.yaml
@@ -1,3 +1,5 @@
1
+ <% cni = config.flannel -%>
2
+ <% k8s = global_config.k8s -%>
1
3
  ---
2
4
  apiVersion: policy/v1beta1
3
5
  kind: PodSecurityPolicy
@@ -125,9 +127,15 @@ data:
125
127
  }
126
128
  net-conf.json: |
127
129
  {
128
- "Network": "<%= global_config.k8s.networking.pod_subnet %>",
130
+ <%- if k8s.networking.enable_ipv4 -%>
131
+ "Network": "<%= k8s.networking.pod_v4subnet %>",
132
+ <%- end -%>
133
+ <%- if k8s.networking.enable_ipv6 -%>
134
+ "EnableIPv6": true,
135
+ "IPv6Network": "<%= k8s.networking.pod_v6subnet %>",
136
+ <%- end -%>
129
137
  "Backend": {
130
- "Type": "<%= global_config.cni.backend %>"
138
+ "Type": "<%= cni.backend %>"
131
139
  }
132
140
  }
133
141
  ---
@@ -165,8 +173,20 @@ spec:
165
173
  effect: NoSchedule
166
174
  serviceAccountName: flannel
167
175
  initContainers:
176
+ - name: install-cni-plugin
177
+ #image: flannelcni/flannel-cni-plugin:v1.0.1 for ppc64le and mips64le (dockerhub limitations may apply)
178
+ image: <%= cni.plugin_image_repository %>:<%= cni.plugin_image_tag %>
179
+ command:
180
+ - cp
181
+ args:
182
+ - -f
183
+ - /flannel
184
+ - /opt/cni/bin/flannel
185
+ volumeMounts:
186
+ - name: cni-plugin
187
+ mountPath: /opt/cni/bin
168
188
  - name: install-cni
169
- image: quay.io/coreos/flannel:v0.14.0
189
+ image: <%= cni.daemon_image_repository %>:<%= cni.daemon_image_tag %>
170
190
  command:
171
191
  - cp
172
192
  args:
@@ -180,19 +200,14 @@ spec:
180
200
  mountPath: /etc/kube-flannel/
181
201
  containers:
182
202
  - name: kube-flannel
183
- image: quay.io/coreos/flannel:v0.14.0
203
+ image: <%= cni.daemon_image_repository %>:<%= cni.daemon_image_tag %>
184
204
  command:
185
205
  - /opt/bin/flanneld
186
206
  args:
187
207
  - --ip-masq
188
208
  - --kube-subnet-mgr
189
209
  resources:
190
- requests:
191
- cpu: "100m"
192
- memory: "50Mi"
193
- limits:
194
- cpu: "100m"
195
- memory: "50Mi"
210
+ <%= u.to_yaml(cni.resources, 10)%>
196
211
  securityContext:
197
212
  privileged: false
198
213
  capabilities:
@@ -211,13 +226,27 @@ spec:
211
226
  mountPath: /run/flannel
212
227
  - name: flannel-cfg
213
228
  mountPath: /etc/kube-flannel/
229
+ - name: ipam-data
230
+ mountPath: /var/lib/cni/networks
231
+ - name: xtables-lock
232
+ mountPath: /run/xtables.lock
214
233
  volumes:
215
234
  - name: run
216
235
  hostPath:
217
236
  path: /run/flannel
237
+ - name: cni-plugin
238
+ hostPath:
239
+ path: /opt/cni/bin
218
240
  - name: cni
219
241
  hostPath:
220
242
  path: /etc/cni/net.d
243
+ - name: ipam-data
244
+ hostPath:
245
+ path: /var/lib/cni/networks
221
246
  - name: flannel-cfg
222
247
  configMap:
223
248
  name: kube-flannel-cfg
249
+ - name: xtables-lock
250
+ hostPath:
251
+ path: /run/xtables.lock
252
+ type: FileOrCreate
@@ -24,7 +24,7 @@ spec:
24
24
  - name: kubelet-rubber-stamp
25
25
  # image: quay.io/kontena/kubelet-rubber-stamp-amd64:0.2
26
26
  # Use following image until issue is fixed
27
- image: yuanying/kubelet-rubber-stamp:0.3.0.y01
27
+ image: ghcr.io/porkadot/kubelet-rubber-stamp:0.22.0
28
28
  args:
29
29
  - "--v=2"
30
30
  imagePullPolicy: Always
@@ -0,0 +1,2 @@
1
+ resources:
2
+ - kubelet-rubber-stamp.yaml
@@ -0,0 +1,4 @@
1
+ resources:
2
+ <%- config.enabled.each do |a| -%>
3
+ - <%= a %>
4
+ <%- end %>
@@ -0,0 +1,6 @@
1
+ namespace: metallb-system
2
+
3
+ resources:
4
+ - 000-metallb.yaml
5
+ - metallb.config.yaml
6
+ - metallb.yaml
@@ -1,4 +1,3 @@
1
- <% k8s = global_config.k8s -%>
2
1
  ---
3
2
  apiVersion: v1
4
3
  kind: ConfigMap
@@ -9,5 +8,5 @@ metadata:
9
8
  namespace: metallb-system
10
9
  data:
11
10
  config: |
12
- <%= u.indent(global_config.lb.lb_config, 4) %>
11
+ <%= u.indent(config.metallb.config, 4) %>
13
12