porkadot 0.19.1 → 0.22.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (49) hide show
  1. checksums.yaml +4 -4
  2. data/lib/porkadot/assets/bootstrap/manifests/kube-apiserver.bootstrap.yaml.erb +32 -0
  3. data/lib/porkadot/assets/bootstrap/manifests/kube-controller-manager.bootstrap.yaml.erb +23 -0
  4. data/lib/porkadot/assets/bootstrap/manifests/kube-scheduler.bootstrap.yaml.erb +23 -0
  5. data/lib/porkadot/assets/kubelet/install-deps.sh.erb +9 -0
  6. data/lib/porkadot/assets/kubelet/setup-containerd.sh.erb +8 -1
  7. data/lib/porkadot/assets/kubelet-default/install.sh.erb +14 -0
  8. data/lib/porkadot/assets/kubelet.rb +32 -0
  9. data/lib/porkadot/assets/kubernetes/install.secrets.sh.erb +8 -0
  10. data/lib/porkadot/assets/kubernetes/install.sh.erb +8 -1
  11. data/lib/porkadot/assets/kubernetes/kubeconfig.yaml.erb +19 -0
  12. data/lib/porkadot/assets/kubernetes/kustomization.yaml.erb +7 -0
  13. data/lib/porkadot/assets/kubernetes/manifests/{coredns.yaml.erb → addons/coredns/coredns.yaml.erb} +2 -1
  14. data/lib/porkadot/assets/kubernetes/manifests/{dns-horizontal-autoscaler.yaml.erb → addons/coredns/dns-horizontal-autoscaler.yaml.erb} +0 -2
  15. data/lib/porkadot/assets/kubernetes/manifests/addons/coredns/kustomization.yaml.erb +3 -0
  16. data/lib/porkadot/assets/kubernetes/manifests/{flannel.yaml.erb → addons/flannel/flannel.yaml.erb} +39 -10
  17. data/lib/porkadot/assets/kubernetes/manifests/addons/flannel/kustomization.yaml.erb +2 -0
  18. data/lib/porkadot/assets/kubernetes/manifests/{kubelet-rubber-stamp.yaml.erb → addons/kubelet-rubber-stamp/kubelet-rubber-stamp.yaml.erb} +1 -1
  19. data/lib/porkadot/assets/kubernetes/manifests/addons/kubelet-rubber-stamp/kustomization.yaml.erb +2 -0
  20. data/lib/porkadot/assets/kubernetes/manifests/addons/kustomization.yaml.erb +4 -0
  21. data/lib/porkadot/assets/kubernetes/manifests/addons/metallb/000-metallb.yaml.erb +7 -0
  22. data/lib/porkadot/assets/kubernetes/manifests/addons/metallb/kustomization.yaml.erb +4 -0
  23. data/lib/porkadot/assets/kubernetes/manifests/addons/metallb/metallb.config.yaml.erb +12 -0
  24. data/lib/porkadot/assets/kubernetes/manifests/{metallb.secrets.yaml.erb → addons/metallb/metallb.secrets.yaml.erb} +0 -0
  25. data/lib/porkadot/assets/kubernetes/manifests/{metallb.yaml.erb → addons/metallb/metallb.yaml.erb} +71 -41
  26. data/lib/porkadot/assets/kubernetes/manifests/addons/storage-version-migrator/kustomization.yaml.erb +2 -0
  27. data/lib/porkadot/assets/kubernetes/manifests/{storage-version-migrator.yaml.erb → addons/storage-version-migrator/storage-version-migrator.yaml.erb} +0 -0
  28. data/lib/porkadot/assets/kubernetes/manifests/kube-apiserver.secrets.yaml.erb +1 -0
  29. data/lib/porkadot/assets/kubernetes/manifests/kube-apiserver.yaml.erb +32 -0
  30. data/lib/porkadot/assets/kubernetes/manifests/kube-controller-manager.yaml.erb +20 -6
  31. data/lib/porkadot/assets/kubernetes/manifests/kube-scheduler.yaml.erb +20 -6
  32. data/lib/porkadot/assets/kubernetes/manifests/kubelet.yaml.erb +0 -1
  33. data/lib/porkadot/assets/kubernetes/manifests/kustomization.yaml.erb +8 -0
  34. data/lib/porkadot/assets/kubernetes.rb +94 -16
  35. data/lib/porkadot/assets.rb +13 -3
  36. data/lib/porkadot/cmd/cli.rb +16 -0
  37. data/lib/porkadot/cmd/install.rb +15 -0
  38. data/lib/porkadot/config.rb +9 -5
  39. data/lib/porkadot/configs/addons.rb +21 -0
  40. data/lib/porkadot/configs/kubelet.rb +26 -0
  41. data/lib/porkadot/configs/kubernetes.rb +29 -10
  42. data/lib/porkadot/default.yaml +25 -7
  43. data/lib/porkadot/install/kubelet.rb +25 -0
  44. data/lib/porkadot/install/kubernetes.rb +3 -2
  45. data/lib/porkadot/version.rb +1 -1
  46. data/lib/porkadot.rb +1 -2
  47. metadata +23 -11
  48. data/lib/porkadot/configs/cni.rb +0 -22
  49. data/lib/porkadot/configs/loadbalancer.rb +0 -26
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 834e1f31cbbf8c7c8766162945572512fc0311dbf772df008f85ef2a00b3ea3d
4
- data.tar.gz: f453e7a4899673f08a550b69c41b30c908eb5bf0906a67eee79a21ccd5fc1dcd
3
+ metadata.gz: 89c9072a82772720ff6d492d2dcaf475ef31460bc108886be716b1b7b0e0a3d7
4
+ data.tar.gz: edcc58e0f9e5a616020caa2348a46ecb06e796930fb565efcc6dfad25244d69b
5
5
  SHA512:
6
- metadata.gz: e13576f10e90eb2d277302bfcfe7cecb7feb681d25070052c50b6060b95e0b455061d093d8ddde84a69c650e2fa6f2b3775e25913cf743023c2ee036bdef0764
7
- data.tar.gz: 5714fefdd57b9683974ea42f6d111ac87b66f723221f5f0852c95b758d92cf19f6eac32c9ddd2e98e1f77dcbac4db70c735eddf4999cb39be71ad2717efb5d5d
6
+ metadata.gz: aa12a3f43721a233b17f46708cced2989430da72ccd3e90be46c72b4d2d01b675372f07f609050d0c49cb966d600c40b2eb209a591715e62c411d148a9ace680
7
+ data.tar.gz: 2cacb639c73ecb17300b48ba50e923a15a02f95b619fec2c05d3c3ae50eef7f9b5ae5a62fc784c4957bcf8a04850384d1bca7ba6c623101d812d669cd8939423
@@ -12,6 +12,9 @@ metadata:
12
12
  <%- end -%>
13
13
  spec:
14
14
  hostNetwork: true
15
+ securityContext:
16
+ seccompProfile:
17
+ type: RuntimeDefault
15
18
  containers:
16
19
  - name: kube-apiserver
17
20
  resources:
@@ -23,6 +26,35 @@ spec:
23
26
  <%- k8s.apiserver.args(bootstrap: true).each do |k, v| -%>
24
27
  - <%= k %><% if v ;%>=<%= v %><%; end %>
25
28
  <%- end -%>
29
+ livenessProbe:
30
+ failureThreshold: 8
31
+ httpGet:
32
+ host: 127.0.0.1
33
+ path: /livez
34
+ port: 6443
35
+ scheme: HTTPS
36
+ initialDelaySeconds: 10
37
+ periodSeconds: 10
38
+ timeoutSeconds: 15
39
+ readinessProbe:
40
+ failureThreshold: 3
41
+ httpGet:
42
+ host: 127.0.0.1
43
+ path: /readyz
44
+ port: 6443
45
+ scheme: HTTPS
46
+ periodSeconds: 1
47
+ timeoutSeconds: 15
48
+ startupProbe:
49
+ failureThreshold: 24
50
+ httpGet:
51
+ host: 127.0.0.1
52
+ path: /livez
53
+ port: 6443
54
+ scheme: HTTPS
55
+ initialDelaySeconds: 10
56
+ periodSeconds: 10
57
+ timeoutSeconds: 15
26
58
  env:
27
59
  - name: POD_IP
28
60
  valueFrom:
@@ -10,6 +10,9 @@ metadata:
10
10
  <%= k.to_s %>: <%= v %>
11
11
  <%- end -%>
12
12
  spec:
13
+ securityContext:
14
+ seccompProfile:
15
+ type: RuntimeDefault
13
16
  containers:
14
17
  - name: kube-controller-manager
15
18
  image: <%= k8s.image_repository %>/kube-controller-manager:<%= k8s.kubernetes_version %>
@@ -18,6 +21,26 @@ spec:
18
21
  <%- k8s.controller_manager.args(bootstrap: true).each do |k, v| -%>
19
22
  - <%= k %><% if v ;%>=<%= v %><%; end %>
20
23
  <%- end -%>
24
+ livenessProbe:
25
+ failureThreshold: 8
26
+ httpGet:
27
+ host: 127.0.0.1
28
+ path: /healthz
29
+ port: 10257
30
+ scheme: HTTPS
31
+ initialDelaySeconds: 10
32
+ periodSeconds: 10
33
+ timeoutSeconds: 15
34
+ startupProbe:
35
+ failureThreshold: 24
36
+ httpGet:
37
+ host: 127.0.0.1
38
+ path: /healthz
39
+ port: 10257
40
+ scheme: HTTPS
41
+ initialDelaySeconds: 10
42
+ periodSeconds: 10
43
+ timeoutSeconds: 15
21
44
  volumeMounts:
22
45
  - name: var-run-kubernetes
23
46
  mountPath: /var/run/kubernetes
@@ -10,6 +10,9 @@ metadata:
10
10
  <%= k.to_s %>: <%= v %>
11
11
  <%- end -%>
12
12
  spec:
13
+ securityContext:
14
+ seccompProfile:
15
+ type: RuntimeDefault
13
16
  containers:
14
17
  - name: kube-scheduler
15
18
  image: <%= k8s.image_repository %>/kube-scheduler:<%= k8s.kubernetes_version %>
@@ -18,6 +21,26 @@ spec:
18
21
  <%- k8s.scheduler.args(bootstrap: true).each do |k, v| -%>
19
22
  - <%= k %><% if v ;%>=<%= v %><%; end %>
20
23
  <%- end -%>
24
+ livenessProbe:
25
+ failureThreshold: 8
26
+ httpGet:
27
+ host: 127.0.0.1
28
+ path: /healthz
29
+ port: 10259
30
+ scheme: HTTPS
31
+ initialDelaySeconds: 10
32
+ periodSeconds: 10
33
+ timeoutSeconds: 15
34
+ startupProbe:
35
+ failureThreshold: 24
36
+ httpGet:
37
+ host: 127.0.0.1
38
+ path: /healthz
39
+ port: 10259
40
+ scheme: HTTPS
41
+ initialDelaySeconds: 10
42
+ periodSeconds: 10
43
+ timeoutSeconds: 15
21
44
  volumeMounts:
22
45
  - name: kubernetes
23
46
  mountPath: /etc/kubernetes
@@ -37,3 +37,12 @@ chmod +x ${ETCD_TMP}/etcdctl
37
37
  rm -f /opt/bin/etcdctl
38
38
  mv ${ETCD_TMP}/etcdctl /opt/bin/etcdctl-${ETCD_VER}
39
39
  ln -s /opt/bin/etcdctl-${ETCD_VER} /opt/bin/etcdctl
40
+
41
+ CRICTL_VER="<%= global_config.k8s.crictl_version %>"
42
+ CRICTL_URL=https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRICTL_VER}/crictl-${CRICTL_VER}-linux-${architecture}.tar.gz
43
+ CRICTL_TMP=$(mktemp -d)
44
+ curl -L ${CRICTL_URL} -o ${CRICTL_TMP}/crictl.tar.gz
45
+ tar zxvf ${CRICTL_TMP}/crictl.tar.gz -C ${CRICTL_TMP}/
46
+ rm -f /opt/bin/crictl
47
+ mv ${CRICTL_TMP}/crictl /opt/bin/crictl-${CRICTL_VER}
48
+ ln -s /opt/bin/crictl-${CRICTL_VER} /opt/bin/crictl
@@ -5,6 +5,13 @@ ROOT=$(dirname "${BASH_SOURCE}")
5
5
 
6
6
  mkdir -p /etc/containerd
7
7
  containerd config default | tee /etc/containerd/config.toml
8
- sed -i -e "/containerd.runtimes.runc.options/a SystemdCgroup = true" /etc/containerd/config.toml
8
+
9
+ grep SystemdCgroup /etc/containerd/config.toml && :
10
+
11
+ if [[ $? == 0 ]]; then
12
+ sed -i -e "s/SystemdCgroup.*$/SystemdCgroup = true/" /etc/containerd/config.toml
13
+ else
14
+ sed -i -e "/containerd.runtimes.runc.options/a SystemdCgroup = true" /etc/containerd/config.toml
15
+ fi
9
16
 
10
17
  systemctl restart containerd
@@ -0,0 +1,14 @@
1
+ #!/bin/bash
2
+
3
+ set -eu
4
+ export LC_ALL=C
5
+ ROOT=$(dirname "${BASH_SOURCE}")
6
+
7
+ # Install addons
8
+ for addon in $(ls ${ROOT}/addons/); do
9
+ install_sh="${ROOT}/addons/${addon}/install.sh"
10
+ if [[ -f ${install_sh} ]]; then
11
+ echo "Install: ${install_sh}"
12
+ bash ${install_sh}
13
+ fi
14
+ done
@@ -7,11 +7,13 @@ module Porkadot; module Assets
7
7
  class KubeletList
8
8
  attr_reader :global_config
9
9
  attr_reader :logger
10
+ attr_reader :kubelet_default
10
11
  attr_reader :kubelets
11
12
 
12
13
  def initialize global_config
13
14
  @global_config = global_config
14
15
  @logger = global_config.logger
16
+ @kubelet_default = KubeletDefault.new(global_config.kubelet_default)
15
17
  @kubelets = {}
16
18
  global_config.nodes.each do |k, config|
17
19
  @kubelets[k] = Kubelet.new(config)
@@ -19,6 +21,7 @@ module Porkadot; module Assets
19
21
  end
20
22
 
21
23
  def render
24
+ self.kubelet_default.render
22
25
  self.kubelets.each do |_, v|
23
26
  v.render
24
27
  end
@@ -29,6 +32,35 @@ module Porkadot; module Assets
29
32
  end
30
33
  end
31
34
 
35
+ class KubeletDefault
36
+ include Porkadot::Assets
37
+ TEMPLATE_DIR = File.join(File.dirname(__FILE__), "kubelet-default")
38
+
39
+ attr_reader :global_config
40
+ attr_reader :config
41
+ attr_reader :logger
42
+ attr_reader :certs
43
+
44
+ def initialize config
45
+ @config = config
46
+ @logger = config.logger
47
+ @global_config = config.config
48
+ @certs = Porkadot::Assets::Certs::Kubernetes.new(global_config)
49
+ end
50
+
51
+ def render
52
+ logger.info "--> Rendering Kubelet default configs"
53
+ unless File.directory?(config.addon_path)
54
+ FileUtils.mkdir_p(config.addon_path)
55
+ end
56
+ unless File.directory?(config.addon_secrets_path)
57
+ FileUtils.mkdir_p(config.addon_secrets_path)
58
+ end
59
+
60
+ render_erb 'install.sh'
61
+ end
62
+ end
63
+
32
64
  class Kubelet
33
65
  include Porkadot::Assets
34
66
  TEMPLATE_DIR = File.join(File.dirname(__FILE__), "kubelet")
@@ -0,0 +1,8 @@
1
+ #!/bin/bash
2
+
3
+ set -eu
4
+ export LC_ALL=C
5
+ ROOT=$(dirname "${BASH_SOURCE}")
6
+
7
+ /opt/bin/kubectl apply -R -f ${ROOT}/manifests
8
+
@@ -3,5 +3,12 @@
3
3
  set -eu
4
4
  export LC_ALL=C
5
5
  ROOT=$(dirname "${BASH_SOURCE}")
6
+ KUBECTL_OPTS=${KUBECTL_OPTS:-""}
6
7
 
7
- /opt/bin/kubectl apply -f ${ROOT}/manifests/
8
+ KUBECTL_OPTS="${KUBECTL_OPTS} --server-side --force-conflicts --prune"
9
+ KUBECTL_OPTS="${KUBECTL_OPTS} -l kubernetes.unstable.cloud/installed-by=porkadot"
10
+ <%- prune_allowlist.each do |a| -%>
11
+ KUBECTL_OPTS="${KUBECTL_OPTS} --prune-whitelist=<%= a %>"
12
+ <%- end -%>
13
+
14
+ /opt/bin/kubectl apply ${KUBECTL_OPTS} -k ${ROOT}
@@ -0,0 +1,19 @@
1
+ apiVersion: v1
2
+ kind: Config
3
+ clusters:
4
+ - name: kubernetes
5
+ cluster:
6
+ certificate-authority-data: <%= certs.kubernetes.to_base64(:ca_cert) %>
7
+ server: https://127.0.0.1:<%= global_config.k8s.apiserver.bind_port %>
8
+ users:
9
+ - name: admin
10
+ user:
11
+ client-certificate-data: <%= certs.kubernetes.to_base64(:client_cert) %>
12
+ client-key-data: <%= certs.kubernetes.to_base64(:client_key) %>
13
+ contexts:
14
+ - context:
15
+ cluster: kubernetes
16
+ user: admin
17
+ name: admin-context
18
+ current-context: admin-context
19
+
@@ -0,0 +1,7 @@
1
+ # Modify this file if you want to kustomize generated manifests
2
+ # This file will not be overridden by Porkadot.
3
+ labels:
4
+ - pairs:
5
+ 'kubernetes.unstable.cloud/installed-by': 'porkadot'
6
+ resources:
7
+ - manifests
@@ -75,7 +75,7 @@ data:
75
75
  lameduck 5s
76
76
  }
77
77
  ready
78
- kubernetes <%= k8s.networking.dns_domain %> in-addr.arpa ip6.arpa {
78
+ kubernetes <%= k8s.networking.dns_domain %> <%= k8s.networking.additional_domains.join(" ") %> in-addr.arpa ip6.arpa {
79
79
  pods insecure
80
80
  fallthrough in-addr.arpa ip6.arpa
81
81
  ttl 30
@@ -193,6 +193,7 @@ metadata:
193
193
  kubernetes.io/cluster-service: "true"
194
194
  addonmanager.kubernetes.io/mode: Reconcile
195
195
  kubernetes.io/name: "CoreDNS"
196
+ app.kubernetes.io/name: kube-dns
196
197
  spec:
197
198
  selector:
198
199
  k8s-app: kube-dns
@@ -82,8 +82,6 @@ spec:
82
82
  securityContext:
83
83
  supplementalGroups: [ 65534 ]
84
84
  fsGroup: 65534
85
- nodeSelector:
86
- kubernetes.io/os: linux
87
85
  containers:
88
86
  - name: autoscaler
89
87
  image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.7.1
@@ -0,0 +1,3 @@
1
+ resources:
2
+ - coredns.yaml
3
+ - dns-horizontal-autoscaler.yaml
@@ -1,3 +1,5 @@
1
+ <% cni = config.flannel -%>
2
+ <% k8s = global_config.k8s -%>
1
3
  ---
2
4
  apiVersion: policy/v1beta1
3
5
  kind: PodSecurityPolicy
@@ -125,9 +127,15 @@ data:
125
127
  }
126
128
  net-conf.json: |
127
129
  {
128
- "Network": "<%= global_config.k8s.networking.pod_subnet %>",
130
+ <%- if k8s.networking.enable_ipv4 -%>
131
+ "Network": "<%= k8s.networking.pod_v4subnet %>",
132
+ <%- end -%>
133
+ <%- if k8s.networking.enable_ipv6 -%>
134
+ "EnableIPv6": true,
135
+ "IPv6Network": "<%= k8s.networking.pod_v6subnet %>",
136
+ <%- end -%>
129
137
  "Backend": {
130
- "Type": "<%= global_config.cni.backend %>"
138
+ "Type": "<%= cni.backend %>"
131
139
  }
132
140
  }
133
141
  ---
@@ -165,8 +173,20 @@ spec:
165
173
  effect: NoSchedule
166
174
  serviceAccountName: flannel
167
175
  initContainers:
176
+ - name: install-cni-plugin
177
+ #image: flannelcni/flannel-cni-plugin:v1.0.1 for ppc64le and mips64le (dockerhub limitations may apply)
178
+ image: <%= cni.plugin_image_repository %>:<%= cni.plugin_image_tag %>
179
+ command:
180
+ - cp
181
+ args:
182
+ - -f
183
+ - /flannel
184
+ - /opt/cni/bin/flannel
185
+ volumeMounts:
186
+ - name: cni-plugin
187
+ mountPath: /opt/cni/bin
168
188
  - name: install-cni
169
- image: quay.io/coreos/flannel:v0.13.0
189
+ image: <%= cni.daemon_image_repository %>:<%= cni.daemon_image_tag %>
170
190
  command:
171
191
  - cp
172
192
  args:
@@ -180,19 +200,14 @@ spec:
180
200
  mountPath: /etc/kube-flannel/
181
201
  containers:
182
202
  - name: kube-flannel
183
- image: quay.io/coreos/flannel:v0.13.0
203
+ image: <%= cni.daemon_image_repository %>:<%= cni.daemon_image_tag %>
184
204
  command:
185
205
  - /opt/bin/flanneld
186
206
  args:
187
207
  - --ip-masq
188
208
  - --kube-subnet-mgr
189
209
  resources:
190
- requests:
191
- cpu: "100m"
192
- memory: "50Mi"
193
- limits:
194
- cpu: "100m"
195
- memory: "50Mi"
210
+ <%= u.to_yaml(cni.resources, 10)%>
196
211
  securityContext:
197
212
  privileged: false
198
213
  capabilities:
@@ -211,13 +226,27 @@ spec:
211
226
  mountPath: /run/flannel
212
227
  - name: flannel-cfg
213
228
  mountPath: /etc/kube-flannel/
229
+ - name: ipam-data
230
+ mountPath: /var/lib/cni/networks
231
+ - name: xtables-lock
232
+ mountPath: /run/xtables.lock
214
233
  volumes:
215
234
  - name: run
216
235
  hostPath:
217
236
  path: /run/flannel
237
+ - name: cni-plugin
238
+ hostPath:
239
+ path: /opt/cni/bin
218
240
  - name: cni
219
241
  hostPath:
220
242
  path: /etc/cni/net.d
243
+ - name: ipam-data
244
+ hostPath:
245
+ path: /var/lib/cni/networks
221
246
  - name: flannel-cfg
222
247
  configMap:
223
248
  name: kube-flannel-cfg
249
+ - name: xtables-lock
250
+ hostPath:
251
+ path: /run/xtables.lock
252
+ type: FileOrCreate
@@ -24,7 +24,7 @@ spec:
24
24
  - name: kubelet-rubber-stamp
25
25
  # image: quay.io/kontena/kubelet-rubber-stamp-amd64:0.2
26
26
  # Use following image until issue is fixed
27
- image: yuanying/kubelet-rubber-stamp:0.3.0.y01
27
+ image: ghcr.io/porkadot/kubelet-rubber-stamp:0.22.0
28
28
  args:
29
29
  - "--v=2"
30
30
  imagePullPolicy: Always
@@ -0,0 +1,2 @@
1
+ resources:
2
+ - kubelet-rubber-stamp.yaml
@@ -0,0 +1,4 @@
1
+ resources:
2
+ <%- config.enabled.each do |a| -%>
3
+ - <%= a %>
4
+ <%- end %>
@@ -0,0 +1,7 @@
1
+ apiVersion: v1
2
+ kind: Namespace
3
+ metadata:
4
+ labels:
5
+ app: metallb
6
+ name: metallb-system
7
+
@@ -0,0 +1,4 @@
1
+ resources:
2
+ - 000-metallb.yaml
3
+ - metallb.config.yaml
4
+ - metallb.yaml
@@ -0,0 +1,12 @@
1
+ ---
2
+ apiVersion: v1
3
+ kind: ConfigMap
4
+ metadata:
5
+ labels:
6
+ app: metallb
7
+ name: config
8
+ namespace: metallb-system
9
+ data:
10
+ config: |
11
+ <%= u.indent(config.metallb.config, 4) %>
12
+
@@ -1,11 +1,3 @@
1
- <% k8s = global_config.k8s -%>
2
- apiVersion: v1
3
- kind: Namespace
4
- metadata:
5
- labels:
6
- app: metallb
7
- name: metallb-system
8
- ---
9
1
  apiVersion: policy/v1beta1
10
2
  kind: PodSecurityPolicy
11
3
  metadata:
@@ -58,9 +50,7 @@ metadata:
58
50
  spec:
59
51
  allowPrivilegeEscalation: false
60
52
  allowedCapabilities:
61
- - NET_ADMIN
62
53
  - NET_RAW
63
- - SYS_ADMIN
64
54
  allowedHostPaths: []
65
55
  defaultAddCapabilities: []
66
56
  defaultAllowPrivilegeEscalation: false
@@ -72,6 +62,8 @@ spec:
72
62
  hostPorts:
73
63
  - max: 7472
74
64
  min: 7472
65
+ - max: 7946
66
+ min: 7946
75
67
  privileged: true
76
68
  readOnlyRootFilesystem: true
77
69
  requiredDropCapabilities:
@@ -118,7 +110,6 @@ rules:
118
110
  - get
119
111
  - list
120
112
  - watch
121
- - update
122
113
  - apiGroups:
123
114
  - ''
124
115
  resources:
@@ -158,6 +149,13 @@ rules:
158
149
  - get
159
150
  - list
160
151
  - watch
152
+ - apiGroups: ["discovery.k8s.io"]
153
+ resources:
154
+ - endpointslices
155
+ verbs:
156
+ - get
157
+ - list
158
+ - watch
161
159
  - apiGroups:
162
160
  - ''
163
161
  resources:
@@ -207,6 +205,37 @@ rules:
207
205
  - list
208
206
  ---
209
207
  apiVersion: rbac.authorization.k8s.io/v1
208
+ kind: Role
209
+ metadata:
210
+ labels:
211
+ app: metallb
212
+ name: controller
213
+ namespace: metallb-system
214
+ rules:
215
+ - apiGroups:
216
+ - ''
217
+ resources:
218
+ - secrets
219
+ verbs:
220
+ - create
221
+ - apiGroups:
222
+ - ''
223
+ resources:
224
+ - secrets
225
+ resourceNames:
226
+ - memberlist
227
+ verbs:
228
+ - list
229
+ - apiGroups:
230
+ - apps
231
+ resources:
232
+ - deployments
233
+ resourceNames:
234
+ - controller
235
+ verbs:
236
+ - get
237
+ ---
238
+ apiVersion: rbac.authorization.k8s.io/v1
210
239
  kind: ClusterRoleBinding
211
240
  metadata:
212
241
  labels:
@@ -268,6 +297,21 @@ subjects:
268
297
  - kind: ServiceAccount
269
298
  name: speaker
270
299
  ---
300
+ apiVersion: rbac.authorization.k8s.io/v1
301
+ kind: RoleBinding
302
+ metadata:
303
+ labels:
304
+ app: metallb
305
+ name: controller
306
+ namespace: metallb-system
307
+ roleRef:
308
+ apiGroup: rbac.authorization.k8s.io
309
+ kind: Role
310
+ name: controller
311
+ subjects:
312
+ - kind: ServiceAccount
313
+ name: controller
314
+ ---
271
315
  apiVersion: apps/v1
272
316
  kind: DaemonSet
273
317
  metadata:
@@ -308,47 +352,44 @@ spec:
308
352
  fieldRef:
309
353
  fieldPath: status.podIP
310
354
  # needed when another software is also using memberlist / port 7946
355
+ # when changing this default you also need to update the container ports definition
356
+ # and the PodSecurityPolicy hostPorts definition
311
357
  #- name: METALLB_ML_BIND_PORT
312
358
  # value: "7946"
313
359
  - name: METALLB_ML_LABELS
314
360
  value: "app=metallb,component=speaker"
315
- - name: METALLB_ML_NAMESPACE
316
- valueFrom:
317
- fieldRef:
318
- fieldPath: metadata.namespace
319
361
  - name: METALLB_ML_SECRET_KEY
320
362
  valueFrom:
321
363
  secretKeyRef:
322
364
  name: memberlist
323
365
  key: secretkey
324
- image: metallb/speaker:v0.9.4
325
- imagePullPolicy: Always
366
+ image: quay.io/metallb/speaker:v0.10.2
326
367
  name: speaker
327
368
  ports:
328
369
  - containerPort: 7472
329
370
  name: monitoring
330
- resources:
331
- limits:
332
- cpu: 100m
333
- memory: 100Mi
371
+ - containerPort: 7946
372
+ name: memberlist-tcp
373
+ - containerPort: 7946
374
+ name: memberlist-udp
375
+ protocol: UDP
334
376
  securityContext:
335
377
  allowPrivilegeEscalation: false
336
378
  capabilities:
337
379
  add:
338
- - NET_ADMIN
339
380
  - NET_RAW
340
- - SYS_ADMIN
341
381
  drop:
342
382
  - ALL
343
383
  readOnlyRootFilesystem: true
344
384
  hostNetwork: true
345
385
  nodeSelector:
346
- beta.kubernetes.io/os: linux
386
+ kubernetes.io/os: linux
347
387
  serviceAccountName: speaker
348
388
  terminationGracePeriodSeconds: 2
349
389
  tolerations:
350
390
  - effect: NoSchedule
351
391
  key: node-role.kubernetes.io/master
392
+ operator: Exists
352
393
  ---
353
394
  apiVersion: apps/v1
354
395
  kind: Deployment
@@ -377,16 +418,16 @@ spec:
377
418
  - args:
378
419
  - --port=7472
379
420
  - --config=config
380
- image: metallb/controller:v0.9.5
381
- imagePullPolicy: Always
421
+ env:
422
+ - name: METALLB_ML_SECRET_NAME
423
+ value: memberlist
424
+ - name: METALLB_DEPLOYMENT
425
+ value: controller
426
+ image: quay.io/metallb/controller:v0.10.2
382
427
  name: controller
383
428
  ports:
384
429
  - containerPort: 7472
385
430
  name: monitoring
386
- resources:
387
- limits:
388
- cpu: 100m
389
- memory: 100Mi
390
431
  securityContext:
391
432
  allowPrivilegeEscalation: false
392
433
  capabilities:
@@ -400,14 +441,3 @@ spec:
400
441
  runAsUser: 65534
401
442
  serviceAccountName: controller
402
443
  terminationGracePeriodSeconds: 0
403
- ---
404
- apiVersion: v1
405
- kind: ConfigMap
406
- metadata:
407
- labels:
408
- app: metallb
409
- name: config
410
- namespace: metallb-system
411
- data:
412
- config: |
413
- <%= u.indent(global_config.lb.lb_config, 4) %>
@@ -0,0 +1,2 @@
1
+ resources:
2
+ - storage-version-migrator.yaml