underpost 2.8.7 → 2.8.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/.vscode/extensions.json +34 -2
  2. package/README.md +7 -5
  3. package/bin/db.js +1 -0
  4. package/bin/deploy.js +259 -74
  5. package/cli.md +88 -9
  6. package/conf.js +4 -0
  7. package/docker-compose.yml +1 -1
  8. package/manifests/deployment/adminer/service.yaml +1 -1
  9. package/manifests/deployment/dd-template-development/deployment.yaml +167 -0
  10. package/manifests/deployment/dd-template-development/proxy.yaml +46 -0
  11. package/manifests/deployment/fastapi/initial_data.sh +56 -0
  12. package/manifests/deployment/spark/spark-pi-py.yaml +21 -0
  13. package/manifests/envoy-service-nodeport.yaml +23 -0
  14. package/manifests/kubelet-config.yaml +65 -0
  15. package/manifests/lxd/lxd-admin-profile.yaml +17 -0
  16. package/manifests/lxd/lxd-preseed.yaml +30 -0
  17. package/manifests/lxd/underpost-setup.sh +163 -0
  18. package/manifests/maas/lxd-preseed.yaml +32 -0
  19. package/manifests/maas/maas-setup.sh +82 -0
  20. package/manifests/mariadb/statefulset.yaml +2 -1
  21. package/manifests/mariadb/storage-class.yaml +10 -0
  22. package/manifests/mongodb/kustomization.yaml +1 -1
  23. package/manifests/mongodb/statefulset.yaml +12 -11
  24. package/manifests/mongodb/storage-class.yaml +9 -0
  25. package/manifests/mongodb-4.4/service-deployment.yaml +2 -2
  26. package/manifests/mysql/kustomization.yaml +7 -0
  27. package/manifests/mysql/pv-pvc.yaml +27 -0
  28. package/manifests/mysql/statefulset.yaml +55 -0
  29. package/manifests/postgresql/statefulset.yaml +1 -1
  30. package/manifests/valkey/service.yaml +3 -9
  31. package/manifests/valkey/statefulset.yaml +12 -15
  32. package/package.json +1 -1
  33. package/src/cli/baremetal.js +60 -0
  34. package/src/cli/cluster.js +506 -207
  35. package/src/cli/deploy.js +47 -14
  36. package/src/cli/env.js +2 -2
  37. package/src/cli/image.js +83 -9
  38. package/src/cli/index.js +68 -61
  39. package/src/cli/lxd.js +395 -0
  40. package/src/cli/repository.js +9 -6
  41. package/src/index.js +17 -1
  42. package/src/runtime/lampp/Dockerfile +1 -1
  43. package/src/server/conf.js +58 -0
  44. package/src/server/logger.js +3 -3
  45. package/src/server/runtime.js +1 -1
  46. package/src/server/valkey.js +3 -3
  47. package/manifests/calico-custom-resources.yaml +0 -25
@@ -3,17 +3,51 @@ import { loggerFactory } from '../server/logger.js';
3
3
  import { shellExec } from '../server/process.js';
4
4
  import UnderpostDeploy from './deploy.js';
5
5
  import UnderpostTest from './test.js';
6
+ import os from 'os';
6
7
 
7
8
  const logger = loggerFactory(import.meta);
8
9
 
9
10
  class UnderpostCluster {
10
11
  static API = {
12
+ /**
13
+ * @method init
14
+ * @description Initializes and configures the Kubernetes cluster based on provided options.
15
+ * This method handles host prerequisites, cluster initialization (Kind, Kubeadm, or K3s),
16
+ * and optional component deployments.
17
+ * @param {string} [podName] - Optional name of a pod for specific operations (e.g., listing).
18
+ * @param {object} [options] - Configuration options for cluster initialization.
19
+ * @param {boolean} [options.mongodb=false] - Deploy MongoDB.
20
+ * @param {boolean} [options.mongodb4=false] - Deploy MongoDB 4.4.
21
+ * @param {boolean} [options.mariadb=false] - Deploy MariaDB.
22
+ * @param {boolean} [options.mysql=false] - Deploy MySQL.
23
+ * @param {boolean} [options.postgresql=false] - Deploy PostgreSQL.
24
+ * @param {boolean} [options.valkey=false] - Deploy Valkey.
25
+ * @param {boolean} [options.full=false] - Deploy a full set of common components.
26
+ * @param {boolean} [options.info=false] - Display extensive Kubernetes cluster information.
27
+ * @param {boolean} [options.certManager=false] - Deploy Cert-Manager for certificate management.
28
+ * @param {boolean} [options.listPods=false] - List Kubernetes pods.
29
+ * @param {boolean} [options.reset=false] - Perform a comprehensive reset of Kubernetes and container environments.
30
+ * @param {boolean} [options.dev=false] - Run in development mode (adjusts paths).
31
+ * @param {string} [options.nsUse=''] - Set the current kubectl namespace.
32
+ * @param {boolean} [options.infoCapacity=false] - Display resource capacity information for the cluster.
33
+ * @param {boolean} [options.infoCapacityPod=false] - Display resource capacity information for pods.
34
+ * @param {boolean} [options.istio=false] - Deploy Istio service mesh.
35
+ * @param {boolean} [options.pullImage=false] - Pull necessary Docker images before deployment.
36
+ * @param {boolean} [options.dedicatedGpu=false] - Configure for dedicated GPU usage (e.g., NVIDIA GPU Operator).
37
+ * @param {boolean} [options.kubeadm=false] - Initialize the cluster using Kubeadm.
38
+ * @param {boolean} [options.k3s=false] - Initialize the cluster using K3s.
39
+ * @param {boolean} [options.initHost=false] - Perform initial host setup (install Docker, Podman, Kind, Kubeadm, Helm).
40
+ * @param {boolean} [options.config=false] - Apply general host configuration (SELinux, containerd, sysctl, firewalld).
41
+ * @param {boolean} [options.worker=false] - Configure as a worker node (for Kubeadm or K3s join).
42
+ * @param {boolean} [options.chown=false] - Set up kubectl configuration for the current user.
43
+ */
11
44
  async init(
12
45
  podName,
13
46
  options = {
14
47
  mongodb: false,
15
48
  mongodb4: false,
16
49
  mariadb: false,
50
+ mysql: false,
17
51
  postgresql: false,
18
52
  valkey: false,
19
53
  full: false,
@@ -27,36 +61,48 @@ class UnderpostCluster {
27
61
  infoCapacityPod: false,
28
62
  istio: false,
29
63
  pullImage: false,
64
+ dedicatedGpu: false,
65
+ kubeadm: false,
66
+ k3s: false,
67
+ initHost: false,
68
+ config: false,
69
+ worker: false,
70
+ chown: false,
30
71
  },
31
72
  ) {
32
- // 1) Install kind, kubeadm, docker, podman
33
- // 2) Check kubectl, kubelet, containerd.io
34
- // 3) Install Nvidia drivers from Rocky Linux docs
35
- // 4) Install LXD with MAAS from Rocky Linux docs
36
- // 5) Install MAAS src from snap
73
+ // Handles initial host setup (installing docker, podman, kind, kubeadm, helm)
74
+ if (options.initHost === true) return UnderpostCluster.API.initHost();
75
+
76
+ // Applies general host configuration (SELinux, containerd, sysctl)
77
+ if (options.config === true) return UnderpostCluster.API.config();
78
+
79
+ // Sets up kubectl configuration for the current user
80
+ if (options.chown === true) return UnderpostCluster.API.chown();
81
+
37
82
  const npmRoot = getNpmRootPath();
38
83
  const underpostRoot = options?.dev === true ? '.' : `${npmRoot}/underpost`;
84
+
85
+ // Information gathering options
39
86
  if (options.infoCapacityPod === true) return logger.info('', UnderpostDeploy.API.resourcesFactory());
40
- if (options.infoCapacity === true) return logger.info('', UnderpostCluster.API.getResourcesCapacity());
41
- if (options.reset === true) return await UnderpostCluster.API.reset();
87
+ if (options.infoCapacity === true)
88
+ return logger.info('', UnderpostCluster.API.getResourcesCapacity(options.kubeadm || options.k3s)); // Adjust for k3s
42
89
  if (options.listPods === true) return console.table(UnderpostDeploy.API.get(podName ?? undefined));
43
-
44
90
  if (options.nsUse && typeof options.nsUse === 'string') {
45
91
  shellExec(`kubectl config set-context --current --namespace=${options.nsUse}`);
46
92
  return;
47
93
  }
48
94
  if (options.info === true) {
49
- shellExec(`kubectl config get-contexts`); // config env persisente for manage multiple clusters
95
+ shellExec(`kubectl config get-contexts`);
50
96
  shellExec(`kubectl config get-clusters`);
51
- shellExec(`kubectl get nodes -o wide`); // set of nodes of a cluster
97
+ shellExec(`kubectl get nodes -o wide`);
52
98
  shellExec(`kubectl config view | grep namespace`);
53
- shellExec(`kubectl get ns -o wide`); // A namespace can have pods of different nodes
54
- shellExec(`kubectl get pvc --all-namespaces -o wide`); // PersistentVolumeClaim -> request storage service
55
- shellExec(`kubectl get pv --all-namespaces -o wide`); // PersistentVolume -> real storage
99
+ shellExec(`kubectl get ns -o wide`);
100
+ shellExec(`kubectl get pvc --all-namespaces -o wide`);
101
+ shellExec(`kubectl get pv --all-namespaces -o wide`);
56
102
  shellExec(`kubectl get cronjob --all-namespaces -o wide`);
57
- shellExec(`kubectl get svc --all-namespaces -o wide`); // proxy dns gate way -> deployments, statefulsets, pods
58
- shellExec(`kubectl get statefulsets --all-namespaces -o wide`); // set pods with data/volume persistence
59
- shellExec(`kubectl get deployments --all-namespaces -o wide`); // set pods
103
+ shellExec(`kubectl get svc --all-namespaces -o wide`);
104
+ shellExec(`kubectl get statefulsets --all-namespaces -o wide`);
105
+ shellExec(`kubectl get deployments --all-namespaces -o wide`);
60
106
  shellExec(`kubectl get configmap --all-namespaces -o wide`);
61
107
  shellExec(`kubectl get pods --all-namespaces -o wide`);
62
108
  shellExec(
@@ -65,6 +111,7 @@ class UnderpostCluster {
65
111
  shellExec(
66
112
  `kubectl get pods --all-namespaces -o=jsonpath='{range .items[*]}{"\\n"}{.metadata.name}{":\\t"}{range .spec.containers[*]}{.image}{", "}{end}{end}'`,
67
113
  );
114
+ shellExec(`sudo crictl images`);
68
115
  console.log();
69
116
  logger.info('contour -------------------------------------------------');
70
117
  for (const _k of ['Cluster', 'HTTPProxy', 'ClusterIssuer', 'Certificate']) {
@@ -78,78 +125,200 @@ class UnderpostCluster {
78
125
  return;
79
126
  }
80
127
 
81
- if (
82
- (!options.istio && !UnderpostDeploy.API.get('kube-apiserver-kind-control-plane')[0]) ||
83
- (options.istio === true && !UnderpostDeploy.API.get('calico-kube-controllers')[0])
84
- ) {
85
- shellExec(`sudo setenforce 0`);
86
- shellExec(`sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config`);
87
- // sudo systemctl disable kubelet
88
- // shellExec(`sudo systemctl enable --now kubelet`);
89
- shellExec(`containerd config default > /etc/containerd/config.toml`);
90
- shellExec(`sed -i -e "s/SystemdCgroup = false/SystemdCgroup = true/g" /etc/containerd/config.toml`);
91
- // shellExec(`cp /etc/kubernetes/admin.conf ~/.kube/config`);
92
- // shellExec(`sudo systemctl restart kubelet`);
93
- shellExec(`sudo service docker restart`);
94
- shellExec(`sudo systemctl enable --now containerd.service`);
95
- shellExec(`sudo swapoff -a; sudo sed -i '/swap/d' /etc/fstab`);
96
- if (options.istio === true) {
97
- shellExec(`sysctl net.bridge.bridge-nf-call-iptables=1`);
98
- shellExec(`sudo kubeadm init --pod-network-cidr=192.168.0.0/16`);
99
- shellExec(`sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config`);
100
- shellExec(`sudo chown $(id -u):$(id -g) $HOME/.kube/config**`);
101
- // https://docs.tigera.io/calico/latest/getting-started/kubernetes/quickstart
128
+ // Reset Kubernetes cluster components (Kind/Kubeadm/K3s) and container runtimes
129
+ if (options.reset === true) return await UnderpostCluster.API.reset();
130
+
131
+ // Check if a cluster (Kind, Kubeadm, or K3s) is already initialized
132
+ const alreadyKubeadmCluster = UnderpostDeploy.API.get('calico-kube-controllers')[0];
133
+ const alreadyKindCluster = UnderpostDeploy.API.get('kube-apiserver-kind-control-plane')[0];
134
+ // K3s pods often contain 'svclb-traefik' in the kube-system namespace
135
+ const alreadyK3sCluster = UnderpostDeploy.API.get('svclb-traefik')[0];
136
+
137
+ // --- Kubeadm/Kind/K3s Cluster Initialization ---
138
+ // This block handles the initial setup of the Kubernetes cluster (control plane or worker).
139
+ // It prevents re-initialization if a cluster is already detected.
140
+ if (!options.worker && !alreadyKubeadmCluster && !alreadyKindCluster && !alreadyK3sCluster) {
141
+ if (options.k3s === true) {
142
+ logger.info('Initializing K3s control plane...');
143
+ // Install K3s
144
+ console.log('Installing K3s...');
145
+ shellExec(`curl -sfL https://get.k3s.io | sh -`);
146
+ console.log('K3s installation completed.');
147
+
148
+ // Move k3s binary to /bin/k3s and make it executable
149
+ shellExec(`sudo mv /usr/local/bin/k3s /bin/k3s`);
150
+ shellExec(`sudo chmod +x /bin/k3s`);
151
+ console.log('K3s binary moved to /bin/k3s and made executable.');
152
+
153
+ // Configure kubectl for the current user for K3s *before* checking readiness
154
+ // This ensures kubectl can find the K3s kubeconfig immediately after K3s installation.
155
+ UnderpostCluster.API.chown('k3s');
156
+
157
+ // Wait for K3s to be ready
158
+ logger.info('Waiting for K3s to be ready...');
159
+ let k3sReady = false;
160
+ let retries = 0;
161
+ const maxRetries = 20; // Increased retries for K3s startup
162
+ const delayMs = 5000; // 5 seconds
163
+
164
+ while (!k3sReady && retries < maxRetries) {
165
+ try {
166
+ // Explicitly use KUBECONFIG for kubectl commands to ensure it points to K3s config
167
+ const nodes = shellExec(`KUBECONFIG=/etc/rancher/k3s/k3s.yaml kubectl get nodes -o json`, {
168
+ stdout: true,
169
+ silent: true,
170
+ });
171
+ const parsedNodes = JSON.parse(nodes);
172
+ if (
173
+ parsedNodes.items.some((node) =>
174
+ node.status.conditions.some((cond) => cond.type === 'Ready' && cond.status === 'True'),
175
+ )
176
+ ) {
177
+ k3sReady = true;
178
+ logger.info('K3s cluster is ready.');
179
+ } else {
180
+ logger.info(`K3s not yet ready. Retrying in ${delayMs / 1000} seconds...`);
181
+ await new Promise((resolve) => setTimeout(resolve, delayMs));
182
+ }
183
+ } catch (error) {
184
+ logger.info(`Error checking K3s status: ${error.message}. Retrying in ${delayMs / 1000} seconds...`);
185
+ await new Promise((resolve) => setTimeout(resolve, delayMs));
186
+ }
187
+ retries++;
188
+ }
189
+
190
+ if (!k3sReady) {
191
+ logger.error('K3s cluster did not become ready in time. Please check the K3s logs.');
192
+ return;
193
+ }
194
+
195
+ // K3s includes local-path-provisioner by default, so no need to install explicitly.
196
+ logger.info('K3s comes with local-path-provisioner by default. Skipping explicit installation.');
197
+ } else if (options.kubeadm === true) {
198
+ logger.info('Initializing Kubeadm control plane...');
199
+ // Initialize kubeadm control plane
200
+ shellExec(
201
+ `sudo kubeadm init --pod-network-cidr=192.168.0.0/16 --control-plane-endpoint="${os.hostname()}:6443"`,
202
+ );
203
+ // Configure kubectl for the current user
204
+ UnderpostCluster.API.chown('kubeadm'); // Pass 'kubeadm' to chown
205
+
206
+ // Install Calico CNI
207
+ logger.info('Installing Calico CNI...');
102
208
  shellExec(
103
209
  `sudo kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.29.3/manifests/tigera-operator.yaml`,
104
210
  );
105
- // shellExec(
106
- // `wget https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/custom-resources.yaml`,
107
- // );
108
- shellExec(`sudo kubectl apply -f ./manifests/kubeadm-calico-config.yaml`);
109
- shellExec(`sudo systemctl restart containerd`);
110
- } else {
111
- shellExec(`sudo systemctl restart containerd`);
211
+ shellExec(`sudo kubectl apply -f ${underpostRoot}/manifests/kubeadm-calico-config.yaml`);
212
+ // Untaint control plane node to allow scheduling pods
213
+ const nodeName = os.hostname();
214
+ shellExec(`kubectl taint nodes ${nodeName} node-role.kubernetes.io/control-plane:NoSchedule-`);
215
+ // Install local-path-provisioner for dynamic PVCs (optional but recommended)
216
+ logger.info('Installing local-path-provisioner...');
112
217
  shellExec(
113
- `cd ${underpostRoot}/manifests && kind create cluster --config kind-config${
114
- options?.dev === true ? '-dev' : ''
115
- }.yaml`,
218
+ `kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/master/deploy/local-path-storage.yaml`,
116
219
  );
117
- shellExec(`sudo chown $(id -u):$(id -g) $HOME/.kube/config**`);
220
+ } else {
221
+ // Kind cluster initialization (if not using kubeadm or k3s)
222
+ logger.info('Initializing Kind cluster...');
223
+ if (options.full === true || options.dedicatedGpu === true) {
224
+ shellExec(`cd ${underpostRoot}/manifests && kind create cluster --config kind-config-cuda.yaml`);
225
+ } else {
226
+ shellExec(
227
+ `cd ${underpostRoot}/manifests && kind create cluster --config kind-config${
228
+ options?.dev === true ? '-dev' : ''
229
+ }.yaml`,
230
+ );
231
+ }
232
+ UnderpostCluster.API.chown('kind'); // Pass 'kind' to chown
118
233
  }
119
- } else logger.warn('Cluster already initialized');
234
+ } else if (options.worker === true) {
235
+ // Worker node specific configuration (kubeadm join command needs to be executed separately)
236
+ logger.info('Worker node configuration applied. Awaiting join command...');
237
+ // No direct cluster initialization here for workers. The `kubeadm join` or `k3s agent` command
238
+ // needs to be run on the worker after the control plane is up and a token is created.
239
+ // This part of the script is for general worker setup, not the join itself.
240
+ } else {
241
+ logger.warn('Cluster already initialized or worker flag not set for worker node.');
242
+ }
243
+
244
+ // --- Optional Component Deployments (Databases, Ingress, Cert-Manager) ---
245
+ // These deployments happen after the base cluster is up.
246
+
247
+ if (options.full === true || options.dedicatedGpu === true) {
248
+ shellExec(`node ${underpostRoot}/bin/deploy nvidia-gpu-operator`);
249
+ shellExec(
250
+ `node ${underpostRoot}/bin/deploy kubeflow-spark-operator${options.kubeadm === true ? ' kubeadm' : ''}`,
251
+ );
252
+ }
120
253
 
121
254
  if (options.full === true || options.valkey === true) {
122
255
  if (options.pullImage === true) {
123
- shellExec(`docker pull valkey/valkey`);
124
- shellExec(`sudo kind load docker-image valkey/valkey:latest`);
256
+ shellExec(`docker pull valkey/valkey:latest`);
257
+ shellExec(`sudo podman pull valkey/valkey:latest`);
258
+ if (!options.kubeadm && !options.k3s)
259
+ // Only load if not kubeadm/k3s (Kind needs it)
260
+ shellExec(`sudo kind load docker-image valkey/valkey:latest`);
261
+ else if (options.kubeadm || options.k3s)
262
+ // For kubeadm/k3s, ensure it's available for containerd
263
+ shellExec(`sudo crictl pull valkey/valkey:latest`);
125
264
  }
126
- shellExec(`kubectl delete statefulset service-valkey`);
265
+ shellExec(`kubectl delete statefulset valkey-service --ignore-not-found`);
127
266
  shellExec(`kubectl apply -k ${underpostRoot}/manifests/valkey`);
128
267
  }
129
268
  if (options.full === true || options.mariadb === true) {
130
269
  shellExec(
131
- `sudo kubectl create secret generic mariadb-secret --from-file=username=/home/dd/engine/engine-private/mariadb-username --from-file=password=/home/dd/engine/engine-private/mariadb-password`,
270
+ `sudo kubectl create secret generic mariadb-secret --from-file=username=/home/dd/engine/engine-private/mariadb-username --from-file=password=/home/dd/engine/engine-private/mariadb-password --dry-run=client -o yaml | kubectl apply -f -`,
132
271
  );
272
+ shellExec(`kubectl delete statefulset mariadb-statefulset --ignore-not-found`);
273
+
274
+ if (options.pullImage === true) {
275
+ shellExec(`docker pull mariadb:latest`);
276
+ shellExec(`sudo podman pull mariadb:latest`);
277
+ if (!options.kubeadm && !options.k3s)
278
+ // Only load if not kubeadm/k3s (Kind needs it)
279
+ shellExec(`sudo kind load docker-image mariadb:latest`);
280
+ else if (options.kubeadm || options.k3s)
281
+ // For kubeadm/k3s, ensure it's available for containerd
282
+ shellExec(`sudo crictl pull mariadb:latest`);
283
+ }
284
+ if (options.kubeadm === true)
285
+ // This storage class is specific to kubeadm setup
286
+ shellExec(`kubectl apply -f ${underpostRoot}/manifests/mariadb/storage-class.yaml`);
287
+ shellExec(`kubectl apply -k ${underpostRoot}/manifests/mariadb`);
288
+ }
289
+ if (options.full === true || options.mysql === true) {
133
290
  shellExec(
134
- `sudo kubectl create secret generic github-secret --from-literal=GITHUB_TOKEN=${process.env.GITHUB_TOKEN}`,
291
+ `sudo kubectl create secret generic mysql-secret --from-file=username=/home/dd/engine/engine-private/mysql-username --from-file=password=/home/dd/engine/engine-private/mysql-password --dry-run=client -o yaml | kubectl apply -f -`,
135
292
  );
136
- shellExec(`kubectl delete statefulset mariadb-statefulset`);
137
- shellExec(`kubectl apply -k ${underpostRoot}/manifests/mariadb`);
293
+ shellExec(`sudo mkdir -p /mnt/data`);
294
+ shellExec(`sudo chmod 777 /mnt/data`);
295
+ shellExec(`sudo chown -R root:root /mnt/data`);
296
+ shellExec(`kubectl apply -k ${underpostRoot}/manifests/mysql`);
138
297
  }
139
298
  if (options.full === true || options.postgresql === true) {
140
299
  if (options.pullImage === true) {
141
300
  shellExec(`docker pull postgres:latest`);
142
- shellExec(`sudo kind load docker-image postgres:latest`);
301
+ if (!options.kubeadm && !options.k3s)
302
+ // Only load if not kubeadm/k3s (Kind needs it)
303
+ shellExec(`sudo kind load docker-image postgres:latest`);
304
+ else if (options.kubeadm || options.k3s)
305
+ // For kubeadm/k3s, ensure it's available for containerd
306
+ shellExec(`sudo crictl pull postgres:latest`);
143
307
  }
144
308
  shellExec(
145
- `sudo kubectl create secret generic postgres-secret --from-file=password=/home/dd/engine/engine-private/postgresql-password`,
309
+ `sudo kubectl create secret generic postgres-secret --from-file=password=/home/dd/engine/engine-private/postgresql-password --dry-run=client -o yaml | kubectl apply -f -`,
146
310
  );
147
- shellExec(`kubectl apply -k ./manifests/postgresql`);
311
+ shellExec(`kubectl apply -k ${underpostRoot}/manifests/postgresql`);
148
312
  }
149
313
  if (options.mongodb4 === true) {
150
314
  if (options.pullImage === true) {
151
315
  shellExec(`docker pull mongo:4.4`);
152
- shellExec(`sudo kind load docker-image mongo:4.4`);
316
+ if (!options.kubeadm && !options.k3s)
317
+ // Only load if not kubeadm/k3s (Kind needs it)
318
+ shellExec(`sudo kind load docker-image mongo:4.4`);
319
+ else if (options.kubeadm || options.k3s)
320
+ // For kubeadm/k3s, ensure it's available for containerd
321
+ shellExec(`sudo crictl pull mongo:4.4`);
153
322
  }
154
323
  shellExec(`kubectl apply -k ${underpostRoot}/manifests/mongodb-4.4`);
155
324
 
@@ -160,7 +329,7 @@ class UnderpostCluster {
160
329
  if (successInstance) {
161
330
  const mongoConfig = {
162
331
  _id: 'rs0',
163
- members: [{ _id: 0, host: '127.0.0.1:27017' }],
332
+ members: [{ _id: 0, host: 'mongodb-service:27017' }],
164
333
  };
165
334
 
166
335
  const [pod] = UnderpostDeploy.API.get(deploymentName);
@@ -170,16 +339,26 @@ class UnderpostCluster {
170
339
  --eval 'rs.initiate(${JSON.stringify(mongoConfig)})'`,
171
340
  );
172
341
  }
173
-
174
- // await UnderpostTest.API.statusMonitor('mongodb-1');
175
342
  } else if (options.full === true || options.mongodb === true) {
343
+ if (options.pullImage === true) {
344
+ shellExec(`docker pull mongo:latest`);
345
+ if (!options.kubeadm && !options.k3s)
346
+ // Only load if not kubeadm/k3s (Kind needs it)
347
+ shellExec(`sudo kind load docker-image mongo:latest`);
348
+ else if (options.kubeadm || options.k3s)
349
+ // For kubeadm/k3s, ensure it's available for containerd
350
+ shellExec(`sudo crictl pull mongo:latest`);
351
+ }
176
352
  shellExec(
177
- `sudo kubectl create secret generic mongodb-keyfile --from-file=/home/dd/engine/engine-private/mongodb-keyfile`,
353
+ `sudo kubectl create secret generic mongodb-keyfile --from-file=/home/dd/engine/engine-private/mongodb-keyfile --dry-run=client -o yaml | kubectl apply -f -`,
178
354
  );
179
355
  shellExec(
180
- `sudo kubectl create secret generic mongodb-secret --from-file=username=/home/dd/engine/engine-private/mongodb-username --from-file=password=/home/dd/engine/engine-private/mongodb-password`,
356
+ `sudo kubectl create secret generic mongodb-secret --from-file=username=/home/dd/engine/engine-private/mongodb-username --from-file=password=/home/dd/engine/engine-private/mongodb-password --dry-run=client -o yaml | kubectl apply -f -`,
181
357
  );
182
- shellExec(`kubectl delete statefulset mongodb`);
358
+ shellExec(`kubectl delete statefulset mongodb --ignore-not-found`);
359
+ if (options.kubeadm === true)
360
+ // This storage class is specific to kubeadm setup
361
+ shellExec(`kubectl apply -f ${underpostRoot}/manifests/mongodb/storage-class.yaml`);
183
362
  shellExec(`kubectl apply -k ${underpostRoot}/manifests/mongodb`);
184
363
 
185
364
  const successInstance = await UnderpostTest.API.statusMonitor('mongodb-1');
@@ -202,8 +381,15 @@ class UnderpostCluster {
202
381
  }
203
382
  }
204
383
 
205
- if (options.full === true || options.contour === true)
384
+ if (options.full === true || options.contour === true) {
206
385
  shellExec(`kubectl apply -f https://projectcontour.io/quickstart/contour.yaml`);
386
+ if (options.kubeadm === true) {
387
+ // Envoy service might need NodePort for kubeadm
388
+ shellExec(`sudo kubectl apply -f ${underpostRoot}/manifests/envoy-service-nodeport.yaml`);
389
+ }
390
+ // K3s has a built-in LoadBalancer (Klipper-lb) that can expose services,
391
+ // so a specific NodePort service might not be needed or can be configured differently.
392
+ }
207
393
 
208
394
  if (options.full === true || options.certManager === true) {
209
395
  if (!UnderpostDeploy.API.get('cert-manager').find((p) => p.STATUS === 'Running')) {
@@ -218,153 +404,200 @@ class UnderpostCluster {
218
404
  }
219
405
 
220
406
  const letsEncName = 'letsencrypt-prod';
221
- shellExec(`sudo kubectl delete ClusterIssuer ${letsEncName}`);
407
+ shellExec(`sudo kubectl delete ClusterIssuer ${letsEncName} --ignore-not-found`);
222
408
  shellExec(`sudo kubectl apply -f ${underpostRoot}/manifests/${letsEncName}.yaml`);
223
409
  }
224
410
  },
225
- // This function performs a comprehensive reset of Kubernetes and container environments
226
- // on the host machine. Its primary goal is to clean up cluster components, temporary files,
227
- // and container data, ensuring a clean state for re-initialization or fresh deployments,
228
- // while also preventing the loss of the host machine's internet connectivity.
229
-
230
- reset() {
231
- // Step 1: Delete all existing Kind (Kubernetes in Docker) clusters.
232
- // 'kind get clusters' lists all Kind clusters.
233
- // 'xargs -t -n1 kind delete cluster --name' then iterates through each cluster name
234
- // and executes 'kind delete cluster --name <cluster_name>' to remove them.
235
- shellExec(`kind get clusters | xargs -t -n1 kind delete cluster --name`);
236
-
237
- // Step 2: Reset the Kubernetes control-plane components installed by kubeadm.
238
- // 'kubeadm reset -f' performs a forceful reset, removing installed Kubernetes components,
239
- // configuration files, and associated network rules (like iptables entries created by kubeadm).
240
- // The '-f' flag bypasses confirmation prompts.
241
- shellExec(`sudo kubeadm reset -f`);
242
-
243
- // Step 3: Remove specific CNI (Container Network Interface) configuration files.
244
- // This command targets and removes the configuration file for Flannel,
245
- // a common CNI plugin, which might be left behind after a reset.
246
- shellExec('sudo rm -f /etc/cni/net.d/10-flannel.conflist');
247
-
248
- // Note: The aggressive 'sudo iptables -F ...' command was intentionally removed from previous versions.
249
- // This command would flush all iptables rules, including those crucial for the host's general
250
- // internet connectivity, leading to network loss. 'kubeadm reset' and container runtime pruning
251
- // adequately handle Kubernetes and container-specific iptables rules without affecting the host's
252
- // default network configuration.
253
-
254
- // Step 4: Remove the kubectl configuration file from the current user's home directory.
255
- // This ensures that after a reset, there's no lingering configuration pointing to the old cluster,
256
- // providing a clean slate for connecting to a new or re-initialized cluster.
257
- shellExec('sudo rm -f $HOME/.kube/config');
258
-
259
- // Step 5: Clear trash files from the root user's trash directory.
260
- // This is a general cleanup step to remove temporary or deleted files.
261
- shellExec('sudo rm -rf /root/.local/share/Trash/files/*');
262
-
263
- // Step 6: Prune all unused Docker data.
264
- // 'docker system prune -a -f' removes:
265
- // - All stopped containers
266
- // - All unused networks
267
- // - All dangling images
268
- // - All build cache
269
- // - All unused volumes
270
- // This aggressively frees up disk space and removes temporary Docker artifacts.
271
- shellExec('sudo docker system prune -a -f');
272
-
273
- // Step 7: Stop the Docker daemon service.
274
- // This step is often necessary to ensure that Docker's files and directories
275
- // can be safely manipulated or moved in subsequent steps without conflicts.
276
- shellExec('sudo service docker stop');
277
-
278
- // Step 8: Aggressively remove container storage data for containerd and Docker.
279
- // These commands target the default storage locations for containerd and Docker,
280
- // as well as any custom paths that might have been used (`/home/containers/storage`, `/home/docker`).
281
- // This ensures a complete wipe of all container images, layers, and volumes.
282
- shellExec(`sudo rm -rf /var/lib/containers/storage/*`);
283
- shellExec(`sudo rm -rf /var/lib/docker/volumes/*`);
284
- shellExec(`sudo rm -rf /var/lib/docker~/*`); // Cleans up a potential backup directory for Docker data
285
- shellExec(`sudo rm -rf /home/containers/storage/*`); // Cleans up custom containerd/Podman storage
286
- shellExec(`sudo rm -rf /home/docker/*`); // Cleans up custom Docker storage
287
-
288
- // Step 9: Re-configure Docker's default storage location (if desired).
289
- // These commands effectively move Docker's data directory from its default `/var/lib/docker`
290
- // to a new location (`/home/docker`) and create a symbolic link.
291
- // This is a specific customization to relocate Docker's storage.
292
- shellExec('sudo mv /var/lib/docker /var/lib/docker~'); // Moves existing /var/lib/docker to /var/lib/docker~ (backup)
293
- shellExec('sudo mkdir /home/docker'); // Creates the new desired directory for Docker data
294
- shellExec('sudo chmod 0711 /home/docker'); // Sets appropriate permissions for the new directory
295
- shellExec('sudo ln -s /home/docker /var/lib/docker'); // Creates a symlink from original path to new path
296
-
297
- // Step 10: Prune all unused Podman data.
298
- // Similar to Docker pruning, these commands remove:
299
- // - All stopped containers
300
- // - All unused networks
301
- // - All unused images
302
- // - All unused volumes ('--volumes')
303
- // - The '--force' flag bypasses confirmation.
304
- // '--external' prunes external content not managed by Podman's default storage backend.
305
- shellExec(`sudo podman system prune -a -f`);
306
- shellExec(`sudo podman system prune --all --volumes --force`);
307
- shellExec(`sudo podman system prune --external --force`);
308
- shellExec(`sudo podman system prune --all --volumes --force`); // Redundant but harmless repetition
309
-
310
- // Step 11: Create and set permissions for Podman's custom storage directory.
311
- // This ensures the custom path `/home/containers/storage` exists and has correct permissions
312
- // before Podman attempts to use it.
313
- shellExec(`sudo mkdir -p /home/containers/storage`);
314
- shellExec('sudo chmod 0711 /home/containers/storage');
315
-
316
- // Step 12: Update Podman's storage configuration file.
317
- // This command uses 'sed' to modify `/etc/containers/storage.conf`,
318
- // changing the default storage path from `/var/lib/containers/storage`
319
- // to the customized `/home/containers/storage`.
320
- shellExec(
321
- `sudo sed -i -e "s@/var/lib/containers/storage@/home/containers/storage@g" /etc/containers/storage.conf`,
322
- );
323
411
 
324
- // Step 13: Reset Podman system settings.
325
- // This command resets Podman's system-wide configuration to its default state.
326
- shellExec(`sudo podman system reset -f`);
327
-
328
- // Note: The 'sysctl net.bridge.bridge-nf-call-iptables=0' and related commands
329
- // were previously removed. These sysctl settings (bridge-nf-call-iptables,
330
- // bridge-nf-call-arptables, bridge-nf-call-ip6tables) are crucial for allowing
331
- // network traffic through Linux bridges to be processed by iptables.
332
- // Kubernetes and CNI plugins generally require them to be enabled (set to '1').
333
- // Re-initializing Kubernetes will typically set these as needed, and leaving them
334
- // at their system default (or '1' if already configured) is safer for host
335
- // connectivity during a reset operation.
336
-
337
- // https://github.com/kubernetes-sigs/kind/issues/2886
338
- // shellExec(`sysctl net.bridge.bridge-nf-call-iptables=0`);
339
- // shellExec(`sysctl net.bridge.bridge-nf-call-arptables=0`);
340
- // shellExec(`sysctl net.bridge.bridge-nf-call-ip6tables=0`);
341
-
342
- // Step 14: Remove the 'kind' Docker network.
343
- // This cleans up any network bridges or configurations specifically created by Kind.
344
- shellExec(`docker network rm kind`);
412
+ /**
413
+ * @method config
414
+ * @description Configures host-level settings required for Kubernetes.
415
+ * This method ensures proper SELinux, Docker, Containerd, and Sysctl settings
416
+ * are applied for a healthy Kubernetes environment. It explicitly avoids
417
+ * iptables flushing commands to prevent conflicts with Kubernetes' own network management.
418
+ */
419
+ config() {
420
+ console.log('Applying host configuration: SELinux, Docker, Containerd, and Sysctl settings.');
421
+ // Disable SELinux (permissive mode)
422
+ shellExec(`sudo setenforce 0`);
423
+ shellExec(`sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config`);
424
+
425
+ // Enable and start Docker and Kubelet services
426
+ shellExec(`sudo systemctl enable --now docker || true`); // Docker might not be needed for K3s
427
+ shellExec(`sudo systemctl enable --now kubelet || true`); // Kubelet might not be needed for K3s (K3s uses its own agent)
428
+
429
+ // Configure containerd for SystemdCgroup
430
+ // This is crucial for kubelet/k3s to interact correctly with containerd
431
+ shellExec(`containerd config default | sudo tee /etc/containerd/config.toml > /dev/null`);
432
+ shellExec(`sudo sed -i -e "s/SystemdCgroup = false/SystemdCgroup = true/g" /etc/containerd/config.toml`);
433
+ shellExec(`sudo service docker restart || true`); // Restart docker after containerd config changes
434
+ shellExec(`sudo systemctl enable --now containerd.service`);
435
+ shellExec(`sudo systemctl restart containerd`); // Restart containerd to apply changes
436
+
437
+ // Disable swap (required by Kubernetes)
438
+ shellExec(`sudo swapoff -a; sudo sed -i '/swap/d' /etc/fstab`);
439
+
440
+ // Reload systemd daemon to pick up new unit files/changes
441
+ shellExec(`sudo systemctl daemon-reload`);
442
+
443
+ // Enable bridge-nf-call-iptables for Kubernetes networking
444
+ // This ensures traffic through Linux bridges is processed by iptables (crucial for CNI)
445
+ for (const iptableConfPath of [
446
+ `/etc/sysctl.d/k8s.conf`,
447
+ `/etc/sysctl.d/99-k8s-ipforward.conf`,
448
+ `/etc/sysctl.d/99-k8s.conf`,
449
+ ])
450
+ shellExec(`echo 'net.bridge.bridge-nf-call-iptables = 1
451
+ net.bridge.bridge-nf-call-ip6tables = 1
452
+ net.bridge.bridge-nf-call-arptables = 1
453
+ net.ipv4.ip_forward = 1' | sudo tee ${iptableConfPath}`);
454
+ shellExec(`sudo sysctl --system`); // Apply sysctl changes immediately
455
+
456
+ // Disable firewalld (common cause of network issues in Kubernetes)
457
+ shellExec(`sudo systemctl stop firewalld || true`); // Stop if running
458
+ shellExec(`sudo systemctl disable firewalld || true`); // Disable from starting on boot
345
459
  },
346
460
 
347
- getResourcesCapacity() {
461
+ /**
462
+ * @method chown
463
+ * @description Sets up kubectl configuration for the current user based on the cluster type.
464
+ * @param {string} clusterType - The type of Kubernetes cluster ('kubeadm', 'k3s', or 'kind').
465
+ */
466
+ chown(clusterType) {
467
+ console.log(`Setting up kubectl configuration for ${clusterType} cluster...`);
468
+ shellExec(`mkdir -p ~/.kube`);
469
+
470
+ let kubeconfigPath;
471
+ if (clusterType === 'k3s') {
472
+ kubeconfigPath = '/etc/rancher/k3s/k3s.yaml';
473
+ } else if (clusterType === 'kubeadm') {
474
+ kubeconfigPath = '/etc/kubernetes/admin.conf';
475
+ } else {
476
+ // Default to kind if not specified or unknown
477
+ kubeconfigPath = ''; // Kind's kubeconfig is usually managed by kind itself, or merged
478
+ }
479
+
480
+ if (kubeconfigPath) {
481
+ shellExec(`sudo -E cp -i ${kubeconfigPath} ~/.kube/config`);
482
+ shellExec(`sudo -E chown $(id -u):$(id -g) ~/.kube/config`);
483
+ } else if (clusterType === 'kind') {
484
+ // For Kind, the kubeconfig is usually merged automatically or can be explicitly exported
485
+ // This command ensures it's merged into the default kubeconfig
486
+ shellExec(`kind get kubeconfig > ~/.kube/config || true`);
487
+ shellExec(`sudo -E chown $(id -u):$(id -g) ~/.kube/config`);
488
+ } else {
489
+ logger.warn('No specific kubeconfig path defined for this cluster type, or it is managed automatically.');
490
+ }
491
+ console.log('kubectl config set up successfully.');
492
+ },
493
+
494
+ /**
495
+ * @method reset
496
+ * @description Performs a comprehensive reset of Kubernetes and container environments.
497
+ * This function is for cleaning up a node, reverting changes made by 'kubeadm init', 'kubeadm join', or 'k3s install'.
498
+ * It includes deleting Kind clusters, resetting kubeadm, removing CNI configs,
499
+ * cleaning Docker and Podman data, persistent volumes, and resetting kubelet components.
500
+ * It avoids aggressive iptables flushing that would break host connectivity, relying on kube-proxy's
501
+ * control loop to eventually clean up rules if the cluster is not re-initialized.
502
+ */
503
+ async reset() {
504
+ logger.info('Starting comprehensive reset of Kubernetes and container environments...');
505
+
506
+ try {
507
+ // Phase 1: Pre-reset Kubernetes Cleanup (while API server is still up)
508
+ logger.info('Phase 1/6: Cleaning up Kubernetes resources (PVCs, PVs) while API server is accessible...');
509
+
510
+ // Get all Persistent Volumes and identify their host paths for data deletion.
511
+ const pvListJson = shellExec(`kubectl get pv -o json || echo '{"items":[]}'`, { stdout: true, silent: true });
512
+ const pvList = JSON.parse(pvListJson);
513
+
514
+ if (pvList.items && pvList.items.length > 0) {
515
+ for (const pv of pvList.items) {
516
+ // Check if the PV uses hostPath and delete its contents
517
+ if (pv.spec.hostPath && pv.spec.hostPath.path) {
518
+ const hostPath = pv.spec.hostPath.path;
519
+ logger.info(`Removing data from host path for PV '${pv.metadata.name}': ${hostPath}`);
520
+ shellExec(`sudo rm -rf ${hostPath}/* || true`);
521
+ }
522
+ }
523
+ } else {
524
+ logger.info('No Persistent Volumes found with hostPath to clean up.');
525
+ }
526
+
527
+ // Phase 2: Stop Kubelet/K3s agent and remove CNI configuration
528
+ logger.info('Phase 2/6: Stopping Kubelet/K3s agent and removing CNI configurations...');
529
+ shellExec(`sudo systemctl stop kubelet || true`); // Stop kubelet if it's running (kubeadm)
530
+ shellExec(`sudo /usr/local/bin/k3s-uninstall.sh || true`); // Run K3s uninstall script if it exists
531
+
532
+ // CNI plugins use /etc/cni/net.d to store their configuration.
533
+ shellExec('sudo rm -rf /etc/cni/net.d/* || true');
534
+
535
+ // Phase 3: Kind Cluster Cleanup
536
+ logger.info('Phase 3/6: Cleaning up Kind clusters...');
537
+ shellExec(`kind get clusters | xargs -r -t -n1 kind delete cluster || true`);
538
+
539
+ // Phase 4: Kubeadm Reset (if applicable)
540
+ logger.info('Phase 4/6: Performing kubeadm reset (if applicable)...');
541
+ shellExec(`sudo kubeadm reset --force || true`); // Use || true to prevent script from failing if kubeadm is not installed
542
+
543
+ // Phase 5: Post-reset File System Cleanup (Local Storage, Kubeconfig)
544
+ logger.info('Phase 5/6: Cleaning up local storage provisioner data and kubeconfig...');
545
+ shellExec('rm -rf $HOME/.kube || true');
546
+ shellExec(`sudo rm -rf /opt/local-path-provisioner/* || true`);
547
+
548
+ // Phase 6: Container Runtime Cleanup (Docker and Podman)
549
+ logger.info('Phase 6/6: Cleaning up Docker and Podman data...');
550
+ shellExec('sudo docker system prune -a -f || true');
551
+ shellExec('sudo service docker stop || true');
552
+ shellExec(`sudo rm -rf /var/lib/containers/storage/* || true`);
553
+ shellExec(`sudo rm -rf /var/lib/docker/volumes/* || true`);
554
+ shellExec(`sudo rm -rf /var/lib/docker~/* || true`);
555
+ shellExec(`sudo rm -rf /home/containers/storage/* || true`);
556
+ shellExec(`sudo rm -rf /home/docker/* || true`);
557
+ shellExec('sudo mkdir -p /home/docker || true');
558
+ shellExec('sudo chmod 777 /home/docker || true');
559
+ shellExec('sudo ln -sf /home/docker /var/lib/docker || true');
560
+
561
+ shellExec(`sudo podman system prune -a -f || true`);
562
+ shellExec(`sudo podman system prune --all --volumes --force || true`);
563
+ shellExec(`sudo podman system prune --external --force || true`);
564
+ shellExec(`sudo mkdir -p /home/containers/storage || true`);
565
+ shellExec('sudo chmod 0711 /home/containers/storage || true');
566
+ shellExec(
567
+ `sudo sed -i -e "s@/var/lib/containers/storage@/home/containers/storage@g" /etc/containers/storage.conf || true`,
568
+ );
569
+ shellExec(`sudo podman system reset -f || true`);
570
+
571
+ // Final Kubelet and System Cleanup (after all other operations)
572
+ logger.info('Finalizing Kubelet and system file cleanup...');
573
+ shellExec(`sudo rm -rf /etc/kubernetes/* || true`);
574
+ shellExec(`sudo rm -rf /var/lib/kubelet/* || true`);
575
+ shellExec(`sudo rm -rf /root/.local/share/Trash/files/* || true`);
576
+ shellExec(`sudo systemctl daemon-reload`);
577
+ shellExec(`sudo systemctl start kubelet || true`); // Attempt to start kubelet; might fail if fully reset
578
+
579
+ logger.info('Comprehensive reset completed successfully.');
580
+ } catch (error) {
581
+ logger.error(`Error during reset: ${error.message}`);
582
+ console.error(error);
583
+ }
584
+ },
585
+
586
+ /**
587
+ * @method getResourcesCapacity
588
+ * @description Retrieves and returns the allocatable CPU and memory resources
589
+ * of the Kubernetes node.
590
+ * @param {boolean} [isKubeadmOrK3s=false] - If true, assumes a kubeadm or k3s-managed node;
591
+ * otherwise, assumes a Kind worker node.
592
+ * @returns {object} An object containing CPU and memory resources with values and units.
593
+ */
594
+ getResourcesCapacity(isKubeadmOrK3s = false) {
348
595
  const resources = {};
349
- const info = false
350
- ? `Capacity:
351
- cpu: 8
352
- ephemeral-storage: 153131976Ki
353
- hugepages-1Gi: 0
354
- hugepages-2Mi: 0
355
- memory: 11914720Ki
356
- pods: 110
357
- Allocatable:
358
- cpu: 8
359
- ephemeral-storage: 153131976Ki
360
- hugepages-1Gi: 0
361
- hugepages-2Mi: 0
362
- memory: 11914720Ki
363
- pods: `
364
- : shellExec(`kubectl describe node kind-worker | grep -E '(Allocatable:|Capacity:)' -A 6`, {
365
- stdout: true,
366
- silent: true,
367
- });
596
+ const nodeName = isKubeadmOrK3s ? os.hostname() : 'kind-worker';
597
+ const info = shellExec(`kubectl describe node ${nodeName} | grep -E '(Allocatable:|Capacity:)' -A 6`, {
598
+ stdout: true,
599
+ silent: true,
600
+ });
368
601
  info
369
602
  .split('Allocatable:')[1]
370
603
  .split('\n')
@@ -384,6 +617,72 @@ Allocatable:
384
617
 
385
618
  return resources;
386
619
  },
620
+ /**
621
+ * @method initHost
622
+ * @description Installs essential host-level prerequisites for Kubernetes,
623
+ * including Docker, Podman, Kind, Kubeadm, and Helm.
624
+ *
625
+ * Quick-Start Guide for K3s Installation:
626
+ * This guide will help you quickly launch a cluster with default options. Make sure your nodes meet the requirements before proceeding.
627
+ * Consult the Installation page for greater detail on installing and configuring K3s.
628
+ * For information on how K3s components work together, refer to the Architecture page.
629
+ * If you are new to Kubernetes, the official Kubernetes docs have great tutorials covering basics that all cluster administrators should be familiar with.
630
+ *
631
+ * Install Script:
632
+ * K3s provides an installation script that is a convenient way to install it as a service on systemd or openrc based systems. This script is available at https://get.k3s.io. To install K3s using this method, just run:
633
+ * curl -sfL https://get.k3s.io | sh -
634
+ *
635
+ * After running this installation:
636
+ * - The K3s service will be configured to automatically restart after node reboots or if the process crashes or is killed
637
+ * - Additional utilities will be installed, including kubectl, crictl, ctr, k3s-killall.sh, and k3s-uninstall.sh
638
+ * - A kubeconfig file will be written to /etc/rancher/k3s/k3s.yaml and the kubectl installed by K3s will automatically use it
639
+ *
640
+ * A single-node server installation is a fully-functional Kubernetes cluster, including all the datastore, control-plane, kubelet, and container runtime components necessary to host workload pods. It is not necessary to add additional server or agents nodes, but you may want to do so to add additional capacity or redundancy to your cluster.
641
+ *
642
+ * To install additional agent nodes and add them to the cluster, run the installation script with the K3S_URL and K3S_TOKEN environment variables. Here is an example showing how to join an agent:
643
+ * curl -sfL https://get.k3s.io | K3S_URL=https://myserver:6443 K3S_TOKEN=mynodetoken sh -
644
+ *
645
+ * Setting the K3S_URL parameter causes the installer to configure K3s as an agent, instead of a server. The K3s agent will register with the K3s server listening at the supplied URL. The value to use for K3S_TOKEN is stored at /var/lib/rancher/k3s/server/node-token on your server node.
646
+ *
647
+ * Note: Each machine must have a unique hostname. If your machines do not have unique hostnames, pass the K3S_NODE_NAME environment variable and provide a value with a valid and unique hostname for each node.
648
+ * If you are interested in having more server nodes, see the High Availability Embedded etcd and High Availability External DB pages for more information.
649
+ */
650
+ initHost() {
651
+ console.log(
652
+ 'Installing essential host-level prerequisites for Kubernetes (Docker, Podman, Kind, Kubeadm, Helm) and providing K3s Quick-Start Guide information...',
653
+ );
654
+ // Install docker
655
+ shellExec(`sudo dnf -y install dnf-plugins-core`);
656
+ shellExec(`sudo dnf config-manager --add-repo https://download.docker.com/linux/rhel/docker-ce.repo`);
657
+ shellExec(`sudo dnf -y install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin`);
658
+
659
+ // Install podman
660
+ shellExec(`sudo dnf -y install podman`);
661
+
662
+ // Install kind
663
+ shellExec(`[ $(uname -m) = aarch64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.29.0/kind-linux-arm64
664
+ chmod +x ./kind
665
+ sudo mv ./kind /bin/kind`);
666
+ // Install kubeadm, kubelet, kubectl (these are also useful for K3s for kubectl command)
667
+ shellExec(`cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
668
+ [kubernetes]
669
+ name=Kubernetes
670
+ baseurl=https://pkgs.k8s.io/core:/stable:/v1.33/rpm/
671
+ enabled=1
672
+ gpgcheck=1
673
+ gpgkey=https://pkgs.k8s.io/core:/stable:/v1.33/rpm/repodata/repomd.xml.key
674
+ exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
675
+ EOF`);
676
+ shellExec(`sudo yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes`);
677
+
678
+ // Install helm
679
+ shellExec(`curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3`);
680
+ shellExec(`chmod 700 get_helm.sh`);
681
+ shellExec(`./get_helm.sh`);
682
+ shellExec(`chmod +x /usr/local/bin/helm`);
683
+ shellExec(`sudo mv /usr/local/bin/helm /bin/helm`);
684
+ console.log('Host prerequisites installed successfully.');
685
+ },
387
686
  };
388
687
  }
389
688
  export default UnderpostCluster;