underpost 2.8.79 → 2.8.84

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/.github/workflows/ghpkg.yml +22 -20
  2. package/.github/workflows/npmpkg.yml +15 -10
  3. package/.github/workflows/pwa-microservices-template.page.yml +12 -3
  4. package/.github/workflows/pwa-microservices-template.test.yml +20 -17
  5. package/.vscode/extensions.json +2 -3
  6. package/.vscode/settings.json +2 -42
  7. package/Dockerfile +14 -33
  8. package/README.md +43 -25
  9. package/bin/db.js +1 -0
  10. package/bin/deploy.js +104 -797
  11. package/bin/file.js +18 -1
  12. package/bin/vs.js +18 -3
  13. package/cli.md +367 -207
  14. package/conf.js +4 -0
  15. package/docker-compose.yml +1 -1
  16. package/manifests/deployment/dd-template-development/deployment.yaml +167 -0
  17. package/manifests/deployment/dd-template-development/proxy.yaml +46 -0
  18. package/manifests/deployment/tensorflow/tf-gpu-test.yaml +65 -0
  19. package/manifests/lxd/lxd-admin-profile.yaml +1 -0
  20. package/manifests/lxd/lxd-preseed.yaml +9 -37
  21. package/manifests/lxd/underpost-setup.sh +98 -81
  22. package/manifests/maas/device-scan.sh +43 -0
  23. package/manifests/maas/gpu-diag.sh +19 -0
  24. package/manifests/maas/lxd-preseed.yaml +32 -0
  25. package/manifests/maas/maas-setup.sh +120 -0
  26. package/manifests/maas/nat-iptables.sh +26 -0
  27. package/manifests/maas/snap-clean.sh +26 -0
  28. package/manifests/mariadb/statefulset.yaml +2 -1
  29. package/manifests/mariadb/storage-class.yaml +10 -0
  30. package/manifests/mongodb-4.4/service-deployment.yaml +2 -2
  31. package/manifests/valkey/service.yaml +3 -9
  32. package/manifests/valkey/statefulset.yaml +10 -12
  33. package/package.json +1 -1
  34. package/src/cli/baremetal.js +1280 -0
  35. package/src/cli/cloud-init.js +537 -0
  36. package/src/cli/cluster.js +506 -243
  37. package/src/cli/deploy.js +41 -3
  38. package/src/cli/env.js +2 -2
  39. package/src/cli/image.js +57 -9
  40. package/src/cli/index.js +271 -232
  41. package/src/cli/lxd.js +314 -81
  42. package/src/cli/repository.js +7 -4
  43. package/src/cli/run.js +262 -0
  44. package/src/cli/test.js +1 -1
  45. package/src/index.js +28 -1
  46. package/src/runtime/lampp/Dockerfile +41 -47
  47. package/src/server/conf.js +61 -0
  48. package/src/server/logger.js +3 -3
  49. package/src/server/process.js +16 -19
  50. package/src/server/runtime.js +1 -6
  51. package/src/server/ssl.js +1 -12
  52. package/src/server/valkey.js +3 -3
  53. package/supervisord-openssh-server.conf +0 -5
@@ -1,6 +1,7 @@
1
1
  import { getNpmRootPath } from '../server/conf.js';
2
2
  import { loggerFactory } from '../server/logger.js';
3
3
  import { shellExec } from '../server/process.js';
4
+ import UnderpostBaremetal from './baremetal.js';
4
5
  import UnderpostDeploy from './deploy.js';
5
6
  import UnderpostTest from './test.js';
6
7
  import os from 'os';
@@ -9,6 +10,39 @@ const logger = loggerFactory(import.meta);
9
10
 
10
11
  class UnderpostCluster {
11
12
  static API = {
13
+ /**
14
+ * @method init
15
+ * @description Initializes and configures the Kubernetes cluster based on provided options.
16
+ * This method handles host prerequisites, cluster initialization (Kind, Kubeadm, or K3s),
17
+ * and optional component deployments.
18
+ * @param {string} [podName] - Optional name of a pod for specific operations (e.g., listing).
19
+ * @param {object} [options] - Configuration options for cluster initialization.
20
+ * @param {boolean} [options.mongodb=false] - Deploy MongoDB.
21
+ * @param {boolean} [options.mongodb4=false] - Deploy MongoDB 4.4.
22
+ * @param {boolean} [options.mariadb=false] - Deploy MariaDB.
23
+ * @param {boolean} [options.mysql=false] - Deploy MySQL.
24
+ * @param {boolean} [options.postgresql=false] - Deploy PostgreSQL.
25
+ * @param {boolean} [options.valkey=false] - Deploy Valkey.
26
+ * @param {boolean} [options.full=false] - Deploy a full set of common components.
27
+ * @param {boolean} [options.info=false] - Display extensive Kubernetes cluster information.
28
+ * @param {boolean} [options.certManager=false] - Deploy Cert-Manager for certificate management.
29
+ * @param {boolean} [options.listPods=false] - List Kubernetes pods.
30
+ * @param {boolean} [options.reset=false] - Perform a comprehensive reset of Kubernetes and container environments.
31
+ * @param {boolean} [options.dev=false] - Run in development mode (adjusts paths).
32
+ * @param {string} [options.nsUse=''] - Set the current kubectl namespace.
33
+ * @param {boolean} [options.infoCapacity=false] - Display resource capacity information for the cluster.
34
+ * @param {boolean} [options.infoCapacityPod=false] - Display resource capacity information for pods.
35
+ * @param {boolean} [options.istio=false] - Deploy Istio service mesh.
36
+ * @param {boolean} [options.pullImage=false] - Pull necessary Docker images before deployment.
37
+ * @param {boolean} [options.dedicatedGpu=false] - Configure for dedicated GPU usage (e.g., NVIDIA GPU Operator).
38
+ * @param {boolean} [options.kubeadm=false] - Initialize the cluster using Kubeadm.
39
+ * @param {boolean} [options.k3s=false] - Initialize the cluster using K3s.
40
+ * @param {boolean} [options.initHost=false] - Perform initial host setup (install Docker, Podman, Kind, Kubeadm, Helm).
41
+ * @param {boolean} [options.uninstallHost=false] - Uninstall all host components.
42
+ * @param {boolean} [options.config=false] - Apply general host configuration (SELinux, containerd, sysctl, firewalld).
43
+ * @param {boolean} [options.worker=false] - Configure as a worker node (for Kubeadm or K3s join).
44
+ * @param {boolean} [options.chown=false] - Set up kubectl configuration for the current user.
45
+ */
12
46
  async init(
13
47
  podName,
14
48
  options = {
@@ -31,45 +65,50 @@ class UnderpostCluster {
31
65
  pullImage: false,
32
66
  dedicatedGpu: false,
33
67
  kubeadm: false,
68
+ k3s: false,
34
69
  initHost: false,
70
+ uninstallHost: false,
35
71
  config: false,
36
72
  worker: false,
37
73
  chown: false,
38
74
  },
39
75
  ) {
40
- // sudo dnf update
41
- // 1) Install kind, kubeadm, docker, podman, helm
42
- // 2) Check kubectl, kubelet, containerd.io
43
- // 3) Install Nvidia drivers from Rocky Linux docs
44
- // 4) Install LXD with MAAS from Rocky Linux docs
45
- // 5) Install MAAS src from snap
76
+ // Handles initial host setup (installing docker, podman, kind, kubeadm, helm)
46
77
  if (options.initHost === true) return UnderpostCluster.API.initHost();
47
- if (options.config === true) UnderpostCluster.API.config();
48
- if (options.chown === true) UnderpostCluster.API.chown();
78
+
79
+ // Handles initial host setup (installing docker, podman, kind, kubeadm, helm)
80
+ if (options.uninstallHost === true) return UnderpostCluster.API.uninstallHost();
81
+
82
+ // Applies general host configuration (SELinux, containerd, sysctl)
83
+ if (options.config === true) return UnderpostCluster.API.config();
84
+
85
+ // Sets up kubectl configuration for the current user
86
+ if (options.chown === true) return UnderpostCluster.API.chown();
87
+
49
88
  const npmRoot = getNpmRootPath();
50
89
  const underpostRoot = options?.dev === true ? '.' : `${npmRoot}/underpost`;
90
+
91
+ // Information gathering options
51
92
  if (options.infoCapacityPod === true) return logger.info('', UnderpostDeploy.API.resourcesFactory());
52
93
  if (options.infoCapacity === true)
53
- return logger.info('', UnderpostCluster.API.getResourcesCapacity(options.kubeadm));
54
- if (options.reset === true) return await UnderpostCluster.API.reset();
94
+ return logger.info('', UnderpostCluster.API.getResourcesCapacity(options.kubeadm || options.k3s)); // Adjust for k3s
55
95
  if (options.listPods === true) return console.table(UnderpostDeploy.API.get(podName ?? undefined));
56
-
57
96
  if (options.nsUse && typeof options.nsUse === 'string') {
58
97
  shellExec(`kubectl config set-context --current --namespace=${options.nsUse}`);
59
98
  return;
60
99
  }
61
100
  if (options.info === true) {
62
- shellExec(`kubectl config get-contexts`); // config env persisente for manage multiple clusters
101
+ shellExec(`kubectl config get-contexts`);
63
102
  shellExec(`kubectl config get-clusters`);
64
- shellExec(`kubectl get nodes -o wide`); // set of nodes of a cluster
103
+ shellExec(`kubectl get nodes -o wide`);
65
104
  shellExec(`kubectl config view | grep namespace`);
66
- shellExec(`kubectl get ns -o wide`); // A namespace can have pods of different nodes
67
- shellExec(`kubectl get pvc --all-namespaces -o wide`); // PersistentVolumeClaim -> request storage service
68
- shellExec(`kubectl get pv --all-namespaces -o wide`); // PersistentVolume -> real storage
105
+ shellExec(`kubectl get ns -o wide`);
106
+ shellExec(`kubectl get pvc --all-namespaces -o wide`);
107
+ shellExec(`kubectl get pv --all-namespaces -o wide`);
69
108
  shellExec(`kubectl get cronjob --all-namespaces -o wide`);
70
- shellExec(`kubectl get svc --all-namespaces -o wide`); // proxy dns gate way -> deployments, statefulsets, pods
71
- shellExec(`kubectl get statefulsets --all-namespaces -o wide`); // set pods with data/volume persistence
72
- shellExec(`kubectl get deployments --all-namespaces -o wide`); // set pods
109
+ shellExec(`kubectl get svc --all-namespaces -o wide`);
110
+ shellExec(`kubectl get statefulsets --all-namespaces -o wide`);
111
+ shellExec(`kubectl get deployments --all-namespaces -o wide`);
73
112
  shellExec(`kubectl get configmap --all-namespaces -o wide`);
74
113
  shellExec(`kubectl get pods --all-namespaces -o wide`);
75
114
  shellExec(
@@ -91,40 +130,105 @@ class UnderpostCluster {
91
130
  shellExec(`sudo kubectl api-resources`);
92
131
  return;
93
132
  }
94
- const alrreadyCluster =
95
- UnderpostDeploy.API.get('kube-apiserver-kind-control-plane')[0] ||
96
- UnderpostDeploy.API.get('calico-kube-controllers')[0];
97
-
98
- if (
99
- !options.worker &&
100
- !alrreadyCluster &&
101
- ((!options.kubeadm && !UnderpostDeploy.API.get('kube-apiserver-kind-control-plane')[0]) ||
102
- (options.kubeadm === true && !UnderpostDeploy.API.get('calico-kube-controllers')[0]))
103
- ) {
133
+
134
+ // Reset Kubernetes cluster components (Kind/Kubeadm/K3s) and container runtimes
135
+ if (options.reset === true) return await UnderpostCluster.API.safeReset({ underpostRoot });
136
+
137
+ // Check if a cluster (Kind, Kubeadm, or K3s) is already initialized
138
+ const alreadyKubeadmCluster = UnderpostDeploy.API.get('calico-kube-controllers')[0];
139
+ const alreadyKindCluster = UnderpostDeploy.API.get('kube-apiserver-kind-control-plane')[0];
140
+ // K3s pods often contain 'svclb-traefik' in the kube-system namespace
141
+ const alreadyK3sCluster = UnderpostDeploy.API.get('svclb-traefik')[0];
142
+
143
+ // --- Kubeadm/Kind/K3s Cluster Initialization ---
144
+ // This block handles the initial setup of the Kubernetes cluster (control plane or worker).
145
+ // It prevents re-initialization if a cluster is already detected.
146
+ if (!options.worker && !alreadyKubeadmCluster && !alreadyKindCluster && !alreadyK3sCluster) {
104
147
  UnderpostCluster.API.config();
105
- if (options.kubeadm === true) {
148
+ if (options.k3s === true) {
149
+ logger.info('Initializing K3s control plane...');
150
+ // Install K3s
151
+ console.log('Installing K3s...');
152
+ shellExec(`curl -sfL https://get.k3s.io | sh -`);
153
+ console.log('K3s installation completed.');
154
+
155
+ // Move k3s binary to /bin/k3s and make it executable
156
+ shellExec(`sudo mv /usr/local/bin/k3s /bin/k3s`);
157
+ shellExec(`sudo chmod +x /bin/k3s`);
158
+ console.log('K3s binary moved to /bin/k3s and made executable.');
159
+
160
+ // Configure kubectl for the current user for K3s *before* checking readiness
161
+ // This ensures kubectl can find the K3s kubeconfig immediately after K3s installation.
162
+ UnderpostCluster.API.chown('k3s');
163
+
164
+ // Wait for K3s to be ready
165
+ logger.info('Waiting for K3s to be ready...');
166
+ let k3sReady = false;
167
+ let retries = 0;
168
+ const maxRetries = 20; // Increased retries for K3s startup
169
+ const delayMs = 5000; // 5 seconds
170
+
171
+ while (!k3sReady && retries < maxRetries) {
172
+ try {
173
+ // Explicitly use KUBECONFIG for kubectl commands to ensure it points to K3s config
174
+ const nodes = shellExec(`KUBECONFIG=/etc/rancher/k3s/k3s.yaml kubectl get nodes -o json`, {
175
+ stdout: true,
176
+ silent: true,
177
+ });
178
+ const parsedNodes = JSON.parse(nodes);
179
+ if (
180
+ parsedNodes.items.some((node) =>
181
+ node.status.conditions.some((cond) => cond.type === 'Ready' && cond.status === 'True'),
182
+ )
183
+ ) {
184
+ k3sReady = true;
185
+ logger.info('K3s cluster is ready.');
186
+ } else {
187
+ logger.info(`K3s not yet ready. Retrying in ${delayMs / 1000} seconds...`);
188
+ await new Promise((resolve) => setTimeout(resolve, delayMs));
189
+ }
190
+ } catch (error) {
191
+ logger.info(`Error checking K3s status: ${error.message}. Retrying in ${delayMs / 1000} seconds...`);
192
+ await new Promise((resolve) => setTimeout(resolve, delayMs));
193
+ }
194
+ retries++;
195
+ }
196
+
197
+ if (!k3sReady) {
198
+ logger.error('K3s cluster did not become ready in time. Please check the K3s logs.');
199
+ return;
200
+ }
201
+
202
+ // K3s includes local-path-provisioner by default, so no need to install explicitly.
203
+ logger.info('K3s comes with local-path-provisioner by default. Skipping explicit installation.');
204
+ } else if (options.kubeadm === true) {
205
+ logger.info('Initializing Kubeadm control plane...');
206
+ // Initialize kubeadm control plane
106
207
  shellExec(
107
208
  `sudo kubeadm init --pod-network-cidr=192.168.0.0/16 --control-plane-endpoint="${os.hostname()}:6443"`,
108
209
  );
109
- UnderpostCluster.API.chown();
110
- // https://docs.tigera.io/calico/latest/getting-started/kubernetes/quickstart
210
+ // Configure kubectl for the current user
211
+ UnderpostCluster.API.chown('kubeadm'); // Pass 'kubeadm' to chown
212
+
213
+ // Install Calico CNI
214
+ logger.info('Installing Calico CNI...');
111
215
  shellExec(
112
216
  `sudo kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.29.3/manifests/tigera-operator.yaml`,
113
217
  );
114
- // shellExec(
115
- // `wget https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/custom-resources.yaml`,
116
- // );
117
218
  shellExec(`sudo kubectl apply -f ${underpostRoot}/manifests/kubeadm-calico-config.yaml`);
219
+ // Untaint control plane node to allow scheduling pods
118
220
  const nodeName = os.hostname();
119
221
  shellExec(`kubectl taint nodes ${nodeName} node-role.kubernetes.io/control-plane:NoSchedule-`);
222
+ // Install local-path-provisioner for dynamic PVCs (optional but recommended)
223
+ logger.info('Installing local-path-provisioner...');
120
224
  shellExec(
121
225
  `kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/master/deploy/local-path-storage.yaml`,
122
226
  );
123
227
  } else {
228
+ // Kind cluster initialization (if not using kubeadm or k3s)
229
+ logger.info('Initializing Kind cluster...');
124
230
  if (options.full === true || options.dedicatedGpu === true) {
125
- // https://kubernetes.io/docs/tasks/manage-gpus/scheduling-gpus/
126
231
  shellExec(`cd ${underpostRoot}/manifests && kind create cluster --config kind-config-cuda.yaml`);
127
- UnderpostCluster.API.chown();
128
232
  } else {
129
233
  shellExec(
130
234
  `cd ${underpostRoot}/manifests && kind create cluster --config kind-config${
@@ -132,10 +236,20 @@ class UnderpostCluster {
132
236
  }.yaml`,
133
237
  );
134
238
  }
239
+ UnderpostCluster.API.chown('kind'); // Pass 'kind' to chown
135
240
  }
136
- } else logger.warn('Cluster already initialized');
241
+ } else if (options.worker === true) {
242
+ // Worker node specific configuration (kubeadm join command needs to be executed separately)
243
+ logger.info('Worker node configuration applied. Awaiting join command...');
244
+ // No direct cluster initialization here for workers. The `kubeadm join` or `k3s agent` command
245
+ // needs to be run on the worker after the control plane is up and a token is created.
246
+ // This part of the script is for general worker setup, not the join itself.
247
+ } else {
248
+ logger.warn('Cluster already initialized or worker flag not set for worker node.');
249
+ }
137
250
 
138
- // shellExec(`sudo kubectl apply -f ${underpostRoot}/manifests/kubelet-config.yaml`);
251
+ // --- Optional Component Deployments (Databases, Ingress, Cert-Manager) ---
252
+ // These deployments happen after the base cluster is up.
139
253
 
140
254
  if (options.full === true || options.dedicatedGpu === true) {
141
255
  shellExec(`node ${underpostRoot}/bin/deploy nvidia-gpu-operator`);
@@ -148,26 +262,40 @@ class UnderpostCluster {
148
262
  if (options.pullImage === true) {
149
263
  shellExec(`docker pull valkey/valkey:latest`);
150
264
  shellExec(`sudo podman pull valkey/valkey:latest`);
151
- if (!options.kubeadm)
152
- shellExec(
153
- `sudo ${
154
- options.kubeadm === true ? `ctr -n k8s.io images import` : `kind load docker-image`
155
- } valkey/valkey:latest`,
156
- );
265
+ if (!options.kubeadm && !options.k3s)
266
+ // Only load if not kubeadm/k3s (Kind needs it)
267
+ shellExec(`sudo kind load docker-image valkey/valkey:latest`);
268
+ else if (options.kubeadm || options.k3s)
269
+ // For kubeadm/k3s, ensure it's available for containerd
270
+ shellExec(`sudo crictl pull valkey/valkey:latest`);
157
271
  }
158
- shellExec(`kubectl delete statefulset service-valkey`);
272
+ shellExec(`kubectl delete statefulset valkey-service --ignore-not-found`);
159
273
  shellExec(`kubectl apply -k ${underpostRoot}/manifests/valkey`);
160
274
  }
161
275
  if (options.full === true || options.mariadb === true) {
162
276
  shellExec(
163
- `sudo kubectl create secret generic mariadb-secret --from-file=username=/home/dd/engine/engine-private/mariadb-username --from-file=password=/home/dd/engine/engine-private/mariadb-password`,
277
+ `sudo kubectl create secret generic mariadb-secret --from-file=username=/home/dd/engine/engine-private/mariadb-username --from-file=password=/home/dd/engine/engine-private/mariadb-password --dry-run=client -o yaml | kubectl apply -f -`,
164
278
  );
165
- shellExec(`kubectl delete statefulset mariadb-statefulset`);
279
+ shellExec(`kubectl delete statefulset mariadb-statefulset --ignore-not-found`);
280
+
281
+ if (options.pullImage === true) {
282
+ shellExec(`docker pull mariadb:latest`);
283
+ shellExec(`sudo podman pull mariadb:latest`);
284
+ if (!options.kubeadm && !options.k3s)
285
+ // Only load if not kubeadm/k3s (Kind needs it)
286
+ shellExec(`sudo kind load docker-image mariadb:latest`);
287
+ else if (options.kubeadm || options.k3s)
288
+ // For kubeadm/k3s, ensure it's available for containerd
289
+ shellExec(`sudo crictl pull mariadb:latest`);
290
+ }
291
+ if (options.kubeadm === true)
292
+ // This storage class is specific to kubeadm setup
293
+ shellExec(`kubectl apply -f ${underpostRoot}/manifests/mariadb/storage-class.yaml`);
166
294
  shellExec(`kubectl apply -k ${underpostRoot}/manifests/mariadb`);
167
295
  }
168
296
  if (options.full === true || options.mysql === true) {
169
297
  shellExec(
170
- `sudo kubectl create secret generic mysql-secret --from-file=username=/home/dd/engine/engine-private/mysql-username --from-file=password=/home/dd/engine/engine-private/mysql-password`,
298
+ `sudo kubectl create secret generic mysql-secret --from-file=username=/home/dd/engine/engine-private/mysql-username --from-file=password=/home/dd/engine/engine-private/mysql-password --dry-run=client -o yaml | kubectl apply -f -`,
171
299
  );
172
300
  shellExec(`sudo mkdir -p /mnt/data`);
173
301
  shellExec(`sudo chmod 777 /mnt/data`);
@@ -177,27 +305,27 @@ class UnderpostCluster {
177
305
  if (options.full === true || options.postgresql === true) {
178
306
  if (options.pullImage === true) {
179
307
  shellExec(`docker pull postgres:latest`);
180
- if (!options.kubeadm)
181
- shellExec(
182
- `sudo ${
183
- options.kubeadm === true ? `ctr -n k8s.io images import` : `kind load docker-image`
184
- } docker-image postgres:latest`,
185
- );
308
+ if (!options.kubeadm && !options.k3s)
309
+ // Only load if not kubeadm/k3s (Kind needs it)
310
+ shellExec(`sudo kind load docker-image postgres:latest`);
311
+ else if (options.kubeadm || options.k3s)
312
+ // For kubeadm/k3s, ensure it's available for containerd
313
+ shellExec(`sudo crictl pull postgres:latest`);
186
314
  }
187
315
  shellExec(
188
- `sudo kubectl create secret generic postgres-secret --from-file=password=/home/dd/engine/engine-private/postgresql-password`,
316
+ `sudo kubectl create secret generic postgres-secret --from-file=password=/home/dd/engine/engine-private/postgresql-password --dry-run=client -o yaml | kubectl apply -f -`,
189
317
  );
190
318
  shellExec(`kubectl apply -k ${underpostRoot}/manifests/postgresql`);
191
319
  }
192
320
  if (options.mongodb4 === true) {
193
321
  if (options.pullImage === true) {
194
322
  shellExec(`docker pull mongo:4.4`);
195
- if (!options.kubeadm)
196
- shellExec(
197
- `sudo ${
198
- options.kubeadm === true ? `ctr -n k8s.io images import` : `kind load docker-image`
199
- } docker-image mongo:4.4`,
200
- );
323
+ if (!options.kubeadm && !options.k3s)
324
+ // Only load if not kubeadm/k3s (Kind needs it)
325
+ shellExec(`sudo kind load docker-image mongo:4.4`);
326
+ else if (options.kubeadm || options.k3s)
327
+ // For kubeadm/k3s, ensure it's available for containerd
328
+ shellExec(`sudo crictl pull mongo:4.4`);
201
329
  }
202
330
  shellExec(`kubectl apply -k ${underpostRoot}/manifests/mongodb-4.4`);
203
331
 
@@ -208,7 +336,7 @@ class UnderpostCluster {
208
336
  if (successInstance) {
209
337
  const mongoConfig = {
210
338
  _id: 'rs0',
211
- members: [{ _id: 0, host: '127.0.0.1:27017' }],
339
+ members: [{ _id: 0, host: 'mongodb-service:27017' }],
212
340
  };
213
341
 
214
342
  const [pod] = UnderpostDeploy.API.get(deploymentName);
@@ -218,20 +346,25 @@ class UnderpostCluster {
218
346
  --eval 'rs.initiate(${JSON.stringify(mongoConfig)})'`,
219
347
  );
220
348
  }
221
-
222
- // await UnderpostTest.API.statusMonitor('mongodb-1');
223
349
  } else if (options.full === true || options.mongodb === true) {
224
350
  if (options.pullImage === true) {
225
351
  shellExec(`docker pull mongo:latest`);
352
+ if (!options.kubeadm && !options.k3s)
353
+ // Only load if not kubeadm/k3s (Kind needs it)
354
+ shellExec(`sudo kind load docker-image mongo:latest`);
355
+ else if (options.kubeadm || options.k3s)
356
+ // For kubeadm/k3s, ensure it's available for containerd
357
+ shellExec(`sudo crictl pull mongo:latest`);
226
358
  }
227
359
  shellExec(
228
- `sudo kubectl create secret generic mongodb-keyfile --from-file=/home/dd/engine/engine-private/mongodb-keyfile`,
360
+ `sudo kubectl create secret generic mongodb-keyfile --from-file=/home/dd/engine/engine-private/mongodb-keyfile --dry-run=client -o yaml | kubectl apply -f -`,
229
361
  );
230
362
  shellExec(
231
- `sudo kubectl create secret generic mongodb-secret --from-file=username=/home/dd/engine/engine-private/mongodb-username --from-file=password=/home/dd/engine/engine-private/mongodb-password`,
363
+ `sudo kubectl create secret generic mongodb-secret --from-file=username=/home/dd/engine/engine-private/mongodb-username --from-file=password=/home/dd/engine/engine-private/mongodb-password --dry-run=client -o yaml | kubectl apply -f -`,
232
364
  );
233
- shellExec(`kubectl delete statefulset mongodb`);
365
+ shellExec(`kubectl delete statefulset mongodb --ignore-not-found`);
234
366
  if (options.kubeadm === true)
367
+ // This storage class is specific to kubeadm setup
235
368
  shellExec(`kubectl apply -f ${underpostRoot}/manifests/mongodb/storage-class.yaml`);
236
369
  shellExec(`kubectl apply -k ${underpostRoot}/manifests/mongodb`);
237
370
 
@@ -258,8 +391,11 @@ class UnderpostCluster {
258
391
  if (options.full === true || options.contour === true) {
259
392
  shellExec(`kubectl apply -f https://projectcontour.io/quickstart/contour.yaml`);
260
393
  if (options.kubeadm === true) {
394
+ // Envoy service might need NodePort for kubeadm
261
395
  shellExec(`sudo kubectl apply -f ${underpostRoot}/manifests/envoy-service-nodeport.yaml`);
262
396
  }
397
+ // K3s has a built-in LoadBalancer (Klipper-lb) that can expose services,
398
+ // so a specific NodePort service might not be needed or can be configured differently.
263
399
  }
264
400
 
265
401
  if (options.full === true || options.certManager === true) {
@@ -275,186 +411,239 @@ class UnderpostCluster {
275
411
  }
276
412
 
277
413
  const letsEncName = 'letsencrypt-prod';
278
- shellExec(`sudo kubectl delete ClusterIssuer ${letsEncName}`);
414
+ shellExec(`sudo kubectl delete ClusterIssuer ${letsEncName} --ignore-not-found`);
279
415
  shellExec(`sudo kubectl apply -f ${underpostRoot}/manifests/${letsEncName}.yaml`);
280
416
  }
281
417
  },
282
418
 
283
- config() {
419
+ /**
420
+ * @method config
421
+ * @description Configures host-level settings required for Kubernetes.
422
+ * This method ensures proper SELinux, Docker, Containerd, and Sysctl settings
423
+ * are applied for a healthy Kubernetes environment. It explicitly avoids
424
+ * iptables flushing commands to prevent conflicts with Kubernetes' own network management.
425
+ * @param {string} underpostRoot - The root directory of the underpost project.
426
+ */
427
+ config(options = { underpostRoot: '.' }) {
428
+ const { underpostRoot } = options;
429
+ console.log('Applying host configuration: SELinux, Docker, Containerd, and Sysctl settings.');
430
+ // Disable SELinux (permissive mode)
284
431
  shellExec(`sudo setenforce 0`);
285
432
  shellExec(`sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config`);
286
- shellExec(`sudo systemctl enable --now docker`);
287
- shellExec(`sudo systemctl enable --now kubelet`);
288
- shellExec(`containerd config default > /etc/containerd/config.toml`);
289
- shellExec(`sed -i -e "s/SystemdCgroup = false/SystemdCgroup = true/g" /etc/containerd/config.toml`);
290
- shellExec(`sudo service docker restart`);
433
+
434
+ // Enable and start Docker and Kubelet services
435
+ shellExec(`sudo systemctl enable --now docker || true`); // Docker might not be needed for K3s
436
+ shellExec(`sudo systemctl enable --now kubelet || true`); // Kubelet might not be needed for K3s (K3s uses its own agent)
437
+
438
+ // Configure containerd for SystemdCgroup and explicitly disable SELinux
439
+ // This is crucial for kubelet/k3s to interact correctly with containerd
440
+ shellExec(`containerd config default | sudo tee /etc/containerd/config.toml > /dev/null`);
441
+ shellExec(`sudo sed -i -e "s/SystemdCgroup = false/SystemdCgroup = true/g" /etc/containerd/config.toml`);
442
+ // Add a new line to disable SELinux for the runc runtime
443
+ // shellExec(
444
+ // `sudo sed -i '/SystemdCgroup = true/a selinux_disabled = true' /etc/containerd/config.toml || true`,
445
+ // );
446
+ shellExec(`sudo service docker restart || true`); // Restart docker after containerd config changes
291
447
  shellExec(`sudo systemctl enable --now containerd.service`);
448
+ shellExec(`sudo systemctl restart containerd`); // Restart containerd to apply changes
449
+
450
+ // Disable swap (required by Kubernetes)
292
451
  shellExec(`sudo swapoff -a; sudo sed -i '/swap/d' /etc/fstab`);
452
+
453
+ // Reload systemd daemon to pick up new unit files/changes
293
454
  shellExec(`sudo systemctl daemon-reload`);
294
- shellExec(`sudo systemctl restart containerd`);
295
- shellExec(`sysctl net.bridge.bridge-nf-call-iptables=1`);
455
+
456
+ // Enable bridge-nf-call-iptables for Kubernetes networking
457
+ // This ensures traffic through Linux bridges is processed by iptables (crucial for CNI)
458
+ for (const iptableConfPath of [
459
+ `/etc/sysctl.d/k8s.conf`,
460
+ `/etc/sysctl.d/99-k8s-ipforward.conf`,
461
+ `/etc/sysctl.d/99-k8s.conf`,
462
+ ])
463
+ shellExec(
464
+ `echo 'net.bridge.bridge-nf-call-iptables = 1
465
+ net.bridge.bridge-nf-call-ip6tables = 1
466
+ net.bridge.bridge-nf-call-arptables = 1
467
+ net.ipv4.ip_forward = 1' | sudo tee ${iptableConfPath}`,
468
+ { silent: true },
469
+ );
470
+ // shellExec(`sudo sysctl --system`); // Apply sysctl changes immediately
471
+ // Apply NAT iptables rules.
472
+ shellExec(`${underpostRoot}/manifests/maas/nat-iptables.sh`, { silent: true });
473
+
474
+ // Disable firewalld (common cause of network issues in Kubernetes)
475
+ shellExec(`sudo systemctl stop firewalld || true`); // Stop if running
476
+ shellExec(`sudo systemctl disable firewalld || true`); // Disable from starting on boot
296
477
  },
297
- chown() {
478
+
479
+ /**
480
+ * @method chown
481
+ * @description Sets up kubectl configuration for the current user based on the cluster type.
482
+ * @param {string} clusterType - The type of Kubernetes cluster ('kubeadm', 'k3s', or 'kind').
483
+ */
484
+ chown(clusterType) {
485
+ console.log(`Setting up kubectl configuration for ${clusterType} cluster...`);
298
486
  shellExec(`mkdir -p ~/.kube`);
299
- shellExec(`sudo -E cp -i /etc/kubernetes/admin.conf ~/.kube/config`);
300
- shellExec(`sudo -E chown $(id -u):$(id -g) ~/.kube/config`);
301
- },
302
- // This function performs a comprehensive reset of Kubernetes and container environments
303
- // on the host machine. Its primary goal is to clean up cluster components, temporary files,
304
- // and container data, ensuring a clean state for re-initialization or fresh deployments,
305
- // while also preventing the loss of the host machine's internet connectivity.
306
-
307
- reset() {
308
- // Step 1: Delete all existing Kind (Kubernetes in Docker) clusters.
309
- // 'kind get clusters' lists all Kind clusters.
310
- // 'xargs -t -n1 kind delete cluster --name' then iterates through each cluster name
311
- // and executes 'kind delete cluster --name <cluster_name>' to remove them.
312
- shellExec(`kind get clusters | xargs -t -n1 kind delete cluster --name`);
313
-
314
- // Step 2: Reset the Kubernetes control-plane components installed by kubeadm.
315
- // 'kubeadm reset -f' performs a forceful reset, removing installed Kubernetes components,
316
- // configuration files, and associated network rules (like iptables entries created by kubeadm).
317
- // The '-f' flag bypasses confirmation prompts.
318
- shellExec(`sudo kubeadm reset -f`);
319
-
320
- // Step 3: Remove specific CNI (Container Network Interface) configuration files.
321
- // This command targets and removes the configuration file for Flannel,
322
- // a common CNI plugin, which might be left behind after a reset.
323
- shellExec('sudo rm -f /etc/cni/net.d/10-flannel.conflist');
324
-
325
- // Note: The aggressive 'sudo iptables -F ...' command was intentionally removed from previous versions.
326
- // This command would flush all iptables rules, including those crucial for the host's general
327
- // internet connectivity, leading to network loss. 'kubeadm reset' and container runtime pruning
328
- // adequately handle Kubernetes and container-specific iptables rules without affecting the host's
329
- // default network configuration.
330
-
331
- // Step 4: Remove the kubectl configuration file from the current user's home directory.
332
- // This ensures that after a reset, there's no lingering configuration pointing to the old cluster,
333
- // providing a clean slate for connecting to a new or re-initialized cluster.
334
- shellExec('sudo rm -f $HOME/.kube/config');
335
-
336
- // Step 5: Clear trash files from the root user's trash directory.
337
- // This is a general cleanup step to remove temporary or deleted files.
338
- shellExec('sudo rm -rf /root/.local/share/Trash/files/*');
339
-
340
- // Step 6: Prune all unused Docker data.
341
- // 'docker system prune -a -f' removes:
342
- // - All stopped containers
343
- // - All unused networks
344
- // - All dangling images
345
- // - All build cache
346
- // - All unused volumes
347
- // This aggressively frees up disk space and removes temporary Docker artifacts.
348
- shellExec('sudo docker system prune -a -f');
349
-
350
- // Step 7: Stop the Docker daemon service.
351
- // This step is often necessary to ensure that Docker's files and directories
352
- // can be safely manipulated or moved in subsequent steps without conflicts.
353
- shellExec('sudo service docker stop');
354
-
355
- // Step 8: Aggressively remove container storage data for containerd and Docker.
356
- // These commands target the default storage locations for containerd and Docker,
357
- // as well as any custom paths that might have been used (`/home/containers/storage`, `/home/docker`).
358
- // This ensures a complete wipe of all container images, layers, and volumes.
359
- shellExec(`sudo rm -rf /var/lib/containers/storage/*`);
360
- shellExec(`sudo rm -rf /var/lib/docker/volumes/*`);
361
- shellExec(`sudo rm -rf /var/lib/docker~/*`); // Cleans up a potential backup directory for Docker data
362
- shellExec(`sudo rm -rf /home/containers/storage/*`); // Cleans up custom containerd/Podman storage
363
- shellExec(`sudo rm -rf /home/docker/*`); // Cleans up custom Docker storage
364
-
365
- // Step 9: Re-configure Docker's default storage location (if desired).
366
- // These commands effectively move Docker's data directory from its default `/var/lib/docker`
367
- // to a new location (`/home/docker`) and create a symbolic link.
368
- // This is a specific customization to relocate Docker's storage.
369
- shellExec('sudo mv /var/lib/docker /var/lib/docker~'); // Moves existing /var/lib/docker to /var/lib/docker~ (backup)
370
- shellExec('sudo mkdir /home/docker'); // Creates the new desired directory for Docker data
371
- shellExec('sudo chmod 0711 /home/docker'); // Sets appropriate permissions for the new directory
372
- shellExec('sudo ln -s /home/docker /var/lib/docker'); // Creates a symlink from original path to new path
373
-
374
- // Step 10: Prune all unused Podman data.
375
- // Similar to Docker pruning, these commands remove:
376
- // - All stopped containers
377
- // - All unused networks
378
- // - All unused images
379
- // - All unused volumes ('--volumes')
380
- // - The '--force' flag bypasses confirmation.
381
- // '--external' prunes external content not managed by Podman's default storage backend.
382
- shellExec(`sudo podman system prune -a -f`);
383
- shellExec(`sudo podman system prune --all --volumes --force`);
384
- shellExec(`sudo podman system prune --external --force`);
385
- shellExec(`sudo podman system prune --all --volumes --force`); // Redundant but harmless repetition
386
-
387
- // Step 11: Create and set permissions for Podman's custom storage directory.
388
- // This ensures the custom path `/home/containers/storage` exists and has correct permissions
389
- // before Podman attempts to use it.
390
- shellExec(`sudo mkdir -p /home/containers/storage`);
391
- shellExec('sudo chmod 0711 /home/containers/storage');
392
-
393
- // Step 12: Update Podman's storage configuration file.
394
- // This command uses 'sed' to modify `/etc/containers/storage.conf`,
395
- // changing the default storage path from `/var/lib/containers/storage`
396
- // to the customized `/home/containers/storage`.
397
- shellExec(
398
- `sudo sed -i -e "s@/var/lib/containers/storage@/home/containers/storage@g" /etc/containers/storage.conf`,
399
- );
400
487
 
401
- // Step 13: Reset Podman system settings.
402
- // This command resets Podman's system-wide configuration to its default state.
403
- shellExec(`sudo podman system reset -f`);
404
-
405
- // Note: The 'sysctl net.bridge.bridge-nf-call-iptables=0' and related commands
406
- // were previously removed. These sysctl settings (bridge-nf-call-iptables,
407
- // bridge-nf-call-arptables, bridge-nf-call-ip6tables) are crucial for allowing
408
- // network traffic through Linux bridges to be processed by iptables.
409
- // Kubernetes and CNI plugins generally require them to be enabled (set to '1').
410
- // Re-initializing Kubernetes will typically set these as needed, and leaving them
411
- // at their system default (or '1' if already configured) is safer for host
412
- // connectivity during a reset operation.
413
-
414
- // https://github.com/kubernetes-sigs/kind/issues/2886
415
- // shellExec(`sysctl net.bridge.bridge-nf-call-iptables=0`);
416
- // shellExec(`sysctl net.bridge.bridge-nf-call-arptables=0`);
417
- // shellExec(`sysctl net.bridge.bridge-nf-call-ip6tables=0`);
418
-
419
- // Step 14: Remove the 'kind' Docker network.
420
- // This cleans up any network bridges or configurations specifically created by Kind.
421
- // shellExec(`docker network rm kind`);
422
-
423
- // Reset kubelet
424
- shellExec(`sudo systemctl stop kubelet`);
425
- shellExec(`sudo rm -rf /etc/kubernetes/*`);
426
- shellExec(`sudo rm -rf /var/lib/kubelet/*`);
427
- shellExec(`sudo rm -rf /etc/cni/net.d/*`);
428
- shellExec(`sudo systemctl daemon-reload`);
429
- shellExec(`sudo systemctl start kubelet`);
488
+ let kubeconfigPath;
489
+ if (clusterType === 'k3s') {
490
+ kubeconfigPath = '/etc/rancher/k3s/k3s.yaml';
491
+ } else if (clusterType === 'kubeadm') {
492
+ kubeconfigPath = '/etc/kubernetes/admin.conf';
493
+ } else {
494
+ // Default to kind if not specified or unknown
495
+ kubeconfigPath = ''; // Kind's kubeconfig is usually managed by kind itself, or merged
496
+ }
497
+
498
+ if (kubeconfigPath) {
499
+ shellExec(`sudo -E cp -i ${kubeconfigPath} ~/.kube/config`);
500
+ shellExec(`sudo -E chown $(id -u):$(id -g) ~/.kube/config`);
501
+ } else if (clusterType === 'kind') {
502
+ // For Kind, the kubeconfig is usually merged automatically or can be explicitly exported
503
+ // This command ensures it's merged into the default kubeconfig
504
+ shellExec(`kind get kubeconfig > ~/.kube/config || true`);
505
+ shellExec(`sudo -E chown $(id -u):$(id -g) ~/.kube/config`);
506
+ } else {
507
+ logger.warn('No specific kubeconfig path defined for this cluster type, or it is managed automatically.');
508
+ }
509
+ console.log('kubectl config set up successfully.');
430
510
  },
431
511
 
432
- getResourcesCapacity(kubeadm = false) {
433
- const resources = {};
434
- const info = false
435
- ? `Capacity:
436
- cpu: 8
437
- ephemeral-storage: 153131976Ki
438
- hugepages-1Gi: 0
439
- hugepages-2Mi: 0
440
- memory: 11914720Ki
441
- pods: 110
442
- Allocatable:
443
- cpu: 8
444
- ephemeral-storage: 153131976Ki
445
- hugepages-1Gi: 0
446
- hugepages-2Mi: 0
447
- memory: 11914720Ki
448
- pods: `
449
- : shellExec(
450
- `kubectl describe node ${
451
- kubeadm === true ? os.hostname() : 'kind-worker'
452
- } | grep -E '(Allocatable:|Capacity:)' -A 6`,
512
+ /**
513
+ * @method safeReset
514
+ * @description Performs a complete reset of the Kubernetes cluster and its container environments.
515
+ * This version focuses on correcting persistent permission errors (such as 'permission denied'
516
+ * in coredns) by restoring SELinux security contexts and safely cleaning up cluster artifacts.
517
+ * @param {object} [options] - Configuration options for the reset.
518
+ * @param {string} [options.underpostRoot] - The root path of the underpost project.
519
+ */
520
+ async safeReset(options = { underpostRoot: '.' }) {
521
+ logger.info('Starting a safe and comprehensive reset of Kubernetes and container environments...');
522
+
523
+ try {
524
+ // Phase 0: Truncate large logs under /var/log to free up immediate space
525
+ logger.info('Phase 0/7: Truncating large log files under /var/log...');
526
+ try {
527
+ const cleanPath = `/var/log/`;
528
+ const largeLogsFiles = shellExec(
529
+ `sudo du -sh ${cleanPath}* | awk '{if ($1 ~ /G$/ && ($1+0) > 1) print}' | sort -rh`,
453
530
  {
454
531
  stdout: true,
455
- silent: true,
456
532
  },
457
533
  );
534
+ for (const pathLog of largeLogsFiles
535
+ .split(`\n`)
536
+ .map((p) => p.split(cleanPath)[1])
537
+ .filter((p) => p)) {
538
+ shellExec(`sudo rm -rf ${cleanPath}${pathLog}`);
539
+ }
540
+ } catch (err) {
541
+ logger.warn(` -> Error truncating log files: ${err.message}. Continuing with reset.`);
542
+ }
543
+
544
+ // Phase 1: Clean up Persistent Volumes with hostPath
545
+ // This targets data created by Kubernetes Persistent Volumes that use hostPath.
546
+ logger.info('Phase 1/7: Cleaning Kubernetes hostPath volumes...');
547
+ try {
548
+ const pvListJson = shellExec(`kubectl get pv -o json || echo '{"items":[]}'`, { stdout: true, silent: true });
549
+ const pvList = JSON.parse(pvListJson);
550
+
551
+ if (pvList.items && pvList.items.length > 0) {
552
+ for (const pv of pvList.items) {
553
+ // Check if the PV uses hostPath and delete its contents
554
+ if (pv.spec.hostPath && pv.spec.hostPath.path) {
555
+ const hostPath = pv.spec.hostPath.path;
556
+ logger.info(`Removing data from host path for PV '${pv.metadata.name}': ${hostPath}`);
557
+ shellExec(`sudo rm -rf ${hostPath}/* || true`);
558
+ }
559
+ }
560
+ } else {
561
+ logger.info('No Persistent Volumes found with hostPath to clean up.');
562
+ }
563
+ } catch (error) {
564
+ logger.error('Failed to clean up Persistent Volumes:', error);
565
+ }
566
+ // Phase 2: Restore SELinux and stop services
567
+ // This is critical for fixing the 'permission denied' error you experienced.
568
+ // Enable SELinux permissive mode and restore file contexts.
569
+ logger.info('Phase 2/7: Stopping services and fixing SELinux...');
570
+ logger.info(' -> Ensuring SELinux is in permissive mode...');
571
+ shellExec(`sudo setenforce 0 || true`);
572
+ shellExec(`sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config || true`);
573
+ logger.info(' -> Restoring SELinux contexts for container data directories...');
574
+ // The 'restorecon' command corrects file system security contexts.
575
+ shellExec(`sudo restorecon -Rv /var/lib/containerd || true`);
576
+ shellExec(`sudo restorecon -Rv /var/lib/kubelet || true`);
577
+
578
+ logger.info(' -> Stopping kubelet, docker, and podman services...');
579
+ shellExec('sudo systemctl stop kubelet || true');
580
+ shellExec('sudo systemctl stop docker || true');
581
+ shellExec('sudo systemctl stop podman || true');
582
+ // Safely unmount pod filesystems to avoid errors.
583
+ shellExec('sudo umount -f /var/lib/kubelet/pods/*/* || true');
584
+
585
+ // Phase 3: Execute official uninstallation commands
586
+ logger.info('Phase 3/7: Executing official reset and uninstallation commands...');
587
+ logger.info(' -> Executing kubeadm reset...');
588
+ shellExec('sudo kubeadm reset --force || true');
589
+ logger.info(' -> Executing K3s uninstallation script if it exists...');
590
+ shellExec('sudo /usr/local/bin/k3s-uninstall.sh || true');
591
+ logger.info(' -> Deleting Kind clusters...');
592
+ shellExec('kind get clusters | xargs -r -t -n1 kind delete cluster || true');
593
+
594
+ // Phase 4: File system cleanup
595
+ logger.info('Phase 4/7: Cleaning up remaining file system artifacts...');
596
+ // Remove any leftover configurations and data.
597
+ shellExec('sudo rm -rf /etc/kubernetes/* || true');
598
+ shellExec('sudo rm -rf /etc/cni/net.d/* || true');
599
+ shellExec('sudo rm -rf /var/lib/kubelet/* || true');
600
+ shellExec('sudo rm -rf /var/lib/cni/* || true');
601
+ shellExec('sudo rm -rf /var/lib/docker/* || true');
602
+ shellExec('sudo rm -rf /var/lib/containerd/* || true');
603
+ shellExec('sudo rm -rf /var/lib/containers/storage/* || true');
604
+ // Clean up the current user's kubeconfig.
605
+ shellExec('rm -rf $HOME/.kube || true');
606
+
607
+ // Phase 5: Host network cleanup
608
+ logger.info('Phase 5/7: Cleaning up host network configurations...');
609
+ // Remove iptables rules and CNI network interfaces.
610
+ shellExec('sudo iptables -F || true');
611
+ shellExec('sudo iptables -t nat -F || true');
612
+ // Restore iptables rules
613
+ shellExec(`chmod +x ${options.underpostRoot}/manifests/maas/nat-iptables.sh`);
614
+ shellExec(`${options.underpostRoot}/manifests/maas/nat-iptables.sh`, { silent: true });
615
+ shellExec('sudo ip link del cni0 || true');
616
+ shellExec('sudo ip link del flannel.1 || true');
617
+
618
+ logger.info('Phase 6/7: Clean up images');
619
+ shellExec(`podman rmi $(podman images -qa) --force`);
620
+
621
+ // Phase 6: Reload daemon and finalize
622
+ logger.info('Phase 7/7: Reloading the system daemon and finalizing...');
623
+ // shellExec('sudo systemctl daemon-reload');
624
+ UnderpostCluster.API.config();
625
+ logger.info('Safe and complete reset finished. The system is ready for a new cluster initialization.');
626
+ } catch (error) {
627
+ logger.error(`Error during reset: ${error.message}`);
628
+ console.error(error);
629
+ }
630
+ },
631
+
632
+ /**
633
+ * @method getResourcesCapacity
634
+ * @description Retrieves and returns the allocatable CPU and memory resources
635
+ * of the Kubernetes node.
636
+ * @param {boolean} [isKubeadmOrK3s=false] - If true, assumes a kubeadm or k3s-managed node;
637
+ * otherwise, assumes a Kind worker node.
638
+ * @returns {object} An object containing CPU and memory resources with values and units.
639
+ */
640
+ getResourcesCapacity(isKubeadmOrK3s = false) {
641
+ const resources = {};
642
+ const nodeName = isKubeadmOrK3s ? os.hostname() : 'kind-worker';
643
+ const info = shellExec(`kubectl describe node ${nodeName} | grep -E '(Allocatable:|Capacity:)' -A 6`, {
644
+ stdout: true,
645
+ silent: true,
646
+ });
458
647
  info
459
648
  .split('Allocatable:')[1]
460
649
  .split('\n')
@@ -474,18 +663,26 @@ Allocatable:
474
663
 
475
664
  return resources;
476
665
  },
666
+ /**
667
+ * @method initHost
668
+ * @description Installs essential host-level prerequisites for Kubernetes (Docker, Podman, Kind, Kubeadm, Helm).
669
+ */
477
670
  initHost() {
478
- // Install docker
479
- shellExec(`sudo dnf -y install dnf-plugins-core
480
- sudo dnf config-manager --add-repo https://download.docker.com/linux/rhel/docker-ce.repo`);
671
+ const archData = UnderpostBaremetal.API.getHostArch();
672
+ logger.info('Installing essential host-level prerequisites for Kubernetes...', archData);
673
+ // Install Docker and its dependencies
674
+ shellExec(`sudo dnf -y install dnf-plugins-core dbus-x11`);
675
+ shellExec(`sudo dnf config-manager --add-repo https://download.docker.com/linux/rhel/docker-ce.repo`);
481
676
  shellExec(`sudo dnf -y install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin`);
482
- // Install podman
677
+
678
+ // Install Podman
483
679
  shellExec(`sudo dnf -y install podman`);
484
- // Install kind
485
- shellExec(`[ $(uname -m) = aarch64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.29.0/kind-linux-arm64
680
+
681
+ // Install Kind (Kubernetes in Docker)
682
+ shellExec(`[ $(uname -m) = ${archData.name} ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.29.0/kind-linux-${archData.alias}
486
683
  chmod +x ./kind
487
684
  sudo mv ./kind /bin/kind`);
488
- // Install kubeadm
685
+ // Install Kubernetes tools: Kubeadm, Kubelet, and Kubectl
489
686
  shellExec(`cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
490
687
  [kubernetes]
491
688
  name=Kubernetes
@@ -496,12 +693,78 @@ gpgkey=https://pkgs.k8s.io/core:/stable:/v1.33/rpm/repodata/repomd.xml.key
496
693
  exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
497
694
  EOF`);
498
695
  shellExec(`sudo yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes`);
499
- // Install helm
500
- shellExec(`curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
501
- chmod 700 get_helm.sh
502
- ./get_helm.sh
503
- chmod +x /usr/local/bin/helm
504
- sudo mv /usr/local/bin/helm /bin/helm`);
696
+
697
+ // Install Helm
698
+ shellExec(`curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3`);
699
+ shellExec(`chmod 700 get_helm.sh`);
700
+ shellExec(`./get_helm.sh`);
701
+ shellExec(`chmod +x /usr/local/bin/helm`);
702
+ shellExec(`sudo mv /usr/local/bin/helm /bin/helm`);
703
+ shellExec(`sudo rm -rf get_helm.sh`);
704
+ console.log('Host prerequisites installed successfully.');
705
+ },
706
+ /**
707
+ * @method uninstallHost
708
+ * @description Uninstalls all host components installed by initHost.
709
+ * This includes Docker, Podman, Kind, Kubeadm, Kubelet, Kubectl, and Helm.
710
+ */
711
+ uninstallHost() {
712
+ console.log('Uninstalling host components: Docker, Podman, Kind, Kubeadm, Kubelet, Kubectl, Helm.');
713
+
714
+ // Remove Kind
715
+ console.log('Removing Kind...');
716
+ shellExec(`sudo rm -f /bin/kind || true`);
717
+
718
+ // Remove Helm
719
+ console.log('Removing Helm...');
720
+ shellExec(`sudo rm -f /usr/local/bin/helm || true`);
721
+ shellExec(`sudo rm -f /usr/local/bin/helm.sh || true`); // clean up the install script if it exists
722
+
723
+ // Remove Docker and its dependencies
724
+ console.log('Removing Docker, containerd, and related packages...');
725
+ shellExec(
726
+ `sudo dnf -y remove docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin || true`,
727
+ );
728
+
729
+ // Remove Podman
730
+ console.log('Removing Podman...');
731
+ shellExec(`sudo dnf -y remove podman || true`);
732
+
733
+ // Remove Kubeadm, Kubelet, and Kubectl
734
+ console.log('Removing Kubernetes tools...');
735
+ shellExec(`sudo yum remove -y kubelet kubeadm kubectl || true`);
736
+
737
+ // Remove Kubernetes repo file
738
+ console.log('Removing Kubernetes repository configuration...');
739
+ shellExec(`sudo rm -f /etc/yum.repos.d/kubernetes.repo || true`);
740
+
741
+ // Clean up Kubeadm config and data directories
742
+ console.log('Cleaning up Kubernetes configuration directories...');
743
+ shellExec(`sudo rm -rf /etc/kubernetes/pki || true`);
744
+ shellExec(`sudo rm -rf ~/.kube || true`);
745
+
746
+ // Stop and disable services
747
+ console.log('Stopping and disabling services...');
748
+ shellExec(`sudo systemctl stop docker.service || true`);
749
+ shellExec(`sudo systemctl disable docker.service || true`);
750
+ shellExec(`sudo systemctl stop containerd.service || true`);
751
+ shellExec(`sudo systemctl disable containerd.service || true`);
752
+ shellExec(`sudo systemctl stop kubelet.service || true`);
753
+ shellExec(`sudo systemctl disable kubelet.service || true`);
754
+
755
+ // Clean up config files
756
+ console.log('Removing host configuration files...');
757
+ shellExec(`sudo rm -f /etc/containerd/config.toml || true`);
758
+ shellExec(`sudo rm -f /etc/sysctl.d/k8s.conf || true`);
759
+ shellExec(`sudo rm -f /etc/sysctl.d/99-k8s-ipforward.conf || true`);
760
+ shellExec(`sudo rm -f /etc/sysctl.d/99-k8s.conf || true`);
761
+
762
+ // Restore SELinux to enforcing
763
+ console.log('Restoring SELinux to enforcing mode...');
764
+ // shellExec(`sudo setenforce 1`);
765
+ // shellExec(`sudo sed -i 's/^SELINUX=permissive$/SELINUX=enforcing/' /etc/selinux/config`);
766
+
767
+ console.log('Uninstall process completed.');
505
768
  },
506
769
  };
507
770
  }