@underpostnet/underpost 2.99.8 → 3.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. package/.env.development +2 -1
  2. package/.env.production +1 -0
  3. package/.env.test +2 -1
  4. package/.github/workflows/publish.ci.yml +18 -34
  5. package/.vscode/extensions.json +8 -50
  6. package/.vscode/settings.json +0 -77
  7. package/CHANGELOG.md +67 -1
  8. package/{cli.md → CLI-HELP.md} +48 -41
  9. package/README.md +3 -3
  10. package/bin/build.js +1 -15
  11. package/bin/deploy.js +4 -133
  12. package/bin/file.js +1 -5
  13. package/bin/zed.js +63 -2
  14. package/jsdoc.json +1 -2
  15. package/manifests/cronjobs/dd-cron/dd-cron-backup.yaml +1 -1
  16. package/manifests/cronjobs/dd-cron/dd-cron-dns.yaml +1 -1
  17. package/manifests/deployment/dd-default-development/deployment.yaml +2 -2
  18. package/manifests/deployment/dd-test-development/deployment.yaml +2 -2
  19. package/manifests/deployment/fastapi/initial_data.sh +4 -52
  20. package/manifests/ipfs/configmap.yaml +57 -0
  21. package/manifests/ipfs/headless-service.yaml +35 -0
  22. package/manifests/ipfs/kustomization.yaml +8 -0
  23. package/manifests/ipfs/statefulset.yaml +149 -0
  24. package/manifests/ipfs/storage-class.yaml +9 -0
  25. package/package.json +5 -5
  26. package/scripts/k3s-node-setup.sh +89 -0
  27. package/scripts/lxd-vm-setup.sh +23 -0
  28. package/scripts/rocky-setup.sh +1 -13
  29. package/src/cli/baremetal.js +7 -9
  30. package/src/cli/cluster.js +72 -121
  31. package/src/cli/deploy.js +8 -5
  32. package/src/cli/index.js +31 -30
  33. package/src/cli/ipfs.js +184 -0
  34. package/src/cli/lxd.js +191 -236
  35. package/src/cli/repository.js +4 -1
  36. package/src/client/components/core/VanillaJs.js +0 -25
  37. package/src/client/services/user/user.management.js +0 -5
  38. package/src/client/services/user/user.service.js +1 -1
  39. package/src/index.js +12 -1
  40. package/src/server/client-build-docs.js +26 -7
  41. package/src/server/conf.js +1 -1
  42. package/src/server/logger.js +22 -10
  43. package/.vscode/zed.keymap.json +0 -39
  44. package/.vscode/zed.settings.json +0 -20
  45. package/manifests/lxd/underpost-setup.sh +0 -163
package/package.json CHANGED
@@ -2,7 +2,7 @@
2
2
  "type": "module",
3
3
  "main": "src/index.js",
4
4
  "name": "@underpostnet/underpost",
5
- "version": "2.99.8",
5
+ "version": "3.0.0",
6
6
  "description": "pwa api rest template",
7
7
  "scripts": {
8
8
  "start": "env-cmd -f .env.production node --max-old-space-size=8192 src/server",
@@ -76,13 +76,13 @@
76
76
  "helmet": "^8.1.0",
77
77
  "html-minifier-terser": "^7.2.0",
78
78
  "http-proxy-middleware": "^2.0.6",
79
- "ignore-walk": "^6.0.4",
79
+ "ignore-walk": "^8.0.0",
80
80
  "iovalkey": "^0.2.1",
81
81
  "json-colorizer": "^2.2.2",
82
82
  "jsonwebtoken": "^9.0.2",
83
83
  "mariadb": "^3.2.2",
84
84
  "marked": "^12.0.2",
85
- "mocha": "^10.8.2",
85
+ "mocha": "^11.3.0",
86
86
  "mongoose": "^8.9.5",
87
87
  "morgan": "^1.10.0",
88
88
  "nodemailer": "^7.0.9",
@@ -92,12 +92,12 @@
92
92
  "prom-client": "^15.1.2",
93
93
  "read": "^2.1.0",
94
94
  "rrule": "^2.8.1",
95
- "shelljs": "^0.8.5",
95
+ "shelljs": "^0.10.0",
96
96
  "sitemap": "^7.1.1",
97
97
  "socket.io": "^4.8.0",
98
98
  "sortablejs": "^1.15.0",
99
99
  "split-file": "^2.3.0",
100
- "swagger-autogen": "^2.23.7",
100
+ "swagger-autogen": "^2.9.2",
101
101
  "swagger-ui-express": "^5.0.0",
102
102
  "uglify-js": "^3.17.4",
103
103
  "validator": "^13.11.0",
@@ -0,0 +1,89 @@
1
+ #!/bin/bash
2
+ set -e
3
+
4
+ # ---------------------------------------------------------------------------
5
+ # Underpost K3s Node Setup
6
+ # Usage:
7
+ # --control Initialize as K3s control plane node (default)
8
+ # --worker Initialize as K3s worker node
9
+ # --control-ip=<ip> Control plane IP (required for --worker)
10
+ # --token=<token> K3s node token (required for --worker)
11
+ # ---------------------------------------------------------------------------
12
+
13
+ ROLE="control"
14
+ CONTROL_IP=""
15
+ K3S_TOKEN=""
16
+
17
+ for arg in "$@"; do
18
+ case $arg in
19
+ --worker) ROLE="worker" ;;
20
+ --control) ROLE="control" ;;
21
+ --control-ip=*) CONTROL_IP="${arg#*=}" ;;
22
+ --token=*) K3S_TOKEN="${arg#*=}" ;;
23
+ esac
24
+ done
25
+ # ---------------------------------------------------------------------------
26
+ # NVM and Node.js
27
+ # ---------------------------------------------------------------------------
28
+ echo "Installing NVM and Node.js v24.10.0..."
29
+
30
+ curl -o- https://cdn.jsdelivr.net/gh/nvm-sh/nvm@v0.40.1/install.sh | bash
31
+
32
+ export NVM_DIR="$([ -z "${XDG_CONFIG_HOME-}" ] && printf %s "${HOME}/.nvm" || printf %s "${XDG_CONFIG_HOME}/nvm")"
33
+ [ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh"
34
+
35
+ nvm install 24.10.0
36
+ nvm use 24.10.0
37
+
38
+ echo "
39
+ ██╗░░░██╗███╗░░██╗██████╗░███████╗██████╗░██████╗░░█████╗░░██████╗████████╗
40
+ ██║░░░██║████╗░██║██╔══██╗██╔════╝██╔══██╗██╔══██╗██╔══██╗██╔════╝╚══██╔══╝
41
+ ██║░░░██║██╔██╗██║██║░░██║█████╗░░██████╔╝██████╔╝██║░░██║╚█████╗░░░░██║░░░
42
+ ██║░░░██║██║╚████║██║░░██║██╔══╝░░██╔══██╗██╔═══╝░██║░░██║░╚═══██╗░░░██║░░░
43
+ ╚██████╔╝██║░╚███║██████╔╝███████╗██║░░██║██║░░░░░╚█████╔╝██████╔╝░░░██║░░░
44
+ ░╚═════╝░╚═╝░░╚══╝╚═════╝░╚══════╝╚═╝░░╚═╝╚═╝░░░░░░╚════╝░╚═════╝░░░░╚═╝░░░
45
+
46
+ Installing underpost VM node...
47
+ "
48
+
49
+ npm install -g underpost
50
+
51
+ cd /home/dd/engine
52
+
53
+ echo "Applying host configuration..."
54
+
55
+ underpost install
56
+
57
+ node bin run secret
58
+
59
+ node bin cluster --dev --config
60
+
61
+ if [ "$ROLE" = "control" ]; then
62
+ echo "Initializing K3s control plane..."
63
+ node bin cluster --dev --k3s
64
+
65
+ echo ""
66
+ echo "K3s control plane is ready."
67
+ echo "Node token (share with workers to join this cluster):"
68
+ sudo cat /var/lib/rancher/k3s/server/node-token
69
+ echo ""
70
+ echo "Control plane IP addresses:"
71
+ ip -4 addr show scope global | grep inet | awk '{print $2}' | cut -d/ -f1
72
+
73
+ elif [ "$ROLE" = "worker" ]; then
74
+ if [ -z "$CONTROL_IP" ] || [ -z "$K3S_TOKEN" ]; then
75
+ echo "ERROR: --control-ip and --token are required for worker role."
76
+ echo "Usage: bash k3s-node-setup.sh --worker --control-ip=<ip> --token=<token>"
77
+ exit 1
78
+ fi
79
+
80
+ echo "Joining K3s cluster at https://${CONTROL_IP}:6443..."
81
+ curl -sfL https://get.k3s.io | \
82
+ K3S_URL="https://${CONTROL_IP}:6443" \
83
+ K3S_TOKEN="${K3S_TOKEN}" \
84
+ sh -s - agent
85
+
86
+ echo ""
87
+ echo "K3s worker node joined the cluster at https://${CONTROL_IP}:6443 successfully."
88
+ sudo systemctl status k3s-agent --no-pager
89
+ fi
@@ -0,0 +1,23 @@
1
+
2
+ echo "Expanding /dev/sda2 and resizing filesystem..."
3
+
4
+ if ! command -v parted &>/dev/null; then
5
+ sudo dnf install -y parted
6
+ fi
7
+
8
+ sudo parted /dev/sda ---pretend-input-tty <<EOF
9
+ unit s
10
+ resizepart 2 100%
11
+ Yes
12
+ quit
13
+ EOF
14
+
15
+ sudo resize2fs /dev/sda2
16
+ echo "Disk resized."
17
+
18
+ echo "Installing essential packages..."
19
+ sudo dnf install -y tar bzip2 git curl jq epel-release
20
+ sudo dnf -y update
21
+
22
+ echo "Loading br_netfilter module..."
23
+ sudo modprobe br_netfilter
@@ -103,16 +103,4 @@ echo "[+] Cleanup: remove unnecessary packages and old metadata"
103
103
  dnf autoremove -y
104
104
  dnf clean all
105
105
 
106
- cat <<EOF
107
-
108
- Installation complete.
109
- - To allow SSH access (if this is a VM or server), open port 22 in firewalld:
110
- sudo firewall-cmd --add-service=ssh --permanent && sudo firewall-cmd --reload
111
- - If you installed Development Tools, you will have gcc, make, etc.
112
-
113
- Examples:
114
- sudo ./scripts/rocky-setup.sh --install-dev
115
- INSTALL_DEV=1 sudo ./scripts/rocky-setup.sh
116
-
117
- Customize PACKAGES=(...) inside this script according to your needs (docker, podman, kube, mssql-tools, etc.).
118
- EOF
106
+ echo "[+] Setup complete! Rocky Linux is now configured with the recommended base packages."
@@ -534,9 +534,6 @@ rm -rf ${artifacts.join(' ')}`);
534
534
  if (options.controlServerDbInstall === true) {
535
535
  // Deploy the database provider and manage MAAS database.
536
536
  shellExec(`node ${underpostRoot}/bin/deploy ${dbProviderId} install`);
537
- shellExec(
538
- `node ${underpostRoot}/bin/deploy pg-drop-db ${process.env.DB_PG_MAAS_NAME} ${process.env.DB_PG_MAAS_USER}`,
539
- );
540
537
  shellExec(`node ${underpostRoot}/bin/deploy maas-db`);
541
538
  return;
542
539
  }
@@ -1150,8 +1147,9 @@ rm -rf ${artifacts.join(' ')}`);
1150
1147
  machine: machine ? machine.system_id : null,
1151
1148
  });
1152
1149
 
1153
- const { discovery, machine: discoveredMachine } =
1154
- await Underpost.baremetal.commissionMonitor(commissionMonitorPayload);
1150
+ const { discovery, machine: discoveredMachine } = await Underpost.baremetal.commissionMonitor(
1151
+ commissionMonitorPayload,
1152
+ );
1155
1153
  if (discoveredMachine) machine = discoveredMachine;
1156
1154
  }
1157
1155
  },
@@ -2496,10 +2494,10 @@ fi
2496
2494
  const discoverHostname = discovery.hostname
2497
2495
  ? discovery.hostname
2498
2496
  : discovery.mac_organization
2499
- ? discovery.mac_organization
2500
- : discovery.domain
2501
- ? discovery.domain
2502
- : `generic-host-${s4()}${s4()}`;
2497
+ ? discovery.mac_organization
2498
+ : discovery.domain
2499
+ ? discovery.domain
2500
+ : `generic-host-${s4()}${s4()}`;
2503
2501
 
2504
2502
  console.log(discoverHostname.bgBlue.bold.white);
2505
2503
  console.log('ip target:'.green + ipAddress, 'ip discovered:'.green + discovery.ip);
@@ -36,6 +36,7 @@ class UnderpostCluster {
36
36
  * @param {boolean} [options.mysql=false] - Deploy MySQL.
37
37
  * @param {boolean} [options.postgresql=false] - Deploy PostgreSQL.
38
38
  * @param {boolean} [options.valkey=false] - Deploy Valkey.
39
+ * @param {boolean} [options.ipfs=false] - Deploy ipfs-cluster statefulset.
39
40
  * @param {boolean} [options.full=false] - Deploy a full set of common components.
40
41
  * @param {boolean} [options.info=false] - Display extensive Kubernetes cluster information.
41
42
  * @param {boolean} [options.certManager=false] - Deploy Cert-Manager for certificate management.
@@ -57,10 +58,10 @@ class UnderpostCluster {
57
58
  * @param {string} [options.prom=''] - Initialize the cluster with a Prometheus Operator deployment and monitor scrap for specified hosts.
58
59
  * @param {boolean} [options.uninstallHost=false] - Uninstall all host components.
59
60
  * @param {boolean} [options.config=false] - Apply general host configuration (SELinux, containerd, sysctl, firewalld).
60
- * @param {boolean} [options.worker=false] - Configure as a worker node (for Kubeadm or K3s join).
61
61
  * @param {boolean} [options.chown=false] - Set up kubectl configuration for the current user.
62
62
  * @param {boolean} [options.removeVolumeHostPaths=false] - Remove data from host paths used by Persistent Volumes.
63
63
  * @param {string} [options.hosts] - Set custom hosts entries.
64
+ * @param {string} [options.replicas] - Set the number of replicas for certain deployments.
64
65
  * @memberof UnderpostCluster
65
66
  */
66
67
  async init(
@@ -73,6 +74,7 @@ class UnderpostCluster {
73
74
  mysql: false,
74
75
  postgresql: false,
75
76
  valkey: false,
77
+ ipfs: false,
76
78
  full: false,
77
79
  info: false,
78
80
  certManager: false,
@@ -94,10 +96,10 @@ class UnderpostCluster {
94
96
  prom: '',
95
97
  uninstallHost: false,
96
98
  config: false,
97
- worker: false,
98
99
  chown: false,
99
100
  removeVolumeHostPaths: false,
100
101
  hosts: '',
102
+ replicas: '',
101
103
  },
102
104
  ) {
103
105
  // Handles initial host setup (installing docker, podman, kind, kubeadm, helm)
@@ -140,11 +142,14 @@ class UnderpostCluster {
140
142
  }
141
143
 
142
144
  // Reset Kubernetes cluster components (Kind/Kubeadm/K3s) and container runtimes
143
- if (options.reset === true)
145
+ if (options.reset === true) {
146
+ const clusterType = options.k3s === true ? 'k3s' : options.kubeadm === true ? 'kubeadm' : 'kind';
144
147
  return await Underpost.cluster.safeReset({
145
148
  underpostRoot,
146
149
  removeVolumeHostPaths: options.removeVolumeHostPaths,
150
+ clusterType,
147
151
  });
152
+ }
148
153
 
149
154
  // Check if a cluster (Kind, Kubeadm, or K3s) is already initialized
150
155
  const alreadyKubeadmCluster = Underpost.deploy.get('calico-kube-controllers')[0];
@@ -153,66 +158,20 @@ class UnderpostCluster {
153
158
  const alreadyK3sCluster = Underpost.deploy.get('svclb-traefik')[0];
154
159
 
155
160
  // --- Kubeadm/Kind/K3s Cluster Initialization ---
156
- // This block handles the initial setup of the Kubernetes cluster (control plane or worker).
157
- // It prevents re-initialization if a cluster is already detected.
158
- if (!options.worker && !alreadyKubeadmCluster && !alreadyKindCluster && !alreadyK3sCluster) {
161
+ if (!alreadyKubeadmCluster && !alreadyKindCluster && !alreadyK3sCluster) {
159
162
  Underpost.cluster.config();
160
163
  if (options.k3s === true) {
161
164
  logger.info('Initializing K3s control plane...');
162
165
  // Install K3s
163
- console.log('Installing K3s...');
166
+ logger.info('Installing K3s...');
164
167
  shellExec(`curl -sfL https://get.k3s.io | sh -`);
165
- console.log('K3s installation completed.');
166
-
167
- // Move k3s binary to /bin/k3s and make it executable
168
- shellExec(`sudo mv /usr/local/bin/k3s /bin/k3s`);
169
- shellExec(`sudo chmod +x /bin/k3s`);
170
- console.log('K3s binary moved to /bin/k3s and made executable.');
168
+ logger.info('K3s installation completed.');
171
169
 
172
- // Configure kubectl for the current user for K3s *before* checking readiness
173
- // This ensures kubectl can find the K3s kubeconfig immediately after K3s installation.
174
170
  Underpost.cluster.chown('k3s');
175
171
 
176
- // Wait for K3s to be ready
177
172
  logger.info('Waiting for K3s to be ready...');
178
- let k3sReady = false;
179
- let retries = 0;
180
- const maxRetries = 20; // Increased retries for K3s startup
181
- const delayMs = 5000; // 5 seconds
182
-
183
- while (!k3sReady && retries < maxRetries) {
184
- try {
185
- // Explicitly use KUBECONFIG for kubectl commands to ensure it points to K3s config
186
- const nodes = shellExec(`KUBECONFIG=/etc/rancher/k3s/k3s.yaml kubectl get nodes -o json`, {
187
- stdout: true,
188
- silent: true,
189
- });
190
- const parsedNodes = JSON.parse(nodes);
191
- if (
192
- parsedNodes.items.some((node) =>
193
- node.status.conditions.some((cond) => cond.type === 'Ready' && cond.status === 'True'),
194
- )
195
- ) {
196
- k3sReady = true;
197
- logger.info('K3s cluster is ready.');
198
- } else {
199
- logger.info(`K3s not yet ready. Retrying in ${delayMs / 1000} seconds...`);
200
- await new Promise((resolve) => setTimeout(resolve, delayMs));
201
- }
202
- } catch (error) {
203
- logger.info(`Error checking K3s status: ${error.message}. Retrying in ${delayMs / 1000} seconds...`);
204
- await new Promise((resolve) => setTimeout(resolve, delayMs));
205
- }
206
- retries++;
207
- }
208
-
209
- if (!k3sReady) {
210
- logger.error('K3s cluster did not become ready in time. Please check the K3s logs.');
211
- return;
212
- }
213
-
214
- // K3s includes local-path-provisioner by default, so no need to install explicitly.
215
- logger.info('K3s comes with local-path-provisioner by default. Skipping explicit installation.');
173
+ shellExec(`sudo systemctl is-active --wait k3s || sudo systemctl wait --for=active k3s.service`);
174
+ logger.info('K3s service is active.');
216
175
  } else if (options.kubeadm === true) {
217
176
  logger.info('Initializing Kubeadm control plane...');
218
177
  // Set default values if not provided
@@ -254,14 +213,6 @@ class UnderpostCluster {
254
213
  );
255
214
  Underpost.cluster.chown('kind'); // Pass 'kind' to chown
256
215
  }
257
- } else if (options.worker === true) {
258
- // Worker node specific configuration (kubeadm join command needs to be executed separately)
259
- logger.info('Worker node configuration applied. Awaiting join command...');
260
- // No direct cluster initialization here for workers. The `kubeadm join` or `k3s agent` command
261
- // needs to be run on the worker after the control plane is up and a token is created.
262
- // This part of the script is for general worker setup, not the join itself.
263
- } else {
264
- logger.warn('Cluster already initialized or worker flag not set for worker node.');
265
216
  }
266
217
 
267
218
  // --- Optional Component Deployments (Databases, Ingress, Cert-Manager) ---
@@ -307,36 +258,21 @@ EOF
307
258
  }
308
259
 
309
260
  if (options.full === true || options.valkey === true) {
310
- if (options.pullImage === true) {
311
- // shellExec(`sudo podman pull valkey/valkey:latest`);
312
- if (!options.kubeadm && !options.k3s) {
313
- // Only load if not kubeadm/k3s (Kind needs it)
314
- shellExec(`docker pull valkey/valkey:latest`);
315
- shellExec(`sudo kind load docker-image valkey/valkey:latest`);
316
- } else if (options.kubeadm || options.k3s)
317
- // For kubeadm/k3s, ensure it's available for containerd
318
- shellExec(`sudo crictl pull valkey/valkey:latest`);
319
- }
261
+ if (options.pullImage === true) Underpost.cluster.pullImage('valkey/valkey:latest', options);
320
262
  shellExec(`kubectl delete statefulset valkey-service -n ${options.namespace} --ignore-not-found`);
321
263
  shellExec(`kubectl apply -k ${underpostRoot}/manifests/valkey -n ${options.namespace}`);
322
264
  await Underpost.test.statusMonitor('valkey-service', 'Running', 'pods', 1000, 60 * 10);
323
265
  }
266
+ if (options.ipfs) {
267
+ await Underpost.ipfs.deploy(options, underpostRoot);
268
+ }
324
269
  if (options.full === true || options.mariadb === true) {
325
270
  shellExec(
326
271
  `sudo kubectl create secret generic mariadb-secret --from-file=username=/home/dd/engine/engine-private/mariadb-username --from-file=password=/home/dd/engine/engine-private/mariadb-password --dry-run=client -o yaml | kubectl apply -f - -n ${options.namespace}`,
327
272
  );
328
273
  shellExec(`kubectl delete statefulset mariadb-statefulset -n ${options.namespace} --ignore-not-found`);
329
274
 
330
- if (options.pullImage === true) {
331
- // shellExec(`sudo podman pull mariadb:latest`);
332
- if (!options.kubeadm && !options.k3s) {
333
- // Only load if not kubeadm/k3s (Kind needs it)
334
- shellExec(`docker pull mariadb:latest`);
335
- shellExec(`sudo kind load docker-image mariadb:latest`);
336
- } else if (options.kubeadm || options.k3s)
337
- // For kubeadm/k3s, ensure it's available for containerd
338
- shellExec(`sudo crictl pull mariadb:latest`);
339
- }
275
+ if (options.pullImage === true) Underpost.cluster.pullImage('mariadb:latest', options);
340
276
  shellExec(`kubectl apply -f ${underpostRoot}/manifests/mariadb/storage-class.yaml -n ${options.namespace}`);
341
277
  shellExec(`kubectl apply -k ${underpostRoot}/manifests/mariadb -n ${options.namespace}`);
342
278
  }
@@ -350,30 +286,14 @@ EOF
350
286
  shellExec(`kubectl apply -k ${underpostRoot}/manifests/mysql -n ${options.namespace}`);
351
287
  }
352
288
  if (options.full === true || options.postgresql === true) {
353
- if (options.pullImage === true) {
354
- if (!options.kubeadm && !options.k3s) {
355
- // Only load if not kubeadm/k3s (Kind needs it)
356
- shellExec(`docker pull postgres:latest`);
357
- shellExec(`sudo kind load docker-image postgres:latest`);
358
- } else if (options.kubeadm || options.k3s)
359
- // For kubeadm/k3s, ensure it's available for containerd
360
- shellExec(`sudo crictl pull postgres:latest`);
361
- }
289
+ if (options.pullImage === true) Underpost.cluster.pullImage('postgres:latest', options);
362
290
  shellExec(
363
291
  `sudo kubectl create secret generic postgres-secret --from-file=password=/home/dd/engine/engine-private/postgresql-password --dry-run=client -o yaml | kubectl apply -f - -n ${options.namespace}`,
364
292
  );
365
293
  shellExec(`kubectl apply -k ${underpostRoot}/manifests/postgresql -n ${options.namespace}`);
366
294
  }
367
295
  if (options.mongodb4 === true) {
368
- if (options.pullImage === true) {
369
- if (!options.kubeadm && !options.k3s) {
370
- // Only load if not kubeadm/k3s (Kind needs it)
371
- shellExec(`docker pull mongo:4.4`);
372
- shellExec(`sudo kind load docker-image mongo:4.4`);
373
- } else if (options.kubeadm || options.k3s)
374
- // For kubeadm/k3s, ensure it's available for containerd
375
- shellExec(`sudo crictl pull mongo:4.4`);
376
- }
296
+ if (options.pullImage === true) Underpost.cluster.pullImage('mongo:4.4', options);
377
297
  shellExec(`kubectl apply -k ${underpostRoot}/manifests/mongodb-4.4 -n ${options.namespace}`);
378
298
 
379
299
  const deploymentName = 'mongodb-deployment';
@@ -395,15 +315,7 @@ EOF
395
315
  );
396
316
  }
397
317
  } else if (options.full === true || options.mongodb === true) {
398
- if (options.pullImage === true) {
399
- if (!options.kubeadm && !options.k3s) {
400
- // Only load if not kubeadm/k3s (Kind needs it)
401
- shellExec(`docker pull mongo:latest`);
402
- shellExec(`sudo kind load docker-image mongo:latest`);
403
- } else if (options.kubeadm || options.k3s)
404
- // For kubeadm/k3s, ensure it's available for containerd
405
- shellExec(`sudo crictl pull mongo:latest`);
406
- }
318
+ if (options.pullImage === true) Underpost.cluster.pullImage('mongo:latest', options);
407
319
  shellExec(
408
320
  `sudo kubectl create secret generic mongodb-keyfile --from-file=/home/dd/engine/engine-private/mongodb-keyfile --dry-run=client -o yaml | kubectl apply -f - -n ${options.namespace}`,
409
321
  );
@@ -464,6 +376,32 @@ EOF
464
376
  }
465
377
  },
466
378
 
379
+ /**
380
+ * @method pullImage
381
+ * @description Pulls a container image using the appropriate runtime based on the cluster type.
382
+ * - For Kind clusters: pulls via Docker and loads the image into the Kind cluster.
383
+ * - For Kubeadm/K3s clusters: pulls via crictl (containerd).
384
+ * @param {string} image - The fully-qualified container image reference (e.g. 'mongo:latest').
385
+ * @param {object} options - The cluster options object from `init`.
386
+ * @param {boolean} [options.kubeadm=false] - Whether the cluster is Kubeadm-based.
387
+ * @param {boolean} [options.k3s=false] - Whether the cluster is K3s-based.
388
+ * @memberof UnderpostCluster
389
+ */
390
+ pullImage(image, options = { kubeadm: false, k3s: false }) {
391
+ if (!options.kubeadm && !options.k3s) {
392
+ const tarPath = `/tmp/kind-image-${image.replace(/[\/:]/g, '-')}.tar`;
393
+ shellExec(`docker pull ${image}`);
394
+ shellExec(`docker save ${image} -o ${tarPath}`);
395
+ shellExec(
396
+ `for node in $(kind get nodes); do cat ${tarPath} | docker exec -i $node ctr --namespace=k8s.io images import -; done`,
397
+ );
398
+ shellExec(`rm -f ${tarPath}`);
399
+ } else if (options.kubeadm || options.k3s) {
400
+ // Kubeadm / K3s: use crictl to pull directly into containerd
401
+ shellExec(`sudo crictl pull ${image}`);
402
+ }
403
+ },
404
+
467
405
  /**
468
406
  * @method config
469
407
  * @description Configures host-level settings required for Kubernetes.
@@ -570,12 +508,15 @@ net.ipv4.ip_forward = 1' | sudo tee ${iptableConfPath}`,
570
508
  * @description Performs a complete reset of the Kubernetes cluster and its container environments.
571
509
  * This version focuses on correcting persistent permission errors (such as 'permission denied'
572
510
  * in coredns) by restoring SELinux security contexts and safely cleaning up cluster artifacts.
511
+ * Only the uninstall/delete commands specific to the given clusterType are executed; all other
512
+ * cleanup steps (log truncation, filesystem, network) are always run as generic k8s resets.
573
513
  * @param {object} [options] - Configuration options for the reset.
574
514
  * @param {string} [options.underpostRoot] - The root path of the underpost project.
575
515
  * @param {boolean} [options.removeVolumeHostPaths=false] - Whether to remove data from host paths used by Persistent Volumes.
516
+ * @param {string} [options.clusterType='kind'] - The type of cluster to reset: 'kind', 'kubeadm', or 'k3s'.
576
517
  * @memberof UnderpostCluster
577
518
  */
578
- async safeReset(options = { underpostRoot: '.', removeVolumeHostPaths: false }) {
519
+ async safeReset(options = { underpostRoot: '.', removeVolumeHostPaths: false, clusterType: 'kind' }) {
579
520
  logger.info('Starting a safe and comprehensive reset of Kubernetes and container environments...');
580
521
 
581
522
  try {
@@ -645,14 +586,22 @@ net.ipv4.ip_forward = 1' | sudo tee ${iptableConfPath}`,
645
586
  // Safely unmount pod filesystems to avoid errors.
646
587
  shellExec('sudo umount -f /var/lib/kubelet/pods/*/*');
647
588
 
648
- // Phase 3: Execute official uninstallation commands
649
- logger.info('Phase 3/7: Executing official reset and uninstallation commands...');
650
- logger.info(' -> Executing kubeadm reset...');
651
- shellExec('sudo kubeadm reset --force');
652
- logger.info(' -> Executing K3s uninstallation script if it exists...');
653
- shellExec('sudo /usr/local/bin/k3s-uninstall.sh');
654
- logger.info(' -> Deleting Kind clusters...');
655
- shellExec('kind get clusters | xargs -r -t -n1 kind delete cluster');
589
+ // Phase 3: Execute official uninstallation commands (type-specific)
590
+ const clusterType = options.clusterType || 'kind';
591
+ logger.info(
592
+ `Phase 3/7: Executing official reset/uninstallation commands for cluster type: '${clusterType}'...`,
593
+ );
594
+ if (clusterType === 'kubeadm') {
595
+ logger.info(' -> Executing kubeadm reset...');
596
+ shellExec('sudo kubeadm reset --force');
597
+ } else if (clusterType === 'k3s') {
598
+ logger.info(' -> Executing K3s uninstallation script if it exists...');
599
+ shellExec('sudo /usr/local/bin/k3s-uninstall.sh');
600
+ } else {
601
+ // Default: kind
602
+ logger.info(' -> Deleting Kind clusters...');
603
+ shellExec('kind get clusters | xargs -r -t -n1 kind delete cluster');
604
+ }
656
605
 
657
606
  // Phase 4: File system cleanup
658
607
  logger.info('Phase 4/7: Cleaning up remaining file system artifacts...');
@@ -672,9 +621,6 @@ net.ipv4.ip_forward = 1' | sudo tee ${iptableConfPath}`,
672
621
  // Remove iptables rules and CNI network interfaces.
673
622
  shellExec('sudo iptables -F');
674
623
  shellExec('sudo iptables -t nat -F');
675
- // Restore iptables rules
676
- shellExec(`chmod +x ${options.underpostRoot}/scripts/nat-iptables.sh`);
677
- shellExec(`${options.underpostRoot}/scripts/nat-iptables.sh`, { silent: true });
678
624
  shellExec('sudo ip link del cni0');
679
625
  shellExec('sudo ip link del flannel.1');
680
626
 
@@ -772,6 +718,11 @@ EOF`);
772
718
  shellExec(`chmod +x /usr/local/bin/helm`);
773
719
  shellExec(`sudo mv /usr/local/bin/helm /bin/helm`);
774
720
  shellExec(`sudo rm -rf get_helm.sh`);
721
+
722
+ // Install snap
723
+ shellExec(`sudo yum install -y snapd`);
724
+ shellExec(`sudo systemctl enable --now snapd.socket`);
725
+
775
726
  console.log('Host prerequisites installed successfully.');
776
727
  },
777
728
 
package/src/cli/deploy.js CHANGED
@@ -491,6 +491,7 @@ spec:
491
491
  retryPerTryTimeout: '',
492
492
  kindType: '',
493
493
  port: 0,
494
+ exposePort: 0,
494
495
  cmd: '',
495
496
  },
496
497
  ) {
@@ -573,11 +574,13 @@ EOF`);
573
574
  if (options.expose === true) {
574
575
  const kindType = options.kindType ? options.kindType : 'svc';
575
576
  const svc = Underpost.deploy.get(deployId, kindType)[0];
576
- const port = options.port
577
- ? options.port
578
- : kindType !== 'svc'
579
- ? 80
580
- : parseInt(svc[`PORT(S)`].split('/TCP')[0]);
577
+ const port = options.exposePort
578
+ ? parseInt(options.exposePort)
579
+ : options.port
580
+ ? parseInt(options.port)
581
+ : kindType !== 'svc'
582
+ ? 80
583
+ : parseInt(svc[`PORT(S)`].split('/TCP')[0]);
581
584
  logger.info(deployId, {
582
585
  svc,
583
586
  port,