underpost 2.8.791 → 2.8.793

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -68,7 +68,7 @@ Run dev client server
68
68
  npm run dev
69
69
  ```
70
70
  <!-- -->
71
- ## underpost ci/cd cli v2.8.791
71
+ ## underpost ci/cd cli v2.8.793
72
72
 
73
73
  ### Usage: `underpost [options] [command]`
74
74
  ```
package/bin/db.js CHANGED
@@ -161,6 +161,7 @@ try {
161
161
  }
162
162
  break;
163
163
  case 'show-all':
164
+ // show dbs
164
165
  break;
165
166
  case 'show':
166
167
  break;
package/cli.md CHANGED
@@ -1,4 +1,4 @@
1
- ## underpost ci/cd cli v2.8.791
1
+ ## underpost ci/cd cli v2.8.793
2
2
 
3
3
  ### Usage: `underpost [options] [command]`
4
4
  ```
@@ -486,6 +486,8 @@ Options:
486
486
  --create-vm <vm-id> Create default virtual machines
487
487
  --init-vm <vm-id> Get init vm underpost script
488
488
  --info-vm <vm-id> Get all info vm
489
+ --test <vm-id> Test health, status and network connectivity
490
+ for a VM
489
491
  --root-size <gb-size> Set root size vm
490
492
  --join-node <nodes> Comma separated worker and control node e.
491
493
  g. k8s-worker-1,k8s-control
@@ -494,6 +496,8 @@ Options:
494
496
  --delete-expose <vm-name-ports> Vm name and : separated with Comma separated
495
497
  vm port to remove expose e. g.
496
498
  k8s-control:80,443
499
+ --auto-expose-k8s-ports <vm-id> Automatically expose common Kubernetes ports
500
+ for the VM.
497
501
  -h, --help display help for command
498
502
 
499
503
  ```
@@ -58,7 +58,7 @@ services:
58
58
  cpus: '0.25'
59
59
  memory: 20M
60
60
  labels: # labels in Compose file instead of Dockerfile
61
- engine.version: '2.8.791'
61
+ engine.version: '2.8.793'
62
62
  networks:
63
63
  - load-balancer
64
64
 
@@ -1,20 +1,25 @@
1
1
  #!/bin/bash
2
2
 
3
+ # Exit immediately if a command exits with a non-zero status.
3
4
  set -e
4
5
 
5
- # Expand /dev/sda2 partition and resize filesystem automatically
6
+ echo "Starting Underpost Kubernetes Node Setup for Production (Kubeadm Use Case)..."
7
+
8
+ # --- Disk Partition Resizing (Keep as is, seems functional) ---
9
+ echo "Expanding /dev/sda2 partition and resizing filesystem..."
6
10
 
7
11
  # Check if parted is installed
8
12
  if ! command -v parted &>/dev/null; then
9
13
  echo "parted not found, installing..."
10
- dnf install -y parted
14
+ sudo dnf install -y parted
11
15
  fi
12
16
 
13
17
  # Get start sector of /dev/sda2
14
- START_SECTOR=$(parted /dev/sda -ms unit s print | awk -F: '/^2:/{print $2}' | sed 's/s//')
18
+ START_SECTOR=$(sudo parted /dev/sda -ms unit s print | awk -F: '/^2:/{print $2}' | sed 's/s//')
15
19
 
16
20
  # Resize the partition
17
- parted /dev/sda ---pretend-input-tty <<EOF
21
+ # Using 'sudo' for parted commands
22
+ sudo parted /dev/sda ---pretend-input-tty <<EOF
18
23
  unit s
19
24
  resizepart 2 100%
20
25
  Yes
@@ -22,45 +27,89 @@ quit
22
27
  EOF
23
28
 
24
29
  # Resize the filesystem
25
- resize2fs /dev/sda2
30
+ sudo resize2fs /dev/sda2
26
31
 
27
32
  echo "Disk and filesystem resized successfully."
28
- sudo dnf install -y tar
29
- sudo dnf install -y bzip2
30
- sudo dnf install -y git
33
+
34
+ # --- Essential System Package Installation ---
35
+ echo "Installing essential system packages..."
36
+ sudo dnf install -y tar bzip2 git epel-release
37
+
38
+ # Perform a system update to ensure all packages are up-to-date
31
39
  sudo dnf -y update
32
- sudo dnf -y install epel-release
33
- sudo dnf install -y ufw
34
- sudo systemctl enable --now ufw
40
+
41
+ # --- NVM and Node.js Installation ---
42
+ echo "Installing NVM and Node.js v23.8.0..."
35
43
  curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.40.1/install.sh | bash
36
- NVM_DIR="$([ -z "${XDG_CONFIG_HOME-}" ] && printf %s "${HOME}/.nvm" || printf %s "${XDG_CONFIG_HOME}/nvm")"
44
+
45
+ # Load nvm for the current session
46
+ export NVM_DIR="$([ -z "${XDG_CONFIG_HOME-}" ] && printf %s "${HOME}/.nvm" || printf %s "${XDG_CONFIG_HOME}/nvm")"
37
47
  [ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
48
+
38
49
  nvm install 23.8.0
39
50
  nvm use 23.8.0
51
+
40
52
  echo "
41
53
  ██╗░░░██╗███╗░░██╗██████╗░███████╗██████╗░██████╗░░█████╗░░██████╗████████╗
42
54
  ██║░░░██║████╗░██║██╔══██╗██╔════╝██╔══██╗██╔══██╗██╔══██╗██╔════╝╚══██╔══╝
43
55
  ██║░░░██║██╔██╗██║██║░░██║█████╗░░██████╔╝██████╔╝██║░░██║╚█████╗░░░░██║░░░
44
- ██║░░░██║██║╚████║██║░░██║██╔══╝░░██╔══██╗██╔═══╝░██║░░██║░╚═══██╗░░░██║░░░
56
+ ██║░░░██║██║╚████║██║░░██║██╔══╝░░██╔══██╗██╔══╝░░██║░░██║░╚═══██╗░░░██║░░░
45
57
  ╚██████╔╝██║░╚███║██████╔╝███████╗██║░░██║██║░░░░░╚█████╔╝██████╔╝░░░██║░░░
46
58
  ░╚═════╝░╚═╝░░╚══╝╚═════╝░╚══════╝╚═╝░░╚═╝╚═╝░░░░░░╚════╝░╚═════╝░░░░╚═╝░░░
47
59
 
48
- Installing underpost k8s node ...
49
-
60
+ Installing underpost k8s node...
50
61
  "
62
+
63
+ # Install underpost globally
51
64
  npm install -g underpost
52
- chmod +x /root/.nvm/versions/node/v23.8.0/bin/underpost
65
+
66
+ # Ensure underpost executable is in PATH and has execute permissions
67
+ # Adjusting this for global npm install which usually handles permissions
68
+ # If you still face issues, ensure /root/.nvm/versions/node/v23.8.0/bin is in your PATH
69
+ # For global installs, it's usually handled automatically.
70
+ # chmod +x /root/.nvm/versions/node/v23.8.0/bin/underpost # This might not be necessary for global npm installs
71
+
72
+ # --- Kernel Module for Bridge Filtering ---
73
+ # This is crucial for Kubernetes networking (CNI)
74
+ echo "Loading br_netfilter kernel module..."
53
75
  sudo modprobe br_netfilter
54
- mkdir -p /home/dd
55
- cd $(underpost root)/underpost
76
+
77
+ # --- Disable UFW (Crucial for Kubernetes) ---
78
+ # UFW conflicts with Kubernetes' iptables management. Disable it completely.
79
+ echo "Disabling UFW to prevent conflicts with Kubernetes..."
80
+ if sudo systemctl is-active --quiet ufw; then
81
+ sudo systemctl stop ufw
82
+ fi
83
+ if sudo systemctl is-enabled --quiet ufw; then
84
+ sudo systemctl disable ufw
85
+ fi
86
+ # Attempt to remove ufw package. dnf will handle if it's not installed.
87
+ echo "Attempting to remove ufw package..."
88
+ sudo dnf remove -y ufw
89
+
90
+ # --- Kubernetes Required Ports (Informational - not for UFW) ---
91
+ # These ports are opened by Kubernetes itself or are expected to be open
92
+ # by external firewalls. UFW is no longer managing them.
93
+ echo "Note: Kubernetes requires the following ports to be open (managed by K8s or external firewall):"
94
+ echo " - Control Plane: 6443/TCP (Kubernetes API), 2379-2380/TCP (etcd)"
95
+ echo " - Worker Nodes: 10250/TCP (Kubelet API), 30000-32767/TCP/UDP (NodePorts)"
96
+ echo " - CNI specific ports (e.g., Calico: 179/TCP, 4789/UDP; Flannel: 8472/UDP)"
97
+ echo " - SSH: 22/TCP"
98
+ echo " - HTTP/HTTPS: 80/TCP, 443/TCP (for Ingress/Load Balancers)"
99
+
100
+ # --- Initial Host Setup for Kubernetes Prerequisites ---
101
+ # This calls the initHost method in cluster.js to install Docker, Podman, Kind, Kubeadm, Helm.
102
+ echo "Running initial host setup for Kubernetes prerequisites..."
103
+ # Ensure the current directory is where 'underpost' expects its root, or use absolute paths.
104
+ # Assuming 'underpost root' correctly points to the base directory of your project.
105
+ cd "$(underpost root)/underpost"
56
106
  underpost cluster --init-host
57
107
 
58
- # Default flags
108
+ # --- Argument Parsing for Kubeadm/Kind/Worker ---
59
109
  USE_KUBEADM=false
60
- USE_KIND=false
110
+ USE_KIND=false # Not the primary focus for this request, but keeping the logic
61
111
  USE_WORKER=false
62
112
 
63
- # Loop through arguments
64
113
  for arg in "$@"; do
65
114
  case "$arg" in
66
115
  --kubeadm)
@@ -76,71 +125,45 @@ for arg in "$@"; do
76
125
  done
77
126
 
78
127
  echo "USE_KUBEADM = $USE_KUBEADM"
79
- echo "USE_KIND = $USE_KIND"
80
- echo "USE_WORKER = $USE_WORKER"
81
-
82
- underpost cluster --kubeadm
83
- underpost cluster --reset
84
-
85
- PORTS=(
86
- 22 # SSH
87
- 80 # HTTP
88
- 443 # HTTPS
89
- 53 # DNS (TCP/UDP)
90
- 66 # TFTP
91
- 67 # DHCP
92
- 69 # TFTP
93
- 111 # rpcbind
94
- 179 # Calico BGP
95
- 2049 # NFS
96
- 20048 # NFS mountd
97
- 4011 # PXE boot
98
- 5240 # snapd API
99
- 5248 # Juju controller
100
- 6443 # Kubernetes API
101
- 9153 # CoreDNS metrics
102
- 10250 # Kubelet API
103
- 10251 # kube-scheduler
104
- 10252 # kube-controller-manager
105
- 10255 # Kubelet read-only (deprecated)
106
- 10257 # controller-manager (v1.23+)
107
- 10259 # scheduler (v1.23+)
108
- )
109
-
110
- PORT_RANGES=(
111
- 2379:2380 # etcd
112
- # 30000:32767 # NodePort range
113
- # 3000:3100 # App node ports
114
- 32765:32766 # Ephemeral ports
115
- 6783:6784 # Weave Net
116
- )
117
-
118
- # Open individual ports
119
- for PORT in "${PORTS[@]}"; do
120
- ufw allow ${PORT}/tcp
121
- ufw allow ${PORT}/udp
122
- done
128
+ echo "USE_KIND = $USE_KIND"
129
+ echo "USE_WORKER = $USE_WORKER"
123
130
 
124
- # Open port ranges
125
- for RANGE in "${PORT_RANGES[@]}"; do
126
- ufw allow ${RANGE}/tcp
127
- ufw allow ${RANGE}/udp
128
- done
131
+ # --- Kubernetes Cluster Initialization Logic ---
129
132
 
130
- # Behavior based on flags
131
- if $USE_KUBEADM; then
132
- echo "Running control node with kubeadm..."
133
- underpost cluster --kubeadm
134
- # kubectl get pods --all-namespaces -o wide -w
135
- fi
133
+ # Call config first to apply SELinux, Docker, Containerd, and sysctl settings.
134
+ # This config function in cluster.js will be modified to remove iptables flushing.
135
+ echo "Applying Kubernetes host configuration (SELinux, Containerd, Sysctl)..."
136
+ underpost cluster --config
136
137
 
137
- if $USE_KIND; then
138
+ if $USE_KUBEADM; then
139
+ if $USE_WORKER; then
140
+ echo "Running worker node setup for kubeadm..."
141
+ # For worker nodes, the 'underpost cluster --worker' command will handle joining
142
+ # the cluster. The join command itself needs to be provided from the control plane.
143
+ # This script assumes the join command will be executed separately or passed in.
144
+ # For a full automated setup, you'd typically pass the join token/command here.
145
+ # Example: underpost cluster --worker --join-command "kubeadm join ..."
146
+ # For now, this just runs the worker-specific config.
147
+ underpost cluster --worker --config
148
+ echo "Worker node setup initiated. You will need to manually join this worker to your control plane."
149
+ echo "On your control plane, run 'kubeadm token create --print-join-command' and execute the output here."
150
+ else
151
+ echo "Running control plane setup with kubeadm..."
152
+ # This will initialize the kubeadm control plane and install Calico
153
+ underpost cluster --kubeadm
154
+ # Ensure kubectl config is set up for the current user
155
+ underpost cluster --chown
156
+ echo "Kubeadm control plane initialized. Check cluster status with 'kubectl get nodes'."
157
+ fi
158
+ elif $USE_KIND; then
138
159
  echo "Running control node with kind..."
139
160
  underpost cluster
140
- # kubectl get pods --all-namespaces -o wide -w
161
+ underpost cluster --chown
162
+ echo "Kind cluster initialized. Check cluster status with 'kubectl get nodes'."
163
+ else
164
+ echo "No specific cluster role (--kubeadm, --kind, --worker) specified. Please provide one."
165
+ exit 1
141
166
  fi
142
167
 
143
- if $USE_WORKER; then
144
- echo "Running worker..."
145
- underpost cluster --worker --config
146
- fi
168
+ echo "Underpost Kubernetes Node Setup completed."
169
+ echo "Remember to verify cluster health with 'kubectl get nodes' and 'kubectl get pods --all-namespaces'."
package/package.json CHANGED
@@ -2,7 +2,7 @@
2
2
  "type": "module",
3
3
  "main": "src/index.js",
4
4
  "name": "underpost",
5
- "version": "2.8.791",
5
+ "version": "2.8.793",
6
6
  "description": "pwa api rest template",
7
7
  "scripts": {
8
8
  "start": "env-cmd -f .env.production node --max-old-space-size=8192 src/server",
@@ -37,39 +37,39 @@ class UnderpostCluster {
37
37
  chown: false,
38
38
  },
39
39
  ) {
40
- // sudo dnf update
41
- // 1) Install kind, kubeadm, docker, podman, helm
42
- // 2) Check kubectl, kubelet, containerd.io
43
- // 3) Install Nvidia drivers from Rocky Linux docs
44
- // 4) Install LXD with MAAS from Rocky Linux docs
45
- // 5) Install MAAS src from snap
40
+ // Handles initial host setup (installing docker, podman, kind, kubeadm, helm)
46
41
  if (options.initHost === true) return UnderpostCluster.API.initHost();
42
+
43
+ // Applies general host configuration (SELinux, containerd, sysctl)
47
44
  if (options.config === true) UnderpostCluster.API.config();
45
+
46
+ // Sets up kubectl configuration for the current user
48
47
  if (options.chown === true) UnderpostCluster.API.chown();
48
+
49
49
  const npmRoot = getNpmRootPath();
50
50
  const underpostRoot = options?.dev === true ? '.' : `${npmRoot}/underpost`;
51
+
52
+ // Information gathering options
51
53
  if (options.infoCapacityPod === true) return logger.info('', UnderpostDeploy.API.resourcesFactory());
52
54
  if (options.infoCapacity === true)
53
55
  return logger.info('', UnderpostCluster.API.getResourcesCapacity(options.kubeadm));
54
- if (options.reset === true) return await UnderpostCluster.API.reset();
55
56
  if (options.listPods === true) return console.table(UnderpostDeploy.API.get(podName ?? undefined));
56
-
57
57
  if (options.nsUse && typeof options.nsUse === 'string') {
58
58
  shellExec(`kubectl config set-context --current --namespace=${options.nsUse}`);
59
59
  return;
60
60
  }
61
61
  if (options.info === true) {
62
- shellExec(`kubectl config get-contexts`); // config env persisente for manage multiple clusters
62
+ shellExec(`kubectl config get-contexts`);
63
63
  shellExec(`kubectl config get-clusters`);
64
- shellExec(`kubectl get nodes -o wide`); // set of nodes of a cluster
64
+ shellExec(`kubectl get nodes -o wide`);
65
65
  shellExec(`kubectl config view | grep namespace`);
66
- shellExec(`kubectl get ns -o wide`); // A namespace can have pods of different nodes
67
- shellExec(`kubectl get pvc --all-namespaces -o wide`); // PersistentVolumeClaim -> request storage service
68
- shellExec(`kubectl get pv --all-namespaces -o wide`); // PersistentVolume -> real storage
66
+ shellExec(`kubectl get ns -o wide`);
67
+ shellExec(`kubectl get pvc --all-namespaces -o wide`);
68
+ shellExec(`kubectl get pv --all-namespaces -o wide`);
69
69
  shellExec(`kubectl get cronjob --all-namespaces -o wide`);
70
- shellExec(`kubectl get svc --all-namespaces -o wide`); // proxy dns gate way -> deployments, statefulsets, pods
71
- shellExec(`kubectl get statefulsets --all-namespaces -o wide`); // set pods with data/volume persistence
72
- shellExec(`kubectl get deployments --all-namespaces -o wide`); // set pods
70
+ shellExec(`kubectl get svc --all-namespaces -o wide`);
71
+ shellExec(`kubectl get statefulsets --all-namespaces -o wide`);
72
+ shellExec(`kubectl get deployments --all-namespaces -o wide`);
73
73
  shellExec(`kubectl get configmap --all-namespaces -o wide`);
74
74
  shellExec(`kubectl get pods --all-namespaces -o wide`);
75
75
  shellExec(
@@ -91,38 +91,46 @@ class UnderpostCluster {
91
91
  shellExec(`sudo kubectl api-resources`);
92
92
  return;
93
93
  }
94
- const alrreadyCluster =
94
+
95
+ // Reset Kubernetes cluster components (Kind/Kubeadm) and container runtimes
96
+ if (options.reset === true) return await UnderpostCluster.API.reset();
97
+
98
+ // Check if a cluster (Kind or Kubeadm with Calico) is already initialized
99
+ const alreadyCluster =
95
100
  UnderpostDeploy.API.get('kube-apiserver-kind-control-plane')[0] ||
96
101
  UnderpostDeploy.API.get('calico-kube-controllers')[0];
97
102
 
98
- if (
99
- !options.worker &&
100
- !alrreadyCluster &&
101
- ((!options.kubeadm && !UnderpostDeploy.API.get('kube-apiserver-kind-control-plane')[0]) ||
102
- (options.kubeadm === true && !UnderpostDeploy.API.get('calico-kube-controllers')[0]))
103
- ) {
104
- UnderpostCluster.API.config();
103
+ // --- Kubeadm/Kind Cluster Initialization ---
104
+ // This block handles the initial setup of the Kubernetes cluster (control plane or worker).
105
+ // It prevents re-initialization if a cluster is already detected.
106
+ if (!options.worker && !alreadyCluster) {
107
+ // If it's a kubeadm setup and no Calico controller is found (indicating no kubeadm cluster)
105
108
  if (options.kubeadm === true) {
109
+ logger.info('Initializing Kubeadm control plane...');
110
+ // Initialize kubeadm control plane
106
111
  shellExec(
107
112
  `sudo kubeadm init --pod-network-cidr=192.168.0.0/16 --control-plane-endpoint="${os.hostname()}:6443"`,
108
113
  );
114
+ // Configure kubectl for the current user
109
115
  UnderpostCluster.API.chown();
110
- // https://docs.tigera.io/calico/latest/getting-started/kubernetes/quickstart
116
+ // Install Calico CNI
117
+ logger.info('Installing Calico CNI...');
111
118
  shellExec(
112
119
  `sudo kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.29.3/manifests/tigera-operator.yaml`,
113
120
  );
114
- // shellExec(
115
- // `wget https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/custom-resources.yaml`,
116
- // );
117
121
  shellExec(`sudo kubectl apply -f ${underpostRoot}/manifests/kubeadm-calico-config.yaml`);
122
+ // Untaint control plane node to allow scheduling pods
118
123
  const nodeName = os.hostname();
119
124
  shellExec(`kubectl taint nodes ${nodeName} node-role.kubernetes.io/control-plane:NoSchedule-`);
125
+ // Install local-path-provisioner for dynamic PVCs (optional but recommended)
126
+ logger.info('Installing local-path-provisioner...');
120
127
  shellExec(
121
128
  `kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/master/deploy/local-path-storage.yaml`,
122
129
  );
123
130
  } else {
131
+ // Kind cluster initialization (if not using kubeadm)
132
+ logger.info('Initializing Kind cluster...');
124
133
  if (options.full === true || options.dedicatedGpu === true) {
125
- // https://kubernetes.io/docs/tasks/manage-gpus/scheduling-gpus/
126
134
  shellExec(`cd ${underpostRoot}/manifests && kind create cluster --config kind-config-cuda.yaml`);
127
135
  UnderpostCluster.API.chown();
128
136
  } else {
@@ -133,9 +141,18 @@ class UnderpostCluster {
133
141
  );
134
142
  }
135
143
  }
136
- } else logger.warn('Cluster already initialized');
144
+ } else if (options.worker === true) {
145
+ // Worker node specific configuration (kubeadm join command needs to be executed separately)
146
+ logger.info('Worker node configuration applied. Awaiting kubeadm join command...');
147
+ // No direct cluster initialization here for workers. The `kubeadm join` command
148
+ // needs to be run on the worker after the control plane is up and a token is created.
149
+ // This part of the script is for general worker setup, not the join itself.
150
+ } else {
151
+ logger.warn('Cluster already initialized or worker flag not set for worker node.');
152
+ }
137
153
 
138
- // shellExec(`sudo kubectl apply -f ${underpostRoot}/manifests/kubelet-config.yaml`);
154
+ // --- Optional Component Deployments (Databases, Ingress, Cert-Manager) ---
155
+ // These deployments happen after the base cluster is up.
139
156
 
140
157
  if (options.full === true || options.dedicatedGpu === true) {
141
158
  shellExec(`node ${underpostRoot}/bin/deploy nvidia-gpu-operator`);
@@ -218,8 +235,6 @@ class UnderpostCluster {
218
235
  --eval 'rs.initiate(${JSON.stringify(mongoConfig)})'`,
219
236
  );
220
237
  }
221
-
222
- // await UnderpostTest.API.statusMonitor('mongodb-1');
223
238
  } else if (options.full === true || options.mongodb === true) {
224
239
  if (options.pullImage === true) {
225
240
  shellExec(`docker pull mongo:latest`);
@@ -280,181 +295,144 @@ class UnderpostCluster {
280
295
  }
281
296
  },
282
297
 
298
+ /**
299
+ * @method config
300
+ * @description Configures host-level settings required for Kubernetes.
301
+ * IMPORTANT: This method has been updated to REMOVE all iptables flushing commands
302
+ * to prevent conflicts with Kubernetes' own network management.
303
+ */
283
304
  config() {
305
+ console.log('Applying host configuration: SELinux, Docker, Containerd, and Sysctl settings.');
306
+ // Disable SELinux (permissive mode)
284
307
  shellExec(`sudo setenforce 0`);
285
308
  shellExec(`sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config`);
309
+
310
+ // Enable and start Docker and Kubelet services
286
311
  shellExec(`sudo systemctl enable --now docker`);
287
312
  shellExec(`sudo systemctl enable --now kubelet`);
288
- shellExec(`containerd config default > /etc/containerd/config.toml`);
289
- shellExec(`sed -i -e "s/SystemdCgroup = false/SystemdCgroup = true/g" /etc/containerd/config.toml`);
290
- shellExec(`sudo service docker restart`);
313
+
314
+ // Configure containerd for SystemdCgroup
315
+ // This is crucial for kubelet to interact correctly with containerd
316
+ shellExec(`containerd config default | sudo tee /etc/containerd/config.toml > /dev/null`);
317
+ shellExec(`sudo sed -i -e "s/SystemdCgroup = false/SystemdCgroup = true/g" /etc/containerd/config.toml`);
318
+ shellExec(`sudo service docker restart`); // Restart docker after containerd config changes
291
319
  shellExec(`sudo systemctl enable --now containerd.service`);
320
+ shellExec(`sudo systemctl restart containerd`); // Restart containerd to apply changes
321
+
322
+ // Disable swap (required by Kubernetes)
292
323
  shellExec(`sudo swapoff -a; sudo sed -i '/swap/d' /etc/fstab`);
324
+
325
+ // Reload systemd daemon to pick up new unit files/changes
293
326
  shellExec(`sudo systemctl daemon-reload`);
294
- shellExec(`sudo systemctl restart containerd`);
295
- shellExec(`sysctl net.bridge.bridge-nf-call-iptables=1`);
327
+
328
+ // Enable bridge-nf-call-iptables for Kubernetes networking
329
+ // This ensures traffic through Linux bridges is processed by iptables (crucial for CNI)
330
+ shellExec(`sudo sysctl net.bridge.bridge-nf-call-iptables=1`);
331
+ // Also ensure these are set for persistence across reboots
332
+ shellExec(`echo "net.bridge.bridge-nf-call-iptables=1" | sudo tee /etc/sysctl.d/k8s.conf`);
333
+ shellExec(`echo "net.ipv4.ip_forward=1" | sudo tee -a /etc/sysctl.d/k8s.conf`); // Enable IP forwarding
334
+ shellExec(`sudo sysctl --system`); // Apply sysctl changes immediately
335
+
336
+ // Removed iptables flushing commands.
337
+ // Kubernetes (kube-proxy and CNI) manages its own iptables rules.
338
+ // Flushing them here would break cluster networking.
296
339
  },
340
+
341
+ /**
342
+ * @method chown
343
+ * @description Sets up kubectl configuration for the current user.
344
+ * This is typically run after kubeadm init on the control plane.
345
+ */
297
346
  chown() {
347
+ console.log('Setting up kubectl configuration...');
298
348
  shellExec(`mkdir -p ~/.kube`);
299
349
  shellExec(`sudo -E cp -i /etc/kubernetes/admin.conf ~/.kube/config`);
300
350
  shellExec(`sudo -E chown $(id -u):$(id -g) ~/.kube/config`);
351
+ console.log('kubectl config set up successfully.');
301
352
  },
302
- // This function performs a comprehensive reset of Kubernetes and container environments
303
- // on the host machine. Its primary goal is to clean up cluster components, temporary files,
304
- // and container data, ensuring a clean state for re-initialization or fresh deployments,
305
- // while also preventing the loss of the host machine's internet connectivity.
306
353
 
354
+ /**
355
+ * @method reset
356
+ * @description Performs a comprehensive reset of Kubernetes and container environments.
357
+ * This function is for cleaning up a node, not for initial setup.
358
+ * It avoids aggressive iptables flushing that would break host connectivity.
359
+ */
307
360
  reset() {
308
- // Step 1: Delete all existing Kind (Kubernetes in Docker) clusters.
309
- // 'kind get clusters' lists all Kind clusters.
310
- // 'xargs -t -n1 kind delete cluster --name' then iterates through each cluster name
311
- // and executes 'kind delete cluster --name <cluster_name>' to remove them.
312
- shellExec(`kind get clusters | xargs -t -n1 kind delete cluster --name`);
313
-
314
- // Step 2: Reset the Kubernetes control-plane components installed by kubeadm.
315
- // 'kubeadm reset -f' performs a forceful reset, removing installed Kubernetes components,
316
- // configuration files, and associated network rules (like iptables entries created by kubeadm).
317
- // The '-f' flag bypasses confirmation prompts.
361
+ console.log('Starting comprehensive reset of Kubernetes and container environments...');
362
+
363
+ // Delete all existing Kind (Kubernetes in Docker) clusters.
364
+ shellExec(`kind get clusters | xargs -r -t -n1 kind delete cluster --name`); // -r for no-op if no clusters
365
+
366
+ // Reset the Kubernetes control-plane components installed by kubeadm.
318
367
  shellExec(`sudo kubeadm reset -f`);
319
368
 
320
- // Step 3: Remove specific CNI (Container Network Interface) configuration files.
321
- // This command targets and removes the configuration file for Flannel,
322
- // a common CNI plugin, which might be left behind after a reset.
369
+ // Remove specific CNI configuration files (e.g., Flannel)
323
370
  shellExec('sudo rm -f /etc/cni/net.d/10-flannel.conflist');
324
371
 
325
- // Note: The aggressive 'sudo iptables -F ...' command was intentionally removed from previous versions.
326
- // This command would flush all iptables rules, including those crucial for the host's general
327
- // internet connectivity, leading to network loss. 'kubeadm reset' and container runtime pruning
328
- // adequately handle Kubernetes and container-specific iptables rules without affecting the host's
329
- // default network configuration.
330
-
331
- // Step 4: Remove the kubectl configuration file from the current user's home directory.
332
- // This ensures that after a reset, there's no lingering configuration pointing to the old cluster,
333
- // providing a clean slate for connecting to a new or re-initialized cluster.
372
+ // Remove the kubectl configuration file
334
373
  shellExec('sudo rm -f $HOME/.kube/config');
335
374
 
336
- // Step 5: Clear trash files from the root user's trash directory.
337
- // This is a general cleanup step to remove temporary or deleted files.
375
+ // Clear trash files from the root user's trash directory.
338
376
  shellExec('sudo rm -rf /root/.local/share/Trash/files/*');
339
377
 
340
- // Step 6: Prune all unused Docker data.
341
- // 'docker system prune -a -f' removes:
342
- // - All stopped containers
343
- // - All unused networks
344
- // - All dangling images
345
- // - All build cache
346
- // - All unused volumes
347
- // This aggressively frees up disk space and removes temporary Docker artifacts.
378
+ // Prune all unused Docker data.
348
379
  shellExec('sudo docker system prune -a -f');
349
380
 
350
- // Step 7: Stop the Docker daemon service.
351
- // This step is often necessary to ensure that Docker's files and directories
352
- // can be safely manipulated or moved in subsequent steps without conflicts.
381
+ // Stop the Docker daemon service.
353
382
  shellExec('sudo service docker stop');
354
383
 
355
- // Step 8: Aggressively remove container storage data for containerd and Docker.
356
- // These commands target the default storage locations for containerd and Docker,
357
- // as well as any custom paths that might have been used (`/home/containers/storage`, `/home/docker`).
358
- // This ensures a complete wipe of all container images, layers, and volumes.
384
+ // Aggressively remove container storage data for containerd and Docker.
359
385
  shellExec(`sudo rm -rf /var/lib/containers/storage/*`);
360
386
  shellExec(`sudo rm -rf /var/lib/docker/volumes/*`);
361
- shellExec(`sudo rm -rf /var/lib/docker~/*`); // Cleans up a potential backup directory for Docker data
362
- shellExec(`sudo rm -rf /home/containers/storage/*`); // Cleans up custom containerd/Podman storage
363
- shellExec(`sudo rm -rf /home/docker/*`); // Cleans up custom Docker storage
364
-
365
- // Step 9: Re-configure Docker's default storage location (if desired).
366
- // These commands effectively move Docker's data directory from its default `/var/lib/docker`
367
- // to a new location (`/home/docker`) and create a symbolic link.
368
- // This is a specific customization to relocate Docker's storage.
369
- shellExec('sudo mv /var/lib/docker /var/lib/docker~'); // Moves existing /var/lib/docker to /var/lib/docker~ (backup)
370
- shellExec('sudo mkdir /home/docker'); // Creates the new desired directory for Docker data
371
- shellExec('sudo chmod 0711 /home/docker'); // Sets appropriate permissions for the new directory
372
- shellExec('sudo ln -s /home/docker /var/lib/docker'); // Creates a symlink from original path to new path
373
-
374
- // Step 10: Prune all unused Podman data.
375
- // Similar to Docker pruning, these commands remove:
376
- // - All stopped containers
377
- // - All unused networks
378
- // - All unused images
379
- // - All unused volumes ('--volumes')
380
- // - The '--force' flag bypasses confirmation.
381
- // '--external' prunes external content not managed by Podman's default storage backend.
387
+ shellExec(`sudo rm -rf /var/lib/docker~/*`);
388
+ shellExec(`sudo rm -rf /home/containers/storage/*`);
389
+ shellExec(`sudo rm -rf /home/docker/*`);
390
+
391
+ // Re-configure Docker's default storage location (if desired).
392
+ shellExec('sudo mv /var/lib/docker /var/lib/docker~ || true'); // Use || true to prevent error if dir doesn't exist
393
+ shellExec('sudo mkdir -p /home/docker');
394
+ shellExec('sudo chmod 0711 /home/docker');
395
+ shellExec('sudo ln -s /home/docker /var/lib/docker');
396
+
397
+ // Prune all unused Podman data.
382
398
  shellExec(`sudo podman system prune -a -f`);
383
399
  shellExec(`sudo podman system prune --all --volumes --force`);
384
400
  shellExec(`sudo podman system prune --external --force`);
385
- shellExec(`sudo podman system prune --all --volumes --force`); // Redundant but harmless repetition
386
401
 
387
- // Step 11: Create and set permissions for Podman's custom storage directory.
388
- // This ensures the custom path `/home/containers/storage` exists and has correct permissions
389
- // before Podman attempts to use it.
402
+ // Create and set permissions for Podman's custom storage directory.
390
403
  shellExec(`sudo mkdir -p /home/containers/storage`);
391
404
  shellExec('sudo chmod 0711 /home/containers/storage');
392
405
 
393
- // Step 12: Update Podman's storage configuration file.
394
- // This command uses 'sed' to modify `/etc/containers/storage.conf`,
395
- // changing the default storage path from `/var/lib/containers/storage`
396
- // to the customized `/home/containers/storage`.
406
+ // Update Podman's storage configuration file.
397
407
  shellExec(
398
408
  `sudo sed -i -e "s@/var/lib/containers/storage@/home/containers/storage@g" /etc/containers/storage.conf`,
399
409
  );
400
410
 
401
- // Step 13: Reset Podman system settings.
402
- // This command resets Podman's system-wide configuration to its default state.
411
+ // Reset Podman system settings.
403
412
  shellExec(`sudo podman system reset -f`);
404
413
 
405
- // Note: The 'sysctl net.bridge.bridge-nf-call-iptables=0' and related commands
406
- // were previously removed. These sysctl settings (bridge-nf-call-iptables,
407
- // bridge-nf-call-arptables, bridge-nf-call-ip6tables) are crucial for allowing
408
- // network traffic through Linux bridges to be processed by iptables.
409
- // Kubernetes and CNI plugins generally require them to be enabled (set to '1').
410
- // Re-initializing Kubernetes will typically set these as needed, and leaving them
411
- // at their system default (or '1' if already configured) is safer for host
412
- // connectivity during a reset operation.
413
-
414
- // https://github.com/kubernetes-sigs/kind/issues/2886
415
- // shellExec(`sysctl net.bridge.bridge-nf-call-iptables=0`);
416
- // shellExec(`sysctl net.bridge.bridge-nf-call-arptables=0`);
417
- // shellExec(`sysctl net.bridge.bridge-nf-call-ip6tables=0`);
418
-
419
- // Step 14: Remove the 'kind' Docker network.
420
- // This cleans up any network bridges or configurations specifically created by Kind.
421
- // shellExec(`docker network rm kind`);
422
-
423
- // Reset kubelet
414
+ // Reset kubelet components
424
415
  shellExec(`sudo systemctl stop kubelet`);
425
416
  shellExec(`sudo rm -rf /etc/kubernetes/*`);
426
417
  shellExec(`sudo rm -rf /var/lib/kubelet/*`);
427
418
  shellExec(`sudo rm -rf /etc/cni/net.d/*`);
428
419
  shellExec(`sudo systemctl daemon-reload`);
429
420
  shellExec(`sudo systemctl start kubelet`);
421
+
422
+ console.log('Comprehensive reset completed.');
430
423
  },
431
424
 
432
425
  getResourcesCapacity(kubeadm = false) {
433
426
  const resources = {};
434
- const info = false
435
- ? `Capacity:
436
- cpu: 8
437
- ephemeral-storage: 153131976Ki
438
- hugepages-1Gi: 0
439
- hugepages-2Mi: 0
440
- memory: 11914720Ki
441
- pods: 110
442
- Allocatable:
443
- cpu: 8
444
- ephemeral-storage: 153131976Ki
445
- hugepages-1Gi: 0
446
- hugepages-2Mi: 0
447
- memory: 11914720Ki
448
- pods: `
449
- : shellExec(
450
- `kubectl describe node ${
451
- kubeadm === true ? os.hostname() : 'kind-worker'
452
- } | grep -E '(Allocatable:|Capacity:)' -A 6`,
453
- {
454
- stdout: true,
455
- silent: true,
456
- },
457
- );
427
+ const info = shellExec(
428
+ `kubectl describe node ${
429
+ kubeadm === true ? os.hostname() : 'kind-worker'
430
+ } | grep -E '(Allocatable:|Capacity:)' -A 6`,
431
+ {
432
+ stdout: true,
433
+ silent: true,
434
+ },
435
+ );
458
436
  info
459
437
  .split('Allocatable:')[1]
460
438
  .split('\n')
@@ -475,17 +453,20 @@ Allocatable:
475
453
  return resources;
476
454
  },
477
455
  initHost() {
456
+ console.log('Installing Docker, Podman, Kind, Kubeadm, and Helm...');
478
457
  // Install docker
479
- shellExec(`sudo dnf -y install dnf-plugins-core
480
- sudo dnf config-manager --add-repo https://download.docker.com/linux/rhel/docker-ce.repo`);
458
+ shellExec(`sudo dnf -y install dnf-plugins-core`);
459
+ shellExec(`sudo dnf config-manager --add-repo https://download.docker.com/linux/rhel/docker-ce.repo`);
481
460
  shellExec(`sudo dnf -y install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin`);
461
+
482
462
  // Install podman
483
463
  shellExec(`sudo dnf -y install podman`);
464
+
484
465
  // Install kind
485
466
  shellExec(`[ $(uname -m) = aarch64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.29.0/kind-linux-arm64
486
467
  chmod +x ./kind
487
468
  sudo mv ./kind /bin/kind`);
488
- // Install kubeadm
469
+ // Install kubeadm, kubelet, kubectl
489
470
  shellExec(`cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
490
471
  [kubernetes]
491
472
  name=Kubernetes
@@ -496,12 +477,14 @@ gpgkey=https://pkgs.k8s.io/core:/stable:/v1.33/rpm/repodata/repomd.xml.key
496
477
  exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
497
478
  EOF`);
498
479
  shellExec(`sudo yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes`);
480
+
499
481
  // Install helm
500
- shellExec(`curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
501
- chmod 700 get_helm.sh
502
- ./get_helm.sh
503
- chmod +x /usr/local/bin/helm
504
- sudo mv /usr/local/bin/helm /bin/helm`);
482
+ shellExec(`curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3`);
483
+ shellExec(`chmod 700 get_helm.sh`);
484
+ shellExec(`./get_helm.sh`);
485
+ shellExec(`chmod +x /usr/local/bin/helm`);
486
+ shellExec(`sudo mv /usr/local/bin/helm /bin/helm`);
487
+ console.log('Host prerequisites installed successfully.');
505
488
  },
506
489
  };
507
490
  }
package/src/cli/index.js CHANGED
@@ -283,6 +283,7 @@ program
283
283
  .option('--create-vm <vm-id>', 'Create default virtual machines')
284
284
  .option('--init-vm <vm-id>', 'Get init vm underpost script')
285
285
  .option('--info-vm <vm-id>', 'Get all info vm')
286
+ .option('--test <vm-id>', 'Test health, status and network connectivity for a VM')
286
287
  .option('--root-size <gb-size>', 'Set root size vm')
287
288
  .option('--join-node <nodes>', 'Comma separated worker and control node e. g. k8s-worker-1,k8s-control')
288
289
  .option(
@@ -293,6 +294,7 @@ program
293
294
  '--delete-expose <vm-name-ports>',
294
295
  'Vm name and : separated with Comma separated vm port to remove expose e. g. k8s-control:80,443',
295
296
  )
297
+ .option('--auto-expose-k8s-ports <vm-id>', 'Automatically expose common Kubernetes ports for the VM.')
296
298
  .description('Lxd management')
297
299
  .action(UnderpostLxd.API.callback);
298
300
 
package/src/cli/lxd.js CHANGED
@@ -3,8 +3,35 @@ import { getLocalIPv4Address } from '../server/dns.js';
3
3
  import { pbcopy, shellExec } from '../server/process.js';
4
4
  import fs from 'fs-extra';
5
5
 
6
+ /**
7
+ * @class UnderpostLxd
8
+ * @description Provides a set of static methods to interact with LXD,
9
+ * encapsulating common LXD operations for VM management and network testing.
10
+ */
6
11
  class UnderpostLxd {
7
12
  static API = {
13
+ /**
14
+ * @method callback
15
+ * @description Main entry point for LXD operations based on provided options.
16
+ * @param {object} options - Configuration options for LXD operations.
17
+ * @param {boolean} [options.init=false] - Initialize LXD.
18
+ * @param {boolean} [options.reset=false] - Reset LXD installation.
19
+ * @param {boolean} [options.dev=false] - Run in development mode (adjusts paths).
20
+ * @param {boolean} [options.install=false] - Install LXD snap.
21
+ * @param {boolean} [options.createVirtualNetwork=false] - Create default LXD bridge network (lxdbr0).
22
+ * @param {boolean} [options.createAdminProfile=false] - Create admin-profile for VMs.
23
+ * @param {boolean} [options.control=false] - Flag for control plane VM initialization.
24
+ * @param {boolean} [options.worker=false] - Flag for worker node VM initialization.
25
+ * @param {string} [options.initVm=''] - Initialize a specific VM.
26
+ * @param {string} [options.createVm=''] - Create a new VM with the given name.
27
+ * @param {string} [options.infoVm=''] - Display information about a specific VM.
28
+ * @param {string} [options.rootSize=''] - Root disk size for new VMs (e.g., '32GiB').
29
+ * @param {string} [options.joinNode=''] - Join a worker node to a control plane (format: 'workerName,controlName').
30
+ * @param {string} [options.expose=''] - Expose ports from a VM to the host (format: 'vmName:port1,port2').
31
+ * @param {string} [options.deleteExpose=''] - Delete exposed ports from a VM (format: 'vmName:port1,port2').
32
+ * @param {string} [options.test=''] - Test health, status and network connectivity for a VM.
33
+ * @param {string} [options.autoExposeK8sPorts=''] - Automatically expose common Kubernetes ports for the VM.
34
+ */
8
35
  async callback(
9
36
  options = {
10
37
  init: false,
@@ -12,6 +39,7 @@ class UnderpostLxd {
12
39
  dev: false,
13
40
  install: false,
14
41
  createVirtualNetwork: false,
42
+ createAdminProfile: false,
15
43
  control: false,
16
44
  worker: false,
17
45
  initVm: '',
@@ -21,6 +49,8 @@ class UnderpostLxd {
21
49
  joinNode: '',
22
50
  expose: '',
23
51
  deleteExpose: '',
52
+ test: '',
53
+ autoExposeK8sPorts: '',
24
54
  },
25
55
  ) {
26
56
  const npmRoot = getNpmRootPath();
@@ -36,7 +66,6 @@ class UnderpostLxd {
36
66
  const lxdPressedContent = fs
37
67
  .readFileSync(`${underpostRoot}/manifests/lxd/lxd-preseed.yaml`, 'utf8')
38
68
  .replaceAll(`127.0.0.1`, getLocalIPv4Address());
39
- // shellExec(`lxd init --preseed < ${underpostRoot}/manifests/lxd/lxd-preseed.yaml`);
40
69
  shellExec(`echo "${lxdPressedContent}" | lxd init --preseed`);
41
70
  shellExec(`lxc cluster list`);
42
71
  }
@@ -67,10 +96,90 @@ ipv6.address=none`);
67
96
  flag = ' -s -- --kubeadm';
68
97
  shellExec(`lxc exec ${options.initVm} -- bash -c 'mkdir -p /home/dd/engine'`);
69
98
  shellExec(`lxc file push /home/dd/engine/engine-private ${options.initVm}/home/dd/engine --recursive`);
99
+ shellExec(`lxc file push /home/dd/engine/manifests ${options.initVm}/home/dd/engine --recursive`);
70
100
  } else if (options.worker == true) {
71
101
  flag = ' -s -- --worker';
72
102
  }
73
- pbcopy(`cat ${underpostRoot}/manifests/lxd/underpost-setup.sh | lxc exec ${options.initVm} -- bash${flag}`);
103
+ console.log(`Executing underpost-setup.sh on VM: ${options.initVm}`);
104
+ shellExec(`cat ${underpostRoot}/manifests/lxd/underpost-setup.sh | lxc exec ${options.initVm} -- bash${flag}`);
105
+ console.log(`underpost-setup.sh execution completed on VM: ${options.initVm}`);
106
+ }
107
+ // --- Automatic Kubernetes Port Exposure ---
108
+ if (options.autoExposeK8sPorts && typeof options.autoExposeK8sPorts === 'string') {
109
+ console.log(`Automatically exposing Kubernetes ports for VM: ${options.autoExposeK8sPorts}`);
110
+ const vmName = options.autoExposeK8sPorts;
111
+ const hostIp = getLocalIPv4Address();
112
+ let vmIp = '';
113
+ let retries = 0;
114
+ const maxRetries = 10;
115
+ const delayMs = 5000; // 5 seconds
116
+
117
+ // Wait for VM to get an IP address
118
+ while (!vmIp && retries < maxRetries) {
119
+ try {
120
+ console.log(`Attempting to get IPv4 address for ${vmName} (Attempt ${retries + 1}/${maxRetries})...`);
121
+ vmIp = shellExec(
122
+ `lxc list ${vmName} --format json | jq -r '.[0].state.network.enp5s0.addresses[] | select(.family=="inet") | .address'`,
123
+ { stdout: true },
124
+ ).trim();
125
+ if (vmIp) {
126
+ console.log(`IPv4 address found for ${vmName}: ${vmIp}`);
127
+ } else {
128
+ console.log(`IPv4 address not yet available for ${vmName}. Retrying in ${delayMs / 1000} seconds...`);
129
+ await new Promise((resolve) => setTimeout(resolve, delayMs));
130
+ }
131
+ } catch (error) {
132
+ console.error(`Error getting IPv4 address for exposure: ${error.message}`);
133
+ console.log(`Retrying in ${delayMs / 1000} seconds...`);
134
+ await new Promise((resolve) => setTimeout(resolve, delayMs));
135
+ }
136
+ retries++;
137
+ }
138
+
139
+ if (!vmIp) {
140
+ console.error(`Failed to get VM IP for ${vmName} after ${maxRetries} attempts. Cannot expose ports.`);
141
+ return;
142
+ }
143
+
144
+ let portsToExpose = [];
145
+ if (options.control === true) {
146
+ // Kubernetes API Server
147
+ portsToExpose.push('6443');
148
+ // Standard HTTP/HTTPS for Ingress if deployed
149
+ portsToExpose.push('80');
150
+ portsToExpose.push('443');
151
+ }
152
+ // NodePort range for all nodes (control plane can also run pods with NodePorts)
153
+ // It's safer to expose the entire range for flexibility, or specific NodePorts if known.
154
+ // For production, you might only expose specific NodePorts or use a LoadBalancer.
155
+ // For a general setup, exposing the range is common.
156
+ // Note: LXD proxy device can only expose individual ports, not ranges directly.
157
+ // We will expose a few common ones, or rely on specific 'expose' calls for others.
158
+ // Let's add some common NodePorts that might be used by applications.
159
+ // The full range 30000-32767 would require individual proxy rules for each port.
160
+ // For this automatic setup, we'll focus on critical K8s ports and common app ports.
161
+ // If a user needs the full NodePort range, they should use the `expose` option explicitly.
162
+ portsToExpose.push('30000'); // Example NodePort
163
+ portsToExpose.push('30001'); // Example NodePort
164
+ portsToExpose.push('30002'); // Example NodePort
165
+
166
+ const protocols = ['tcp']; // Most K8s services are TCP, UDP for some like DNS
167
+
168
+ for (const port of portsToExpose) {
169
+ for (const protocol of protocols) {
170
+ const deviceName = `${vmName}-${protocol}-port-${port}`;
171
+ try {
172
+ // Remove existing device first to avoid conflicts if re-running
173
+ shellExec(`lxc config device remove ${vmName} ${deviceName} || true`);
174
+ shellExec(
175
+ `lxc config device add ${vmName} ${deviceName} proxy listen=${protocol}:${hostIp}:${port} connect=${protocol}:${vmIp}:${port} nat=true`,
176
+ );
177
+ console.log(`Exposed ${protocol}:${hostIp}:${port} -> ${vmIp}:${port} for ${vmName}`);
178
+ } catch (error) {
179
+ console.error(`Failed to expose port ${port} for ${vmName}: ${error.message}`);
180
+ }
181
+ }
182
+ }
74
183
  }
75
184
  if (options.joinNode && typeof options.joinNode === 'string') {
76
185
  const [workerNode, controlNode] = options.joinNode.split(',');
@@ -87,21 +196,26 @@ ipv6.address=none`);
87
196
  shellExec(`lxc list ${options.infoVm}`);
88
197
  }
89
198
  if (options.expose && typeof options.expose === 'string') {
90
- const [controlNode, ports] = options.expose.split(':');
91
- console.log({ controlNode, ports });
199
+ const [vmName, ports] = options.expose.split(':');
200
+ console.log({ vmName, ports });
92
201
  const protocols = ['tcp']; // udp
93
202
  const hostIp = getLocalIPv4Address();
94
- // The vmIp will now be the static IP assigned in the admin-profile
95
203
  const vmIp = shellExec(
96
- `lxc list ${controlNode} --format json | jq -r '.[0].state.network.enp5s0.addresses[] | select(.family=="inet") | .address'`,
204
+ `lxc list ${vmName} --format json | jq -r '.[0].state.network.enp5s0.addresses[] | select(.family=="inet") | .address'`,
97
205
  { stdout: true },
98
206
  ).trim();
207
+ if (!vmIp) {
208
+ console.error(`Could not get VM IP for ${vmName}. Cannot expose ports.`);
209
+ return;
210
+ }
99
211
  for (const port of ports.split(',')) {
100
212
  for (const protocol of protocols) {
101
- shellExec(`lxc config device remove ${controlNode} ${controlNode}-${protocol}-port-${port}`);
213
+ const deviceName = `${vmName}-${protocol}-port-${port}`;
214
+ shellExec(`lxc config device remove ${vmName} ${deviceName} || true`); // Use || true to prevent error if device doesn't exist
102
215
  shellExec(
103
- `lxc config device add ${controlNode} ${controlNode}-${protocol}-port-${port} proxy listen=${protocol}:${hostIp}:${port} connect=${protocol}:${vmIp}:${port} nat=true`,
216
+ `lxc config device add ${vmName} ${deviceName} proxy listen=${protocol}:${hostIp}:${port} connect=${protocol}:${vmIp}:${port} nat=true`,
104
217
  );
218
+ console.log(`Manually exposed ${protocol}:${hostIp}:${port} -> ${vmIp}:${port} for ${vmName}`);
105
219
  }
106
220
  }
107
221
  }
@@ -111,11 +225,139 @@ ipv6.address=none`);
111
225
  const protocols = ['tcp']; // udp
112
226
  for (const port of ports.split(',')) {
113
227
  for (const protocol of protocols) {
114
- // The device name is consistent: {controlNode}-port-{port}
115
228
  shellExec(`lxc config device remove ${controlNode} ${controlNode}-${protocol}-port-${port}`);
116
229
  }
117
230
  }
118
231
  }
232
+
233
+ // New 'test' option implementation
234
+ if (options.test && typeof options.test === 'string') {
235
+ const vmName = options.test;
236
+ console.log(`Starting comprehensive test for VM: ${vmName}`);
237
+
238
+ // 1. Monitor for IPv4 address
239
+ let vmIp = '';
240
+ let retries = 0;
241
+ const maxRetries = 10;
242
+ const delayMs = 5000; // 5 seconds
243
+
244
+ while (!vmIp && retries < maxRetries) {
245
+ try {
246
+ console.log(`Attempting to get IPv4 address for ${vmName} (Attempt ${retries + 1}/${maxRetries})...`);
247
+ vmIp = shellExec(
248
+ `lxc list ${vmName} --format json | jq -r '.[0].state.network.enp5s0.addresses[] | select(.family=="inet") | .address'`,
249
+ { stdout: true },
250
+ ).trim();
251
+ if (vmIp) {
252
+ console.log(`IPv4 address found for ${vmName}: ${vmIp}`);
253
+ } else {
254
+ console.log(`IPv4 address not yet available for ${vmName}. Retrying in ${delayMs / 1000} seconds...`);
255
+ await new Promise((resolve) => setTimeout(resolve, delayMs));
256
+ }
257
+ } catch (error) {
258
+ console.error(`Error getting IPv4 address: ${error.message}`);
259
+ console.log(`Retrying in ${delayMs / 1000} seconds...`);
260
+ await new Promise((resolve) => setTimeout(resolve, delayMs));
261
+ }
262
+ retries++;
263
+ }
264
+
265
+ if (!vmIp) {
266
+ console.error(`Failed to get IPv4 address for ${vmName} after ${maxRetries} attempts. Aborting tests.`);
267
+ return;
268
+ }
269
+
270
+ // 2. Iteratively check connection to google.com
271
+ let connectedToGoogle = false;
272
+ retries = 0;
273
+ while (!connectedToGoogle && retries < maxRetries) {
274
+ try {
275
+ console.log(`Checking connectivity to google.com from ${vmName} (Attempt ${retries + 1}/${maxRetries})...`);
276
+ const curlOutput = shellExec(
277
+ `lxc exec ${vmName} -- bash -c 'curl -s -o /dev/null -w "%{http_code}" http://google.com'`,
278
+ { stdout: true },
279
+ );
280
+ if (curlOutput.startsWith('2') || curlOutput.startsWith('3')) {
281
+ console.log(`Successfully connected to google.com from ${vmName}.`);
282
+ connectedToGoogle = true;
283
+ } else {
284
+ console.log(`Connectivity to google.com not yet verified. Retrying in ${delayMs / 1000} seconds...`);
285
+ await new Promise((resolve) => setTimeout(resolve, delayMs));
286
+ }
287
+ } catch (error) {
288
+ console.error(`Error checking connectivity to google.com: ${error.message}`);
289
+ console.log(`Retrying in ${delayMs / 1000} seconds...`);
290
+ await new Promise((resolve) => setTimeout(resolve, delayMs));
291
+ }
292
+ retries++;
293
+ }
294
+
295
+ if (!connectedToGoogle) {
296
+ console.error(
297
+ `Failed to connect to google.com from ${vmName} after ${maxRetries} attempts. Aborting further tests.`,
298
+ );
299
+ return;
300
+ }
301
+
302
+ // 3. Check other connectivity, network, and VM health parameters
303
+ console.log(`\n--- Comprehensive Health Report for ${vmName} ---`);
304
+
305
+ // VM Status
306
+ console.log('\n--- VM Status ---');
307
+ try {
308
+ const vmStatus = shellExec(`lxc list ${vmName} --format json`, { stdout: true, silent: true });
309
+ console.log(JSON.stringify(JSON.parse(vmStatus), null, 2));
310
+ } catch (error) {
311
+ console.error(`Error getting VM status: ${error.message}`);
312
+ }
313
+
314
+ // CPU Usage
315
+ console.log('\n--- CPU Usage ---');
316
+ try {
317
+ const cpuUsage = shellExec(`lxc exec ${vmName} -- bash -c 'top -bn1 | grep "Cpu(s)"'`, { stdout: true });
318
+ console.log(cpuUsage.trim());
319
+ } catch (error) {
320
+ console.error(`Error getting CPU usage: ${error.message}`);
321
+ }
322
+
323
+ // Memory Usage
324
+ console.log('\n--- Memory Usage ---');
325
+ try {
326
+ const memoryUsage = shellExec(`lxc exec ${vmName} -- bash -c 'free -m'`, { stdout: true });
327
+ console.log(memoryUsage.trim());
328
+ } catch (error) {
329
+ console.error(`Error getting memory usage: ${error.message}`);
330
+ }
331
+
332
+ // Disk Usage
333
+ console.log('\n--- Disk Usage (Root Partition) ---');
334
+ try {
335
+ const diskUsage = shellExec(`lxc exec ${vmName} -- bash -c 'df -h /'`, { stdout: true });
336
+ console.log(diskUsage.trim());
337
+ } catch (error) {
338
+ console.error(`Error getting disk usage: ${error.message}`);
339
+ }
340
+
341
+ // Network Interface Status
342
+ console.log('\n--- Network Interface Status (ip a) ---');
343
+ try {
344
+ const ipA = shellExec(`lxc exec ${vmName} -- bash -c 'ip a'`, { stdout: true });
345
+ console.log(ipA.trim());
346
+ } catch (error) {
347
+ console.error(`Error getting network interface status: ${error.message}`);
348
+ }
349
+
350
+ // DNS Resolution (resolv.conf)
351
+ console.log('\n--- DNS Configuration (/etc/resolv.conf) ---');
352
+ try {
353
+ const resolvConf = shellExec(`lxc exec ${vmName} -- bash -c 'cat /etc/resolv.conf'`, { stdout: true });
354
+ console.log(resolvConf.trim());
355
+ } catch (error) {
356
+ console.error(`Error getting DNS configuration: ${error.message}`);
357
+ }
358
+
359
+ console.log(`\nComprehensive test for VM: ${vmName} completed.`);
360
+ }
119
361
  },
120
362
  };
121
363
  }
package/src/index.js CHANGED
@@ -31,7 +31,7 @@ class Underpost {
31
31
  * @type {String}
32
32
  * @memberof Underpost
33
33
  */
34
- static version = 'v2.8.791';
34
+ static version = 'v2.8.793';
35
35
  /**
36
36
  * Repository cli API
37
37
  * @static