underpost 2.8.792 → 2.8.794

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -68,7 +68,7 @@ Run dev client server
68
68
  npm run dev
69
69
  ```
70
70
  <!-- -->
71
- ## underpost ci/cd cli v2.8.792
71
+ ## underpost ci/cd cli v2.8.794
72
72
 
73
73
  ### Usage: `underpost [options] [command]`
74
74
  ```
package/cli.md CHANGED
@@ -1,4 +1,4 @@
1
- ## underpost ci/cd cli v2.8.792
1
+ ## underpost ci/cd cli v2.8.794
2
2
 
3
3
  ### Usage: `underpost [options] [command]`
4
4
  ```
@@ -496,6 +496,8 @@ Options:
496
496
  --delete-expose <vm-name-ports> Vm name and : separated with Comma separated
497
497
  vm port to remove expose e. g.
498
498
  k8s-control:80,443
499
+ --auto-expose-k8s-ports <vm-id> Automatically expose common Kubernetes ports
500
+ for the VM.
499
501
  -h, --help display help for command
500
502
 
501
503
  ```
@@ -58,7 +58,7 @@ services:
58
58
  cpus: '0.25'
59
59
  memory: 20M
60
60
  labels: # labels in Compose file instead of Dockerfile
61
- engine.version: '2.8.792'
61
+ engine.version: '2.8.794'
62
62
  networks:
63
63
  - load-balancer
64
64
 
@@ -1,20 +1,25 @@
1
1
  #!/bin/bash
2
2
 
3
+ # Exit immediately if a command exits with a non-zero status.
3
4
  set -e
4
5
 
5
- # Expand /dev/sda2 partition and resize filesystem automatically
6
+ echo "Starting Underpost Kubernetes Node Setup for Production (Kubeadm Use Case)..."
7
+
8
+ # --- Disk Partition Resizing (Keep as is, seems functional) ---
9
+ echo "Expanding /dev/sda2 partition and resizing filesystem..."
6
10
 
7
11
  # Check if parted is installed
8
12
  if ! command -v parted &>/dev/null; then
9
13
  echo "parted not found, installing..."
10
- dnf install -y parted
14
+ sudo dnf install -y parted
11
15
  fi
12
16
 
13
17
  # Get start sector of /dev/sda2
14
- START_SECTOR=$(parted /dev/sda -ms unit s print | awk -F: '/^2:/{print $2}' | sed 's/s//')
18
+ START_SECTOR=$(sudo parted /dev/sda -ms unit s print | awk -F: '/^2:/{print $2}' | sed 's/s//')
15
19
 
16
20
  # Resize the partition
17
- parted /dev/sda ---pretend-input-tty <<EOF
21
+ # Using 'sudo' for parted commands
22
+ sudo parted /dev/sda ---pretend-input-tty <<EOF
18
23
  unit s
19
24
  resizepart 2 100%
20
25
  Yes
@@ -22,21 +27,28 @@ quit
22
27
  EOF
23
28
 
24
29
  # Resize the filesystem
25
- resize2fs /dev/sda2
30
+ sudo resize2fs /dev/sda2
26
31
 
27
32
  echo "Disk and filesystem resized successfully."
28
- sudo dnf install -y tar
29
- sudo dnf install -y bzip2
30
- sudo dnf install -y git
33
+
34
+ # --- Essential System Package Installation ---
35
+ echo "Installing essential system packages..."
36
+ sudo dnf install -y tar bzip2 git epel-release
37
+
38
+ # Perform a system update to ensure all packages are up-to-date
31
39
  sudo dnf -y update
32
- sudo dnf -y install epel-release
33
- sudo dnf install -y ufw
34
- sudo systemctl enable --now ufw
40
+
41
+ # --- NVM and Node.js Installation ---
42
+ echo "Installing NVM and Node.js v23.8.0..."
35
43
  curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.40.1/install.sh | bash
36
- NVM_DIR="$([ -z "${XDG_CONFIG_HOME-}" ] && printf %s "${HOME}/.nvm" || printf %s "${XDG_CONFIG_HOME}/nvm")"
44
+
45
+ # Load nvm for the current session
46
+ export NVM_DIR="$([ -z "${XDG_CONFIG_HOME-}" ] && printf %s "${HOME}/.nvm" || printf %s "${XDG_CONFIG_HOME}/nvm")"
37
47
  [ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
48
+
38
49
  nvm install 23.8.0
39
50
  nvm use 23.8.0
51
+
40
52
  echo "
41
53
  ██╗░░░██╗███╗░░██╗██████╗░███████╗██████╗░██████╗░░█████╗░░██████╗████████╗
42
54
  ██║░░░██║████╗░██║██╔══██╗██╔════╝██╔══██╗██╔══██╗██╔══██╗██╔════╝╚══██╔══╝
@@ -45,22 +57,36 @@ echo "
45
57
  ╚██████╔╝██║░╚███║██████╔╝███████╗██║░░██║██║░░░░░╚█████╔╝██████╔╝░░░██║░░░
46
58
  ░╚═════╝░╚═╝░░╚══╝╚═════╝░╚══════╝╚═╝░░╚═╝╚═╝░░░░░░╚════╝░╚═════╝░░░░╚═╝░░░
47
59
 
48
- Installing underpost k8s node ...
49
-
60
+ Installing underpost k8s node...
50
61
  "
62
+
63
+ # Install underpost globally
51
64
  npm install -g underpost
52
- chmod +x /root/.nvm/versions/node/v23.8.0/bin/underpost
65
+
66
+ # Ensure underpost executable is in PATH and has execute permissions
67
+ # Adjusting this for global npm install which usually handles permissions
68
+ # If you still face issues, ensure /root/.nvm/versions/node/v23.8.0/bin is in your PATH
69
+ # For global installs, it's usually handled automatically.
70
+ # chmod +x /root/.nvm/versions/node/v23.8.0/bin/underpost # This might not be necessary for global npm installs
71
+
72
+ # --- Kernel Module for Bridge Filtering ---
73
+ # This is crucial for Kubernetes networking (CNI)
74
+ echo "Loading br_netfilter kernel module..."
53
75
  sudo modprobe br_netfilter
54
- mkdir -p /home/dd
55
- cd $(underpost root)/underpost
76
+
77
+ # --- Initial Host Setup for Kubernetes Prerequisites ---
78
+ # This calls the initHost method in cluster.js to install Docker, Podman, Kind, Kubeadm, Helm.
79
+ echo "Running initial host setup for Kubernetes prerequisites..."
80
+ # Ensure the current directory is where 'underpost' expects its root, or use absolute paths.
81
+ # Assuming 'underpost root' correctly points to the base directory of your project.
82
+ cd "$(underpost root)/underpost"
56
83
  underpost cluster --init-host
57
84
 
58
- # Default flags
85
+ # --- Argument Parsing for Kubeadm/Kind/Worker ---
59
86
  USE_KUBEADM=false
60
- USE_KIND=false
87
+ USE_KIND=false # Not the primary focus for this request, but keeping the logic
61
88
  USE_WORKER=false
62
89
 
63
- # Loop through arguments
64
90
  for arg in "$@"; do
65
91
  case "$arg" in
66
92
  --kubeadm)
@@ -76,71 +102,42 @@ for arg in "$@"; do
76
102
  done
77
103
 
78
104
  echo "USE_KUBEADM = $USE_KUBEADM"
79
- echo "USE_KIND = $USE_KIND"
80
- echo "USE_WORKER = $USE_WORKER"
81
-
82
- underpost cluster --kubeadm
83
- underpost cluster --reset
84
-
85
- PORTS=(
86
- 22 # SSH
87
- 80 # HTTP
88
- 443 # HTTPS
89
- 53 # DNS (TCP/UDP)
90
- 66 # TFTP
91
- 67 # DHCP
92
- 69 # TFTP
93
- 111 # rpcbind
94
- 179 # Calico BGP
95
- 2049 # NFS
96
- 20048 # NFS mountd
97
- 4011 # PXE boot
98
- 5240 # snapd API
99
- 5248 # Juju controller
100
- 6443 # Kubernetes API
101
- 9153 # CoreDNS metrics
102
- 10250 # Kubelet API
103
- 10251 # kube-scheduler
104
- 10252 # kube-controller-manager
105
- 10255 # Kubelet read-only (deprecated)
106
- 10257 # controller-manager (v1.23+)
107
- 10259 # scheduler (v1.23+)
108
- )
109
-
110
- PORT_RANGES=(
111
- 2379:2380 # etcd
112
- # 30000:32767 # NodePort range
113
- # 3000:3100 # App node ports
114
- 32765:32766 # Ephemeral ports
115
- 6783:6784 # Weave Net
116
- )
117
-
118
- # Open individual ports
119
- for PORT in "${PORTS[@]}"; do
120
- ufw allow ${PORT}/tcp
121
- ufw allow ${PORT}/udp
122
- done
105
+ echo "USE_KIND = $USE_KIND"
106
+ echo "USE_WORKER = $USE_WORKER"
123
107
 
124
- # Open port ranges
125
- for RANGE in "${PORT_RANGES[@]}"; do
126
- ufw allow ${RANGE}/tcp
127
- ufw allow ${RANGE}/udp
128
- done
108
+ # --- Kubernetes Cluster Initialization Logic ---
129
109
 
130
- # Behavior based on flags
131
- if $USE_KUBEADM; then
132
- echo "Running control node with kubeadm..."
133
- underpost cluster --kubeadm
134
- # kubectl get pods --all-namespaces -o wide -w
135
- fi
110
+ # Apply host configuration (SELinux, Containerd, Sysctl, and now firewalld disabling)
111
+ echo "Applying Kubernetes host configuration (SELinux, Containerd, Sysctl, Firewalld)..."
112
+ underpost cluster --config
136
113
 
137
- if $USE_KIND; then
114
+ if $USE_KUBEADM; then
115
+ if $USE_WORKER; then
116
+ echo "Running worker node setup for kubeadm..."
117
+ # For worker nodes, the 'underpost cluster --worker' command will handle joining
118
+ # the cluster. The join command itself needs to be provided from the control plane.
119
+ # This script assumes the join command will be executed separately or passed in.
120
+ # For a full automated setup, you'd typically pass the join token/command here.
121
+ # Example: underpost cluster --worker --join-command "kubeadm join ..."
122
+ # For now, this just runs the worker-specific config.
123
+ underpost cluster --worker
124
+ underpost cluster --chown
125
+ echo "Worker node setup initiated. You will need to manually join this worker to your control plane."
126
+ echo "On your control plane, run 'kubeadm token create --print-join-command' and execute the output here."
127
+ else
128
+ echo "Running control plane setup with kubeadm..."
129
+ # This will initialize the kubeadm control plane and install Calico
130
+ underpost cluster --kubeadm
131
+ echo "Kubeadm control plane initialized. Check cluster status with 'kubectl get nodes'."
132
+ fi
133
+ elif $USE_KIND; then
138
134
  echo "Running control node with kind..."
139
135
  underpost cluster
140
- # kubectl get pods --all-namespaces -o wide -w
136
+ echo "Kind cluster initialized. Check cluster status with 'kubectl get nodes'."
137
+ else
138
+ echo "No specific cluster role (--kubeadm, --kind, --worker) specified. Please provide one."
139
+ exit 1
141
140
  fi
142
141
 
143
- if $USE_WORKER; then
144
- echo "Running worker..."
145
- underpost cluster --worker --config
146
- fi
142
+ echo "Underpost Kubernetes Node Setup completed."
143
+ echo "Remember to verify cluster health with 'kubectl get nodes' and 'kubectl get pods --all-namespaces'."
@@ -1,17 +1,11 @@
1
- ---
2
1
  apiVersion: v1
3
2
  kind: Service
4
3
  metadata:
5
4
  name: service-valkey
6
5
  namespace: default
7
6
  spec:
7
+ selector:
8
+ app: service-valkey
8
9
  ports:
9
10
  - port: 6379
10
11
  targetPort: 6379
11
- selector:
12
- app: service-valkey
13
- ipFamilyPolicy: PreferDualStack
14
- ipFamilies:
15
- - IPv4
16
- # - IPv6
17
- type: ClusterIP
@@ -14,27 +14,25 @@ spec:
14
14
  labels:
15
15
  app: service-valkey
16
16
  spec:
17
- # Prevent automatic token mounting if you're not using the default ServiceAccount
18
17
  automountServiceAccountToken: false
19
-
20
18
  containers:
21
19
  - name: service-valkey
22
20
  image: docker.io/valkey/valkey:latest
23
21
  imagePullPolicy: IfNotPresent
24
- env:
25
- - name: TZ
26
- value: Europe/Zurich
22
+ command: ["valkey-server"]
23
+ args: ["--port", "6379"]
27
24
  ports:
28
25
  - containerPort: 6379
29
26
  startupProbe:
30
27
  tcpSocket:
31
28
  port: 6379
32
- failureThreshold: 30
33
29
  periodSeconds: 5
34
30
  timeoutSeconds: 5
31
+ failureThreshold: 30
35
32
  livenessProbe:
36
33
  tcpSocket:
37
34
  port: 6379
38
- failureThreshold: 2
35
+ initialDelaySeconds: 10
39
36
  periodSeconds: 30
40
37
  timeoutSeconds: 5
38
+ failureThreshold: 2
package/package.json CHANGED
@@ -2,7 +2,7 @@
2
2
  "type": "module",
3
3
  "main": "src/index.js",
4
4
  "name": "underpost",
5
- "version": "2.8.792",
5
+ "version": "2.8.794",
6
6
  "description": "pwa api rest template",
7
7
  "scripts": {
8
8
  "start": "env-cmd -f .env.production node --max-old-space-size=8192 src/server",
@@ -37,39 +37,39 @@ class UnderpostCluster {
37
37
  chown: false,
38
38
  },
39
39
  ) {
40
- // sudo dnf update
41
- // 1) Install kind, kubeadm, docker, podman, helm
42
- // 2) Check kubectl, kubelet, containerd.io
43
- // 3) Install Nvidia drivers from Rocky Linux docs
44
- // 4) Install LXD with MAAS from Rocky Linux docs
45
- // 5) Install MAAS src from snap
40
+ // Handles initial host setup (installing docker, podman, kind, kubeadm, helm)
46
41
  if (options.initHost === true) return UnderpostCluster.API.initHost();
47
- if (options.config === true) UnderpostCluster.API.config();
48
- if (options.chown === true) UnderpostCluster.API.chown();
42
+
43
+ // Applies general host configuration (SELinux, containerd, sysctl)
44
+ if (options.config === true) return UnderpostCluster.API.config();
45
+
46
+ // Sets up kubectl configuration for the current user
47
+ if (options.chown === true) return UnderpostCluster.API.chown();
48
+
49
49
  const npmRoot = getNpmRootPath();
50
50
  const underpostRoot = options?.dev === true ? '.' : `${npmRoot}/underpost`;
51
+
52
+ // Information gathering options
51
53
  if (options.infoCapacityPod === true) return logger.info('', UnderpostDeploy.API.resourcesFactory());
52
54
  if (options.infoCapacity === true)
53
55
  return logger.info('', UnderpostCluster.API.getResourcesCapacity(options.kubeadm));
54
- if (options.reset === true) return await UnderpostCluster.API.reset();
55
56
  if (options.listPods === true) return console.table(UnderpostDeploy.API.get(podName ?? undefined));
56
-
57
57
  if (options.nsUse && typeof options.nsUse === 'string') {
58
58
  shellExec(`kubectl config set-context --current --namespace=${options.nsUse}`);
59
59
  return;
60
60
  }
61
61
  if (options.info === true) {
62
- shellExec(`kubectl config get-contexts`); // config env persisente for manage multiple clusters
62
+ shellExec(`kubectl config get-contexts`);
63
63
  shellExec(`kubectl config get-clusters`);
64
- shellExec(`kubectl get nodes -o wide`); // set of nodes of a cluster
64
+ shellExec(`kubectl get nodes -o wide`);
65
65
  shellExec(`kubectl config view | grep namespace`);
66
- shellExec(`kubectl get ns -o wide`); // A namespace can have pods of different nodes
67
- shellExec(`kubectl get pvc --all-namespaces -o wide`); // PersistentVolumeClaim -> request storage service
68
- shellExec(`kubectl get pv --all-namespaces -o wide`); // PersistentVolume -> real storage
66
+ shellExec(`kubectl get ns -o wide`);
67
+ shellExec(`kubectl get pvc --all-namespaces -o wide`);
68
+ shellExec(`kubectl get pv --all-namespaces -o wide`);
69
69
  shellExec(`kubectl get cronjob --all-namespaces -o wide`);
70
- shellExec(`kubectl get svc --all-namespaces -o wide`); // proxy dns gate way -> deployments, statefulsets, pods
71
- shellExec(`kubectl get statefulsets --all-namespaces -o wide`); // set pods with data/volume persistence
72
- shellExec(`kubectl get deployments --all-namespaces -o wide`); // set pods
70
+ shellExec(`kubectl get svc --all-namespaces -o wide`);
71
+ shellExec(`kubectl get statefulsets --all-namespaces -o wide`);
72
+ shellExec(`kubectl get deployments --all-namespaces -o wide`);
73
73
  shellExec(`kubectl get configmap --all-namespaces -o wide`);
74
74
  shellExec(`kubectl get pods --all-namespaces -o wide`);
75
75
  shellExec(
@@ -91,40 +91,47 @@ class UnderpostCluster {
91
91
  shellExec(`sudo kubectl api-resources`);
92
92
  return;
93
93
  }
94
- const alrreadyCluster =
94
+
95
+ // Reset Kubernetes cluster components (Kind/Kubeadm) and container runtimes
96
+ if (options.reset === true) return await UnderpostCluster.API.reset();
97
+
98
+ // Check if a cluster (Kind or Kubeadm with Calico) is already initialized
99
+ const alreadyCluster =
95
100
  UnderpostDeploy.API.get('kube-apiserver-kind-control-plane')[0] ||
96
101
  UnderpostDeploy.API.get('calico-kube-controllers')[0];
97
102
 
98
- if (
99
- !options.worker &&
100
- !alrreadyCluster &&
101
- ((!options.kubeadm && !UnderpostDeploy.API.get('kube-apiserver-kind-control-plane')[0]) ||
102
- (options.kubeadm === true && !UnderpostDeploy.API.get('calico-kube-controllers')[0]))
103
- ) {
104
- UnderpostCluster.API.config();
103
+ // --- Kubeadm/Kind Cluster Initialization ---
104
+ // This block handles the initial setup of the Kubernetes cluster (control plane or worker).
105
+ // It prevents re-initialization if a cluster is already detected.
106
+ if (!options.worker && !alreadyCluster) {
107
+ // If it's a kubeadm setup and no Calico controller is found (indicating no kubeadm cluster)
105
108
  if (options.kubeadm === true) {
109
+ logger.info('Initializing Kubeadm control plane...');
110
+ // Initialize kubeadm control plane
106
111
  shellExec(
107
112
  `sudo kubeadm init --pod-network-cidr=192.168.0.0/16 --control-plane-endpoint="${os.hostname()}:6443"`,
108
113
  );
114
+ // Configure kubectl for the current user
109
115
  UnderpostCluster.API.chown();
110
- // https://docs.tigera.io/calico/latest/getting-started/kubernetes/quickstart
116
+ // Install Calico CNI
117
+ logger.info('Installing Calico CNI...');
111
118
  shellExec(
112
119
  `sudo kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.29.3/manifests/tigera-operator.yaml`,
113
120
  );
114
- // shellExec(
115
- // `wget https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/custom-resources.yaml`,
116
- // );
117
121
  shellExec(`sudo kubectl apply -f ${underpostRoot}/manifests/kubeadm-calico-config.yaml`);
122
+ // Untaint control plane node to allow scheduling pods
118
123
  const nodeName = os.hostname();
119
124
  shellExec(`kubectl taint nodes ${nodeName} node-role.kubernetes.io/control-plane:NoSchedule-`);
125
+ // Install local-path-provisioner for dynamic PVCs (optional but recommended)
126
+ logger.info('Installing local-path-provisioner...');
120
127
  shellExec(
121
128
  `kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/master/deploy/local-path-storage.yaml`,
122
129
  );
123
130
  } else {
131
+ // Kind cluster initialization (if not using kubeadm)
132
+ logger.info('Initializing Kind cluster...');
124
133
  if (options.full === true || options.dedicatedGpu === true) {
125
- // https://kubernetes.io/docs/tasks/manage-gpus/scheduling-gpus/
126
134
  shellExec(`cd ${underpostRoot}/manifests && kind create cluster --config kind-config-cuda.yaml`);
127
- UnderpostCluster.API.chown();
128
135
  } else {
129
136
  shellExec(
130
137
  `cd ${underpostRoot}/manifests && kind create cluster --config kind-config${
@@ -132,10 +139,20 @@ class UnderpostCluster {
132
139
  }.yaml`,
133
140
  );
134
141
  }
142
+ UnderpostCluster.API.chown();
135
143
  }
136
- } else logger.warn('Cluster already initialized');
144
+ } else if (options.worker === true) {
145
+ // Worker node specific configuration (kubeadm join command needs to be executed separately)
146
+ logger.info('Worker node configuration applied. Awaiting kubeadm join command...');
147
+ // No direct cluster initialization here for workers. The `kubeadm join` command
148
+ // needs to be run on the worker after the control plane is up and a token is created.
149
+ // This part of the script is for general worker setup, not the join itself.
150
+ } else {
151
+ logger.warn('Cluster already initialized or worker flag not set for worker node.');
152
+ }
137
153
 
138
- // shellExec(`sudo kubectl apply -f ${underpostRoot}/manifests/kubelet-config.yaml`);
154
+ // --- Optional Component Deployments (Databases, Ingress, Cert-Manager) ---
155
+ // These deployments happen after the base cluster is up.
139
156
 
140
157
  if (options.full === true || options.dedicatedGpu === true) {
141
158
  shellExec(`node ${underpostRoot}/bin/deploy nvidia-gpu-operator`);
@@ -218,8 +235,6 @@ class UnderpostCluster {
218
235
  --eval 'rs.initiate(${JSON.stringify(mongoConfig)})'`,
219
236
  );
220
237
  }
221
-
222
- // await UnderpostTest.API.statusMonitor('mongodb-1');
223
238
  } else if (options.full === true || options.mongodb === true) {
224
239
  if (options.pullImage === true) {
225
240
  shellExec(`docker pull mongo:latest`);
@@ -280,193 +295,149 @@ class UnderpostCluster {
280
295
  }
281
296
  },
282
297
 
298
+ /**
299
+ * @method config
300
+ * @description Configures host-level settings required for Kubernetes.
301
+ * IMPORTANT: This method has been updated to REMOVE all iptables flushing commands
302
+ * to prevent conflicts with Kubernetes' own network management.
303
+ */
283
304
  config() {
305
+ console.log('Applying host configuration: SELinux, Docker, Containerd, and Sysctl settings.');
306
+ // Disable SELinux (permissive mode)
284
307
  shellExec(`sudo setenforce 0`);
285
308
  shellExec(`sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config`);
309
+
310
+ // Enable and start Docker and Kubelet services
286
311
  shellExec(`sudo systemctl enable --now docker`);
287
312
  shellExec(`sudo systemctl enable --now kubelet`);
288
- shellExec(`containerd config default > /etc/containerd/config.toml`);
289
- shellExec(`sed -i -e "s/SystemdCgroup = false/SystemdCgroup = true/g" /etc/containerd/config.toml`);
290
- shellExec(`sudo service docker restart`);
313
+
314
+ // Configure containerd for SystemdCgroup
315
+ // This is crucial for kubelet to interact correctly with containerd
316
+ shellExec(`containerd config default | sudo tee /etc/containerd/config.toml > /dev/null`);
317
+ shellExec(`sudo sed -i -e "s/SystemdCgroup = false/SystemdCgroup = true/g" /etc/containerd/config.toml`);
318
+ shellExec(`sudo service docker restart`); // Restart docker after containerd config changes
291
319
  shellExec(`sudo systemctl enable --now containerd.service`);
320
+ shellExec(`sudo systemctl restart containerd`); // Restart containerd to apply changes
321
+
322
+ // Disable swap (required by Kubernetes)
292
323
  shellExec(`sudo swapoff -a; sudo sed -i '/swap/d' /etc/fstab`);
324
+
325
+ // Reload systemd daemon to pick up new unit files/changes
293
326
  shellExec(`sudo systemctl daemon-reload`);
294
- shellExec(`sudo systemctl restart containerd`);
295
- shellExec(`sysctl net.bridge.bridge-nf-call-iptables=1`);
296
- // Clean ip tables
297
- shellExec(`sudo iptables -F`);
298
- shellExec(`sudo iptables -X`);
299
- shellExec(`sudo iptables -t nat -F`);
300
- shellExec(`sudo iptables -t nat -X`);
301
- shellExec(`sudo iptables -t raw -F`);
302
- shellExec(`sudo iptables -t raw -X`);
303
- shellExec(`sudo iptables -t mangle -F`);
304
- shellExec(`sudo iptables -t mangle -X`);
305
- shellExec(`sudo iptables -P INPUT ACCEPT`);
306
- shellExec(`sudo iptables -P FORWARD ACCEPT`);
307
- shellExec(`sudo iptables -P OUTPUT ACCEPT`);
327
+
328
+ // Enable bridge-nf-call-iptables for Kubernetes networking
329
+ // This ensures traffic through Linux bridges is processed by iptables (crucial for CNI)
330
+ for (const iptableConfPath of [
331
+ `/etc/sysctl.d/k8s.conf`,
332
+ `/etc/sysctl.d/99-k8s-ipforward.conf`,
333
+ `/etc/sysctl.d/99-k8s.conf`,
334
+ ])
335
+ shellExec(`echo 'net.bridge.bridge-nf-call-iptables = 1
336
+ net.bridge.bridge-nf-call-ip6tables = 1
337
+ net.bridge.bridge-nf-call-arptables = 1
338
+ net.ipv4.ip_forward = 1' | sudo tee ${iptableConfPath}`);
339
+ shellExec(`sudo sysctl --system`); // Apply sysctl changes immediately
340
+
341
+ // Disable firewalld (common cause of network issues in Kubernetes)
342
+ shellExec(`sudo systemctl stop firewalld || true`); // Stop if running
343
+ shellExec(`sudo systemctl disable firewalld || true`); // Disable from starting on boot
308
344
  },
345
+
346
+ /**
347
+ * @method chown
348
+ * @description Sets up kubectl configuration for the current user.
349
+ * This is typically run after kubeadm init on the control plane.
350
+ */
309
351
  chown() {
352
+ console.log('Setting up kubectl configuration...');
310
353
  shellExec(`mkdir -p ~/.kube`);
311
354
  shellExec(`sudo -E cp -i /etc/kubernetes/admin.conf ~/.kube/config`);
312
355
  shellExec(`sudo -E chown $(id -u):$(id -g) ~/.kube/config`);
356
+ console.log('kubectl config set up successfully.');
313
357
  },
314
- // This function performs a comprehensive reset of Kubernetes and container environments
315
- // on the host machine. Its primary goal is to clean up cluster components, temporary files,
316
- // and container data, ensuring a clean state for re-initialization or fresh deployments,
317
- // while also preventing the loss of the host machine's internet connectivity.
318
358
 
359
+ /**
360
+ * @method reset
361
+ * @description Performs a comprehensive reset of Kubernetes and container environments.
362
+ * This function is for cleaning up a node, not for initial setup.
363
+ * It avoids aggressive iptables flushing that would break host connectivity.
364
+ */
319
365
  reset() {
320
- // Step 1: Delete all existing Kind (Kubernetes in Docker) clusters.
321
- // 'kind get clusters' lists all Kind clusters.
322
- // 'xargs -t -n1 kind delete cluster --name' then iterates through each cluster name
323
- // and executes 'kind delete cluster --name <cluster_name>' to remove them.
324
- shellExec(`kind get clusters | xargs -t -n1 kind delete cluster --name`);
325
-
326
- // Step 2: Reset the Kubernetes control-plane components installed by kubeadm.
327
- // 'kubeadm reset -f' performs a forceful reset, removing installed Kubernetes components,
328
- // configuration files, and associated network rules (like iptables entries created by kubeadm).
329
- // The '-f' flag bypasses confirmation prompts.
366
+ console.log('Starting comprehensive reset of Kubernetes and container environments...');
367
+
368
+ // Delete all existing Kind (Kubernetes in Docker) clusters.
369
+ shellExec(`kind get clusters | xargs -r -t -n1 kind delete cluster --name`); // -r for no-op if no clusters
370
+
371
+ // Reset the Kubernetes control-plane components installed by kubeadm.
330
372
  shellExec(`sudo kubeadm reset -f`);
331
373
 
332
- // Step 3: Remove specific CNI (Container Network Interface) configuration files.
333
- // This command targets and removes the configuration file for Flannel,
334
- // a common CNI plugin, which might be left behind after a reset.
374
+ // Remove specific CNI configuration files (e.g., Flannel)
335
375
  shellExec('sudo rm -f /etc/cni/net.d/10-flannel.conflist');
336
376
 
337
- // Note: The aggressive 'sudo iptables -F ...' command was intentionally removed from previous versions.
338
- // This command would flush all iptables rules, including those crucial for the host's general
339
- // internet connectivity, leading to network loss. 'kubeadm reset' and container runtime pruning
340
- // adequately handle Kubernetes and container-specific iptables rules without affecting the host's
341
- // default network configuration.
342
-
343
- // Step 4: Remove the kubectl configuration file from the current user's home directory.
344
- // This ensures that after a reset, there's no lingering configuration pointing to the old cluster,
345
- // providing a clean slate for connecting to a new or re-initialized cluster.
377
+ // Remove the kubectl configuration file
346
378
  shellExec('sudo rm -f $HOME/.kube/config');
347
379
 
348
- // Step 5: Clear trash files from the root user's trash directory.
349
- // This is a general cleanup step to remove temporary or deleted files.
380
+ // Clear trash files from the root user's trash directory.
350
381
  shellExec('sudo rm -rf /root/.local/share/Trash/files/*');
351
382
 
352
- // Step 6: Prune all unused Docker data.
353
- // 'docker system prune -a -f' removes:
354
- // - All stopped containers
355
- // - All unused networks
356
- // - All dangling images
357
- // - All build cache
358
- // - All unused volumes
359
- // This aggressively frees up disk space and removes temporary Docker artifacts.
383
+ // Prune all unused Docker data.
360
384
  shellExec('sudo docker system prune -a -f');
361
385
 
362
- // Step 7: Stop the Docker daemon service.
363
- // This step is often necessary to ensure that Docker's files and directories
364
- // can be safely manipulated or moved in subsequent steps without conflicts.
386
+ // Stop the Docker daemon service.
365
387
  shellExec('sudo service docker stop');
366
388
 
367
- // Step 8: Aggressively remove container storage data for containerd and Docker.
368
- // These commands target the default storage locations for containerd and Docker,
369
- // as well as any custom paths that might have been used (`/home/containers/storage`, `/home/docker`).
370
- // This ensures a complete wipe of all container images, layers, and volumes.
389
+ // Aggressively remove container storage data for containerd and Docker.
371
390
  shellExec(`sudo rm -rf /var/lib/containers/storage/*`);
372
391
  shellExec(`sudo rm -rf /var/lib/docker/volumes/*`);
373
- shellExec(`sudo rm -rf /var/lib/docker~/*`); // Cleans up a potential backup directory for Docker data
374
- shellExec(`sudo rm -rf /home/containers/storage/*`); // Cleans up custom containerd/Podman storage
375
- shellExec(`sudo rm -rf /home/docker/*`); // Cleans up custom Docker storage
376
-
377
- // Step 9: Re-configure Docker's default storage location (if desired).
378
- // These commands effectively move Docker's data directory from its default `/var/lib/docker`
379
- // to a new location (`/home/docker`) and create a symbolic link.
380
- // This is a specific customization to relocate Docker's storage.
381
- shellExec('sudo mv /var/lib/docker /var/lib/docker~'); // Moves existing /var/lib/docker to /var/lib/docker~ (backup)
382
- shellExec('sudo mkdir /home/docker'); // Creates the new desired directory for Docker data
383
- shellExec('sudo chmod 0711 /home/docker'); // Sets appropriate permissions for the new directory
384
- shellExec('sudo ln -s /home/docker /var/lib/docker'); // Creates a symlink from original path to new path
385
-
386
- // Step 10: Prune all unused Podman data.
387
- // Similar to Docker pruning, these commands remove:
388
- // - All stopped containers
389
- // - All unused networks
390
- // - All unused images
391
- // - All unused volumes ('--volumes')
392
- // - The '--force' flag bypasses confirmation.
393
- // '--external' prunes external content not managed by Podman's default storage backend.
392
+ shellExec(`sudo rm -rf /var/lib/docker~/*`);
393
+ shellExec(`sudo rm -rf /home/containers/storage/*`);
394
+ shellExec(`sudo rm -rf /home/docker/*`);
395
+
396
+ // Re-configure Docker's default storage location (if desired).
397
+ shellExec('sudo mv /var/lib/docker /var/lib/docker~ || true'); // Use || true to prevent error if dir doesn't exist
398
+ shellExec('sudo mkdir -p /home/docker');
399
+ shellExec('sudo chmod 777 /home/docker');
400
+ shellExec('sudo ln -s /home/docker /var/lib/docker');
401
+
402
+ // Prune all unused Podman data.
394
403
  shellExec(`sudo podman system prune -a -f`);
395
404
  shellExec(`sudo podman system prune --all --volumes --force`);
396
405
  shellExec(`sudo podman system prune --external --force`);
397
- shellExec(`sudo podman system prune --all --volumes --force`); // Redundant but harmless repetition
398
406
 
399
- // Step 11: Create and set permissions for Podman's custom storage directory.
400
- // This ensures the custom path `/home/containers/storage` exists and has correct permissions
401
- // before Podman attempts to use it.
407
+ // Create and set permissions for Podman's custom storage directory.
402
408
  shellExec(`sudo mkdir -p /home/containers/storage`);
403
409
  shellExec('sudo chmod 0711 /home/containers/storage');
404
410
 
405
- // Step 12: Update Podman's storage configuration file.
406
- // This command uses 'sed' to modify `/etc/containers/storage.conf`,
407
- // changing the default storage path from `/var/lib/containers/storage`
408
- // to the customized `/home/containers/storage`.
411
+ // Update Podman's storage configuration file.
409
412
  shellExec(
410
413
  `sudo sed -i -e "s@/var/lib/containers/storage@/home/containers/storage@g" /etc/containers/storage.conf`,
411
414
  );
412
415
 
413
- // Step 13: Reset Podman system settings.
414
- // This command resets Podman's system-wide configuration to its default state.
416
+ // Reset Podman system settings.
415
417
  shellExec(`sudo podman system reset -f`);
416
418
 
417
- // Note: The 'sysctl net.bridge.bridge-nf-call-iptables=0' and related commands
418
- // were previously removed. These sysctl settings (bridge-nf-call-iptables,
419
- // bridge-nf-call-arptables, bridge-nf-call-ip6tables) are crucial for allowing
420
- // network traffic through Linux bridges to be processed by iptables.
421
- // Kubernetes and CNI plugins generally require them to be enabled (set to '1').
422
- // Re-initializing Kubernetes will typically set these as needed, and leaving them
423
- // at their system default (or '1' if already configured) is safer for host
424
- // connectivity during a reset operation.
425
-
426
- // https://github.com/kubernetes-sigs/kind/issues/2886
427
- // shellExec(`sysctl net.bridge.bridge-nf-call-iptables=0`);
428
- // shellExec(`sysctl net.bridge.bridge-nf-call-arptables=0`);
429
- // shellExec(`sysctl net.bridge.bridge-nf-call-ip6tables=0`);
430
-
431
- // Step 14: Remove the 'kind' Docker network.
432
- // This cleans up any network bridges or configurations specifically created by Kind.
433
- // shellExec(`docker network rm kind`);
434
-
435
- // Reset kubelet
419
+ // Reset kubelet components
436
420
  shellExec(`sudo systemctl stop kubelet`);
437
421
  shellExec(`sudo rm -rf /etc/kubernetes/*`);
438
422
  shellExec(`sudo rm -rf /var/lib/kubelet/*`);
439
423
  shellExec(`sudo rm -rf /etc/cni/net.d/*`);
440
424
  shellExec(`sudo systemctl daemon-reload`);
441
425
  shellExec(`sudo systemctl start kubelet`);
426
+
427
+ console.log('Comprehensive reset completed.');
442
428
  },
443
429
 
444
430
  getResourcesCapacity(kubeadm = false) {
445
431
  const resources = {};
446
- const info = false
447
- ? `Capacity:
448
- cpu: 8
449
- ephemeral-storage: 153131976Ki
450
- hugepages-1Gi: 0
451
- hugepages-2Mi: 0
452
- memory: 11914720Ki
453
- pods: 110
454
- Allocatable:
455
- cpu: 8
456
- ephemeral-storage: 153131976Ki
457
- hugepages-1Gi: 0
458
- hugepages-2Mi: 0
459
- memory: 11914720Ki
460
- pods: `
461
- : shellExec(
462
- `kubectl describe node ${
463
- kubeadm === true ? os.hostname() : 'kind-worker'
464
- } | grep -E '(Allocatable:|Capacity:)' -A 6`,
465
- {
466
- stdout: true,
467
- silent: true,
468
- },
469
- );
432
+ const info = shellExec(
433
+ `kubectl describe node ${
434
+ kubeadm === true ? os.hostname() : 'kind-worker'
435
+ } | grep -E '(Allocatable:|Capacity:)' -A 6`,
436
+ {
437
+ stdout: true,
438
+ silent: true,
439
+ },
440
+ );
470
441
  info
471
442
  .split('Allocatable:')[1]
472
443
  .split('\n')
@@ -487,17 +458,20 @@ Allocatable:
487
458
  return resources;
488
459
  },
489
460
  initHost() {
461
+ console.log('Installing Docker, Podman, Kind, Kubeadm, and Helm...');
490
462
  // Install docker
491
- shellExec(`sudo dnf -y install dnf-plugins-core
492
- sudo dnf config-manager --add-repo https://download.docker.com/linux/rhel/docker-ce.repo`);
463
+ shellExec(`sudo dnf -y install dnf-plugins-core`);
464
+ shellExec(`sudo dnf config-manager --add-repo https://download.docker.com/linux/rhel/docker-ce.repo`);
493
465
  shellExec(`sudo dnf -y install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin`);
466
+
494
467
  // Install podman
495
468
  shellExec(`sudo dnf -y install podman`);
469
+
496
470
  // Install kind
497
471
  shellExec(`[ $(uname -m) = aarch64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.29.0/kind-linux-arm64
498
472
  chmod +x ./kind
499
473
  sudo mv ./kind /bin/kind`);
500
- // Install kubeadm
474
+ // Install kubeadm, kubelet, kubectl
501
475
  shellExec(`cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
502
476
  [kubernetes]
503
477
  name=Kubernetes
@@ -508,12 +482,14 @@ gpgkey=https://pkgs.k8s.io/core:/stable:/v1.33/rpm/repodata/repomd.xml.key
508
482
  exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
509
483
  EOF`);
510
484
  shellExec(`sudo yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes`);
485
+
511
486
  // Install helm
512
- shellExec(`curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
513
- chmod 700 get_helm.sh
514
- ./get_helm.sh
515
- chmod +x /usr/local/bin/helm
516
- sudo mv /usr/local/bin/helm /bin/helm`);
487
+ shellExec(`curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3`);
488
+ shellExec(`chmod 700 get_helm.sh`);
489
+ shellExec(`./get_helm.sh`);
490
+ shellExec(`chmod +x /usr/local/bin/helm`);
491
+ shellExec(`sudo mv /usr/local/bin/helm /bin/helm`);
492
+ console.log('Host prerequisites installed successfully.');
517
493
  },
518
494
  };
519
495
  }
package/src/cli/deploy.js CHANGED
@@ -273,6 +273,25 @@ kubectl patch ippool default-ipv4-ippool --type='json' -p='[{"op": "replace", "p
273
273
  kubectl patch ippool default-ipv4-ippool --type='json' -p='[{"op": "replace", "path": "/spec/cidr", "value": "192.168.0.0/24"}]'
274
274
  sudo podman run --rm localhost/<image-name>:<image-version> <command>
275
275
  kubectl get configmap kubelet-config -n kube-system -o yaml > kubelet-config.yaml
276
+ kubectl -n kube-system rollout restart daemonset kube-proxy
277
+
278
+ kubectl get EndpointSlice -o wide --all-namespaces -w
279
+
280
+ kubectl run --rm -it test-dns --image=busybox:latest --restart=Never -- /bin/sh -c "
281
+ nslookup kubernetes.default.svc.cluster.local;
282
+ nslookup mongodb-service.default.svc.cluster.local;
283
+ nslookup service-valkey.default.svc.cluster.local;
284
+ nc -vz mongodb-service 27017;
285
+ nc -vz service-valkey 6379;
286
+ echo exit code: \\\$?
287
+ "
288
+
289
+ kubectl apply -f - <<EOF
290
+ apiVersion: apps/v1
291
+ kind: StatefulSet
292
+ metadata:
293
+ name: ...
294
+ EOF
276
295
  `);
277
296
  if (deployList === 'dd' && fs.existsSync(`./engine-private/deploy/dd.router`))
278
297
  deployList = fs.readFileSync(`./engine-private/deploy/dd.router`, 'utf8');
package/src/cli/index.js CHANGED
@@ -294,6 +294,7 @@ program
294
294
  '--delete-expose <vm-name-ports>',
295
295
  'Vm name and : separated with Comma separated vm port to remove expose e. g. k8s-control:80,443',
296
296
  )
297
+ .option('--auto-expose-k8s-ports <vm-id>', 'Automatically expose common Kubernetes ports for the VM.')
297
298
  .description('Lxd management')
298
299
  .action(UnderpostLxd.API.callback);
299
300
 
package/src/cli/lxd.js CHANGED
@@ -30,6 +30,7 @@ class UnderpostLxd {
30
30
  * @param {string} [options.expose=''] - Expose ports from a VM to the host (format: 'vmName:port1,port2').
31
31
  * @param {string} [options.deleteExpose=''] - Delete exposed ports from a VM (format: 'vmName:port1,port2').
32
32
  * @param {string} [options.test=''] - Test health, status and network connectivity for a VM.
33
+ * @param {string} [options.autoExposeK8sPorts=''] - Automatically expose common Kubernetes ports for the VM.
33
34
  */
34
35
  async callback(
35
36
  options = {
@@ -49,6 +50,7 @@ class UnderpostLxd {
49
50
  expose: '',
50
51
  deleteExpose: '',
51
52
  test: '',
53
+ autoExposeK8sPorts: '',
52
54
  },
53
55
  ) {
54
56
  const npmRoot = getNpmRootPath();
@@ -64,7 +66,6 @@ class UnderpostLxd {
64
66
  const lxdPressedContent = fs
65
67
  .readFileSync(`${underpostRoot}/manifests/lxd/lxd-preseed.yaml`, 'utf8')
66
68
  .replaceAll(`127.0.0.1`, getLocalIPv4Address());
67
- // shellExec(`lxd init --preseed < ${underpostRoot}/manifests/lxd/lxd-preseed.yaml`);
68
69
  shellExec(`echo "${lxdPressedContent}" | lxd init --preseed`);
69
70
  shellExec(`lxc cluster list`);
70
71
  }
@@ -99,7 +100,86 @@ ipv6.address=none`);
99
100
  } else if (options.worker == true) {
100
101
  flag = ' -s -- --worker';
101
102
  }
102
- pbcopy(`cat ${underpostRoot}/manifests/lxd/underpost-setup.sh | lxc exec ${options.initVm} -- bash${flag}`);
103
+ console.log(`Executing underpost-setup.sh on VM: ${options.initVm}`);
104
+ shellExec(`cat ${underpostRoot}/manifests/lxd/underpost-setup.sh | lxc exec ${options.initVm} -- bash${flag}`);
105
+ console.log(`underpost-setup.sh execution completed on VM: ${options.initVm}`);
106
+ }
107
+ // --- Automatic Kubernetes Port Exposure ---
108
+ if (options.autoExposeK8sPorts && typeof options.autoExposeK8sPorts === 'string') {
109
+ console.log(`Automatically exposing Kubernetes ports for VM: ${options.autoExposeK8sPorts}`);
110
+ const vmName = options.autoExposeK8sPorts;
111
+ const hostIp = getLocalIPv4Address();
112
+ let vmIp = '';
113
+ let retries = 0;
114
+ const maxRetries = 10;
115
+ const delayMs = 5000; // 5 seconds
116
+
117
+ // Wait for VM to get an IP address
118
+ while (!vmIp && retries < maxRetries) {
119
+ try {
120
+ console.log(`Attempting to get IPv4 address for ${vmName} (Attempt ${retries + 1}/${maxRetries})...`);
121
+ vmIp = shellExec(
122
+ `lxc list ${vmName} --format json | jq -r '.[0].state.network.enp5s0.addresses[] | select(.family=="inet") | .address'`,
123
+ { stdout: true },
124
+ ).trim();
125
+ if (vmIp) {
126
+ console.log(`IPv4 address found for ${vmName}: ${vmIp}`);
127
+ } else {
128
+ console.log(`IPv4 address not yet available for ${vmName}. Retrying in ${delayMs / 1000} seconds...`);
129
+ await new Promise((resolve) => setTimeout(resolve, delayMs));
130
+ }
131
+ } catch (error) {
132
+ console.error(`Error getting IPv4 address for exposure: ${error.message}`);
133
+ console.log(`Retrying in ${delayMs / 1000} seconds...`);
134
+ await new Promise((resolve) => setTimeout(resolve, delayMs));
135
+ }
136
+ retries++;
137
+ }
138
+
139
+ if (!vmIp) {
140
+ console.error(`Failed to get VM IP for ${vmName} after ${maxRetries} attempts. Cannot expose ports.`);
141
+ return;
142
+ }
143
+
144
+ let portsToExpose = [];
145
+ if (options.control === true) {
146
+ // Kubernetes API Server
147
+ portsToExpose.push('6443');
148
+ // Standard HTTP/HTTPS for Ingress if deployed
149
+ portsToExpose.push('80');
150
+ portsToExpose.push('443');
151
+ }
152
+ // NodePort range for all nodes (control plane can also run pods with NodePorts)
153
+ // It's safer to expose the entire range for flexibility, or specific NodePorts if known.
154
+ // For production, you might only expose specific NodePorts or use a LoadBalancer.
155
+ // For a general setup, exposing the range is common.
156
+ // Note: LXD proxy device can only expose individual ports, not ranges directly.
157
+ // We will expose a few common ones, or rely on specific 'expose' calls for others.
158
+ // Let's add some common NodePorts that might be used by applications.
159
+ // The full range 30000-32767 would require individual proxy rules for each port.
160
+ // For this automatic setup, we'll focus on critical K8s ports and common app ports.
161
+ // If a user needs the full NodePort range, they should use the `expose` option explicitly.
162
+ portsToExpose.push('30000'); // Example NodePort
163
+ portsToExpose.push('30001'); // Example NodePort
164
+ portsToExpose.push('30002'); // Example NodePort
165
+
166
+ const protocols = ['tcp']; // Most K8s services are TCP, UDP for some like DNS
167
+
168
+ for (const port of portsToExpose) {
169
+ for (const protocol of protocols) {
170
+ const deviceName = `${vmName}-${protocol}-port-${port}`;
171
+ try {
172
+ // Remove existing device first to avoid conflicts if re-running
173
+ shellExec(`lxc config device remove ${vmName} ${deviceName} || true`);
174
+ shellExec(
175
+ `lxc config device add ${vmName} ${deviceName} proxy listen=${protocol}:${hostIp}:${port} connect=${protocol}:${vmIp}:${port} nat=true`,
176
+ );
177
+ console.log(`Exposed ${protocol}:${hostIp}:${port} -> ${vmIp}:${port} for ${vmName}`);
178
+ } catch (error) {
179
+ console.error(`Failed to expose port ${port} for ${vmName}: ${error.message}`);
180
+ }
181
+ }
182
+ }
103
183
  }
104
184
  if (options.joinNode && typeof options.joinNode === 'string') {
105
185
  const [workerNode, controlNode] = options.joinNode.split(',');
@@ -116,20 +196,26 @@ ipv6.address=none`);
116
196
  shellExec(`lxc list ${options.infoVm}`);
117
197
  }
118
198
  if (options.expose && typeof options.expose === 'string') {
119
- const [controlNode, ports] = options.expose.split(':');
120
- console.log({ controlNode, ports });
199
+ const [vmName, ports] = options.expose.split(':');
200
+ console.log({ vmName, ports });
121
201
  const protocols = ['tcp']; // udp
122
202
  const hostIp = getLocalIPv4Address();
123
203
  const vmIp = shellExec(
124
- `lxc list ${controlNode} --format json | jq -r '.[0].state.network.enp5s0.addresses[] | select(.family=="inet") | .address'`,
204
+ `lxc list ${vmName} --format json | jq -r '.[0].state.network.enp5s0.addresses[] | select(.family=="inet") | .address'`,
125
205
  { stdout: true },
126
206
  ).trim();
207
+ if (!vmIp) {
208
+ console.error(`Could not get VM IP for ${vmName}. Cannot expose ports.`);
209
+ return;
210
+ }
127
211
  for (const port of ports.split(',')) {
128
212
  for (const protocol of protocols) {
129
- shellExec(`lxc config device remove ${controlNode} ${controlNode}-${protocol}-port-${port}`);
213
+ const deviceName = `${vmName}-${protocol}-port-${port}`;
214
+ shellExec(`lxc config device remove ${vmName} ${deviceName} || true`); // Use || true to prevent error if device doesn't exist
130
215
  shellExec(
131
- `lxc config device add ${controlNode} ${controlNode}-${protocol}-port-${port} proxy listen=${protocol}:${hostIp}:${port} connect=${protocol}:${vmIp}:${port} nat=true`,
216
+ `lxc config device add ${vmName} ${deviceName} proxy listen=${protocol}:${hostIp}:${port} connect=${protocol}:${vmIp}:${port} nat=true`,
132
217
  );
218
+ console.log(`Manually exposed ${protocol}:${hostIp}:${port} -> ${vmIp}:${port} for ${vmName}`);
133
219
  }
134
220
  }
135
221
  }
@@ -181,25 +267,25 @@ ipv6.address=none`);
181
267
  return;
182
268
  }
183
269
 
184
- // 2. Iteratively check connection to google.cl
270
+ // 2. Iteratively check connection to google.com
185
271
  let connectedToGoogle = false;
186
272
  retries = 0;
187
273
  while (!connectedToGoogle && retries < maxRetries) {
188
274
  try {
189
- console.log(`Checking connectivity to google.cl from ${vmName} (Attempt ${retries + 1}/${maxRetries})...`);
275
+ console.log(`Checking connectivity to google.com from ${vmName} (Attempt ${retries + 1}/${maxRetries})...`);
190
276
  const curlOutput = shellExec(
191
- `lxc exec ${vmName} -- curl -s -o /dev/null -w "%{http_code}" http://google.cl`,
277
+ `lxc exec ${vmName} -- bash -c 'curl -s -o /dev/null -w "%{http_code}" http://google.com'`,
192
278
  { stdout: true },
193
279
  );
194
280
  if (curlOutput.startsWith('2') || curlOutput.startsWith('3')) {
195
- console.log(`Successfully connected to google.cl from ${vmName}.`);
281
+ console.log(`Successfully connected to google.com from ${vmName}.`);
196
282
  connectedToGoogle = true;
197
283
  } else {
198
- console.log(`Connectivity to google.cl not yet verified. Retrying in ${delayMs / 1000} seconds...`);
284
+ console.log(`Connectivity to google.com not yet verified. Retrying in ${delayMs / 1000} seconds...`);
199
285
  await new Promise((resolve) => setTimeout(resolve, delayMs));
200
286
  }
201
287
  } catch (error) {
202
- console.error(`Error checking connectivity to google.cl: ${error.message}`);
288
+ console.error(`Error checking connectivity to google.com: ${error.message}`);
203
289
  console.log(`Retrying in ${delayMs / 1000} seconds...`);
204
290
  await new Promise((resolve) => setTimeout(resolve, delayMs));
205
291
  }
@@ -208,7 +294,7 @@ ipv6.address=none`);
208
294
 
209
295
  if (!connectedToGoogle) {
210
296
  console.error(
211
- `Failed to connect to google.cl from ${vmName} after ${maxRetries} attempts. Aborting further tests.`,
297
+ `Failed to connect to google.com from ${vmName} after ${maxRetries} attempts. Aborting further tests.`,
212
298
  );
213
299
  return;
214
300
  }
package/src/index.js CHANGED
@@ -31,7 +31,7 @@ class Underpost {
31
31
  * @type {String}
32
32
  * @memberof Underpost
33
33
  */
34
- static version = 'v2.8.792';
34
+ static version = 'v2.8.794';
35
35
  /**
36
36
  * Repository cli API
37
37
  * @static
@@ -176,7 +176,7 @@ const loggerMiddleware = (meta = { url: '' }) => {
176
176
  );
177
177
  };
178
178
 
179
- const underpostASCI = () => `
179
+ const underpostASCII = () => `
180
180
 
181
181
  ██╗░░░██╗███╗░░██╗██████╗░███████╗██████╗░██████╗░░█████╗░░██████╗████████╗
182
182
  ██║░░░██║████╗░██║██╔══██╗██╔════╝██╔══██╗██╔══██╗██╔══██╗██╔════╝╚══██╔══╝
@@ -188,10 +188,10 @@ const underpostASCI = () => `
188
188
 
189
189
  const actionInitLog = () =>
190
190
  console.log(
191
- underpostASCI() +
191
+ underpostASCII() +
192
192
  `
193
193
  https://www.nexodev.org/docs
194
194
  `,
195
195
  );
196
196
 
197
- export { loggerFactory, loggerMiddleware, setUpInfo, underpostASCI, actionInitLog };
197
+ export { loggerFactory, loggerMiddleware, setUpInfo, underpostASCII, actionInitLog };