underpost 2.8.792 → 2.8.793
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/cli.md +3 -1
- package/docker-compose.yml +1 -1
- package/manifests/lxd/underpost-setup.sh +105 -82
- package/package.json +1 -1
- package/src/cli/cluster.js +146 -175
- package/src/cli/index.js +1 -0
- package/src/cli/lxd.js +100 -14
- package/src/index.js +1 -1
package/README.md
CHANGED
package/cli.md
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
## underpost ci/cd cli v2.8.
|
|
1
|
+
## underpost ci/cd cli v2.8.793
|
|
2
2
|
|
|
3
3
|
### Usage: `underpost [options] [command]`
|
|
4
4
|
```
|
|
@@ -496,6 +496,8 @@ Options:
|
|
|
496
496
|
--delete-expose <vm-name-ports> Vm name and : separated with Comma separated
|
|
497
497
|
vm port to remove expose e. g.
|
|
498
498
|
k8s-control:80,443
|
|
499
|
+
--auto-expose-k8s-ports <vm-id> Automatically expose common Kubernetes ports
|
|
500
|
+
for the VM.
|
|
499
501
|
-h, --help display help for command
|
|
500
502
|
|
|
501
503
|
```
|
package/docker-compose.yml
CHANGED
|
@@ -1,20 +1,25 @@
|
|
|
1
1
|
#!/bin/bash
|
|
2
2
|
|
|
3
|
+
# Exit immediately if a command exits with a non-zero status.
|
|
3
4
|
set -e
|
|
4
5
|
|
|
5
|
-
|
|
6
|
+
echo "Starting Underpost Kubernetes Node Setup for Production (Kubeadm Use Case)..."
|
|
7
|
+
|
|
8
|
+
# --- Disk Partition Resizing (Keep as is, seems functional) ---
|
|
9
|
+
echo "Expanding /dev/sda2 partition and resizing filesystem..."
|
|
6
10
|
|
|
7
11
|
# Check if parted is installed
|
|
8
12
|
if ! command -v parted &>/dev/null; then
|
|
9
13
|
echo "parted not found, installing..."
|
|
10
|
-
dnf install -y parted
|
|
14
|
+
sudo dnf install -y parted
|
|
11
15
|
fi
|
|
12
16
|
|
|
13
17
|
# Get start sector of /dev/sda2
|
|
14
|
-
START_SECTOR=$(parted /dev/sda -ms unit s print | awk -F: '/^2:/{print $2}' | sed 's/s//')
|
|
18
|
+
START_SECTOR=$(sudo parted /dev/sda -ms unit s print | awk -F: '/^2:/{print $2}' | sed 's/s//')
|
|
15
19
|
|
|
16
20
|
# Resize the partition
|
|
17
|
-
|
|
21
|
+
# Using 'sudo' for parted commands
|
|
22
|
+
sudo parted /dev/sda ---pretend-input-tty <<EOF
|
|
18
23
|
unit s
|
|
19
24
|
resizepart 2 100%
|
|
20
25
|
Yes
|
|
@@ -22,45 +27,89 @@ quit
|
|
|
22
27
|
EOF
|
|
23
28
|
|
|
24
29
|
# Resize the filesystem
|
|
25
|
-
resize2fs /dev/sda2
|
|
30
|
+
sudo resize2fs /dev/sda2
|
|
26
31
|
|
|
27
32
|
echo "Disk and filesystem resized successfully."
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
33
|
+
|
|
34
|
+
# --- Essential System Package Installation ---
|
|
35
|
+
echo "Installing essential system packages..."
|
|
36
|
+
sudo dnf install -y tar bzip2 git epel-release
|
|
37
|
+
|
|
38
|
+
# Perform a system update to ensure all packages are up-to-date
|
|
31
39
|
sudo dnf -y update
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
40
|
+
|
|
41
|
+
# --- NVM and Node.js Installation ---
|
|
42
|
+
echo "Installing NVM and Node.js v23.8.0..."
|
|
35
43
|
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.40.1/install.sh | bash
|
|
36
|
-
|
|
44
|
+
|
|
45
|
+
# Load nvm for the current session
|
|
46
|
+
export NVM_DIR="$([ -z "${XDG_CONFIG_HOME-}" ] && printf %s "${HOME}/.nvm" || printf %s "${XDG_CONFIG_HOME}/nvm")"
|
|
37
47
|
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
|
|
48
|
+
|
|
38
49
|
nvm install 23.8.0
|
|
39
50
|
nvm use 23.8.0
|
|
51
|
+
|
|
40
52
|
echo "
|
|
41
53
|
██╗░░░██╗███╗░░██╗██████╗░███████╗██████╗░██████╗░░█████╗░░██████╗████████╗
|
|
42
54
|
██║░░░██║████╗░██║██╔══██╗██╔════╝██╔══██╗██╔══██╗██╔══██╗██╔════╝╚══██╔══╝
|
|
43
55
|
██║░░░██║██╔██╗██║██║░░██║█████╗░░██████╔╝██████╔╝██║░░██║╚█████╗░░░░██║░░░
|
|
44
|
-
|
|
56
|
+
██║░░░██║██║╚████║██║░░██║██╔══╝░░██╔══██╗██╔══╝░░██║░░██║░╚═══██╗░░░██║░░░
|
|
45
57
|
╚██████╔╝██║░╚███║██████╔╝███████╗██║░░██║██║░░░░░╚█████╔╝██████╔╝░░░██║░░░
|
|
46
58
|
░╚═════╝░╚═╝░░╚══╝╚═════╝░╚══════╝╚═╝░░╚═╝╚═╝░░░░░░╚════╝░╚═════╝░░░░╚═╝░░░
|
|
47
59
|
|
|
48
|
-
Installing underpost k8s node
|
|
49
|
-
|
|
60
|
+
Installing underpost k8s node...
|
|
50
61
|
"
|
|
62
|
+
|
|
63
|
+
# Install underpost globally
|
|
51
64
|
npm install -g underpost
|
|
52
|
-
|
|
65
|
+
|
|
66
|
+
# Ensure underpost executable is in PATH and has execute permissions
|
|
67
|
+
# Adjusting this for global npm install which usually handles permissions
|
|
68
|
+
# If you still face issues, ensure /root/.nvm/versions/node/v23.8.0/bin is in your PATH
|
|
69
|
+
# For global installs, it's usually handled automatically.
|
|
70
|
+
# chmod +x /root/.nvm/versions/node/v23.8.0/bin/underpost # This might not be necessary for global npm installs
|
|
71
|
+
|
|
72
|
+
# --- Kernel Module for Bridge Filtering ---
|
|
73
|
+
# This is crucial for Kubernetes networking (CNI)
|
|
74
|
+
echo "Loading br_netfilter kernel module..."
|
|
53
75
|
sudo modprobe br_netfilter
|
|
54
|
-
|
|
55
|
-
|
|
76
|
+
|
|
77
|
+
# --- Disable UFW (Crucial for Kubernetes) ---
|
|
78
|
+
# UFW conflicts with Kubernetes' iptables management. Disable it completely.
|
|
79
|
+
echo "Disabling UFW to prevent conflicts with Kubernetes..."
|
|
80
|
+
if sudo systemctl is-active --quiet ufw; then
|
|
81
|
+
sudo systemctl stop ufw
|
|
82
|
+
fi
|
|
83
|
+
if sudo systemctl is-enabled --quiet ufw; then
|
|
84
|
+
sudo systemctl disable ufw
|
|
85
|
+
fi
|
|
86
|
+
# Attempt to remove ufw package. dnf will handle if it's not installed.
|
|
87
|
+
echo "Attempting to remove ufw package..."
|
|
88
|
+
sudo dnf remove -y ufw
|
|
89
|
+
|
|
90
|
+
# --- Kubernetes Required Ports (Informational - not for UFW) ---
|
|
91
|
+
# These ports are opened by Kubernetes itself or are expected to be open
|
|
92
|
+
# by external firewalls. UFW is no longer managing them.
|
|
93
|
+
echo "Note: Kubernetes requires the following ports to be open (managed by K8s or external firewall):"
|
|
94
|
+
echo " - Control Plane: 6443/TCP (Kubernetes API), 2379-2380/TCP (etcd)"
|
|
95
|
+
echo " - Worker Nodes: 10250/TCP (Kubelet API), 30000-32767/TCP/UDP (NodePorts)"
|
|
96
|
+
echo " - CNI specific ports (e.g., Calico: 179/TCP, 4789/UDP; Flannel: 8472/UDP)"
|
|
97
|
+
echo " - SSH: 22/TCP"
|
|
98
|
+
echo " - HTTP/HTTPS: 80/TCP, 443/TCP (for Ingress/Load Balancers)"
|
|
99
|
+
|
|
100
|
+
# --- Initial Host Setup for Kubernetes Prerequisites ---
|
|
101
|
+
# This calls the initHost method in cluster.js to install Docker, Podman, Kind, Kubeadm, Helm.
|
|
102
|
+
echo "Running initial host setup for Kubernetes prerequisites..."
|
|
103
|
+
# Ensure the current directory is where 'underpost' expects its root, or use absolute paths.
|
|
104
|
+
# Assuming 'underpost root' correctly points to the base directory of your project.
|
|
105
|
+
cd "$(underpost root)/underpost"
|
|
56
106
|
underpost cluster --init-host
|
|
57
107
|
|
|
58
|
-
#
|
|
108
|
+
# --- Argument Parsing for Kubeadm/Kind/Worker ---
|
|
59
109
|
USE_KUBEADM=false
|
|
60
|
-
USE_KIND=false
|
|
110
|
+
USE_KIND=false # Not the primary focus for this request, but keeping the logic
|
|
61
111
|
USE_WORKER=false
|
|
62
112
|
|
|
63
|
-
# Loop through arguments
|
|
64
113
|
for arg in "$@"; do
|
|
65
114
|
case "$arg" in
|
|
66
115
|
--kubeadm)
|
|
@@ -76,71 +125,45 @@ for arg in "$@"; do
|
|
|
76
125
|
done
|
|
77
126
|
|
|
78
127
|
echo "USE_KUBEADM = $USE_KUBEADM"
|
|
79
|
-
echo "USE_KIND
|
|
80
|
-
echo "USE_WORKER
|
|
81
|
-
|
|
82
|
-
underpost cluster --kubeadm
|
|
83
|
-
underpost cluster --reset
|
|
84
|
-
|
|
85
|
-
PORTS=(
|
|
86
|
-
22 # SSH
|
|
87
|
-
80 # HTTP
|
|
88
|
-
443 # HTTPS
|
|
89
|
-
53 # DNS (TCP/UDP)
|
|
90
|
-
66 # TFTP
|
|
91
|
-
67 # DHCP
|
|
92
|
-
69 # TFTP
|
|
93
|
-
111 # rpcbind
|
|
94
|
-
179 # Calico BGP
|
|
95
|
-
2049 # NFS
|
|
96
|
-
20048 # NFS mountd
|
|
97
|
-
4011 # PXE boot
|
|
98
|
-
5240 # snapd API
|
|
99
|
-
5248 # Juju controller
|
|
100
|
-
6443 # Kubernetes API
|
|
101
|
-
9153 # CoreDNS metrics
|
|
102
|
-
10250 # Kubelet API
|
|
103
|
-
10251 # kube-scheduler
|
|
104
|
-
10252 # kube-controller-manager
|
|
105
|
-
10255 # Kubelet read-only (deprecated)
|
|
106
|
-
10257 # controller-manager (v1.23+)
|
|
107
|
-
10259 # scheduler (v1.23+)
|
|
108
|
-
)
|
|
109
|
-
|
|
110
|
-
PORT_RANGES=(
|
|
111
|
-
2379:2380 # etcd
|
|
112
|
-
# 30000:32767 # NodePort range
|
|
113
|
-
# 3000:3100 # App node ports
|
|
114
|
-
32765:32766 # Ephemeral ports
|
|
115
|
-
6783:6784 # Weave Net
|
|
116
|
-
)
|
|
117
|
-
|
|
118
|
-
# Open individual ports
|
|
119
|
-
for PORT in "${PORTS[@]}"; do
|
|
120
|
-
ufw allow ${PORT}/tcp
|
|
121
|
-
ufw allow ${PORT}/udp
|
|
122
|
-
done
|
|
128
|
+
echo "USE_KIND = $USE_KIND"
|
|
129
|
+
echo "USE_WORKER = $USE_WORKER"
|
|
123
130
|
|
|
124
|
-
#
|
|
125
|
-
for RANGE in "${PORT_RANGES[@]}"; do
|
|
126
|
-
ufw allow ${RANGE}/tcp
|
|
127
|
-
ufw allow ${RANGE}/udp
|
|
128
|
-
done
|
|
131
|
+
# --- Kubernetes Cluster Initialization Logic ---
|
|
129
132
|
|
|
130
|
-
#
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
# kubectl get pods --all-namespaces -o wide -w
|
|
135
|
-
fi
|
|
133
|
+
# Call config first to apply SELinux, Docker, Containerd, and sysctl settings.
|
|
134
|
+
# This config function in cluster.js will be modified to remove iptables flushing.
|
|
135
|
+
echo "Applying Kubernetes host configuration (SELinux, Containerd, Sysctl)..."
|
|
136
|
+
underpost cluster --config
|
|
136
137
|
|
|
137
|
-
if $
|
|
138
|
+
if $USE_KUBEADM; then
|
|
139
|
+
if $USE_WORKER; then
|
|
140
|
+
echo "Running worker node setup for kubeadm..."
|
|
141
|
+
# For worker nodes, the 'underpost cluster --worker' command will handle joining
|
|
142
|
+
# the cluster. The join command itself needs to be provided from the control plane.
|
|
143
|
+
# This script assumes the join command will be executed separately or passed in.
|
|
144
|
+
# For a full automated setup, you'd typically pass the join token/command here.
|
|
145
|
+
# Example: underpost cluster --worker --join-command "kubeadm join ..."
|
|
146
|
+
# For now, this just runs the worker-specific config.
|
|
147
|
+
underpost cluster --worker --config
|
|
148
|
+
echo "Worker node setup initiated. You will need to manually join this worker to your control plane."
|
|
149
|
+
echo "On your control plane, run 'kubeadm token create --print-join-command' and execute the output here."
|
|
150
|
+
else
|
|
151
|
+
echo "Running control plane setup with kubeadm..."
|
|
152
|
+
# This will initialize the kubeadm control plane and install Calico
|
|
153
|
+
underpost cluster --kubeadm
|
|
154
|
+
# Ensure kubectl config is set up for the current user
|
|
155
|
+
underpost cluster --chown
|
|
156
|
+
echo "Kubeadm control plane initialized. Check cluster status with 'kubectl get nodes'."
|
|
157
|
+
fi
|
|
158
|
+
elif $USE_KIND; then
|
|
138
159
|
echo "Running control node with kind..."
|
|
139
160
|
underpost cluster
|
|
140
|
-
|
|
161
|
+
underpost cluster --chown
|
|
162
|
+
echo "Kind cluster initialized. Check cluster status with 'kubectl get nodes'."
|
|
163
|
+
else
|
|
164
|
+
echo "No specific cluster role (--kubeadm, --kind, --worker) specified. Please provide one."
|
|
165
|
+
exit 1
|
|
141
166
|
fi
|
|
142
167
|
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
underpost cluster --worker --config
|
|
146
|
-
fi
|
|
168
|
+
echo "Underpost Kubernetes Node Setup completed."
|
|
169
|
+
echo "Remember to verify cluster health with 'kubectl get nodes' and 'kubectl get pods --all-namespaces'."
|
package/package.json
CHANGED
package/src/cli/cluster.js
CHANGED
|
@@ -37,39 +37,39 @@ class UnderpostCluster {
|
|
|
37
37
|
chown: false,
|
|
38
38
|
},
|
|
39
39
|
) {
|
|
40
|
-
//
|
|
41
|
-
// 1) Install kind, kubeadm, docker, podman, helm
|
|
42
|
-
// 2) Check kubectl, kubelet, containerd.io
|
|
43
|
-
// 3) Install Nvidia drivers from Rocky Linux docs
|
|
44
|
-
// 4) Install LXD with MAAS from Rocky Linux docs
|
|
45
|
-
// 5) Install MAAS src from snap
|
|
40
|
+
// Handles initial host setup (installing docker, podman, kind, kubeadm, helm)
|
|
46
41
|
if (options.initHost === true) return UnderpostCluster.API.initHost();
|
|
42
|
+
|
|
43
|
+
// Applies general host configuration (SELinux, containerd, sysctl)
|
|
47
44
|
if (options.config === true) UnderpostCluster.API.config();
|
|
45
|
+
|
|
46
|
+
// Sets up kubectl configuration for the current user
|
|
48
47
|
if (options.chown === true) UnderpostCluster.API.chown();
|
|
48
|
+
|
|
49
49
|
const npmRoot = getNpmRootPath();
|
|
50
50
|
const underpostRoot = options?.dev === true ? '.' : `${npmRoot}/underpost`;
|
|
51
|
+
|
|
52
|
+
// Information gathering options
|
|
51
53
|
if (options.infoCapacityPod === true) return logger.info('', UnderpostDeploy.API.resourcesFactory());
|
|
52
54
|
if (options.infoCapacity === true)
|
|
53
55
|
return logger.info('', UnderpostCluster.API.getResourcesCapacity(options.kubeadm));
|
|
54
|
-
if (options.reset === true) return await UnderpostCluster.API.reset();
|
|
55
56
|
if (options.listPods === true) return console.table(UnderpostDeploy.API.get(podName ?? undefined));
|
|
56
|
-
|
|
57
57
|
if (options.nsUse && typeof options.nsUse === 'string') {
|
|
58
58
|
shellExec(`kubectl config set-context --current --namespace=${options.nsUse}`);
|
|
59
59
|
return;
|
|
60
60
|
}
|
|
61
61
|
if (options.info === true) {
|
|
62
|
-
shellExec(`kubectl config get-contexts`);
|
|
62
|
+
shellExec(`kubectl config get-contexts`);
|
|
63
63
|
shellExec(`kubectl config get-clusters`);
|
|
64
|
-
shellExec(`kubectl get nodes -o wide`);
|
|
64
|
+
shellExec(`kubectl get nodes -o wide`);
|
|
65
65
|
shellExec(`kubectl config view | grep namespace`);
|
|
66
|
-
shellExec(`kubectl get ns -o wide`);
|
|
67
|
-
shellExec(`kubectl get pvc --all-namespaces -o wide`);
|
|
68
|
-
shellExec(`kubectl get pv --all-namespaces -o wide`);
|
|
66
|
+
shellExec(`kubectl get ns -o wide`);
|
|
67
|
+
shellExec(`kubectl get pvc --all-namespaces -o wide`);
|
|
68
|
+
shellExec(`kubectl get pv --all-namespaces -o wide`);
|
|
69
69
|
shellExec(`kubectl get cronjob --all-namespaces -o wide`);
|
|
70
|
-
shellExec(`kubectl get svc --all-namespaces -o wide`);
|
|
71
|
-
shellExec(`kubectl get statefulsets --all-namespaces -o wide`);
|
|
72
|
-
shellExec(`kubectl get deployments --all-namespaces -o wide`);
|
|
70
|
+
shellExec(`kubectl get svc --all-namespaces -o wide`);
|
|
71
|
+
shellExec(`kubectl get statefulsets --all-namespaces -o wide`);
|
|
72
|
+
shellExec(`kubectl get deployments --all-namespaces -o wide`);
|
|
73
73
|
shellExec(`kubectl get configmap --all-namespaces -o wide`);
|
|
74
74
|
shellExec(`kubectl get pods --all-namespaces -o wide`);
|
|
75
75
|
shellExec(
|
|
@@ -91,38 +91,46 @@ class UnderpostCluster {
|
|
|
91
91
|
shellExec(`sudo kubectl api-resources`);
|
|
92
92
|
return;
|
|
93
93
|
}
|
|
94
|
-
|
|
94
|
+
|
|
95
|
+
// Reset Kubernetes cluster components (Kind/Kubeadm) and container runtimes
|
|
96
|
+
if (options.reset === true) return await UnderpostCluster.API.reset();
|
|
97
|
+
|
|
98
|
+
// Check if a cluster (Kind or Kubeadm with Calico) is already initialized
|
|
99
|
+
const alreadyCluster =
|
|
95
100
|
UnderpostDeploy.API.get('kube-apiserver-kind-control-plane')[0] ||
|
|
96
101
|
UnderpostDeploy.API.get('calico-kube-controllers')[0];
|
|
97
102
|
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
) {
|
|
104
|
-
UnderpostCluster.API.config();
|
|
103
|
+
// --- Kubeadm/Kind Cluster Initialization ---
|
|
104
|
+
// This block handles the initial setup of the Kubernetes cluster (control plane or worker).
|
|
105
|
+
// It prevents re-initialization if a cluster is already detected.
|
|
106
|
+
if (!options.worker && !alreadyCluster) {
|
|
107
|
+
// If it's a kubeadm setup and no Calico controller is found (indicating no kubeadm cluster)
|
|
105
108
|
if (options.kubeadm === true) {
|
|
109
|
+
logger.info('Initializing Kubeadm control plane...');
|
|
110
|
+
// Initialize kubeadm control plane
|
|
106
111
|
shellExec(
|
|
107
112
|
`sudo kubeadm init --pod-network-cidr=192.168.0.0/16 --control-plane-endpoint="${os.hostname()}:6443"`,
|
|
108
113
|
);
|
|
114
|
+
// Configure kubectl for the current user
|
|
109
115
|
UnderpostCluster.API.chown();
|
|
110
|
-
//
|
|
116
|
+
// Install Calico CNI
|
|
117
|
+
logger.info('Installing Calico CNI...');
|
|
111
118
|
shellExec(
|
|
112
119
|
`sudo kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.29.3/manifests/tigera-operator.yaml`,
|
|
113
120
|
);
|
|
114
|
-
// shellExec(
|
|
115
|
-
// `wget https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/custom-resources.yaml`,
|
|
116
|
-
// );
|
|
117
121
|
shellExec(`sudo kubectl apply -f ${underpostRoot}/manifests/kubeadm-calico-config.yaml`);
|
|
122
|
+
// Untaint control plane node to allow scheduling pods
|
|
118
123
|
const nodeName = os.hostname();
|
|
119
124
|
shellExec(`kubectl taint nodes ${nodeName} node-role.kubernetes.io/control-plane:NoSchedule-`);
|
|
125
|
+
// Install local-path-provisioner for dynamic PVCs (optional but recommended)
|
|
126
|
+
logger.info('Installing local-path-provisioner...');
|
|
120
127
|
shellExec(
|
|
121
128
|
`kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/master/deploy/local-path-storage.yaml`,
|
|
122
129
|
);
|
|
123
130
|
} else {
|
|
131
|
+
// Kind cluster initialization (if not using kubeadm)
|
|
132
|
+
logger.info('Initializing Kind cluster...');
|
|
124
133
|
if (options.full === true || options.dedicatedGpu === true) {
|
|
125
|
-
// https://kubernetes.io/docs/tasks/manage-gpus/scheduling-gpus/
|
|
126
134
|
shellExec(`cd ${underpostRoot}/manifests && kind create cluster --config kind-config-cuda.yaml`);
|
|
127
135
|
UnderpostCluster.API.chown();
|
|
128
136
|
} else {
|
|
@@ -133,9 +141,18 @@ class UnderpostCluster {
|
|
|
133
141
|
);
|
|
134
142
|
}
|
|
135
143
|
}
|
|
136
|
-
} else
|
|
144
|
+
} else if (options.worker === true) {
|
|
145
|
+
// Worker node specific configuration (kubeadm join command needs to be executed separately)
|
|
146
|
+
logger.info('Worker node configuration applied. Awaiting kubeadm join command...');
|
|
147
|
+
// No direct cluster initialization here for workers. The `kubeadm join` command
|
|
148
|
+
// needs to be run on the worker after the control plane is up and a token is created.
|
|
149
|
+
// This part of the script is for general worker setup, not the join itself.
|
|
150
|
+
} else {
|
|
151
|
+
logger.warn('Cluster already initialized or worker flag not set for worker node.');
|
|
152
|
+
}
|
|
137
153
|
|
|
138
|
-
//
|
|
154
|
+
// --- Optional Component Deployments (Databases, Ingress, Cert-Manager) ---
|
|
155
|
+
// These deployments happen after the base cluster is up.
|
|
139
156
|
|
|
140
157
|
if (options.full === true || options.dedicatedGpu === true) {
|
|
141
158
|
shellExec(`node ${underpostRoot}/bin/deploy nvidia-gpu-operator`);
|
|
@@ -218,8 +235,6 @@ class UnderpostCluster {
|
|
|
218
235
|
--eval 'rs.initiate(${JSON.stringify(mongoConfig)})'`,
|
|
219
236
|
);
|
|
220
237
|
}
|
|
221
|
-
|
|
222
|
-
// await UnderpostTest.API.statusMonitor('mongodb-1');
|
|
223
238
|
} else if (options.full === true || options.mongodb === true) {
|
|
224
239
|
if (options.pullImage === true) {
|
|
225
240
|
shellExec(`docker pull mongo:latest`);
|
|
@@ -280,193 +295,144 @@ class UnderpostCluster {
|
|
|
280
295
|
}
|
|
281
296
|
},
|
|
282
297
|
|
|
298
|
+
/**
|
|
299
|
+
* @method config
|
|
300
|
+
* @description Configures host-level settings required for Kubernetes.
|
|
301
|
+
* IMPORTANT: This method has been updated to REMOVE all iptables flushing commands
|
|
302
|
+
* to prevent conflicts with Kubernetes' own network management.
|
|
303
|
+
*/
|
|
283
304
|
config() {
|
|
305
|
+
console.log('Applying host configuration: SELinux, Docker, Containerd, and Sysctl settings.');
|
|
306
|
+
// Disable SELinux (permissive mode)
|
|
284
307
|
shellExec(`sudo setenforce 0`);
|
|
285
308
|
shellExec(`sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config`);
|
|
309
|
+
|
|
310
|
+
// Enable and start Docker and Kubelet services
|
|
286
311
|
shellExec(`sudo systemctl enable --now docker`);
|
|
287
312
|
shellExec(`sudo systemctl enable --now kubelet`);
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
313
|
+
|
|
314
|
+
// Configure containerd for SystemdCgroup
|
|
315
|
+
// This is crucial for kubelet to interact correctly with containerd
|
|
316
|
+
shellExec(`containerd config default | sudo tee /etc/containerd/config.toml > /dev/null`);
|
|
317
|
+
shellExec(`sudo sed -i -e "s/SystemdCgroup = false/SystemdCgroup = true/g" /etc/containerd/config.toml`);
|
|
318
|
+
shellExec(`sudo service docker restart`); // Restart docker after containerd config changes
|
|
291
319
|
shellExec(`sudo systemctl enable --now containerd.service`);
|
|
320
|
+
shellExec(`sudo systemctl restart containerd`); // Restart containerd to apply changes
|
|
321
|
+
|
|
322
|
+
// Disable swap (required by Kubernetes)
|
|
292
323
|
shellExec(`sudo swapoff -a; sudo sed -i '/swap/d' /etc/fstab`);
|
|
324
|
+
|
|
325
|
+
// Reload systemd daemon to pick up new unit files/changes
|
|
293
326
|
shellExec(`sudo systemctl daemon-reload`);
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
//
|
|
297
|
-
shellExec(`sudo
|
|
298
|
-
|
|
299
|
-
shellExec(`
|
|
300
|
-
shellExec(`sudo
|
|
301
|
-
shellExec(`sudo
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
shellExec(`sudo iptables -P FORWARD ACCEPT`);
|
|
307
|
-
shellExec(`sudo iptables -P OUTPUT ACCEPT`);
|
|
327
|
+
|
|
328
|
+
// Enable bridge-nf-call-iptables for Kubernetes networking
|
|
329
|
+
// This ensures traffic through Linux bridges is processed by iptables (crucial for CNI)
|
|
330
|
+
shellExec(`sudo sysctl net.bridge.bridge-nf-call-iptables=1`);
|
|
331
|
+
// Also ensure these are set for persistence across reboots
|
|
332
|
+
shellExec(`echo "net.bridge.bridge-nf-call-iptables=1" | sudo tee /etc/sysctl.d/k8s.conf`);
|
|
333
|
+
shellExec(`echo "net.ipv4.ip_forward=1" | sudo tee -a /etc/sysctl.d/k8s.conf`); // Enable IP forwarding
|
|
334
|
+
shellExec(`sudo sysctl --system`); // Apply sysctl changes immediately
|
|
335
|
+
|
|
336
|
+
// Removed iptables flushing commands.
|
|
337
|
+
// Kubernetes (kube-proxy and CNI) manages its own iptables rules.
|
|
338
|
+
// Flushing them here would break cluster networking.
|
|
308
339
|
},
|
|
340
|
+
|
|
341
|
+
/**
|
|
342
|
+
* @method chown
|
|
343
|
+
* @description Sets up kubectl configuration for the current user.
|
|
344
|
+
* This is typically run after kubeadm init on the control plane.
|
|
345
|
+
*/
|
|
309
346
|
chown() {
|
|
347
|
+
console.log('Setting up kubectl configuration...');
|
|
310
348
|
shellExec(`mkdir -p ~/.kube`);
|
|
311
349
|
shellExec(`sudo -E cp -i /etc/kubernetes/admin.conf ~/.kube/config`);
|
|
312
350
|
shellExec(`sudo -E chown $(id -u):$(id -g) ~/.kube/config`);
|
|
351
|
+
console.log('kubectl config set up successfully.');
|
|
313
352
|
},
|
|
314
|
-
// This function performs a comprehensive reset of Kubernetes and container environments
|
|
315
|
-
// on the host machine. Its primary goal is to clean up cluster components, temporary files,
|
|
316
|
-
// and container data, ensuring a clean state for re-initialization or fresh deployments,
|
|
317
|
-
// while also preventing the loss of the host machine's internet connectivity.
|
|
318
353
|
|
|
354
|
+
/**
|
|
355
|
+
* @method reset
|
|
356
|
+
* @description Performs a comprehensive reset of Kubernetes and container environments.
|
|
357
|
+
* This function is for cleaning up a node, not for initial setup.
|
|
358
|
+
* It avoids aggressive iptables flushing that would break host connectivity.
|
|
359
|
+
*/
|
|
319
360
|
reset() {
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
//
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
// Step 2: Reset the Kubernetes control-plane components installed by kubeadm.
|
|
327
|
-
// 'kubeadm reset -f' performs a forceful reset, removing installed Kubernetes components,
|
|
328
|
-
// configuration files, and associated network rules (like iptables entries created by kubeadm).
|
|
329
|
-
// The '-f' flag bypasses confirmation prompts.
|
|
361
|
+
console.log('Starting comprehensive reset of Kubernetes and container environments...');
|
|
362
|
+
|
|
363
|
+
// Delete all existing Kind (Kubernetes in Docker) clusters.
|
|
364
|
+
shellExec(`kind get clusters | xargs -r -t -n1 kind delete cluster --name`); // -r for no-op if no clusters
|
|
365
|
+
|
|
366
|
+
// Reset the Kubernetes control-plane components installed by kubeadm.
|
|
330
367
|
shellExec(`sudo kubeadm reset -f`);
|
|
331
368
|
|
|
332
|
-
//
|
|
333
|
-
// This command targets and removes the configuration file for Flannel,
|
|
334
|
-
// a common CNI plugin, which might be left behind after a reset.
|
|
369
|
+
// Remove specific CNI configuration files (e.g., Flannel)
|
|
335
370
|
shellExec('sudo rm -f /etc/cni/net.d/10-flannel.conflist');
|
|
336
371
|
|
|
337
|
-
//
|
|
338
|
-
// This command would flush all iptables rules, including those crucial for the host's general
|
|
339
|
-
// internet connectivity, leading to network loss. 'kubeadm reset' and container runtime pruning
|
|
340
|
-
// adequately handle Kubernetes and container-specific iptables rules without affecting the host's
|
|
341
|
-
// default network configuration.
|
|
342
|
-
|
|
343
|
-
// Step 4: Remove the kubectl configuration file from the current user's home directory.
|
|
344
|
-
// This ensures that after a reset, there's no lingering configuration pointing to the old cluster,
|
|
345
|
-
// providing a clean slate for connecting to a new or re-initialized cluster.
|
|
372
|
+
// Remove the kubectl configuration file
|
|
346
373
|
shellExec('sudo rm -f $HOME/.kube/config');
|
|
347
374
|
|
|
348
|
-
//
|
|
349
|
-
// This is a general cleanup step to remove temporary or deleted files.
|
|
375
|
+
// Clear trash files from the root user's trash directory.
|
|
350
376
|
shellExec('sudo rm -rf /root/.local/share/Trash/files/*');
|
|
351
377
|
|
|
352
|
-
//
|
|
353
|
-
// 'docker system prune -a -f' removes:
|
|
354
|
-
// - All stopped containers
|
|
355
|
-
// - All unused networks
|
|
356
|
-
// - All dangling images
|
|
357
|
-
// - All build cache
|
|
358
|
-
// - All unused volumes
|
|
359
|
-
// This aggressively frees up disk space and removes temporary Docker artifacts.
|
|
378
|
+
// Prune all unused Docker data.
|
|
360
379
|
shellExec('sudo docker system prune -a -f');
|
|
361
380
|
|
|
362
|
-
//
|
|
363
|
-
// This step is often necessary to ensure that Docker's files and directories
|
|
364
|
-
// can be safely manipulated or moved in subsequent steps without conflicts.
|
|
381
|
+
// Stop the Docker daemon service.
|
|
365
382
|
shellExec('sudo service docker stop');
|
|
366
383
|
|
|
367
|
-
//
|
|
368
|
-
// These commands target the default storage locations for containerd and Docker,
|
|
369
|
-
// as well as any custom paths that might have been used (`/home/containers/storage`, `/home/docker`).
|
|
370
|
-
// This ensures a complete wipe of all container images, layers, and volumes.
|
|
384
|
+
// Aggressively remove container storage data for containerd and Docker.
|
|
371
385
|
shellExec(`sudo rm -rf /var/lib/containers/storage/*`);
|
|
372
386
|
shellExec(`sudo rm -rf /var/lib/docker/volumes/*`);
|
|
373
|
-
shellExec(`sudo rm -rf /var/lib/docker~/*`);
|
|
374
|
-
shellExec(`sudo rm -rf /home/containers/storage/*`);
|
|
375
|
-
shellExec(`sudo rm -rf /home/docker/*`);
|
|
376
|
-
|
|
377
|
-
//
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
shellExec('sudo
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
shellExec('sudo ln -s /home/docker /var/lib/docker'); // Creates a symlink from original path to new path
|
|
385
|
-
|
|
386
|
-
// Step 10: Prune all unused Podman data.
|
|
387
|
-
// Similar to Docker pruning, these commands remove:
|
|
388
|
-
// - All stopped containers
|
|
389
|
-
// - All unused networks
|
|
390
|
-
// - All unused images
|
|
391
|
-
// - All unused volumes ('--volumes')
|
|
392
|
-
// - The '--force' flag bypasses confirmation.
|
|
393
|
-
// '--external' prunes external content not managed by Podman's default storage backend.
|
|
387
|
+
shellExec(`sudo rm -rf /var/lib/docker~/*`);
|
|
388
|
+
shellExec(`sudo rm -rf /home/containers/storage/*`);
|
|
389
|
+
shellExec(`sudo rm -rf /home/docker/*`);
|
|
390
|
+
|
|
391
|
+
// Re-configure Docker's default storage location (if desired).
|
|
392
|
+
shellExec('sudo mv /var/lib/docker /var/lib/docker~ || true'); // Use || true to prevent error if dir doesn't exist
|
|
393
|
+
shellExec('sudo mkdir -p /home/docker');
|
|
394
|
+
shellExec('sudo chmod 0711 /home/docker');
|
|
395
|
+
shellExec('sudo ln -s /home/docker /var/lib/docker');
|
|
396
|
+
|
|
397
|
+
// Prune all unused Podman data.
|
|
394
398
|
shellExec(`sudo podman system prune -a -f`);
|
|
395
399
|
shellExec(`sudo podman system prune --all --volumes --force`);
|
|
396
400
|
shellExec(`sudo podman system prune --external --force`);
|
|
397
|
-
shellExec(`sudo podman system prune --all --volumes --force`); // Redundant but harmless repetition
|
|
398
401
|
|
|
399
|
-
//
|
|
400
|
-
// This ensures the custom path `/home/containers/storage` exists and has correct permissions
|
|
401
|
-
// before Podman attempts to use it.
|
|
402
|
+
// Create and set permissions for Podman's custom storage directory.
|
|
402
403
|
shellExec(`sudo mkdir -p /home/containers/storage`);
|
|
403
404
|
shellExec('sudo chmod 0711 /home/containers/storage');
|
|
404
405
|
|
|
405
|
-
//
|
|
406
|
-
// This command uses 'sed' to modify `/etc/containers/storage.conf`,
|
|
407
|
-
// changing the default storage path from `/var/lib/containers/storage`
|
|
408
|
-
// to the customized `/home/containers/storage`.
|
|
406
|
+
// Update Podman's storage configuration file.
|
|
409
407
|
shellExec(
|
|
410
408
|
`sudo sed -i -e "s@/var/lib/containers/storage@/home/containers/storage@g" /etc/containers/storage.conf`,
|
|
411
409
|
);
|
|
412
410
|
|
|
413
|
-
//
|
|
414
|
-
// This command resets Podman's system-wide configuration to its default state.
|
|
411
|
+
// Reset Podman system settings.
|
|
415
412
|
shellExec(`sudo podman system reset -f`);
|
|
416
413
|
|
|
417
|
-
//
|
|
418
|
-
// were previously removed. These sysctl settings (bridge-nf-call-iptables,
|
|
419
|
-
// bridge-nf-call-arptables, bridge-nf-call-ip6tables) are crucial for allowing
|
|
420
|
-
// network traffic through Linux bridges to be processed by iptables.
|
|
421
|
-
// Kubernetes and CNI plugins generally require them to be enabled (set to '1').
|
|
422
|
-
// Re-initializing Kubernetes will typically set these as needed, and leaving them
|
|
423
|
-
// at their system default (or '1' if already configured) is safer for host
|
|
424
|
-
// connectivity during a reset operation.
|
|
425
|
-
|
|
426
|
-
// https://github.com/kubernetes-sigs/kind/issues/2886
|
|
427
|
-
// shellExec(`sysctl net.bridge.bridge-nf-call-iptables=0`);
|
|
428
|
-
// shellExec(`sysctl net.bridge.bridge-nf-call-arptables=0`);
|
|
429
|
-
// shellExec(`sysctl net.bridge.bridge-nf-call-ip6tables=0`);
|
|
430
|
-
|
|
431
|
-
// Step 14: Remove the 'kind' Docker network.
|
|
432
|
-
// This cleans up any network bridges or configurations specifically created by Kind.
|
|
433
|
-
// shellExec(`docker network rm kind`);
|
|
434
|
-
|
|
435
|
-
// Reset kubelet
|
|
414
|
+
// Reset kubelet components
|
|
436
415
|
shellExec(`sudo systemctl stop kubelet`);
|
|
437
416
|
shellExec(`sudo rm -rf /etc/kubernetes/*`);
|
|
438
417
|
shellExec(`sudo rm -rf /var/lib/kubelet/*`);
|
|
439
418
|
shellExec(`sudo rm -rf /etc/cni/net.d/*`);
|
|
440
419
|
shellExec(`sudo systemctl daemon-reload`);
|
|
441
420
|
shellExec(`sudo systemctl start kubelet`);
|
|
421
|
+
|
|
422
|
+
console.log('Comprehensive reset completed.');
|
|
442
423
|
},
|
|
443
424
|
|
|
444
425
|
getResourcesCapacity(kubeadm = false) {
|
|
445
426
|
const resources = {};
|
|
446
|
-
const info =
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
cpu: 8
|
|
456
|
-
ephemeral-storage: 153131976Ki
|
|
457
|
-
hugepages-1Gi: 0
|
|
458
|
-
hugepages-2Mi: 0
|
|
459
|
-
memory: 11914720Ki
|
|
460
|
-
pods: `
|
|
461
|
-
: shellExec(
|
|
462
|
-
`kubectl describe node ${
|
|
463
|
-
kubeadm === true ? os.hostname() : 'kind-worker'
|
|
464
|
-
} | grep -E '(Allocatable:|Capacity:)' -A 6`,
|
|
465
|
-
{
|
|
466
|
-
stdout: true,
|
|
467
|
-
silent: true,
|
|
468
|
-
},
|
|
469
|
-
);
|
|
427
|
+
const info = shellExec(
|
|
428
|
+
`kubectl describe node ${
|
|
429
|
+
kubeadm === true ? os.hostname() : 'kind-worker'
|
|
430
|
+
} | grep -E '(Allocatable:|Capacity:)' -A 6`,
|
|
431
|
+
{
|
|
432
|
+
stdout: true,
|
|
433
|
+
silent: true,
|
|
434
|
+
},
|
|
435
|
+
);
|
|
470
436
|
info
|
|
471
437
|
.split('Allocatable:')[1]
|
|
472
438
|
.split('\n')
|
|
@@ -487,17 +453,20 @@ Allocatable:
|
|
|
487
453
|
return resources;
|
|
488
454
|
},
|
|
489
455
|
initHost() {
|
|
456
|
+
console.log('Installing Docker, Podman, Kind, Kubeadm, and Helm...');
|
|
490
457
|
// Install docker
|
|
491
|
-
shellExec(`sudo dnf -y install dnf-plugins-core
|
|
492
|
-
sudo dnf config-manager --add-repo https://download.docker.com/linux/rhel/docker-ce.repo`);
|
|
458
|
+
shellExec(`sudo dnf -y install dnf-plugins-core`);
|
|
459
|
+
shellExec(`sudo dnf config-manager --add-repo https://download.docker.com/linux/rhel/docker-ce.repo`);
|
|
493
460
|
shellExec(`sudo dnf -y install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin`);
|
|
461
|
+
|
|
494
462
|
// Install podman
|
|
495
463
|
shellExec(`sudo dnf -y install podman`);
|
|
464
|
+
|
|
496
465
|
// Install kind
|
|
497
466
|
shellExec(`[ $(uname -m) = aarch64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.29.0/kind-linux-arm64
|
|
498
467
|
chmod +x ./kind
|
|
499
468
|
sudo mv ./kind /bin/kind`);
|
|
500
|
-
// Install kubeadm
|
|
469
|
+
// Install kubeadm, kubelet, kubectl
|
|
501
470
|
shellExec(`cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
|
|
502
471
|
[kubernetes]
|
|
503
472
|
name=Kubernetes
|
|
@@ -508,12 +477,14 @@ gpgkey=https://pkgs.k8s.io/core:/stable:/v1.33/rpm/repodata/repomd.xml.key
|
|
|
508
477
|
exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
|
|
509
478
|
EOF`);
|
|
510
479
|
shellExec(`sudo yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes`);
|
|
480
|
+
|
|
511
481
|
// Install helm
|
|
512
|
-
shellExec(`curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
|
|
513
|
-
chmod 700 get_helm.sh
|
|
514
|
-
|
|
515
|
-
chmod +x /usr/local/bin/helm
|
|
516
|
-
sudo mv /usr/local/bin/helm /bin/helm`);
|
|
482
|
+
shellExec(`curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3`);
|
|
483
|
+
shellExec(`chmod 700 get_helm.sh`);
|
|
484
|
+
shellExec(`./get_helm.sh`);
|
|
485
|
+
shellExec(`chmod +x /usr/local/bin/helm`);
|
|
486
|
+
shellExec(`sudo mv /usr/local/bin/helm /bin/helm`);
|
|
487
|
+
console.log('Host prerequisites installed successfully.');
|
|
517
488
|
},
|
|
518
489
|
};
|
|
519
490
|
}
|
package/src/cli/index.js
CHANGED
|
@@ -294,6 +294,7 @@ program
|
|
|
294
294
|
'--delete-expose <vm-name-ports>',
|
|
295
295
|
'Vm name and : separated with Comma separated vm port to remove expose e. g. k8s-control:80,443',
|
|
296
296
|
)
|
|
297
|
+
.option('--auto-expose-k8s-ports <vm-id>', 'Automatically expose common Kubernetes ports for the VM.')
|
|
297
298
|
.description('Lxd management')
|
|
298
299
|
.action(UnderpostLxd.API.callback);
|
|
299
300
|
|
package/src/cli/lxd.js
CHANGED
|
@@ -30,6 +30,7 @@ class UnderpostLxd {
|
|
|
30
30
|
* @param {string} [options.expose=''] - Expose ports from a VM to the host (format: 'vmName:port1,port2').
|
|
31
31
|
* @param {string} [options.deleteExpose=''] - Delete exposed ports from a VM (format: 'vmName:port1,port2').
|
|
32
32
|
* @param {string} [options.test=''] - Test health, status and network connectivity for a VM.
|
|
33
|
+
* @param {string} [options.autoExposeK8sPorts=''] - Automatically expose common Kubernetes ports for the VM.
|
|
33
34
|
*/
|
|
34
35
|
async callback(
|
|
35
36
|
options = {
|
|
@@ -49,6 +50,7 @@ class UnderpostLxd {
|
|
|
49
50
|
expose: '',
|
|
50
51
|
deleteExpose: '',
|
|
51
52
|
test: '',
|
|
53
|
+
autoExposeK8sPorts: '',
|
|
52
54
|
},
|
|
53
55
|
) {
|
|
54
56
|
const npmRoot = getNpmRootPath();
|
|
@@ -64,7 +66,6 @@ class UnderpostLxd {
|
|
|
64
66
|
const lxdPressedContent = fs
|
|
65
67
|
.readFileSync(`${underpostRoot}/manifests/lxd/lxd-preseed.yaml`, 'utf8')
|
|
66
68
|
.replaceAll(`127.0.0.1`, getLocalIPv4Address());
|
|
67
|
-
// shellExec(`lxd init --preseed < ${underpostRoot}/manifests/lxd/lxd-preseed.yaml`);
|
|
68
69
|
shellExec(`echo "${lxdPressedContent}" | lxd init --preseed`);
|
|
69
70
|
shellExec(`lxc cluster list`);
|
|
70
71
|
}
|
|
@@ -99,7 +100,86 @@ ipv6.address=none`);
|
|
|
99
100
|
} else if (options.worker == true) {
|
|
100
101
|
flag = ' -s -- --worker';
|
|
101
102
|
}
|
|
102
|
-
|
|
103
|
+
console.log(`Executing underpost-setup.sh on VM: ${options.initVm}`);
|
|
104
|
+
shellExec(`cat ${underpostRoot}/manifests/lxd/underpost-setup.sh | lxc exec ${options.initVm} -- bash${flag}`);
|
|
105
|
+
console.log(`underpost-setup.sh execution completed on VM: ${options.initVm}`);
|
|
106
|
+
}
|
|
107
|
+
// --- Automatic Kubernetes Port Exposure ---
|
|
108
|
+
if (options.autoExposeK8sPorts && typeof options.autoExposeK8sPorts === 'string') {
|
|
109
|
+
console.log(`Automatically exposing Kubernetes ports for VM: ${options.autoExposeK8sPorts}`);
|
|
110
|
+
const vmName = options.autoExposeK8sPorts;
|
|
111
|
+
const hostIp = getLocalIPv4Address();
|
|
112
|
+
let vmIp = '';
|
|
113
|
+
let retries = 0;
|
|
114
|
+
const maxRetries = 10;
|
|
115
|
+
const delayMs = 5000; // 5 seconds
|
|
116
|
+
|
|
117
|
+
// Wait for VM to get an IP address
|
|
118
|
+
while (!vmIp && retries < maxRetries) {
|
|
119
|
+
try {
|
|
120
|
+
console.log(`Attempting to get IPv4 address for ${vmName} (Attempt ${retries + 1}/${maxRetries})...`);
|
|
121
|
+
vmIp = shellExec(
|
|
122
|
+
`lxc list ${vmName} --format json | jq -r '.[0].state.network.enp5s0.addresses[] | select(.family=="inet") | .address'`,
|
|
123
|
+
{ stdout: true },
|
|
124
|
+
).trim();
|
|
125
|
+
if (vmIp) {
|
|
126
|
+
console.log(`IPv4 address found for ${vmName}: ${vmIp}`);
|
|
127
|
+
} else {
|
|
128
|
+
console.log(`IPv4 address not yet available for ${vmName}. Retrying in ${delayMs / 1000} seconds...`);
|
|
129
|
+
await new Promise((resolve) => setTimeout(resolve, delayMs));
|
|
130
|
+
}
|
|
131
|
+
} catch (error) {
|
|
132
|
+
console.error(`Error getting IPv4 address for exposure: ${error.message}`);
|
|
133
|
+
console.log(`Retrying in ${delayMs / 1000} seconds...`);
|
|
134
|
+
await new Promise((resolve) => setTimeout(resolve, delayMs));
|
|
135
|
+
}
|
|
136
|
+
retries++;
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
if (!vmIp) {
|
|
140
|
+
console.error(`Failed to get VM IP for ${vmName} after ${maxRetries} attempts. Cannot expose ports.`);
|
|
141
|
+
return;
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
let portsToExpose = [];
|
|
145
|
+
if (options.control === true) {
|
|
146
|
+
// Kubernetes API Server
|
|
147
|
+
portsToExpose.push('6443');
|
|
148
|
+
// Standard HTTP/HTTPS for Ingress if deployed
|
|
149
|
+
portsToExpose.push('80');
|
|
150
|
+
portsToExpose.push('443');
|
|
151
|
+
}
|
|
152
|
+
// NodePort range for all nodes (control plane can also run pods with NodePorts)
|
|
153
|
+
// It's safer to expose the entire range for flexibility, or specific NodePorts if known.
|
|
154
|
+
// For production, you might only expose specific NodePorts or use a LoadBalancer.
|
|
155
|
+
// For a general setup, exposing the range is common.
|
|
156
|
+
// Note: LXD proxy device can only expose individual ports, not ranges directly.
|
|
157
|
+
// We will expose a few common ones, or rely on specific 'expose' calls for others.
|
|
158
|
+
// Let's add some common NodePorts that might be used by applications.
|
|
159
|
+
// The full range 30000-32767 would require individual proxy rules for each port.
|
|
160
|
+
// For this automatic setup, we'll focus on critical K8s ports and common app ports.
|
|
161
|
+
// If a user needs the full NodePort range, they should use the `expose` option explicitly.
|
|
162
|
+
portsToExpose.push('30000'); // Example NodePort
|
|
163
|
+
portsToExpose.push('30001'); // Example NodePort
|
|
164
|
+
portsToExpose.push('30002'); // Example NodePort
|
|
165
|
+
|
|
166
|
+
const protocols = ['tcp']; // Most K8s services are TCP, UDP for some like DNS
|
|
167
|
+
|
|
168
|
+
for (const port of portsToExpose) {
|
|
169
|
+
for (const protocol of protocols) {
|
|
170
|
+
const deviceName = `${vmName}-${protocol}-port-${port}`;
|
|
171
|
+
try {
|
|
172
|
+
// Remove existing device first to avoid conflicts if re-running
|
|
173
|
+
shellExec(`lxc config device remove ${vmName} ${deviceName} || true`);
|
|
174
|
+
shellExec(
|
|
175
|
+
`lxc config device add ${vmName} ${deviceName} proxy listen=${protocol}:${hostIp}:${port} connect=${protocol}:${vmIp}:${port} nat=true`,
|
|
176
|
+
);
|
|
177
|
+
console.log(`Exposed ${protocol}:${hostIp}:${port} -> ${vmIp}:${port} for ${vmName}`);
|
|
178
|
+
} catch (error) {
|
|
179
|
+
console.error(`Failed to expose port ${port} for ${vmName}: ${error.message}`);
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
}
|
|
103
183
|
}
|
|
104
184
|
if (options.joinNode && typeof options.joinNode === 'string') {
|
|
105
185
|
const [workerNode, controlNode] = options.joinNode.split(',');
|
|
@@ -116,20 +196,26 @@ ipv6.address=none`);
|
|
|
116
196
|
shellExec(`lxc list ${options.infoVm}`);
|
|
117
197
|
}
|
|
118
198
|
if (options.expose && typeof options.expose === 'string') {
|
|
119
|
-
const [
|
|
120
|
-
console.log({
|
|
199
|
+
const [vmName, ports] = options.expose.split(':');
|
|
200
|
+
console.log({ vmName, ports });
|
|
121
201
|
const protocols = ['tcp']; // udp
|
|
122
202
|
const hostIp = getLocalIPv4Address();
|
|
123
203
|
const vmIp = shellExec(
|
|
124
|
-
`lxc list ${
|
|
204
|
+
`lxc list ${vmName} --format json | jq -r '.[0].state.network.enp5s0.addresses[] | select(.family=="inet") | .address'`,
|
|
125
205
|
{ stdout: true },
|
|
126
206
|
).trim();
|
|
207
|
+
if (!vmIp) {
|
|
208
|
+
console.error(`Could not get VM IP for ${vmName}. Cannot expose ports.`);
|
|
209
|
+
return;
|
|
210
|
+
}
|
|
127
211
|
for (const port of ports.split(',')) {
|
|
128
212
|
for (const protocol of protocols) {
|
|
129
|
-
|
|
213
|
+
const deviceName = `${vmName}-${protocol}-port-${port}`;
|
|
214
|
+
shellExec(`lxc config device remove ${vmName} ${deviceName} || true`); // Use || true to prevent error if device doesn't exist
|
|
130
215
|
shellExec(
|
|
131
|
-
`lxc config device add ${
|
|
216
|
+
`lxc config device add ${vmName} ${deviceName} proxy listen=${protocol}:${hostIp}:${port} connect=${protocol}:${vmIp}:${port} nat=true`,
|
|
132
217
|
);
|
|
218
|
+
console.log(`Manually exposed ${protocol}:${hostIp}:${port} -> ${vmIp}:${port} for ${vmName}`);
|
|
133
219
|
}
|
|
134
220
|
}
|
|
135
221
|
}
|
|
@@ -181,25 +267,25 @@ ipv6.address=none`);
|
|
|
181
267
|
return;
|
|
182
268
|
}
|
|
183
269
|
|
|
184
|
-
// 2. Iteratively check connection to google.
|
|
270
|
+
// 2. Iteratively check connection to google.com
|
|
185
271
|
let connectedToGoogle = false;
|
|
186
272
|
retries = 0;
|
|
187
273
|
while (!connectedToGoogle && retries < maxRetries) {
|
|
188
274
|
try {
|
|
189
|
-
console.log(`Checking connectivity to google.
|
|
275
|
+
console.log(`Checking connectivity to google.com from ${vmName} (Attempt ${retries + 1}/${maxRetries})...`);
|
|
190
276
|
const curlOutput = shellExec(
|
|
191
|
-
`lxc exec ${vmName} -- curl -s -o /dev/null -w "%{http_code}" http://google.
|
|
277
|
+
`lxc exec ${vmName} -- bash -c 'curl -s -o /dev/null -w "%{http_code}" http://google.com'`,
|
|
192
278
|
{ stdout: true },
|
|
193
279
|
);
|
|
194
280
|
if (curlOutput.startsWith('2') || curlOutput.startsWith('3')) {
|
|
195
|
-
console.log(`Successfully connected to google.
|
|
281
|
+
console.log(`Successfully connected to google.com from ${vmName}.`);
|
|
196
282
|
connectedToGoogle = true;
|
|
197
283
|
} else {
|
|
198
|
-
console.log(`Connectivity to google.
|
|
284
|
+
console.log(`Connectivity to google.com not yet verified. Retrying in ${delayMs / 1000} seconds...`);
|
|
199
285
|
await new Promise((resolve) => setTimeout(resolve, delayMs));
|
|
200
286
|
}
|
|
201
287
|
} catch (error) {
|
|
202
|
-
console.error(`Error checking connectivity to google.
|
|
288
|
+
console.error(`Error checking connectivity to google.com: ${error.message}`);
|
|
203
289
|
console.log(`Retrying in ${delayMs / 1000} seconds...`);
|
|
204
290
|
await new Promise((resolve) => setTimeout(resolve, delayMs));
|
|
205
291
|
}
|
|
@@ -208,7 +294,7 @@ ipv6.address=none`);
|
|
|
208
294
|
|
|
209
295
|
if (!connectedToGoogle) {
|
|
210
296
|
console.error(
|
|
211
|
-
`Failed to connect to google.
|
|
297
|
+
`Failed to connect to google.com from ${vmName} after ${maxRetries} attempts. Aborting further tests.`,
|
|
212
298
|
);
|
|
213
299
|
return;
|
|
214
300
|
}
|