underpost 2.8.797 → 2.8.798
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/cli.md +4 -1
- package/docker-compose.yml +1 -1
- package/manifests/deployment/dd-template-development/deployment.yaml +2 -2
- package/manifests/lxd/underpost-setup.sh +26 -6
- package/package.json +1 -1
- package/src/cli/cluster.js +195 -129
- package/src/cli/image.js +51 -4
- package/src/cli/index.js +3 -0
- package/src/cli/lxd.js +53 -20
- package/src/index.js +1 -1
package/README.md
CHANGED
package/cli.md
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
## underpost ci/cd cli v2.8.
|
|
1
|
+
## underpost ci/cd cli v2.8.798
|
|
2
2
|
|
|
3
3
|
### Usage: `underpost [options] [command]`
|
|
4
4
|
```
|
|
@@ -222,6 +222,7 @@ Options:
|
|
|
222
222
|
--config Set k8s base node config
|
|
223
223
|
--worker Set worker node context
|
|
224
224
|
--chown Set k8s kube chown
|
|
225
|
+
--k3s Initialize the cluster using K3s
|
|
225
226
|
-h, --help display help for command
|
|
226
227
|
|
|
227
228
|
```
|
|
@@ -299,6 +300,7 @@ Options:
|
|
|
299
300
|
--secrets Dockerfile env secrets
|
|
300
301
|
--secrets-path [secrets-path] Dockerfile custom path env secrets
|
|
301
302
|
--reset Build without using cache
|
|
303
|
+
--k3s-load Load image into K3s cluster.
|
|
302
304
|
-h, --help display help for command
|
|
303
305
|
|
|
304
306
|
```
|
|
@@ -315,6 +317,7 @@ Options:
|
|
|
315
317
|
--kind-load Import tar image to Kind cluster
|
|
316
318
|
--kubeadm-load Import tar image to Kubeadm cluster
|
|
317
319
|
--version Set custom version
|
|
320
|
+
--k3s-load Load image into K3s cluster.
|
|
318
321
|
-h, --help display help for command
|
|
319
322
|
|
|
320
323
|
```
|
package/docker-compose.yml
CHANGED
|
@@ -17,7 +17,7 @@ spec:
|
|
|
17
17
|
spec:
|
|
18
18
|
containers:
|
|
19
19
|
- name: dd-template-development-blue
|
|
20
|
-
image: localhost/debian-underpost:v2.8.
|
|
20
|
+
image: localhost/debian-underpost:v2.8.798
|
|
21
21
|
# resources:
|
|
22
22
|
# requests:
|
|
23
23
|
# memory: "124Ki"
|
|
@@ -100,7 +100,7 @@ spec:
|
|
|
100
100
|
spec:
|
|
101
101
|
containers:
|
|
102
102
|
- name: dd-template-development-green
|
|
103
|
-
image: localhost/debian-underpost:v2.8.
|
|
103
|
+
image: localhost/debian-underpost:v2.8.798
|
|
104
104
|
# resources:
|
|
105
105
|
# requests:
|
|
106
106
|
# memory: "124Ki"
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
# Exit immediately if a command exits with a non-zero status.
|
|
4
4
|
set -e
|
|
5
5
|
|
|
6
|
-
echo "Starting Underpost Kubernetes Node Setup for Production (Kubeadm Use Case)..."
|
|
6
|
+
echo "Starting Underpost Kubernetes Node Setup for Production (Kubeadm/K3s Use Case)..."
|
|
7
7
|
|
|
8
8
|
# --- Disk Partition Resizing (Keep as is, seems functional) ---
|
|
9
9
|
echo "Expanding /dev/sda2 partition and resizing filesystem..."
|
|
@@ -53,7 +53,7 @@ echo "
|
|
|
53
53
|
██╗░░░██╗███╗░░██╗██████╗░███████╗██████╗░██████╗░░█████╗░░██████╗████████╗
|
|
54
54
|
██║░░░██║████╗░██║██╔══██╗██╔════╝██╔══██╗██╔══██╗██╔══██╗██╔════╝╚══██╔══╝
|
|
55
55
|
██║░░░██║██╔██╗██║██║░░██║█████╗░░██████╔╝██████╔╝██║░░██║╚█████╗░░░░██║░░░
|
|
56
|
-
|
|
56
|
+
██║░░░██║██║╚████║██║░░██║██╔══╝░░██╔══██╗██╔═╝░░░██║░░██║░╚═══██╗░░░██║░░░
|
|
57
57
|
╚██████╔╝██║░╚███║██████╔╝███████╗██║░░██║██║░░░░░╚█████╔╝██████╔╝░░░██║░░░
|
|
58
58
|
░╚═════╝░╚═╝░░╚══╝╚═════╝░╚══════╝╚═╝░░╚═╝╚═╝░░░░░░╚════╝░╚═════╝░░░░╚═╝░░░
|
|
59
59
|
|
|
@@ -82,9 +82,10 @@ echo "Running initial host setup for Kubernetes prerequisites..."
|
|
|
82
82
|
cd "$(underpost root)/underpost"
|
|
83
83
|
underpost cluster --init-host
|
|
84
84
|
|
|
85
|
-
# --- Argument Parsing for Kubeadm/Kind/Worker ---
|
|
85
|
+
# --- Argument Parsing for Kubeadm/Kind/K3s/Worker ---
|
|
86
86
|
USE_KUBEADM=false
|
|
87
87
|
USE_KIND=false # Not the primary focus for this request, but keeping the logic
|
|
88
|
+
USE_K3S=false # New K3s option
|
|
88
89
|
USE_WORKER=false
|
|
89
90
|
|
|
90
91
|
for arg in "$@"; do
|
|
@@ -95,6 +96,9 @@ for arg in "$@"; do
|
|
|
95
96
|
--kind)
|
|
96
97
|
USE_KIND=true
|
|
97
98
|
;;
|
|
99
|
+
--k3s) # New K3s argument
|
|
100
|
+
USE_K3S=true
|
|
101
|
+
;;
|
|
98
102
|
--worker)
|
|
99
103
|
USE_WORKER=true
|
|
100
104
|
;;
|
|
@@ -102,7 +106,8 @@ for arg in "$@"; do
|
|
|
102
106
|
done
|
|
103
107
|
|
|
104
108
|
echo "USE_KUBEADM = $USE_KUBEADM"
|
|
105
|
-
echo "USE_KIND
|
|
109
|
+
echo "USE_KIND = $USE_KIND"
|
|
110
|
+
echo "USE_K3S = $USE_K3S" # Display K3s flag status
|
|
106
111
|
echo "USE_WORKER = $USE_WORKER"
|
|
107
112
|
|
|
108
113
|
# --- Kubernetes Cluster Initialization Logic ---
|
|
@@ -117,7 +122,6 @@ if $USE_KUBEADM; then
|
|
|
117
122
|
# For worker nodes, the 'underpost cluster --worker' command will handle joining
|
|
118
123
|
# the cluster. The join command itself needs to be provided from the control plane.
|
|
119
124
|
# This script assumes the join command will be executed separately or passed in.
|
|
120
|
-
# For a full automated setup, you'd typically pass the join token/command here.
|
|
121
125
|
# Example: underpost cluster --worker --join-command "kubeadm join ..."
|
|
122
126
|
# For now, this just runs the worker-specific config.
|
|
123
127
|
underpost cluster --worker
|
|
@@ -130,12 +134,28 @@ if $USE_KUBEADM; then
|
|
|
130
134
|
underpost cluster --kubeadm
|
|
131
135
|
echo "Kubeadm control plane initialized. Check cluster status with 'kubectl get nodes'."
|
|
132
136
|
fi
|
|
137
|
+
elif $USE_K3S; then # New K3s initialization block
|
|
138
|
+
if $USE_WORKER; then
|
|
139
|
+
echo "Running worker node setup for K3s..."
|
|
140
|
+
# For K3s worker nodes, the 'underpost cluster --worker' command will handle joining
|
|
141
|
+
# the cluster. The K3s join command (k3s agent --server ...) needs to be provided.
|
|
142
|
+
underpost cluster --worker --k3s
|
|
143
|
+
underpost cluster --chown
|
|
144
|
+
echo "K3s Worker node setup initiated. You will need to manually join this worker to your control plane."
|
|
145
|
+
echo "On your K3s control plane, get the K3S_TOKEN from /var/lib/rancher/k3s/server/node-token"
|
|
146
|
+
echo "and the K3S_URL (e.g., https://<control-plane-ip>:6443)."
|
|
147
|
+
echo "Then execute: K3S_URL=${K3S_URL} K3S_TOKEN=${K3S_TOKEN} curl -sfL https://get.k3s.io | sh -"
|
|
148
|
+
else
|
|
149
|
+
echo "Running control plane setup with K3s..."
|
|
150
|
+
underpost cluster --k3s
|
|
151
|
+
echo "K3s control plane initialized. Check cluster status with 'kubectl get nodes'."
|
|
152
|
+
fi
|
|
133
153
|
elif $USE_KIND; then
|
|
134
154
|
echo "Running control node with kind..."
|
|
135
155
|
underpost cluster
|
|
136
156
|
echo "Kind cluster initialized. Check cluster status with 'kubectl get nodes'."
|
|
137
157
|
else
|
|
138
|
-
echo "No specific cluster role (--kubeadm, --kind, --worker) specified. Please provide one."
|
|
158
|
+
echo "No specific cluster role (--kubeadm, --kind, --k3s, --worker) specified. Please provide one."
|
|
139
159
|
exit 1
|
|
140
160
|
fi
|
|
141
161
|
|
package/package.json
CHANGED
package/src/cli/cluster.js
CHANGED
|
@@ -12,7 +12,7 @@ class UnderpostCluster {
|
|
|
12
12
|
/**
|
|
13
13
|
* @method init
|
|
14
14
|
* @description Initializes and configures the Kubernetes cluster based on provided options.
|
|
15
|
-
* This method handles host prerequisites, cluster initialization (Kind or
|
|
15
|
+
* This method handles host prerequisites, cluster initialization (Kind, Kubeadm, or K3s),
|
|
16
16
|
* and optional component deployments.
|
|
17
17
|
* @param {string} [podName] - Optional name of a pod for specific operations (e.g., listing).
|
|
18
18
|
* @param {object} [options] - Configuration options for cluster initialization.
|
|
@@ -35,9 +35,10 @@ class UnderpostCluster {
|
|
|
35
35
|
* @param {boolean} [options.pullImage=false] - Pull necessary Docker images before deployment.
|
|
36
36
|
* @param {boolean} [options.dedicatedGpu=false] - Configure for dedicated GPU usage (e.g., NVIDIA GPU Operator).
|
|
37
37
|
* @param {boolean} [options.kubeadm=false] - Initialize the cluster using Kubeadm.
|
|
38
|
+
* @param {boolean} [options.k3s=false] - Initialize the cluster using K3s.
|
|
38
39
|
* @param {boolean} [options.initHost=false] - Perform initial host setup (install Docker, Podman, Kind, Kubeadm, Helm).
|
|
39
40
|
* @param {boolean} [options.config=false] - Apply general host configuration (SELinux, containerd, sysctl, firewalld).
|
|
40
|
-
* @param {boolean} [options.worker=false] - Configure as a worker node (for Kubeadm join).
|
|
41
|
+
* @param {boolean} [options.worker=false] - Configure as a worker node (for Kubeadm or K3s join).
|
|
41
42
|
* @param {boolean} [options.chown=false] - Set up kubectl configuration for the current user.
|
|
42
43
|
*/
|
|
43
44
|
async init(
|
|
@@ -62,6 +63,7 @@ class UnderpostCluster {
|
|
|
62
63
|
pullImage: false,
|
|
63
64
|
dedicatedGpu: false,
|
|
64
65
|
kubeadm: false,
|
|
66
|
+
k3s: false, // New K3s option
|
|
65
67
|
initHost: false,
|
|
66
68
|
config: false,
|
|
67
69
|
worker: false,
|
|
@@ -83,7 +85,7 @@ class UnderpostCluster {
|
|
|
83
85
|
// Information gathering options
|
|
84
86
|
if (options.infoCapacityPod === true) return logger.info('', UnderpostDeploy.API.resourcesFactory());
|
|
85
87
|
if (options.infoCapacity === true)
|
|
86
|
-
return logger.info('', UnderpostCluster.API.getResourcesCapacity(options.kubeadm));
|
|
88
|
+
return logger.info('', UnderpostCluster.API.getResourcesCapacity(options.kubeadm || options.k3s)); // Adjust for k3s
|
|
87
89
|
if (options.listPods === true) return console.table(UnderpostDeploy.API.get(podName ?? undefined));
|
|
88
90
|
if (options.nsUse && typeof options.nsUse === 'string') {
|
|
89
91
|
shellExec(`kubectl config set-context --current --namespace=${options.nsUse}`);
|
|
@@ -123,32 +125,83 @@ class UnderpostCluster {
|
|
|
123
125
|
return;
|
|
124
126
|
}
|
|
125
127
|
|
|
126
|
-
// Reset Kubernetes cluster components (Kind/Kubeadm) and container runtimes
|
|
128
|
+
// Reset Kubernetes cluster components (Kind/Kubeadm/K3s) and container runtimes
|
|
127
129
|
if (options.reset === true) return await UnderpostCluster.API.reset();
|
|
128
130
|
|
|
129
|
-
// Check if a cluster (Kind
|
|
130
|
-
const
|
|
131
|
-
|
|
132
|
-
|
|
131
|
+
// Check if a cluster (Kind, Kubeadm, or K3s) is already initialized
|
|
132
|
+
const alreadyKubeadmCluster = UnderpostDeploy.API.get('calico-kube-controllers')[0];
|
|
133
|
+
const alreadyKindCluster = UnderpostDeploy.API.get('kube-apiserver-kind-control-plane')[0];
|
|
134
|
+
// K3s pods often contain 'svclb-traefik' in the kube-system namespace
|
|
135
|
+
const alreadyK3sCluster = UnderpostDeploy.API.get('svclb-traefik')[0];
|
|
133
136
|
|
|
134
|
-
// --- Kubeadm/Kind Cluster Initialization ---
|
|
137
|
+
// --- Kubeadm/Kind/K3s Cluster Initialization ---
|
|
135
138
|
// This block handles the initial setup of the Kubernetes cluster (control plane or worker).
|
|
136
139
|
// It prevents re-initialization if a cluster is already detected.
|
|
137
|
-
if (!options.worker && !
|
|
138
|
-
|
|
139
|
-
|
|
140
|
+
if (!options.worker && !alreadyKubeadmCluster && !alreadyKindCluster && !alreadyK3sCluster) {
|
|
141
|
+
if (options.k3s === true) {
|
|
142
|
+
logger.info('Initializing K3s control plane...');
|
|
143
|
+
// Install K3s
|
|
144
|
+
console.log('Installing K3s...');
|
|
145
|
+
shellExec(`curl -sfL https://get.k3s.io | sh -`);
|
|
146
|
+
console.log('K3s installation completed.');
|
|
147
|
+
|
|
148
|
+
// Move k3s binary to /bin/k3s and make it executable
|
|
149
|
+
shellExec(`sudo mv /usr/local/bin/k3s /bin/k3s`);
|
|
150
|
+
shellExec(`sudo chmod +x /bin/k3s`);
|
|
151
|
+
console.log('K3s binary moved to /bin/k3s and made executable.');
|
|
152
|
+
|
|
153
|
+
// Configure kubectl for the current user for K3s *before* checking readiness
|
|
154
|
+
// This ensures kubectl can find the K3s kubeconfig immediately after K3s installation.
|
|
155
|
+
UnderpostCluster.API.chown('k3s');
|
|
156
|
+
|
|
157
|
+
// Wait for K3s to be ready
|
|
158
|
+
logger.info('Waiting for K3s to be ready...');
|
|
159
|
+
let k3sReady = false;
|
|
160
|
+
let retries = 0;
|
|
161
|
+
const maxRetries = 20; // Increased retries for K3s startup
|
|
162
|
+
const delayMs = 5000; // 5 seconds
|
|
163
|
+
|
|
164
|
+
while (!k3sReady && retries < maxRetries) {
|
|
165
|
+
try {
|
|
166
|
+
// Explicitly use KUBECONFIG for kubectl commands to ensure it points to K3s config
|
|
167
|
+
const nodes = shellExec(`KUBECONFIG=/etc/rancher/k3s/k3s.yaml kubectl get nodes -o json`, {
|
|
168
|
+
stdout: true,
|
|
169
|
+
silent: true,
|
|
170
|
+
});
|
|
171
|
+
const parsedNodes = JSON.parse(nodes);
|
|
172
|
+
if (
|
|
173
|
+
parsedNodes.items.some((node) =>
|
|
174
|
+
node.status.conditions.some((cond) => cond.type === 'Ready' && cond.status === 'True'),
|
|
175
|
+
)
|
|
176
|
+
) {
|
|
177
|
+
k3sReady = true;
|
|
178
|
+
logger.info('K3s cluster is ready.');
|
|
179
|
+
} else {
|
|
180
|
+
logger.info(`K3s not yet ready. Retrying in ${delayMs / 1000} seconds...`);
|
|
181
|
+
await new Promise((resolve) => setTimeout(resolve, delayMs));
|
|
182
|
+
}
|
|
183
|
+
} catch (error) {
|
|
184
|
+
logger.info(`Error checking K3s status: ${error.message}. Retrying in ${delayMs / 1000} seconds...`);
|
|
185
|
+
await new Promise((resolve) => setTimeout(resolve, delayMs));
|
|
186
|
+
}
|
|
187
|
+
retries++;
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
if (!k3sReady) {
|
|
191
|
+
logger.error('K3s cluster did not become ready in time. Please check the K3s logs.');
|
|
192
|
+
return;
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
// K3s includes local-path-provisioner by default, so no need to install explicitly.
|
|
196
|
+
logger.info('K3s comes with local-path-provisioner by default. Skipping explicit installation.');
|
|
197
|
+
} else if (options.kubeadm === true) {
|
|
140
198
|
logger.info('Initializing Kubeadm control plane...');
|
|
141
199
|
// Initialize kubeadm control plane
|
|
142
200
|
shellExec(
|
|
143
201
|
`sudo kubeadm init --pod-network-cidr=192.168.0.0/16 --control-plane-endpoint="${os.hostname()}:6443"`,
|
|
144
202
|
);
|
|
145
203
|
// Configure kubectl for the current user
|
|
146
|
-
UnderpostCluster.API.chown();
|
|
147
|
-
|
|
148
|
-
// Apply kubelet-config.yaml explicitly
|
|
149
|
-
// Using 'kubectl replace --force' to ensure the ConfigMap is updated,
|
|
150
|
-
// even if it was modified by kubeadm or other processes, resolving conflicts.
|
|
151
|
-
// shellExec(`kubectl replace --force -f ${underpostRoot}/manifests/kubelet-config.yaml`);
|
|
204
|
+
UnderpostCluster.API.chown('kubeadm'); // Pass 'kubeadm' to chown
|
|
152
205
|
|
|
153
206
|
// Install Calico CNI
|
|
154
207
|
logger.info('Installing Calico CNI...');
|
|
@@ -165,7 +218,7 @@ class UnderpostCluster {
|
|
|
165
218
|
`kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/master/deploy/local-path-storage.yaml`,
|
|
166
219
|
);
|
|
167
220
|
} else {
|
|
168
|
-
// Kind cluster initialization (if not using kubeadm)
|
|
221
|
+
// Kind cluster initialization (if not using kubeadm or k3s)
|
|
169
222
|
logger.info('Initializing Kind cluster...');
|
|
170
223
|
if (options.full === true || options.dedicatedGpu === true) {
|
|
171
224
|
shellExec(`cd ${underpostRoot}/manifests && kind create cluster --config kind-config-cuda.yaml`);
|
|
@@ -176,12 +229,12 @@ class UnderpostCluster {
|
|
|
176
229
|
}.yaml`,
|
|
177
230
|
);
|
|
178
231
|
}
|
|
179
|
-
UnderpostCluster.API.chown();
|
|
232
|
+
UnderpostCluster.API.chown('kind'); // Pass 'kind' to chown
|
|
180
233
|
}
|
|
181
234
|
} else if (options.worker === true) {
|
|
182
235
|
// Worker node specific configuration (kubeadm join command needs to be executed separately)
|
|
183
|
-
logger.info('Worker node configuration applied. Awaiting
|
|
184
|
-
// No direct cluster initialization here for workers. The `kubeadm join` command
|
|
236
|
+
logger.info('Worker node configuration applied. Awaiting join command...');
|
|
237
|
+
// No direct cluster initialization here for workers. The `kubeadm join` or `k3s agent` command
|
|
185
238
|
// needs to be run on the worker after the control plane is up and a token is created.
|
|
186
239
|
// This part of the script is for general worker setup, not the join itself.
|
|
187
240
|
} else {
|
|
@@ -202,26 +255,26 @@ class UnderpostCluster {
|
|
|
202
255
|
if (options.pullImage === true) {
|
|
203
256
|
shellExec(`docker pull valkey/valkey:latest`);
|
|
204
257
|
shellExec(`sudo podman pull valkey/valkey:latest`);
|
|
205
|
-
if (!options.kubeadm)
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
);
|
|
258
|
+
if (!options.kubeadm && !options.k3s)
|
|
259
|
+
// Only load if not kubeadm/k3s (Kind needs it)
|
|
260
|
+
shellExec(`sudo kind load docker-image valkey/valkey:latest`);
|
|
261
|
+
else if (options.kubeadm || options.k3s)
|
|
262
|
+
// For kubeadm/k3s, ensure it's available for containerd
|
|
263
|
+
shellExec(`sudo crictl pull valkey/valkey:latest`);
|
|
211
264
|
}
|
|
212
|
-
shellExec(`kubectl delete statefulset valkey-service`);
|
|
265
|
+
shellExec(`kubectl delete statefulset valkey-service --ignore-not-found`);
|
|
213
266
|
shellExec(`kubectl apply -k ${underpostRoot}/manifests/valkey`);
|
|
214
267
|
}
|
|
215
268
|
if (options.full === true || options.mariadb === true) {
|
|
216
269
|
shellExec(
|
|
217
|
-
`sudo kubectl create secret generic mariadb-secret --from-file=username=/home/dd/engine/engine-private/mariadb-username --from-file=password=/home/dd/engine/engine-private/mariadb-password
|
|
270
|
+
`sudo kubectl create secret generic mariadb-secret --from-file=username=/home/dd/engine/engine-private/mariadb-username --from-file=password=/home/dd/engine/engine-private/mariadb-password --dry-run=client -o yaml | kubectl apply -f -`,
|
|
218
271
|
);
|
|
219
|
-
shellExec(`kubectl delete statefulset mariadb-statefulset`);
|
|
272
|
+
shellExec(`kubectl delete statefulset mariadb-statefulset --ignore-not-found`);
|
|
220
273
|
shellExec(`kubectl apply -k ${underpostRoot}/manifests/mariadb`);
|
|
221
274
|
}
|
|
222
275
|
if (options.full === true || options.mysql === true) {
|
|
223
276
|
shellExec(
|
|
224
|
-
`sudo kubectl create secret generic mysql-secret --from-file=username=/home/dd/engine/engine-private/mysql-username --from-file=password=/home/dd/engine/engine-private/mysql-password
|
|
277
|
+
`sudo kubectl create secret generic mysql-secret --from-file=username=/home/dd/engine/engine-private/mysql-username --from-file=password=/home/dd/engine/engine-private/mysql-password --dry-run=client -o yaml | kubectl apply -f -`,
|
|
225
278
|
);
|
|
226
279
|
shellExec(`sudo mkdir -p /mnt/data`);
|
|
227
280
|
shellExec(`sudo chmod 777 /mnt/data`);
|
|
@@ -231,27 +284,27 @@ class UnderpostCluster {
|
|
|
231
284
|
if (options.full === true || options.postgresql === true) {
|
|
232
285
|
if (options.pullImage === true) {
|
|
233
286
|
shellExec(`docker pull postgres:latest`);
|
|
234
|
-
if (!options.kubeadm)
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
);
|
|
287
|
+
if (!options.kubeadm && !options.k3s)
|
|
288
|
+
// Only load if not kubeadm/k3s (Kind needs it)
|
|
289
|
+
shellExec(`sudo kind load docker-image postgres:latest`);
|
|
290
|
+
else if (options.kubeadm || options.k3s)
|
|
291
|
+
// For kubeadm/k3s, ensure it's available for containerd
|
|
292
|
+
shellExec(`sudo crictl pull postgres:latest`);
|
|
240
293
|
}
|
|
241
294
|
shellExec(
|
|
242
|
-
`sudo kubectl create secret generic postgres-secret --from-file=password=/home/dd/engine/engine-private/postgresql-password
|
|
295
|
+
`sudo kubectl create secret generic postgres-secret --from-file=password=/home/dd/engine/engine-private/postgresql-password --dry-run=client -o yaml | kubectl apply -f -`,
|
|
243
296
|
);
|
|
244
297
|
shellExec(`kubectl apply -k ${underpostRoot}/manifests/postgresql`);
|
|
245
298
|
}
|
|
246
299
|
if (options.mongodb4 === true) {
|
|
247
300
|
if (options.pullImage === true) {
|
|
248
301
|
shellExec(`docker pull mongo:4.4`);
|
|
249
|
-
if (!options.kubeadm)
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
);
|
|
302
|
+
if (!options.kubeadm && !options.k3s)
|
|
303
|
+
// Only load if not kubeadm/k3s (Kind needs it)
|
|
304
|
+
shellExec(`sudo kind load docker-image mongo:4.4`);
|
|
305
|
+
else if (options.kubeadm || options.k3s)
|
|
306
|
+
// For kubeadm/k3s, ensure it's available for containerd
|
|
307
|
+
shellExec(`sudo crictl pull mongo:4.4`);
|
|
255
308
|
}
|
|
256
309
|
shellExec(`kubectl apply -k ${underpostRoot}/manifests/mongodb-4.4`);
|
|
257
310
|
|
|
@@ -275,15 +328,22 @@ class UnderpostCluster {
|
|
|
275
328
|
} else if (options.full === true || options.mongodb === true) {
|
|
276
329
|
if (options.pullImage === true) {
|
|
277
330
|
shellExec(`docker pull mongo:latest`);
|
|
331
|
+
if (!options.kubeadm && !options.k3s)
|
|
332
|
+
// Only load if not kubeadm/k3s (Kind needs it)
|
|
333
|
+
shellExec(`sudo kind load docker-image mongo:latest`);
|
|
334
|
+
else if (options.kubeadm || options.k3s)
|
|
335
|
+
// For kubeadm/k3s, ensure it's available for containerd
|
|
336
|
+
shellExec(`sudo crictl pull mongo:latest`);
|
|
278
337
|
}
|
|
279
338
|
shellExec(
|
|
280
|
-
`sudo kubectl create secret generic mongodb-keyfile --from-file=/home/dd/engine/engine-private/mongodb-keyfile
|
|
339
|
+
`sudo kubectl create secret generic mongodb-keyfile --from-file=/home/dd/engine/engine-private/mongodb-keyfile --dry-run=client -o yaml | kubectl apply -f -`,
|
|
281
340
|
);
|
|
282
341
|
shellExec(
|
|
283
|
-
`sudo kubectl create secret generic mongodb-secret --from-file=username=/home/dd/engine/engine-private/mongodb-username --from-file=password=/home/dd/engine/engine-private/mongodb-password
|
|
342
|
+
`sudo kubectl create secret generic mongodb-secret --from-file=username=/home/dd/engine/engine-private/mongodb-username --from-file=password=/home/dd/engine/engine-private/mongodb-password --dry-run=client -o yaml | kubectl apply -f -`,
|
|
284
343
|
);
|
|
285
|
-
shellExec(`kubectl delete statefulset mongodb`);
|
|
344
|
+
shellExec(`kubectl delete statefulset mongodb --ignore-not-found`);
|
|
286
345
|
if (options.kubeadm === true)
|
|
346
|
+
// This storage class is specific to kubeadm setup
|
|
287
347
|
shellExec(`kubectl apply -f ${underpostRoot}/manifests/mongodb/storage-class.yaml`);
|
|
288
348
|
shellExec(`kubectl apply -k ${underpostRoot}/manifests/mongodb`);
|
|
289
349
|
|
|
@@ -310,8 +370,11 @@ class UnderpostCluster {
|
|
|
310
370
|
if (options.full === true || options.contour === true) {
|
|
311
371
|
shellExec(`kubectl apply -f https://projectcontour.io/quickstart/contour.yaml`);
|
|
312
372
|
if (options.kubeadm === true) {
|
|
373
|
+
// Envoy service might need NodePort for kubeadm
|
|
313
374
|
shellExec(`sudo kubectl apply -f ${underpostRoot}/manifests/envoy-service-nodeport.yaml`);
|
|
314
375
|
}
|
|
376
|
+
// K3s has a built-in LoadBalancer (Klipper-lb) that can expose services,
|
|
377
|
+
// so a specific NodePort service might not be needed or can be configured differently.
|
|
315
378
|
}
|
|
316
379
|
|
|
317
380
|
if (options.full === true || options.certManager === true) {
|
|
@@ -327,7 +390,7 @@ class UnderpostCluster {
|
|
|
327
390
|
}
|
|
328
391
|
|
|
329
392
|
const letsEncName = 'letsencrypt-prod';
|
|
330
|
-
shellExec(`sudo kubectl delete ClusterIssuer ${letsEncName}`);
|
|
393
|
+
shellExec(`sudo kubectl delete ClusterIssuer ${letsEncName} --ignore-not-found`);
|
|
331
394
|
shellExec(`sudo kubectl apply -f ${underpostRoot}/manifests/${letsEncName}.yaml`);
|
|
332
395
|
}
|
|
333
396
|
},
|
|
@@ -346,14 +409,14 @@ class UnderpostCluster {
|
|
|
346
409
|
shellExec(`sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config`);
|
|
347
410
|
|
|
348
411
|
// Enable and start Docker and Kubelet services
|
|
349
|
-
shellExec(`sudo systemctl enable --now docker`);
|
|
350
|
-
shellExec(`sudo systemctl enable --now kubelet`);
|
|
412
|
+
shellExec(`sudo systemctl enable --now docker || true`); // Docker might not be needed for K3s
|
|
413
|
+
shellExec(`sudo systemctl enable --now kubelet || true`); // Kubelet might not be needed for K3s (K3s uses its own agent)
|
|
351
414
|
|
|
352
415
|
// Configure containerd for SystemdCgroup
|
|
353
|
-
// This is crucial for kubelet to interact correctly with containerd
|
|
416
|
+
// This is crucial for kubelet/k3s to interact correctly with containerd
|
|
354
417
|
shellExec(`containerd config default | sudo tee /etc/containerd/config.toml > /dev/null`);
|
|
355
418
|
shellExec(`sudo sed -i -e "s/SystemdCgroup = false/SystemdCgroup = true/g" /etc/containerd/config.toml`);
|
|
356
|
-
shellExec(`sudo service docker restart`); // Restart docker after containerd config changes
|
|
419
|
+
shellExec(`sudo service docker restart || true`); // Restart docker after containerd config changes
|
|
357
420
|
shellExec(`sudo systemctl enable --now containerd.service`);
|
|
358
421
|
shellExec(`sudo systemctl restart containerd`); // Restart containerd to apply changes
|
|
359
422
|
|
|
@@ -383,22 +446,41 @@ net.ipv4.ip_forward = 1' | sudo tee ${iptableConfPath}`);
|
|
|
383
446
|
|
|
384
447
|
/**
|
|
385
448
|
* @method chown
|
|
386
|
-
* @description Sets up kubectl configuration for the current user.
|
|
387
|
-
*
|
|
388
|
-
* to allow non-root users to interact with the cluster.
|
|
449
|
+
* @description Sets up kubectl configuration for the current user based on the cluster type.
|
|
450
|
+
* @param {string} clusterType - The type of Kubernetes cluster ('kubeadm', 'k3s', or 'kind').
|
|
389
451
|
*/
|
|
390
|
-
chown() {
|
|
391
|
-
console.log(
|
|
452
|
+
chown(clusterType) {
|
|
453
|
+
console.log(`Setting up kubectl configuration for ${clusterType} cluster...`);
|
|
392
454
|
shellExec(`mkdir -p ~/.kube`);
|
|
393
|
-
|
|
394
|
-
|
|
455
|
+
|
|
456
|
+
let kubeconfigPath;
|
|
457
|
+
if (clusterType === 'k3s') {
|
|
458
|
+
kubeconfigPath = '/etc/rancher/k3s/k3s.yaml';
|
|
459
|
+
} else if (clusterType === 'kubeadm') {
|
|
460
|
+
kubeconfigPath = '/etc/kubernetes/admin.conf';
|
|
461
|
+
} else {
|
|
462
|
+
// Default to kind if not specified or unknown
|
|
463
|
+
kubeconfigPath = ''; // Kind's kubeconfig is usually managed by kind itself, or merged
|
|
464
|
+
}
|
|
465
|
+
|
|
466
|
+
if (kubeconfigPath) {
|
|
467
|
+
shellExec(`sudo -E cp -i ${kubeconfigPath} ~/.kube/config`);
|
|
468
|
+
shellExec(`sudo -E chown $(id -u):$(id -g) ~/.kube/config`);
|
|
469
|
+
} else if (clusterType === 'kind') {
|
|
470
|
+
// For Kind, the kubeconfig is usually merged automatically or can be explicitly exported
|
|
471
|
+
// This command ensures it's merged into the default kubeconfig
|
|
472
|
+
shellExec(`kind get kubeconfig > ~/.kube/config || true`);
|
|
473
|
+
shellExec(`sudo -E chown $(id -u):$(id -g) ~/.kube/config`);
|
|
474
|
+
} else {
|
|
475
|
+
logger.warn('No specific kubeconfig path defined for this cluster type, or it is managed automatically.');
|
|
476
|
+
}
|
|
395
477
|
console.log('kubectl config set up successfully.');
|
|
396
478
|
},
|
|
397
479
|
|
|
398
480
|
/**
|
|
399
481
|
* @method reset
|
|
400
482
|
* @description Performs a comprehensive reset of Kubernetes and container environments.
|
|
401
|
-
* This function is for cleaning up a node, reverting changes made by 'kubeadm init'
|
|
483
|
+
* This function is for cleaning up a node, reverting changes made by 'kubeadm init', 'kubeadm join', or 'k3s install'.
|
|
402
484
|
* It includes deleting Kind clusters, resetting kubeadm, removing CNI configs,
|
|
403
485
|
* cleaning Docker and Podman data, persistent volumes, and resetting kubelet components.
|
|
404
486
|
* It avoids aggressive iptables flushing that would break host connectivity, relying on kube-proxy's
|
|
@@ -411,13 +493,7 @@ net.ipv4.ip_forward = 1' | sudo tee ${iptableConfPath}`);
|
|
|
411
493
|
// Phase 1: Pre-reset Kubernetes Cleanup (while API server is still up)
|
|
412
494
|
logger.info('Phase 1/6: Cleaning up Kubernetes resources (PVCs, PVs) while API server is accessible...');
|
|
413
495
|
|
|
414
|
-
// Delete all Persistent Volume Claims (PVCs) to release the PVs.
|
|
415
|
-
// This must happen before deleting PVs or the host paths.
|
|
416
|
-
// shellExec(`kubectl delete pvc --all-namespaces --all --ignore-not-found || true`);
|
|
417
|
-
|
|
418
496
|
// Get all Persistent Volumes and identify their host paths for data deletion.
|
|
419
|
-
// This needs to be done *before* deleting the PVs themselves.
|
|
420
|
-
// The '|| echo '{"items":[]}'` handles cases where 'kubectl get pv' might return empty or error.
|
|
421
497
|
const pvListJson = shellExec(`kubectl get pv -o json || echo '{"items":[]}'`, { stdout: true, silent: true });
|
|
422
498
|
const pvList = JSON.parse(pvListJson);
|
|
423
499
|
|
|
@@ -434,90 +510,57 @@ net.ipv4.ip_forward = 1' | sudo tee ${iptableConfPath}`);
|
|
|
434
510
|
logger.info('No Persistent Volumes found with hostPath to clean up.');
|
|
435
511
|
}
|
|
436
512
|
|
|
437
|
-
//
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
//
|
|
441
|
-
logger.info('Phase 2/6: Stopping Kubelet and removing CNI configurations...');
|
|
442
|
-
// Stop kubelet service to prevent further activity and release resources.
|
|
443
|
-
shellExec(`sudo systemctl stop kubelet || true`);
|
|
513
|
+
// Phase 2: Stop Kubelet/K3s agent and remove CNI configuration
|
|
514
|
+
logger.info('Phase 2/6: Stopping Kubelet/K3s agent and removing CNI configurations...');
|
|
515
|
+
shellExec(`sudo systemctl stop kubelet || true`); // Stop kubelet if it's running (kubeadm)
|
|
516
|
+
shellExec(`sudo /usr/local/bin/k3s-uninstall.sh || true`); // Run K3s uninstall script if it exists
|
|
444
517
|
|
|
445
518
|
// CNI plugins use /etc/cni/net.d to store their configuration.
|
|
446
|
-
// Removing this prevents conflicts and potential issues during kubeadm reset.
|
|
447
519
|
shellExec('sudo rm -rf /etc/cni/net.d/* || true');
|
|
448
520
|
|
|
449
521
|
// Phase 3: Kind Cluster Cleanup
|
|
450
522
|
logger.info('Phase 3/6: Cleaning up Kind clusters...');
|
|
451
|
-
// Delete all existing Kind (Kubernetes in Docker) clusters.
|
|
452
523
|
shellExec(`kind get clusters | xargs -r -t -n1 kind delete cluster || true`);
|
|
453
524
|
|
|
454
|
-
// Phase 4: Kubeadm Reset
|
|
455
|
-
logger.info('Phase 4/6: Performing kubeadm reset...');
|
|
456
|
-
//
|
|
457
|
-
// The --force flag skips confirmation prompts. This command will tear down the cluster.
|
|
458
|
-
shellExec(`sudo kubeadm reset --force`);
|
|
525
|
+
// Phase 4: Kubeadm Reset (if applicable)
|
|
526
|
+
logger.info('Phase 4/6: Performing kubeadm reset (if applicable)...');
|
|
527
|
+
shellExec(`sudo kubeadm reset --force || true`); // Use || true to prevent script from failing if kubeadm is not installed
|
|
459
528
|
|
|
460
529
|
// Phase 5: Post-reset File System Cleanup (Local Storage, Kubeconfig)
|
|
461
530
|
logger.info('Phase 5/6: Cleaning up local storage provisioner data and kubeconfig...');
|
|
462
|
-
// Remove the kubectl configuration file for the current user.
|
|
463
|
-
// This is important to prevent stale credentials after the cluster is reset.
|
|
464
531
|
shellExec('rm -rf $HOME/.kube || true');
|
|
465
|
-
|
|
466
|
-
// Remove local path provisioner data, which stores data for dynamically provisioned PVCs.
|
|
467
532
|
shellExec(`sudo rm -rf /opt/local-path-provisioner/* || true`);
|
|
468
533
|
|
|
469
534
|
// Phase 6: Container Runtime Cleanup (Docker and Podman)
|
|
470
535
|
logger.info('Phase 6/6: Cleaning up Docker and Podman data...');
|
|
471
|
-
|
|
472
|
-
shellExec('sudo docker system prune -a -f');
|
|
473
|
-
|
|
474
|
-
// Stop the Docker daemon service to ensure all files can be removed.
|
|
536
|
+
shellExec('sudo docker system prune -a -f || true');
|
|
475
537
|
shellExec('sudo service docker stop || true');
|
|
476
|
-
|
|
477
|
-
// Aggressively remove container storage data for containerd and Docker.
|
|
478
|
-
// This targets the underlying storage directories.
|
|
479
538
|
shellExec(`sudo rm -rf /var/lib/containers/storage/* || true`);
|
|
480
539
|
shellExec(`sudo rm -rf /var/lib/docker/volumes/* || true`);
|
|
481
|
-
shellExec(`sudo rm -rf /var/lib/docker~/* || true`);
|
|
540
|
+
shellExec(`sudo rm -rf /var/lib/docker~/* || true`);
|
|
482
541
|
shellExec(`sudo rm -rf /home/containers/storage/* || true`);
|
|
483
542
|
shellExec(`sudo rm -rf /home/docker/* || true`);
|
|
484
|
-
|
|
485
|
-
// Ensure Docker's default storage location is clean and re-linked if custom.
|
|
486
|
-
shellExec(`sudo rm -rf /var/lib/docker/* || true`);
|
|
487
543
|
shellExec('sudo mkdir -p /home/docker || true');
|
|
488
544
|
shellExec('sudo chmod 777 /home/docker || true');
|
|
489
|
-
shellExec('sudo ln -sf /home/docker /var/lib/docker || true');
|
|
490
|
-
|
|
491
|
-
// Prune all unused Podman data.
|
|
492
|
-
shellExec(`sudo podman system prune -a -f`);
|
|
493
|
-
shellExec(`sudo podman system prune --all --volumes --force`);
|
|
494
|
-
shellExec(`sudo podman system prune --external --force`);
|
|
545
|
+
shellExec('sudo ln -sf /home/docker /var/lib/docker || true');
|
|
495
546
|
|
|
496
|
-
|
|
547
|
+
shellExec(`sudo podman system prune -a -f || true`);
|
|
548
|
+
shellExec(`sudo podman system prune --all --volumes --force || true`);
|
|
549
|
+
shellExec(`sudo podman system prune --external --force || true`);
|
|
497
550
|
shellExec(`sudo mkdir -p /home/containers/storage || true`);
|
|
498
551
|
shellExec('sudo chmod 0711 /home/containers/storage || true');
|
|
499
|
-
|
|
500
|
-
// Update Podman's storage configuration file.
|
|
501
552
|
shellExec(
|
|
502
553
|
`sudo sed -i -e "s@/var/lib/containers/storage@/home/containers/storage@g" /etc/containers/storage.conf || true`,
|
|
503
554
|
);
|
|
504
|
-
|
|
505
|
-
// Reset Podman system settings.
|
|
506
|
-
shellExec(`sudo podman system reset -f`);
|
|
555
|
+
shellExec(`sudo podman system reset -f || true`);
|
|
507
556
|
|
|
508
557
|
// Final Kubelet and System Cleanup (after all other operations)
|
|
509
558
|
logger.info('Finalizing Kubelet and system file cleanup...');
|
|
510
|
-
// Remove Kubernetes configuration and kubelet data directories.
|
|
511
559
|
shellExec(`sudo rm -rf /etc/kubernetes/* || true`);
|
|
512
560
|
shellExec(`sudo rm -rf /var/lib/kubelet/* || true`);
|
|
513
|
-
|
|
514
|
-
// Clear trash files from the root user's trash directory.
|
|
515
|
-
shellExec('sudo rm -rf /root/.local/share/Trash/files/* || true');
|
|
516
|
-
|
|
517
|
-
// Reload systemd daemon to pick up any service file changes.
|
|
561
|
+
shellExec(`sudo rm -rf /root/.local/share/Trash/files/* || true`);
|
|
518
562
|
shellExec(`sudo systemctl daemon-reload`);
|
|
519
|
-
// Attempt to start kubelet;
|
|
520
|
-
shellExec(`sudo systemctl start kubelet || true`);
|
|
563
|
+
shellExec(`sudo systemctl start kubelet || true`); // Attempt to start kubelet; might fail if fully reset
|
|
521
564
|
|
|
522
565
|
logger.info('Comprehensive reset completed successfully.');
|
|
523
566
|
} catch (error) {
|
|
@@ -530,21 +573,17 @@ net.ipv4.ip_forward = 1' | sudo tee ${iptableConfPath}`);
|
|
|
530
573
|
* @method getResourcesCapacity
|
|
531
574
|
* @description Retrieves and returns the allocatable CPU and memory resources
|
|
532
575
|
* of the Kubernetes node.
|
|
533
|
-
* @param {boolean} [
|
|
576
|
+
* @param {boolean} [isKubeadmOrK3s=false] - If true, assumes a kubeadm or k3s-managed node;
|
|
534
577
|
* otherwise, assumes a Kind worker node.
|
|
535
578
|
* @returns {object} An object containing CPU and memory resources with values and units.
|
|
536
579
|
*/
|
|
537
|
-
getResourcesCapacity(
|
|
580
|
+
getResourcesCapacity(isKubeadmOrK3s = false) {
|
|
538
581
|
const resources = {};
|
|
539
|
-
const
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
stdout: true,
|
|
545
|
-
silent: true,
|
|
546
|
-
},
|
|
547
|
-
);
|
|
582
|
+
const nodeName = isKubeadmOrK3s ? os.hostname() : 'kind-worker';
|
|
583
|
+
const info = shellExec(`kubectl describe node ${nodeName} | grep -E '(Allocatable:|Capacity:)' -A 6`, {
|
|
584
|
+
stdout: true,
|
|
585
|
+
silent: true,
|
|
586
|
+
});
|
|
548
587
|
info
|
|
549
588
|
.split('Allocatable:')[1]
|
|
550
589
|
.split('\n')
|
|
@@ -568,9 +607,36 @@ net.ipv4.ip_forward = 1' | sudo tee ${iptableConfPath}`);
|
|
|
568
607
|
* @method initHost
|
|
569
608
|
* @description Installs essential host-level prerequisites for Kubernetes,
|
|
570
609
|
* including Docker, Podman, Kind, Kubeadm, and Helm.
|
|
610
|
+
*
|
|
611
|
+
* Quick-Start Guide for K3s Installation:
|
|
612
|
+
* This guide will help you quickly launch a cluster with default options. Make sure your nodes meet the requirements before proceeding.
|
|
613
|
+
* Consult the Installation page for greater detail on installing and configuring K3s.
|
|
614
|
+
* For information on how K3s components work together, refer to the Architecture page.
|
|
615
|
+
* If you are new to Kubernetes, the official Kubernetes docs have great tutorials covering basics that all cluster administrators should be familiar with.
|
|
616
|
+
*
|
|
617
|
+
* Install Script:
|
|
618
|
+
* K3s provides an installation script that is a convenient way to install it as a service on systemd or openrc based systems. This script is available at https://get.k3s.io. To install K3s using this method, just run:
|
|
619
|
+
* curl -sfL https://get.k3s.io | sh -
|
|
620
|
+
*
|
|
621
|
+
* After running this installation:
|
|
622
|
+
* - The K3s service will be configured to automatically restart after node reboots or if the process crashes or is killed
|
|
623
|
+
* - Additional utilities will be installed, including kubectl, crictl, ctr, k3s-killall.sh, and k3s-uninstall.sh
|
|
624
|
+
* - A kubeconfig file will be written to /etc/rancher/k3s/k3s.yaml and the kubectl installed by K3s will automatically use it
|
|
625
|
+
*
|
|
626
|
+
* A single-node server installation is a fully-functional Kubernetes cluster, including all the datastore, control-plane, kubelet, and container runtime components necessary to host workload pods. It is not necessary to add additional server or agents nodes, but you may want to do so to add additional capacity or redundancy to your cluster.
|
|
627
|
+
*
|
|
628
|
+
* To install additional agent nodes and add them to the cluster, run the installation script with the K3S_URL and K3S_TOKEN environment variables. Here is an example showing how to join an agent:
|
|
629
|
+
* curl -sfL https://get.k3s.io | K3S_URL=https://myserver:6443 K3S_TOKEN=mynodetoken sh -
|
|
630
|
+
*
|
|
631
|
+
* Setting the K3S_URL parameter causes the installer to configure K3s as an agent, instead of a server. The K3s agent will register with the K3s server listening at the supplied URL. The value to use for K3S_TOKEN is stored at /var/lib/rancher/k3s/server/node-token on your server node.
|
|
632
|
+
*
|
|
633
|
+
* Note: Each machine must have a unique hostname. If your machines do not have unique hostnames, pass the K3S_NODE_NAME environment variable and provide a value with a valid and unique hostname for each node.
|
|
634
|
+
* If you are interested in having more server nodes, see the High Availability Embedded etcd and High Availability External DB pages for more information.
|
|
571
635
|
*/
|
|
572
636
|
initHost() {
|
|
573
|
-
console.log(
|
|
637
|
+
console.log(
|
|
638
|
+
'Installing essential host-level prerequisites for Kubernetes (Docker, Podman, Kind, Kubeadm, Helm) and providing K3s Quick-Start Guide information...',
|
|
639
|
+
);
|
|
574
640
|
// Install docker
|
|
575
641
|
shellExec(`sudo dnf -y install dnf-plugins-core`);
|
|
576
642
|
shellExec(`sudo dnf config-manager --add-repo https://download.docker.com/linux/rhel/docker-ce.repo`);
|
|
@@ -583,7 +649,7 @@ net.ipv4.ip_forward = 1' | sudo tee ${iptableConfPath}`);
|
|
|
583
649
|
shellExec(`[ $(uname -m) = aarch64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.29.0/kind-linux-arm64
|
|
584
650
|
chmod +x ./kind
|
|
585
651
|
sudo mv ./kind /bin/kind`);
|
|
586
|
-
// Install kubeadm, kubelet, kubectl
|
|
652
|
+
// Install kubeadm, kubelet, kubectl (these are also useful for K3s for kubectl command)
|
|
587
653
|
shellExec(`cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
|
|
588
654
|
[kubernetes]
|
|
589
655
|
name=Kubernetes
|
package/src/cli/image.js
CHANGED
|
@@ -12,10 +12,22 @@ const logger = loggerFactory(import.meta);
|
|
|
12
12
|
class UnderpostImage {
|
|
13
13
|
static API = {
|
|
14
14
|
dockerfile: {
|
|
15
|
+
/**
|
|
16
|
+
* @method pullBaseImages
|
|
17
|
+
* @description Pulls base images and builds a 'debian-underpost' image,
|
|
18
|
+
* then loads it into the specified Kubernetes cluster type (Kind, Kubeadm, or K3s).
|
|
19
|
+
* @param {object} options - Options for pulling and loading images.
|
|
20
|
+
* @param {boolean} [options.kindLoad=false] - If true, load image into Kind cluster.
|
|
21
|
+
* @param {boolean} [options.kubeadmLoad=false] - If true, load image into Kubeadm cluster.
|
|
22
|
+
* @param {boolean} [options.k3sLoad=false] - If true, load image into K3s cluster.
|
|
23
|
+
* @param {string} [options.path=false] - Path to the Dockerfile context.
|
|
24
|
+
* @param {string} [options.version=''] - Version tag for the image.
|
|
25
|
+
*/
|
|
15
26
|
pullBaseImages(
|
|
16
27
|
options = {
|
|
17
28
|
kindLoad: false,
|
|
18
29
|
kubeadmLoad: false,
|
|
30
|
+
k3sLoad: false,
|
|
19
31
|
path: false,
|
|
20
32
|
version: '',
|
|
21
33
|
},
|
|
@@ -23,13 +35,39 @@ class UnderpostImage {
|
|
|
23
35
|
shellExec(`sudo podman pull docker.io/library/debian:buster`);
|
|
24
36
|
const IMAGE_NAME = `debian-underpost`;
|
|
25
37
|
const IMAGE_NAME_FULL = `${IMAGE_NAME}:${options.version ?? Underpost.version}`;
|
|
26
|
-
|
|
38
|
+
let LOAD_TYPE = '';
|
|
39
|
+
if (options.kindLoad === true) {
|
|
40
|
+
LOAD_TYPE = `--kind-load`;
|
|
41
|
+
} else if (options.kubeadmLoad === true) {
|
|
42
|
+
LOAD_TYPE = `--kubeadm-load`;
|
|
43
|
+
} else if (options.k3sLoad === true) {
|
|
44
|
+
// Handle K3s load type
|
|
45
|
+
LOAD_TYPE = `--k3s-load`;
|
|
46
|
+
}
|
|
47
|
+
|
|
27
48
|
shellExec(
|
|
28
49
|
`underpost dockerfile-image-build --podman-save --reset --image-path=. --path ${
|
|
29
50
|
options.path ?? getUnderpostRootPath()
|
|
30
51
|
} --image-name=${IMAGE_NAME_FULL} ${LOAD_TYPE}`,
|
|
31
52
|
);
|
|
32
53
|
},
|
|
54
|
+
/**
|
|
55
|
+
* @method build
|
|
56
|
+
* @description Builds a Docker image using Podman, optionally saves it as a tar archive,
|
|
57
|
+
* and loads it into a specified Kubernetes cluster (Kind, Kubeadm, or K3s).
|
|
58
|
+
* @param {object} options - Options for building and loading images.
|
|
59
|
+
* @param {string} [options.path=''] - The path to the directory containing the Dockerfile.
|
|
60
|
+
* @param {string} [options.imageName=''] - The name and tag for the image (e.g., 'my-app:latest').
|
|
61
|
+
* @param {string} [options.imagePath=''] - Directory to save the image tar file.
|
|
62
|
+
* @param {string} [options.dockerfileName=''] - Name of the Dockerfile (defaults to 'Dockerfile').
|
|
63
|
+
* @param {boolean} [options.podmanSave=false] - If true, save the image as a tar archive using Podman.
|
|
64
|
+
* @param {boolean} [options.kindLoad=false] - If true, load the image archive into a Kind cluster.
|
|
65
|
+
* @param {boolean} [options.kubeadmLoad=false] - If true, load the image archive into a Kubeadm cluster (uses 'ctr').
|
|
66
|
+
* @param {boolean} [options.k3sLoad=false] - If true, load the image archive into a K3s cluster (uses 'k3s ctr').
|
|
67
|
+
* @param {boolean} [options.secrets=false] - If true, load secrets from the .env file for the build.
|
|
68
|
+
* @param {string} [options.secretsPath=''] - Custom path to the .env file for secrets.
|
|
69
|
+
* @param {boolean} [options.reset=false] - If true, perform a no-cache build.
|
|
70
|
+
*/
|
|
33
71
|
build(
|
|
34
72
|
options = {
|
|
35
73
|
path: '',
|
|
@@ -39,6 +77,7 @@ class UnderpostImage {
|
|
|
39
77
|
podmanSave: false,
|
|
40
78
|
kindLoad: false,
|
|
41
79
|
kubeadmLoad: false,
|
|
80
|
+
k3sLoad: false,
|
|
42
81
|
secrets: false,
|
|
43
82
|
secretsPath: '',
|
|
44
83
|
reset: false,
|
|
@@ -53,8 +92,9 @@ class UnderpostImage {
|
|
|
53
92
|
secrets,
|
|
54
93
|
secretsPath,
|
|
55
94
|
kindLoad,
|
|
56
|
-
reset,
|
|
57
95
|
kubeadmLoad,
|
|
96
|
+
k3sLoad,
|
|
97
|
+
reset,
|
|
58
98
|
} = options;
|
|
59
99
|
const podManImg = `localhost/${imageName}`;
|
|
60
100
|
if (imagePath && typeof imagePath === 'string' && !fs.existsSync(imagePath))
|
|
@@ -71,7 +111,7 @@ class UnderpostImage {
|
|
|
71
111
|
),
|
|
72
112
|
);
|
|
73
113
|
for (const key of Object.keys(envObj)) {
|
|
74
|
-
secretsInput += ` && export ${key}="${envObj[key]}" `; // $(cat gitlab-token.txt)
|
|
114
|
+
secretsInput += ` && export ${key}="${envObj[key]}" `; // Example: $(cat gitlab-token.txt)
|
|
75
115
|
secretDockerInput += ` --secret id=${key},env=${key} \ `;
|
|
76
116
|
}
|
|
77
117
|
}
|
|
@@ -85,7 +125,14 @@ class UnderpostImage {
|
|
|
85
125
|
|
|
86
126
|
if (podmanSave === true) shellExec(`podman save -o ${tarFile} ${podManImg}`);
|
|
87
127
|
if (kindLoad === true) shellExec(`sudo kind load image-archive ${tarFile}`);
|
|
88
|
-
if (kubeadmLoad === true)
|
|
128
|
+
if (kubeadmLoad === true) {
|
|
129
|
+
// Use 'ctr' for Kubeadm
|
|
130
|
+
shellExec(`sudo ctr -n k8s.io images import ${tarFile}`);
|
|
131
|
+
}
|
|
132
|
+
if (k3sLoad === true) {
|
|
133
|
+
// Use 'k3s ctr' for K3s
|
|
134
|
+
shellExec(`sudo k3s ctr images import ${tarFile}`);
|
|
135
|
+
}
|
|
89
136
|
},
|
|
90
137
|
},
|
|
91
138
|
};
|
package/src/cli/index.js
CHANGED
|
@@ -115,6 +115,7 @@ program
|
|
|
115
115
|
.option('--config', 'Set k8s base node config')
|
|
116
116
|
.option('--worker', 'Set worker node context')
|
|
117
117
|
.option('--chown', 'Set k8s kube chown')
|
|
118
|
+
.option('--k3s', 'Initialize the cluster using K3s')
|
|
118
119
|
.action(Underpost.cluster.init)
|
|
119
120
|
.description('Manage cluster, for default initialization base kind cluster');
|
|
120
121
|
|
|
@@ -171,6 +172,7 @@ program
|
|
|
171
172
|
.option('--secrets', 'Dockerfile env secrets')
|
|
172
173
|
.option('--secrets-path [secrets-path]', 'Dockerfile custom path env secrets')
|
|
173
174
|
.option('--reset', 'Build without using cache')
|
|
175
|
+
.option('--k3s-load', 'Load image into K3s cluster.')
|
|
174
176
|
.description('Build image from Dockerfile')
|
|
175
177
|
.action(Underpost.image.dockerfile.build);
|
|
176
178
|
|
|
@@ -180,6 +182,7 @@ program
|
|
|
180
182
|
.option('--kind-load', 'Import tar image to Kind cluster')
|
|
181
183
|
.option('--kubeadm-load', 'Import tar image to Kubeadm cluster')
|
|
182
184
|
.option('--version', 'Set custom version')
|
|
185
|
+
.option('--k3s-load', 'Load image into K3s cluster.')
|
|
183
186
|
.description('Pull underpost dockerfile images requirements')
|
|
184
187
|
.action(Underpost.image.dockerfile.pullBaseImages);
|
|
185
188
|
|
package/src/cli/lxd.js
CHANGED
|
@@ -22,6 +22,7 @@ class UnderpostLxd {
|
|
|
22
22
|
* @param {boolean} [options.createAdminProfile=false] - Create admin-profile for VMs.
|
|
23
23
|
* @param {boolean} [options.control=false] - Flag for control plane VM initialization.
|
|
24
24
|
* @param {boolean} [options.worker=false] - Flag for worker node VM initialization.
|
|
25
|
+
* @param {boolean} [options.k3s=false] - Flag to indicate K3s cluster type for VM initialization.
|
|
25
26
|
* @param {string} [options.initVm=''] - Initialize a specific VM.
|
|
26
27
|
* @param {string} [options.createVm=''] - Create a new VM with the given name.
|
|
27
28
|
* @param {string} [options.infoVm=''] - Display information about a specific VM.
|
|
@@ -42,6 +43,7 @@ class UnderpostLxd {
|
|
|
42
43
|
createAdminProfile: false,
|
|
43
44
|
control: false,
|
|
44
45
|
worker: false,
|
|
46
|
+
k3s: false, // New k3s option
|
|
45
47
|
initVm: '',
|
|
46
48
|
createVm: '',
|
|
47
49
|
infoVm: '',
|
|
@@ -56,8 +58,8 @@ class UnderpostLxd {
|
|
|
56
58
|
const npmRoot = getNpmRootPath();
|
|
57
59
|
const underpostRoot = options?.dev === true ? '.' : `${npmRoot}/underpost`;
|
|
58
60
|
if (options.reset === true) {
|
|
59
|
-
shellExec(`sudo systemctl stop snap.lxd.daemon`);
|
|
60
|
-
shellExec(`sudo snap remove lxd --purge`);
|
|
61
|
+
shellExec(`sudo systemctl stop snap.lxd.daemon || true`);
|
|
62
|
+
shellExec(`sudo snap remove lxd --purge || true`);
|
|
61
63
|
}
|
|
62
64
|
if (options.install === true) shellExec(`sudo snap install lxd`);
|
|
63
65
|
if (options.init === true) {
|
|
@@ -93,12 +95,24 @@ ipv6.address=none`);
|
|
|
93
95
|
if (options.initVm && typeof options.initVm === 'string') {
|
|
94
96
|
let flag = '';
|
|
95
97
|
if (options.control === true) {
|
|
96
|
-
|
|
98
|
+
if (options.k3s === true) {
|
|
99
|
+
// New K3s flag for control plane
|
|
100
|
+
flag = ' -s -- --k3s';
|
|
101
|
+
} else {
|
|
102
|
+
// Default to kubeadm if not K3s
|
|
103
|
+
flag = ' -s -- --kubeadm';
|
|
104
|
+
}
|
|
97
105
|
shellExec(`lxc exec ${options.initVm} -- bash -c 'mkdir -p /home/dd/engine'`);
|
|
98
106
|
shellExec(`lxc file push /home/dd/engine/engine-private ${options.initVm}/home/dd/engine --recursive`);
|
|
99
107
|
shellExec(`lxc file push /home/dd/engine/manifests ${options.initVm}/home/dd/engine --recursive`);
|
|
100
108
|
} else if (options.worker == true) {
|
|
101
|
-
|
|
109
|
+
if (options.k3s === true) {
|
|
110
|
+
// New K3s flag for worker
|
|
111
|
+
flag = ' -s -- --worker --k3s';
|
|
112
|
+
} else {
|
|
113
|
+
// Default to kubeadm worker
|
|
114
|
+
flag = ' -s -- --worker';
|
|
115
|
+
}
|
|
102
116
|
}
|
|
103
117
|
console.log(`Executing underpost-setup.sh on VM: ${options.initVm}`);
|
|
104
118
|
shellExec(`cat ${underpostRoot}/manifests/lxd/underpost-setup.sh | lxc exec ${options.initVm} -- bash${flag}`);
|
|
@@ -143,22 +157,13 @@ ipv6.address=none`);
|
|
|
143
157
|
|
|
144
158
|
let portsToExpose = [];
|
|
145
159
|
if (options.control === true) {
|
|
146
|
-
// Kubernetes API Server
|
|
160
|
+
// Kubernetes API Server (Kubeadm and K3s both use 6443 by default)
|
|
147
161
|
portsToExpose.push('6443');
|
|
148
162
|
// Standard HTTP/HTTPS for Ingress if deployed
|
|
149
163
|
portsToExpose.push('80');
|
|
150
164
|
portsToExpose.push('443');
|
|
151
165
|
}
|
|
152
|
-
//
|
|
153
|
-
// It's safer to expose the entire range for flexibility, or specific NodePorts if known.
|
|
154
|
-
// For production, you might only expose specific NodePorts or use a LoadBalancer.
|
|
155
|
-
// For a general setup, exposing the range is common.
|
|
156
|
-
// Note: LXD proxy device can only expose individual ports, not ranges directly.
|
|
157
|
-
// We will expose a few common ones, or rely on specific 'expose' calls for others.
|
|
158
|
-
// Let's add some common NodePorts that might be used by applications.
|
|
159
|
-
// The full range 30000-32767 would require individual proxy rules for each port.
|
|
160
|
-
// For this automatic setup, we'll focus on critical K8s ports and common app ports.
|
|
161
|
-
// If a user needs the full NodePort range, they should use the `expose` option explicitly.
|
|
166
|
+
// Add common NodePorts if needed, or rely on explicit 'expose'
|
|
162
167
|
portsToExpose.push('30000'); // Example NodePort
|
|
163
168
|
portsToExpose.push('30001'); // Example NodePort
|
|
164
169
|
portsToExpose.push('30002'); // Example NodePort
|
|
@@ -183,11 +188,39 @@ ipv6.address=none`);
|
|
|
183
188
|
}
|
|
184
189
|
if (options.joinNode && typeof options.joinNode === 'string') {
|
|
185
190
|
const [workerNode, controlNode] = options.joinNode.split(',');
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
)
|
|
190
|
-
|
|
191
|
+
// Determine if it's a Kubeadm or K3s join
|
|
192
|
+
const isK3sJoin = options.k3s === true;
|
|
193
|
+
|
|
194
|
+
if (isK3sJoin) {
|
|
195
|
+
console.log(`Attempting to join K3s worker node ${workerNode} to control plane ${controlNode}`);
|
|
196
|
+
// Get K3s token from control plane
|
|
197
|
+
const k3sToken = shellExec(
|
|
198
|
+
`lxc exec ${controlNode} -- bash -c 'sudo cat /var/lib/rancher/k3s/server/node-token'`,
|
|
199
|
+
{ stdout: true },
|
|
200
|
+
).trim();
|
|
201
|
+
// Get control plane IP
|
|
202
|
+
const controlPlaneIp = shellExec(
|
|
203
|
+
`lxc list ${controlNode} --format json | jq -r '.[0].state.network.enp5s0.addresses[] | select(.family=="inet") | .address'`,
|
|
204
|
+
{ stdout: true },
|
|
205
|
+
).trim();
|
|
206
|
+
|
|
207
|
+
if (!k3sToken || !controlPlaneIp) {
|
|
208
|
+
console.error(`Failed to get K3s token or control plane IP. Cannot join worker.`);
|
|
209
|
+
return;
|
|
210
|
+
}
|
|
211
|
+
const k3sJoinCommand = `K3S_URL=https://${controlPlaneIp}:6443 K3S_TOKEN=${k3sToken} curl -sfL https://get.k3s.io | sh -`;
|
|
212
|
+
shellExec(`lxc exec ${workerNode} -- bash -c '${k3sJoinCommand}'`);
|
|
213
|
+
console.log(`K3s worker node ${workerNode} join command executed.`);
|
|
214
|
+
} else {
|
|
215
|
+
// Kubeadm join
|
|
216
|
+
console.log(`Attempting to join Kubeadm worker node ${workerNode} to control plane ${controlNode}`);
|
|
217
|
+
const token = shellExec(
|
|
218
|
+
`echo "$(lxc exec ${controlNode} -- bash -c 'sudo kubeadm token create --print-join-command')"`,
|
|
219
|
+
{ stdout: true },
|
|
220
|
+
);
|
|
221
|
+
shellExec(`lxc exec ${workerNode} -- bash -c '${token}'`);
|
|
222
|
+
console.log(`Kubeadm worker node ${workerNode} join command executed.`);
|
|
223
|
+
}
|
|
191
224
|
}
|
|
192
225
|
if (options.infoVm && typeof options.infoVm === 'string') {
|
|
193
226
|
shellExec(`lxc config show ${options.infoVm}`);
|