underpost 2.8.652 → 2.8.781
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.vscode/extensions.json +37 -2
- package/.vscode/settings.json +2 -0
- package/CHANGELOG.md +24 -4
- package/README.md +5 -4
- package/bin/deploy.js +1455 -144
- package/cli.md +57 -14
- package/docker-compose.yml +1 -1
- package/manifests/deployment/adminer/deployment.yaml +32 -0
- package/manifests/deployment/adminer/kustomization.yaml +7 -0
- package/manifests/deployment/adminer/service.yaml +13 -0
- package/manifests/deployment/fastapi/backend-deployment.yml +120 -0
- package/manifests/deployment/fastapi/backend-service.yml +19 -0
- package/manifests/deployment/fastapi/frontend-deployment.yml +54 -0
- package/manifests/deployment/fastapi/frontend-service.yml +15 -0
- package/manifests/deployment/fastapi/initial_data.sh +56 -0
- package/manifests/deployment/kafka/deployment.yaml +69 -0
- package/manifests/deployment/spark/spark-pi-py.yaml +21 -0
- package/manifests/envoy-service-nodeport.yaml +23 -0
- package/manifests/kubeadm-calico-config.yaml +119 -0
- package/manifests/kubelet-config.yaml +65 -0
- package/manifests/mongodb/kustomization.yaml +1 -1
- package/manifests/mongodb/statefulset.yaml +12 -11
- package/manifests/mongodb/storage-class.yaml +9 -0
- package/manifests/mongodb-4.4/service-deployment.yaml +1 -1
- package/manifests/mysql/kustomization.yaml +7 -0
- package/manifests/mysql/pv-pvc.yaml +27 -0
- package/manifests/mysql/statefulset.yaml +55 -0
- package/manifests/postgresql/configmap.yaml +9 -0
- package/manifests/postgresql/kustomization.yaml +10 -0
- package/manifests/postgresql/pv.yaml +15 -0
- package/manifests/postgresql/pvc.yaml +13 -0
- package/manifests/postgresql/service.yaml +10 -0
- package/manifests/postgresql/statefulset.yaml +37 -0
- package/manifests/valkey/statefulset.yaml +4 -3
- package/package.json +2 -1
- package/src/cli/cluster.js +281 -27
- package/src/cli/deploy.js +81 -15
- package/src/cli/fs.js +14 -3
- package/src/cli/image.js +34 -7
- package/src/cli/index.js +36 -1
- package/src/cli/lxd.js +19 -0
- package/src/cli/monitor.js +75 -30
- package/src/cli/repository.js +9 -6
- package/src/client/components/core/JoyStick.js +2 -2
- package/src/client/components/core/Modal.js +1 -0
- package/src/index.js +1 -1
- package/src/runtime/lampp/Dockerfile +1 -1
- package/src/server/conf.js +5 -1
- package/src/server/dns.js +47 -17
- package/src/server/runtime.js +2 -0
- package/src/server/start.js +0 -1
package/src/cli/cluster.js
CHANGED
|
@@ -3,6 +3,7 @@ import { loggerFactory } from '../server/logger.js';
|
|
|
3
3
|
import { shellExec } from '../server/process.js';
|
|
4
4
|
import UnderpostDeploy from './deploy.js';
|
|
5
5
|
import UnderpostTest from './test.js';
|
|
6
|
+
import os from 'os';
|
|
6
7
|
|
|
7
8
|
const logger = loggerFactory(import.meta);
|
|
8
9
|
|
|
@@ -14,6 +15,8 @@ class UnderpostCluster {
|
|
|
14
15
|
mongodb: false,
|
|
15
16
|
mongodb4: false,
|
|
16
17
|
mariadb: false,
|
|
18
|
+
mysql: false,
|
|
19
|
+
postgresql: false,
|
|
17
20
|
valkey: false,
|
|
18
21
|
full: false,
|
|
19
22
|
info: false,
|
|
@@ -24,12 +27,25 @@ class UnderpostCluster {
|
|
|
24
27
|
nsUse: '',
|
|
25
28
|
infoCapacity: false,
|
|
26
29
|
infoCapacityPod: false,
|
|
30
|
+
istio: false,
|
|
31
|
+
pullImage: false,
|
|
32
|
+
dedicatedGpu: false,
|
|
33
|
+
kubeadm: false,
|
|
34
|
+
initHost: false,
|
|
27
35
|
},
|
|
28
36
|
) {
|
|
37
|
+
// sudo dnf update
|
|
38
|
+
// 1) Install kind, kubeadm, docker, podman, helm
|
|
39
|
+
// 2) Check kubectl, kubelet, containerd.io
|
|
40
|
+
// 3) Install Nvidia drivers from Rocky Linux docs
|
|
41
|
+
// 4) Install LXD with MAAS from Rocky Linux docs
|
|
42
|
+
// 5) Install MAAS src from snap
|
|
43
|
+
if (options.initHost === true) return UnderpostCluster.API.initHost();
|
|
29
44
|
const npmRoot = getNpmRootPath();
|
|
30
45
|
const underpostRoot = options?.dev === true ? '.' : `${npmRoot}/underpost`;
|
|
31
46
|
if (options.infoCapacityPod === true) return logger.info('', UnderpostDeploy.API.resourcesFactory());
|
|
32
|
-
if (options.infoCapacity === true)
|
|
47
|
+
if (options.infoCapacity === true)
|
|
48
|
+
return logger.info('', UnderpostCluster.API.getResourcesCapacity(options.kubeadm));
|
|
33
49
|
if (options.reset === true) return await UnderpostCluster.API.reset();
|
|
34
50
|
if (options.listPods === true) return console.table(UnderpostDeploy.API.get(podName ?? undefined));
|
|
35
51
|
|
|
@@ -57,6 +73,7 @@ class UnderpostCluster {
|
|
|
57
73
|
shellExec(
|
|
58
74
|
`kubectl get pods --all-namespaces -o=jsonpath='{range .items[*]}{"\\n"}{.metadata.name}{":\\t"}{range .spec.containers[*]}{.image}{", "}{end}{end}'`,
|
|
59
75
|
);
|
|
76
|
+
shellExec(`sudo crictl images`);
|
|
60
77
|
console.log();
|
|
61
78
|
logger.info('contour -------------------------------------------------');
|
|
62
79
|
for (const _k of ['Cluster', 'HTTPProxy', 'ClusterIssuer', 'Certificate']) {
|
|
@@ -66,26 +83,86 @@ class UnderpostCluster {
|
|
|
66
83
|
shellExec(`kubectl get secrets --all-namespaces -o wide`);
|
|
67
84
|
shellExec(`docker secret ls`);
|
|
68
85
|
shellExec(`kubectl get crd --all-namespaces -o wide`);
|
|
86
|
+
shellExec(`sudo kubectl api-resources`);
|
|
69
87
|
return;
|
|
70
88
|
}
|
|
89
|
+
const alrreadyCluster =
|
|
90
|
+
UnderpostDeploy.API.get('kube-apiserver-kind-control-plane')[0] ||
|
|
91
|
+
UnderpostDeploy.API.get('calico-kube-controllers')[0];
|
|
71
92
|
|
|
72
|
-
if (
|
|
93
|
+
if (
|
|
94
|
+
!alrreadyCluster &&
|
|
95
|
+
((!options.istio && !UnderpostDeploy.API.get('kube-apiserver-kind-control-plane')[0]) ||
|
|
96
|
+
(options.istio === true && !UnderpostDeploy.API.get('calico-kube-controllers')[0]))
|
|
97
|
+
) {
|
|
98
|
+
shellExec(`sudo setenforce 0`);
|
|
99
|
+
shellExec(`sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config`);
|
|
100
|
+
shellExec(`sudo systemctl enable --now docker`);
|
|
101
|
+
shellExec(`sudo systemctl enable --now kubelet`);
|
|
73
102
|
shellExec(`containerd config default > /etc/containerd/config.toml`);
|
|
74
103
|
shellExec(`sed -i -e "s/SystemdCgroup = false/SystemdCgroup = true/g" /etc/containerd/config.toml`);
|
|
75
104
|
// shellExec(`cp /etc/kubernetes/admin.conf ~/.kube/config`);
|
|
76
|
-
shellExec(`sudo systemctl restart kubelet`);
|
|
105
|
+
// shellExec(`sudo systemctl restart kubelet`);
|
|
77
106
|
shellExec(`sudo service docker restart`);
|
|
78
107
|
shellExec(`sudo systemctl enable --now containerd.service`);
|
|
79
|
-
shellExec(`sudo
|
|
108
|
+
shellExec(`sudo swapoff -a; sudo sed -i '/swap/d' /etc/fstab`);
|
|
109
|
+
if (options.istio === true) {
|
|
110
|
+
shellExec(`sysctl net.bridge.bridge-nf-call-iptables=1`);
|
|
111
|
+
shellExec(
|
|
112
|
+
`sudo kubeadm init --pod-network-cidr=192.168.0.0/16 --control-plane-endpoint="${os.hostname()}:6443"`,
|
|
113
|
+
);
|
|
114
|
+
shellExec(`sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config`);
|
|
115
|
+
shellExec(`sudo chown $(id -u):$(id -g) $HOME/.kube/config**`);
|
|
116
|
+
// https://docs.tigera.io/calico/latest/getting-started/kubernetes/quickstart
|
|
117
|
+
shellExec(
|
|
118
|
+
`sudo kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.29.3/manifests/tigera-operator.yaml`,
|
|
119
|
+
);
|
|
120
|
+
// shellExec(
|
|
121
|
+
// `wget https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/custom-resources.yaml`,
|
|
122
|
+
// );
|
|
123
|
+
shellExec(`sudo kubectl apply -f ${underpostRoot}/manifests/kubeadm-calico-config.yaml`);
|
|
124
|
+
shellExec(`sudo systemctl restart containerd`);
|
|
125
|
+
const nodeName = os.hostname();
|
|
126
|
+
shellExec(`kubectl taint nodes ${nodeName} node-role.kubernetes.io/control-plane:NoSchedule-`);
|
|
127
|
+
shellExec(
|
|
128
|
+
`kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/master/deploy/local-path-storage.yaml`,
|
|
129
|
+
);
|
|
130
|
+
} else {
|
|
131
|
+
shellExec(`sudo systemctl restart containerd`);
|
|
132
|
+
if (options.full === true || options.dedicatedGpu === true) {
|
|
133
|
+
// https://kubernetes.io/docs/tasks/manage-gpus/scheduling-gpus/
|
|
134
|
+
shellExec(`cd ${underpostRoot}/manifests && kind create cluster --config kind-config-cuda.yaml`);
|
|
135
|
+
} else {
|
|
136
|
+
shellExec(
|
|
137
|
+
`cd ${underpostRoot}/manifests && kind create cluster --config kind-config${
|
|
138
|
+
options?.dev === true ? '-dev' : ''
|
|
139
|
+
}.yaml`,
|
|
140
|
+
);
|
|
141
|
+
}
|
|
142
|
+
shellExec(`sudo chown $(id -u):$(id -g) $HOME/.kube/config**`);
|
|
143
|
+
}
|
|
144
|
+
} else logger.warn('Cluster already initialized');
|
|
145
|
+
|
|
146
|
+
// shellExec(`sudo kubectl apply -f ${underpostRoot}/manifests/kubelet-config.yaml`);
|
|
147
|
+
|
|
148
|
+
if (options.full === true || options.dedicatedGpu === true) {
|
|
149
|
+
shellExec(`node ${underpostRoot}/bin/deploy nvidia-gpu-operator`);
|
|
80
150
|
shellExec(
|
|
81
|
-
`
|
|
82
|
-
options?.dev === true ? '-dev' : ''
|
|
83
|
-
}.yaml`,
|
|
151
|
+
`node ${underpostRoot}/bin/deploy kubeflow-spark-operator${options.kubeadm === true ? ' kubeadm' : ''}`,
|
|
84
152
|
);
|
|
85
|
-
|
|
86
|
-
} else logger.warn('Cluster already initialized');
|
|
153
|
+
}
|
|
87
154
|
|
|
88
155
|
if (options.full === true || options.valkey === true) {
|
|
156
|
+
if (options.pullImage === true) {
|
|
157
|
+
shellExec(`docker pull valkey/valkey:latest`);
|
|
158
|
+
shellExec(`sudo podman pull valkey/valkey:latest`);
|
|
159
|
+
if (!options.kubeadm)
|
|
160
|
+
shellExec(
|
|
161
|
+
`sudo ${
|
|
162
|
+
options.kubeadm === true ? `ctr -n k8s.io images import` : `kind load docker-image`
|
|
163
|
+
} valkey/valkey:latest`,
|
|
164
|
+
);
|
|
165
|
+
}
|
|
89
166
|
shellExec(`kubectl delete statefulset service-valkey`);
|
|
90
167
|
shellExec(`kubectl apply -k ${underpostRoot}/manifests/valkey`);
|
|
91
168
|
}
|
|
@@ -93,13 +170,43 @@ class UnderpostCluster {
|
|
|
93
170
|
shellExec(
|
|
94
171
|
`sudo kubectl create secret generic mariadb-secret --from-file=username=/home/dd/engine/engine-private/mariadb-username --from-file=password=/home/dd/engine/engine-private/mariadb-password`,
|
|
95
172
|
);
|
|
96
|
-
shellExec(
|
|
97
|
-
`sudo kubectl create secret generic github-secret --from-literal=GITHUB_TOKEN=${process.env.GITHUB_TOKEN}`,
|
|
98
|
-
);
|
|
99
173
|
shellExec(`kubectl delete statefulset mariadb-statefulset`);
|
|
100
174
|
shellExec(`kubectl apply -k ${underpostRoot}/manifests/mariadb`);
|
|
101
175
|
}
|
|
176
|
+
if (options.full === true || options.mysql === true) {
|
|
177
|
+
shellExec(
|
|
178
|
+
`sudo kubectl create secret generic mysql-secret --from-file=username=/home/dd/engine/engine-private/mysql-username --from-file=password=/home/dd/engine/engine-private/mysql-password`,
|
|
179
|
+
);
|
|
180
|
+
shellExec(`sudo mkdir -p /mnt/data`);
|
|
181
|
+
shellExec(`sudo chmod 777 /mnt/data`);
|
|
182
|
+
shellExec(`sudo chown -R root:root /mnt/data`);
|
|
183
|
+
shellExec(`kubectl apply -k ${underpostRoot}/manifests/mysql`);
|
|
184
|
+
}
|
|
185
|
+
if (options.full === true || options.postgresql === true) {
|
|
186
|
+
if (options.pullImage === true) {
|
|
187
|
+
shellExec(`docker pull postgres:latest`);
|
|
188
|
+
if (!options.kubeadm)
|
|
189
|
+
shellExec(
|
|
190
|
+
`sudo ${
|
|
191
|
+
options.kubeadm === true ? `ctr -n k8s.io images import` : `kind load docker-image`
|
|
192
|
+
} docker-image postgres:latest`,
|
|
193
|
+
);
|
|
194
|
+
}
|
|
195
|
+
shellExec(
|
|
196
|
+
`sudo kubectl create secret generic postgres-secret --from-file=password=/home/dd/engine/engine-private/postgresql-password`,
|
|
197
|
+
);
|
|
198
|
+
shellExec(`kubectl apply -k ${underpostRoot}/manifests/postgresql`);
|
|
199
|
+
}
|
|
102
200
|
if (options.mongodb4 === true) {
|
|
201
|
+
if (options.pullImage === true) {
|
|
202
|
+
shellExec(`docker pull mongo:4.4`);
|
|
203
|
+
if (!options.kubeadm)
|
|
204
|
+
shellExec(
|
|
205
|
+
`sudo ${
|
|
206
|
+
options.kubeadm === true ? `ctr -n k8s.io images import` : `kind load docker-image`
|
|
207
|
+
} docker-image mongo:4.4`,
|
|
208
|
+
);
|
|
209
|
+
}
|
|
103
210
|
shellExec(`kubectl apply -k ${underpostRoot}/manifests/mongodb-4.4`);
|
|
104
211
|
|
|
105
212
|
const deploymentName = 'mongodb-deployment';
|
|
@@ -122,6 +229,9 @@ class UnderpostCluster {
|
|
|
122
229
|
|
|
123
230
|
// await UnderpostTest.API.statusMonitor('mongodb-1');
|
|
124
231
|
} else if (options.full === true || options.mongodb === true) {
|
|
232
|
+
if (options.pullImage === true) {
|
|
233
|
+
shellExec(`docker pull mongo:latest`);
|
|
234
|
+
}
|
|
125
235
|
shellExec(
|
|
126
236
|
`sudo kubectl create secret generic mongodb-keyfile --from-file=/home/dd/engine/engine-private/mongodb-keyfile`,
|
|
127
237
|
);
|
|
@@ -129,6 +239,8 @@ class UnderpostCluster {
|
|
|
129
239
|
`sudo kubectl create secret generic mongodb-secret --from-file=username=/home/dd/engine/engine-private/mongodb-username --from-file=password=/home/dd/engine/engine-private/mongodb-password`,
|
|
130
240
|
);
|
|
131
241
|
shellExec(`kubectl delete statefulset mongodb`);
|
|
242
|
+
if (options.kubeadm === true)
|
|
243
|
+
shellExec(`kubectl apply -f ${underpostRoot}/manifests/mongodb/storage-class.yaml`);
|
|
132
244
|
shellExec(`kubectl apply -k ${underpostRoot}/manifests/mongodb`);
|
|
133
245
|
|
|
134
246
|
const successInstance = await UnderpostTest.API.statusMonitor('mongodb-1');
|
|
@@ -151,8 +263,12 @@ class UnderpostCluster {
|
|
|
151
263
|
}
|
|
152
264
|
}
|
|
153
265
|
|
|
154
|
-
if (options.full === true || options.contour === true)
|
|
266
|
+
if (options.full === true || options.contour === true) {
|
|
155
267
|
shellExec(`kubectl apply -f https://projectcontour.io/quickstart/contour.yaml`);
|
|
268
|
+
if (options.kubeadm === true) {
|
|
269
|
+
shellExec(`sudo kubectl apply -f ${underpostRoot}/manifests/envoy-service-nodeport.yaml`);
|
|
270
|
+
}
|
|
271
|
+
}
|
|
156
272
|
|
|
157
273
|
if (options.full === true || options.certManager === true) {
|
|
158
274
|
if (!UnderpostDeploy.API.get('cert-manager').find((p) => p.STATUS === 'Running')) {
|
|
@@ -171,36 +287,137 @@ class UnderpostCluster {
|
|
|
171
287
|
shellExec(`sudo kubectl apply -f ${underpostRoot}/manifests/${letsEncName}.yaml`);
|
|
172
288
|
}
|
|
173
289
|
},
|
|
290
|
+
// This function performs a comprehensive reset of Kubernetes and container environments
|
|
291
|
+
// on the host machine. Its primary goal is to clean up cluster components, temporary files,
|
|
292
|
+
// and container data, ensuring a clean state for re-initialization or fresh deployments,
|
|
293
|
+
// while also preventing the loss of the host machine's internet connectivity.
|
|
294
|
+
|
|
174
295
|
reset() {
|
|
296
|
+
// Step 1: Delete all existing Kind (Kubernetes in Docker) clusters.
|
|
297
|
+
// 'kind get clusters' lists all Kind clusters.
|
|
298
|
+
// 'xargs -t -n1 kind delete cluster --name' then iterates through each cluster name
|
|
299
|
+
// and executes 'kind delete cluster --name <cluster_name>' to remove them.
|
|
175
300
|
shellExec(`kind get clusters | xargs -t -n1 kind delete cluster --name`);
|
|
301
|
+
|
|
302
|
+
// Step 2: Reset the Kubernetes control-plane components installed by kubeadm.
|
|
303
|
+
// 'kubeadm reset -f' performs a forceful reset, removing installed Kubernetes components,
|
|
304
|
+
// configuration files, and associated network rules (like iptables entries created by kubeadm).
|
|
305
|
+
// The '-f' flag bypasses confirmation prompts.
|
|
176
306
|
shellExec(`sudo kubeadm reset -f`);
|
|
307
|
+
|
|
308
|
+
// Step 3: Remove specific CNI (Container Network Interface) configuration files.
|
|
309
|
+
// This command targets and removes the configuration file for Flannel,
|
|
310
|
+
// a common CNI plugin, which might be left behind after a reset.
|
|
177
311
|
shellExec('sudo rm -f /etc/cni/net.d/10-flannel.conflist');
|
|
178
|
-
|
|
312
|
+
|
|
313
|
+
// Note: The aggressive 'sudo iptables -F ...' command was intentionally removed from previous versions.
|
|
314
|
+
// This command would flush all iptables rules, including those crucial for the host's general
|
|
315
|
+
// internet connectivity, leading to network loss. 'kubeadm reset' and container runtime pruning
|
|
316
|
+
// adequately handle Kubernetes and container-specific iptables rules without affecting the host's
|
|
317
|
+
// default network configuration.
|
|
318
|
+
|
|
319
|
+
// Step 4: Remove the kubectl configuration file from the current user's home directory.
|
|
320
|
+
// This ensures that after a reset, there's no lingering configuration pointing to the old cluster,
|
|
321
|
+
// providing a clean slate for connecting to a new or re-initialized cluster.
|
|
179
322
|
shellExec('sudo rm -f $HOME/.kube/config');
|
|
323
|
+
|
|
324
|
+
// Step 5: Clear trash files from the root user's trash directory.
|
|
325
|
+
// This is a general cleanup step to remove temporary or deleted files.
|
|
180
326
|
shellExec('sudo rm -rf /root/.local/share/Trash/files/*');
|
|
327
|
+
|
|
328
|
+
// Step 6: Prune all unused Docker data.
|
|
329
|
+
// 'docker system prune -a -f' removes:
|
|
330
|
+
// - All stopped containers
|
|
331
|
+
// - All unused networks
|
|
332
|
+
// - All dangling images
|
|
333
|
+
// - All build cache
|
|
334
|
+
// - All unused volumes
|
|
335
|
+
// This aggressively frees up disk space and removes temporary Docker artifacts.
|
|
181
336
|
shellExec('sudo docker system prune -a -f');
|
|
337
|
+
|
|
338
|
+
// Step 7: Stop the Docker daemon service.
|
|
339
|
+
// This step is often necessary to ensure that Docker's files and directories
|
|
340
|
+
// can be safely manipulated or moved in subsequent steps without conflicts.
|
|
182
341
|
shellExec('sudo service docker stop');
|
|
342
|
+
|
|
343
|
+
// Step 8: Aggressively remove container storage data for containerd and Docker.
|
|
344
|
+
// These commands target the default storage locations for containerd and Docker,
|
|
345
|
+
// as well as any custom paths that might have been used (`/home/containers/storage`, `/home/docker`).
|
|
346
|
+
// This ensures a complete wipe of all container images, layers, and volumes.
|
|
183
347
|
shellExec(`sudo rm -rf /var/lib/containers/storage/*`);
|
|
184
348
|
shellExec(`sudo rm -rf /var/lib/docker/volumes/*`);
|
|
185
|
-
shellExec(`sudo rm -rf /var/lib/docker~/*`);
|
|
186
|
-
shellExec(`sudo rm -rf /home/containers/storage/*`);
|
|
187
|
-
shellExec(`sudo rm -rf /home/docker/*`);
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
349
|
+
shellExec(`sudo rm -rf /var/lib/docker~/*`); // Cleans up a potential backup directory for Docker data
|
|
350
|
+
shellExec(`sudo rm -rf /home/containers/storage/*`); // Cleans up custom containerd/Podman storage
|
|
351
|
+
shellExec(`sudo rm -rf /home/docker/*`); // Cleans up custom Docker storage
|
|
352
|
+
|
|
353
|
+
// Step 9: Re-configure Docker's default storage location (if desired).
|
|
354
|
+
// These commands effectively move Docker's data directory from its default `/var/lib/docker`
|
|
355
|
+
// to a new location (`/home/docker`) and create a symbolic link.
|
|
356
|
+
// This is a specific customization to relocate Docker's storage.
|
|
357
|
+
shellExec('sudo mv /var/lib/docker /var/lib/docker~'); // Moves existing /var/lib/docker to /var/lib/docker~ (backup)
|
|
358
|
+
shellExec('sudo mkdir /home/docker'); // Creates the new desired directory for Docker data
|
|
359
|
+
shellExec('sudo chmod 0711 /home/docker'); // Sets appropriate permissions for the new directory
|
|
360
|
+
shellExec('sudo ln -s /home/docker /var/lib/docker'); // Creates a symlink from original path to new path
|
|
361
|
+
|
|
362
|
+
// Step 10: Prune all unused Podman data.
|
|
363
|
+
// Similar to Docker pruning, these commands remove:
|
|
364
|
+
// - All stopped containers
|
|
365
|
+
// - All unused networks
|
|
366
|
+
// - All unused images
|
|
367
|
+
// - All unused volumes ('--volumes')
|
|
368
|
+
// - The '--force' flag bypasses confirmation.
|
|
369
|
+
// '--external' prunes external content not managed by Podman's default storage backend.
|
|
192
370
|
shellExec(`sudo podman system prune -a -f`);
|
|
193
371
|
shellExec(`sudo podman system prune --all --volumes --force`);
|
|
194
372
|
shellExec(`sudo podman system prune --external --force`);
|
|
195
|
-
shellExec(`sudo podman system prune --all --volumes --force`);
|
|
373
|
+
shellExec(`sudo podman system prune --all --volumes --force`); // Redundant but harmless repetition
|
|
374
|
+
|
|
375
|
+
// Step 11: Create and set permissions for Podman's custom storage directory.
|
|
376
|
+
// This ensures the custom path `/home/containers/storage` exists and has correct permissions
|
|
377
|
+
// before Podman attempts to use it.
|
|
196
378
|
shellExec(`sudo mkdir -p /home/containers/storage`);
|
|
197
379
|
shellExec('sudo chmod 0711 /home/containers/storage');
|
|
380
|
+
|
|
381
|
+
// Step 12: Update Podman's storage configuration file.
|
|
382
|
+
// This command uses 'sed' to modify `/etc/containers/storage.conf`,
|
|
383
|
+
// changing the default storage path from `/var/lib/containers/storage`
|
|
384
|
+
// to the customized `/home/containers/storage`.
|
|
198
385
|
shellExec(
|
|
199
386
|
`sudo sed -i -e "s@/var/lib/containers/storage@/home/containers/storage@g" /etc/containers/storage.conf`,
|
|
200
387
|
);
|
|
388
|
+
|
|
389
|
+
// Step 13: Reset Podman system settings.
|
|
390
|
+
// This command resets Podman's system-wide configuration to its default state.
|
|
201
391
|
shellExec(`sudo podman system reset -f`);
|
|
392
|
+
|
|
393
|
+
// Note: The 'sysctl net.bridge.bridge-nf-call-iptables=0' and related commands
|
|
394
|
+
// were previously removed. These sysctl settings (bridge-nf-call-iptables,
|
|
395
|
+
// bridge-nf-call-arptables, bridge-nf-call-ip6tables) are crucial for allowing
|
|
396
|
+
// network traffic through Linux bridges to be processed by iptables.
|
|
397
|
+
// Kubernetes and CNI plugins generally require them to be enabled (set to '1').
|
|
398
|
+
// Re-initializing Kubernetes will typically set these as needed, and leaving them
|
|
399
|
+
// at their system default (or '1' if already configured) is safer for host
|
|
400
|
+
// connectivity during a reset operation.
|
|
401
|
+
|
|
402
|
+
// https://github.com/kubernetes-sigs/kind/issues/2886
|
|
403
|
+
// shellExec(`sysctl net.bridge.bridge-nf-call-iptables=0`);
|
|
404
|
+
// shellExec(`sysctl net.bridge.bridge-nf-call-arptables=0`);
|
|
405
|
+
// shellExec(`sysctl net.bridge.bridge-nf-call-ip6tables=0`);
|
|
406
|
+
|
|
407
|
+
// Step 14: Remove the 'kind' Docker network.
|
|
408
|
+
// This cleans up any network bridges or configurations specifically created by Kind.
|
|
409
|
+
// shellExec(`docker network rm kind`);
|
|
410
|
+
|
|
411
|
+
// Reset kubelet
|
|
412
|
+
shellExec(`sudo systemctl stop kubelet`);
|
|
413
|
+
shellExec(`sudo rm -rf /etc/kubernetes/*`);
|
|
414
|
+
shellExec(`sudo rm -rf /var/lib/kubelet/*`);
|
|
415
|
+
shellExec(`sudo rm -rf /etc/cni/net.d/*`);
|
|
416
|
+
shellExec(`sudo systemctl daemon-reload`);
|
|
417
|
+
shellExec(`sudo systemctl start kubelet`);
|
|
202
418
|
},
|
|
203
|
-
|
|
419
|
+
|
|
420
|
+
getResourcesCapacity(kubeadm = false) {
|
|
204
421
|
const resources = {};
|
|
205
422
|
const info = false
|
|
206
423
|
? `Capacity:
|
|
@@ -217,10 +434,15 @@ Allocatable:
|
|
|
217
434
|
hugepages-2Mi: 0
|
|
218
435
|
memory: 11914720Ki
|
|
219
436
|
pods: `
|
|
220
|
-
: shellExec(
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
437
|
+
: shellExec(
|
|
438
|
+
`kubectl describe node ${
|
|
439
|
+
kubeadm === true ? os.hostname() : 'kind-worker'
|
|
440
|
+
} | grep -E '(Allocatable:|Capacity:)' -A 6`,
|
|
441
|
+
{
|
|
442
|
+
stdout: true,
|
|
443
|
+
silent: true,
|
|
444
|
+
},
|
|
445
|
+
);
|
|
224
446
|
info
|
|
225
447
|
.split('Allocatable:')[1]
|
|
226
448
|
.split('\n')
|
|
@@ -240,6 +462,38 @@ Allocatable:
|
|
|
240
462
|
|
|
241
463
|
return resources;
|
|
242
464
|
},
|
|
465
|
+
initHost() {
|
|
466
|
+
// Base
|
|
467
|
+
shellExec(`sudo dnf update -y`);
|
|
468
|
+
shellExec(`sudo dnf install epel-release -y`);
|
|
469
|
+
// Install docker
|
|
470
|
+
shellExec(`sudo dnf -y install dnf-plugins-core
|
|
471
|
+
sudo dnf config-manager --add-repo https://download.docker.com/linux/rhel/docker-ce.repo`);
|
|
472
|
+
shellExec(`sudo dnf install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin`);
|
|
473
|
+
// Install podman
|
|
474
|
+
shellExec(`sudo dnf install podman`);
|
|
475
|
+
// Install kind
|
|
476
|
+
shellExec(`[ $(uname -m) = aarch64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.29.0/kind-linux-arm64
|
|
477
|
+
chmod +x ./kind
|
|
478
|
+
sudo mv ./kind /bin/kind`);
|
|
479
|
+
// Install kubeadm
|
|
480
|
+
shellExec(`cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
|
|
481
|
+
[kubernetes]
|
|
482
|
+
name=Kubernetes
|
|
483
|
+
baseurl=https://pkgs.k8s.io/core:/stable:/v1.33/rpm/
|
|
484
|
+
enabled=1
|
|
485
|
+
gpgcheck=1
|
|
486
|
+
gpgkey=https://pkgs.k8s.io/core:/stable:/v1.33/rpm/repodata/repomd.xml.key
|
|
487
|
+
exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
|
|
488
|
+
EOF`);
|
|
489
|
+
shellExec(`sudo yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes`);
|
|
490
|
+
// Install helm
|
|
491
|
+
shellExec(`curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
|
|
492
|
+
chmod 700 get_helm.sh
|
|
493
|
+
./get_helm.sh
|
|
494
|
+
chmod +x /usr/local/bin/helm
|
|
495
|
+
sudo mv /usr/local/bin/helm /bin/helm`);
|
|
496
|
+
},
|
|
243
497
|
};
|
|
244
498
|
}
|
|
245
499
|
export default UnderpostCluster;
|
package/src/cli/deploy.js
CHANGED
|
@@ -15,19 +15,20 @@ import dotenv from 'dotenv';
|
|
|
15
15
|
import { DataBaseProvider } from '../db/DataBaseProvider.js';
|
|
16
16
|
import UnderpostRootEnv from './env.js';
|
|
17
17
|
import UnderpostCluster from './cluster.js';
|
|
18
|
+
import Underpost from '../index.js';
|
|
18
19
|
|
|
19
20
|
const logger = loggerFactory(import.meta);
|
|
20
21
|
|
|
21
22
|
class UnderpostDeploy {
|
|
22
23
|
static NETWORK = {};
|
|
23
24
|
static API = {
|
|
24
|
-
sync(deployList, { versions, replicas }) {
|
|
25
|
-
const deployGroupId = 'dd.
|
|
25
|
+
sync(deployList, { versions, replicas, kubeadm = false }) {
|
|
26
|
+
const deployGroupId = 'dd.router';
|
|
26
27
|
fs.writeFileSync(`./engine-private/deploy/${deployGroupId}`, deployList, 'utf8');
|
|
27
28
|
const totalPods = deployList.split(',').length * versions.split(',').length * parseInt(replicas);
|
|
28
|
-
const limitFactor = 0.
|
|
29
|
-
const reserveFactor = 0.
|
|
30
|
-
const resources = UnderpostCluster.API.getResourcesCapacity();
|
|
29
|
+
const limitFactor = 0.8;
|
|
30
|
+
const reserveFactor = 0.05;
|
|
31
|
+
const resources = UnderpostCluster.API.getResourcesCapacity(kubeadm);
|
|
31
32
|
const memory = parseInt(resources.memory.value / totalPods);
|
|
32
33
|
const cpu = parseInt(resources.cpu.value / totalPods);
|
|
33
34
|
UnderpostRootEnv.API.set(
|
|
@@ -80,7 +81,7 @@ spec:
|
|
|
80
81
|
spec:
|
|
81
82
|
containers:
|
|
82
83
|
- name: ${deployId}-${env}-${suffix}
|
|
83
|
-
image: localhost/debian
|
|
84
|
+
image: localhost/debian-underpost:${Underpost.version}
|
|
84
85
|
resources:
|
|
85
86
|
requests:
|
|
86
87
|
memory: "${resources.requests.memory}"
|
|
@@ -220,6 +221,14 @@ spec:
|
|
|
220
221
|
}
|
|
221
222
|
}
|
|
222
223
|
},
|
|
224
|
+
getCurrentTraffic(deployId) {
|
|
225
|
+
// kubectl get deploy,sts,svc,configmap,secret -n default -o yaml --export > default.yaml
|
|
226
|
+
const hostTest = Object.keys(
|
|
227
|
+
JSON.parse(fs.readFileSync(`./engine-private/conf/${deployId}/conf.server.json`, 'utf8')),
|
|
228
|
+
)[0];
|
|
229
|
+
const info = shellExec(`sudo kubectl get HTTPProxy/${hostTest} -o yaml`, { silent: true, stdout: true });
|
|
230
|
+
return info.match('blue') ? 'blue' : info.match('green') ? 'green' : null;
|
|
231
|
+
},
|
|
223
232
|
async callback(
|
|
224
233
|
deployList = 'default',
|
|
225
234
|
env = 'development',
|
|
@@ -235,6 +244,10 @@ spec:
|
|
|
235
244
|
traffic: '',
|
|
236
245
|
dashboardUpdate: false,
|
|
237
246
|
replicas: '',
|
|
247
|
+
restoreHosts: false,
|
|
248
|
+
disableUpdateDeployment: false,
|
|
249
|
+
infoTraffic: false,
|
|
250
|
+
rebuildClientsBundle: false,
|
|
238
251
|
},
|
|
239
252
|
) {
|
|
240
253
|
if (options.infoUtil === true)
|
|
@@ -242,11 +255,41 @@ spec:
|
|
|
242
255
|
kubectl rollout restart deployment/deployment-name
|
|
243
256
|
kubectl rollout undo deployment/deployment-name
|
|
244
257
|
kubectl scale statefulsets <stateful-set-name> --replicas=<new-replicas>
|
|
245
|
-
|
|
258
|
+
kubectl get pods -w
|
|
259
|
+
kubectl patch statefulset service-valkey --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value":"valkey/valkey:latest"}]'
|
|
260
|
+
kubectl patch statefulset service-valkey -p '{"spec":{"template":{"spec":{"containers":[{"name":"service-valkey","imagePullPolicy":"Never"}]}}}}'
|
|
261
|
+
kubectl logs -f <pod-name>
|
|
262
|
+
kubectl describe pod <pod-name>
|
|
263
|
+
kubectl exec -it <pod-name> -- bash
|
|
264
|
+
kubectl exec -it <pod-name> -- sh
|
|
265
|
+
docker exec -it kind-control-plane bash
|
|
266
|
+
curl -4 -v google.com
|
|
267
|
+
kubectl taint nodes <node-name> node-role.kubernetes.io/control-plane:NoSchedule-
|
|
268
|
+
kubectl run test-pod --image=busybox:latest --restart=Never -- /bin/sh -c "while true; do sleep 30; done;"
|
|
269
|
+
kubectl run test-pod --image=alpine/curl:latest --restart=Never -- sh -c "sleep infinity"
|
|
270
|
+
kubectl get ippools -o yaml
|
|
271
|
+
kubectl get node <node-name> -o jsonpath='{.spec.podCIDR}'
|
|
272
|
+
kubectl patch ippool default-ipv4-ippool --type='json' -p='[{"op": "replace", "path": "/spec/cidr", "value": "10.244.0.0/16"}]'
|
|
273
|
+
kubectl patch ippool default-ipv4-ippool --type='json' -p='[{"op": "replace", "path": "/spec/cidr", "value": "192.168.0.0/24"}]'
|
|
274
|
+
sudo podman run --rm localhost/<image-name>:<image-version> <command>
|
|
275
|
+
kubectl get configmap kubelet-config -n kube-system -o yaml > kubelet-config.yaml
|
|
276
|
+
`);
|
|
246
277
|
if (deployList === 'dd' && fs.existsSync(`./engine-private/deploy/dd.router`))
|
|
247
278
|
deployList = fs.readFileSync(`./engine-private/deploy/dd.router`, 'utf8');
|
|
279
|
+
if (options.infoTraffic === true) {
|
|
280
|
+
for (const _deployId of deployList.split(',')) {
|
|
281
|
+
const deployId = _deployId.trim();
|
|
282
|
+
logger.info('', {
|
|
283
|
+
deployId,
|
|
284
|
+
env,
|
|
285
|
+
traffic: UnderpostDeploy.API.getCurrentTraffic(deployId),
|
|
286
|
+
});
|
|
287
|
+
}
|
|
288
|
+
return;
|
|
289
|
+
}
|
|
290
|
+
if (options.rebuildClientsBundle === true) await UnderpostDeploy.API.rebuildClientsBundle(deployList);
|
|
248
291
|
if (!(options.versions && typeof options.versions === 'string')) options.versions = 'blue,green';
|
|
249
|
-
if (!options.replicas) options.replicas =
|
|
292
|
+
if (!options.replicas) options.replicas = 1;
|
|
250
293
|
if (options.sync) UnderpostDeploy.API.sync(deployList, options);
|
|
251
294
|
if (options.buildManifest === true) await UnderpostDeploy.API.buildManifest(deployList, env, options);
|
|
252
295
|
if (options.infoRouter === true) logger.info('router', await UnderpostDeploy.API.routerFactory(deployList, env));
|
|
@@ -256,11 +299,17 @@ kubectl scale statefulsets <stateful-set-name> --replicas=<new-replicas>
|
|
|
256
299
|
shellExec(
|
|
257
300
|
`kubectl create configmap underpost-config --from-file=/home/dd/engine/engine-private/conf/dd-cron/.env.${env}`,
|
|
258
301
|
);
|
|
302
|
+
let renderHosts = '';
|
|
303
|
+
let concatHots = '';
|
|
259
304
|
const etcHost = (
|
|
260
305
|
concat,
|
|
261
306
|
) => `127.0.0.1 ${concat} localhost localhost.localdomain localhost4 localhost4.localdomain4
|
|
262
307
|
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6`;
|
|
263
|
-
|
|
308
|
+
if (options.restoreHosts === true) {
|
|
309
|
+
renderHosts = etcHost(concatHots);
|
|
310
|
+
fs.writeFileSync(`/etc/hosts`, renderHosts, 'utf8');
|
|
311
|
+
return;
|
|
312
|
+
}
|
|
264
313
|
|
|
265
314
|
for (const _deployId of deployList.split(',')) {
|
|
266
315
|
const deployId = _deployId.trim();
|
|
@@ -276,10 +325,11 @@ kubectl scale statefulsets <stateful-set-name> --replicas=<new-replicas>
|
|
|
276
325
|
continue;
|
|
277
326
|
}
|
|
278
327
|
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
328
|
+
if (!options.disableUpdateDeployment)
|
|
329
|
+
for (const version of options.versions.split(',')) {
|
|
330
|
+
shellExec(`sudo kubectl delete svc ${deployId}-${env}-${version}-service`);
|
|
331
|
+
shellExec(`sudo kubectl delete deployment ${deployId}-${env}-${version}`);
|
|
332
|
+
}
|
|
283
333
|
|
|
284
334
|
const confServer = JSON.parse(fs.readFileSync(`./engine-private/conf/${deployId}/conf.server.json`, 'utf8'));
|
|
285
335
|
for (const host of Object.keys(confServer)) {
|
|
@@ -294,13 +344,12 @@ kubectl scale statefulsets <stateful-set-name> --replicas=<new-replicas>
|
|
|
294
344
|
: `manifests/deployment/${deployId}-${env}`;
|
|
295
345
|
|
|
296
346
|
if (!options.remove === true) {
|
|
297
|
-
shellExec(`sudo kubectl apply -f ./${manifestsPath}/deployment.yaml`);
|
|
347
|
+
if (!options.disableUpdateDeployment) shellExec(`sudo kubectl apply -f ./${manifestsPath}/deployment.yaml`);
|
|
298
348
|
shellExec(`sudo kubectl apply -f ./${manifestsPath}/proxy.yaml`);
|
|
299
349
|
if (env === 'production' && options.cert === true)
|
|
300
350
|
shellExec(`sudo kubectl apply -f ./${manifestsPath}/secret.yaml`);
|
|
301
351
|
}
|
|
302
352
|
}
|
|
303
|
-
let renderHosts;
|
|
304
353
|
switch (process.platform) {
|
|
305
354
|
case 'linux':
|
|
306
355
|
{
|
|
@@ -357,6 +406,23 @@ kubectl scale statefulsets <stateful-set-name> --replicas=<new-replicas>
|
|
|
357
406
|
|
|
358
407
|
return result;
|
|
359
408
|
},
|
|
409
|
+
rebuildClientsBundle(deployList) {
|
|
410
|
+
for (const _deployId of deployList.split(',')) {
|
|
411
|
+
const deployId = _deployId.trim();
|
|
412
|
+
const repoName = `engine-${deployId.split('-')[1]}`;
|
|
413
|
+
|
|
414
|
+
shellExec(`underpost script set ${deployId}-client-build '
|
|
415
|
+
cd /home/dd/engine &&
|
|
416
|
+
git checkout . &&
|
|
417
|
+
underpost pull . underpostnet/${repoName} &&
|
|
418
|
+
underpost pull ./engine-private underpostnet/${repoName}-private &&
|
|
419
|
+
underpost env ${deployId} production &&
|
|
420
|
+
node bin/deploy build-full-client ${deployId}
|
|
421
|
+
'`);
|
|
422
|
+
|
|
423
|
+
shellExec(`node bin script run ${deployId}-client-build --itc --pod-name ${deployId}`);
|
|
424
|
+
}
|
|
425
|
+
},
|
|
360
426
|
resourcesFactory() {
|
|
361
427
|
return {
|
|
362
428
|
requests: {
|
package/src/cli/fs.js
CHANGED
|
@@ -24,7 +24,7 @@ class UnderpostFileStorage {
|
|
|
24
24
|
getStorageConf(options) {
|
|
25
25
|
let storage, storageConf;
|
|
26
26
|
if (options.deployId && typeof options.deployId === 'string') {
|
|
27
|
-
storageConf = `./engine-private/conf/${options.deployId}/storage.json`;
|
|
27
|
+
storageConf = options.storageFilePath ?? `./engine-private/conf/${options.deployId}/storage.json`;
|
|
28
28
|
if (!fs.existsSync(storageConf)) fs.writeFileSync(storageConf, JSON.stringify({}), 'utf8');
|
|
29
29
|
storage = JSON.parse(fs.readFileSync(storageConf, 'utf8'));
|
|
30
30
|
}
|
|
@@ -35,7 +35,15 @@ class UnderpostFileStorage {
|
|
|
35
35
|
},
|
|
36
36
|
async recursiveCallback(
|
|
37
37
|
path,
|
|
38
|
-
options = {
|
|
38
|
+
options = {
|
|
39
|
+
rm: false,
|
|
40
|
+
recursive: false,
|
|
41
|
+
deployId: '',
|
|
42
|
+
force: false,
|
|
43
|
+
pull: false,
|
|
44
|
+
git: false,
|
|
45
|
+
storageFilePath: '',
|
|
46
|
+
},
|
|
39
47
|
) {
|
|
40
48
|
const { storage, storageConf } = UnderpostFileStorage.API.getStorageConf(options);
|
|
41
49
|
const deleteFiles = options.pull === true ? [] : UnderpostRepository.API.getDeleteFiles(path);
|
|
@@ -85,7 +93,10 @@ class UnderpostFileStorage {
|
|
|
85
93
|
if (options.rm === true) return await UnderpostFileStorage.API.delete(path, options);
|
|
86
94
|
return await UnderpostFileStorage.API.upload(path, options);
|
|
87
95
|
},
|
|
88
|
-
async upload(
|
|
96
|
+
async upload(
|
|
97
|
+
path,
|
|
98
|
+
options = { rm: false, recursive: false, deployId: '', force: false, pull: false, storageFilePath: '' },
|
|
99
|
+
) {
|
|
89
100
|
UnderpostFileStorage.API.cloudinaryConfig();
|
|
90
101
|
const { storage, storageConf } = UnderpostFileStorage.API.getStorageConf(options);
|
|
91
102
|
// path = UnderpostFileStorage.API.file2Zip(path);
|