@underpostnet/underpost 2.8.5 → 2.8.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/workflows/ghpkg.yml +1 -1
- package/.github/workflows/npmpkg.yml +1 -1
- package/.github/workflows/pwa-microservices-template.page.yml +1 -1
- package/.vscode/extensions.json +3 -2
- package/.vscode/settings.json +6 -0
- package/CHANGELOG.md +44 -0
- package/Dockerfile +9 -10
- package/README.md +39 -2
- package/bin/build.js +31 -6
- package/bin/deploy.js +1404 -202
- package/bin/file.js +8 -0
- package/bin/hwt.js +0 -10
- package/bin/index.js +1 -187
- package/bin/util.js +0 -7
- package/bin/vs.js +1 -0
- package/cli.md +451 -0
- package/conf.js +0 -2
- package/docker-compose.yml +1 -1
- package/jsdoc.json +1 -1
- package/manifests/calico-custom-resources.yaml +25 -0
- package/manifests/deployment/adminer/deployment.yaml +32 -0
- package/manifests/deployment/adminer/kustomization.yaml +7 -0
- package/manifests/deployment/adminer/service.yaml +13 -0
- package/manifests/deployment/fastapi/backend-deployment.yml +120 -0
- package/manifests/deployment/fastapi/backend-service.yml +19 -0
- package/manifests/deployment/fastapi/frontend-deployment.yml +54 -0
- package/manifests/deployment/fastapi/frontend-service.yml +15 -0
- package/manifests/deployment/kafka/deployment.yaml +69 -0
- package/manifests/kind-config-dev.yaml +12 -0
- package/manifests/kubeadm-calico-config.yaml +119 -0
- package/manifests/mongodb/kustomization.yaml +2 -2
- package/manifests/mongodb-4.4/kustomization.yaml +7 -0
- package/manifests/mongodb-4.4/service-deployment.yaml +63 -0
- package/manifests/postgresql/configmap.yaml +9 -0
- package/manifests/postgresql/kustomization.yaml +10 -0
- package/manifests/postgresql/pv.yaml +15 -0
- package/manifests/postgresql/pvc.yaml +13 -0
- package/manifests/{core/underpost-engine-headless-service.yaml → postgresql/service.yaml} +3 -3
- package/manifests/postgresql/statefulset.yaml +37 -0
- package/manifests/valkey/statefulset.yaml +6 -4
- package/package.json +10 -14
- package/src/api/default/default.service.js +1 -1
- package/src/api/user/user.service.js +14 -11
- package/src/cli/cluster.js +298 -63
- package/src/cli/cron.js +39 -8
- package/src/cli/db.js +118 -44
- package/src/cli/deploy.js +312 -102
- package/src/cli/env.js +9 -3
- package/src/cli/fs.js +161 -0
- package/src/cli/image.js +45 -104
- package/src/cli/index.js +312 -0
- package/src/cli/monitor.js +236 -0
- package/src/cli/repository.js +26 -2
- package/src/cli/script.js +25 -1
- package/src/cli/test.js +39 -4
- package/src/client/components/core/Account.js +28 -24
- package/src/client/components/core/Blockchain.js +1 -1
- package/src/client/components/core/CalendarCore.js +14 -73
- package/src/client/components/core/CommonJs.js +54 -2
- package/src/client/components/core/Css.js +0 -1
- package/src/client/components/core/CssCore.js +10 -4
- package/src/client/components/core/Docs.js +1 -2
- package/src/client/components/core/EventsUI.js +3 -3
- package/src/client/components/core/FileExplorer.js +86 -78
- package/src/client/components/core/Input.js +4 -2
- package/src/client/components/core/JoyStick.js +2 -2
- package/src/client/components/core/LoadingAnimation.js +3 -12
- package/src/client/components/core/LogIn.js +3 -3
- package/src/client/components/core/LogOut.js +1 -1
- package/src/client/components/core/Modal.js +44 -14
- package/src/client/components/core/Panel.js +26 -66
- package/src/client/components/core/PanelForm.js +22 -15
- package/src/client/components/core/Recover.js +3 -3
- package/src/client/components/core/RichText.js +1 -11
- package/src/client/components/core/Router.js +3 -1
- package/src/client/components/core/SignUp.js +2 -2
- package/src/client/components/default/RoutesDefault.js +3 -2
- package/src/client/services/core/core.service.js +15 -10
- package/src/client/services/default/default.management.js +45 -38
- package/src/client/ssr/Render.js +6 -1
- package/src/client/ssr/body/CacheControl.js +2 -3
- package/src/client/sw/default.sw.js +3 -3
- package/src/db/mongo/MongooseDB.js +17 -1
- package/src/index.js +25 -1
- package/src/mailer/MailerProvider.js +3 -0
- package/src/runtime/lampp/Dockerfile +65 -0
- package/src/server/backup.js +3 -3
- package/src/server/client-build.js +45 -23
- package/src/server/client-formatted.js +2 -1
- package/src/server/conf.js +110 -16
- package/src/server/dns.js +74 -43
- package/src/server/downloader.js +0 -8
- package/src/server/json-schema.js +77 -0
- package/src/server/network.js +7 -122
- package/src/server/peer.js +2 -2
- package/src/server/proxy.js +4 -4
- package/src/server/runtime.js +40 -12
- package/src/server/start.js +122 -0
- package/src/server/valkey.js +25 -11
- package/test/api.test.js +0 -8
- package/manifests/core/kustomization.yaml +0 -11
- package/manifests/core/underpost-engine-backup-access.yaml +0 -16
- package/manifests/core/underpost-engine-backup-pv-pvc.yaml +0 -22
- package/manifests/core/underpost-engine-mongodb-backup-cronjob.yaml +0 -40
- package/manifests/core/underpost-engine-mongodb-configmap.yaml +0 -26
- package/manifests/core/underpost-engine-statefulset.yaml +0 -91
- package/manifests/valkey/underpost-engine-valkey-service.yaml +0 -17
- package/manifests/valkey/underpost-engine-valkey-statefulset.yaml +0 -39
- /package/manifests/{core/underpost-engine-pv-pvc.yaml → mongodb-4.4/pv-pvc.yaml} +0 -0
package/src/cli/cluster.js
CHANGED
|
@@ -1,18 +1,51 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { cliSpinner } from '../server/conf.js';
|
|
1
|
+
import { getNpmRootPath } from '../server/conf.js';
|
|
3
2
|
import { loggerFactory } from '../server/logger.js';
|
|
4
3
|
import { shellExec } from '../server/process.js';
|
|
4
|
+
import UnderpostDeploy from './deploy.js';
|
|
5
|
+
import UnderpostTest from './test.js';
|
|
5
6
|
|
|
6
7
|
const logger = loggerFactory(import.meta);
|
|
7
8
|
|
|
8
9
|
class UnderpostCluster {
|
|
9
10
|
static API = {
|
|
10
|
-
async init(
|
|
11
|
-
|
|
11
|
+
async init(
|
|
12
|
+
podName,
|
|
13
|
+
options = {
|
|
14
|
+
mongodb: false,
|
|
15
|
+
mongodb4: false,
|
|
16
|
+
mariadb: false,
|
|
17
|
+
postgresql: false,
|
|
18
|
+
valkey: false,
|
|
19
|
+
full: false,
|
|
20
|
+
info: false,
|
|
21
|
+
certManager: false,
|
|
22
|
+
listPods: false,
|
|
23
|
+
reset: false,
|
|
24
|
+
dev: false,
|
|
25
|
+
nsUse: '',
|
|
26
|
+
infoCapacity: false,
|
|
27
|
+
infoCapacityPod: false,
|
|
28
|
+
istio: false,
|
|
29
|
+
pullImage: false,
|
|
30
|
+
},
|
|
31
|
+
) {
|
|
32
|
+
// 1) Install kind, kubeadm, docker, podman
|
|
33
|
+
// 2) Check kubectl, kubelet, containerd.io
|
|
34
|
+
// 3) Install Nvidia drivers from Rocky Linux docs
|
|
35
|
+
// 4) Install LXD with MAAS from Rocky Linux docs
|
|
36
|
+
// 5) Install MAAS src from snap
|
|
37
|
+
const npmRoot = getNpmRootPath();
|
|
38
|
+
const underpostRoot = options?.dev === true ? '.' : `${npmRoot}/underpost`;
|
|
39
|
+
if (options.infoCapacityPod === true) return logger.info('', UnderpostDeploy.API.resourcesFactory());
|
|
40
|
+
if (options.infoCapacity === true) return logger.info('', UnderpostCluster.API.getResourcesCapacity());
|
|
41
|
+
if (options.reset === true) return await UnderpostCluster.API.reset();
|
|
42
|
+
if (options.listPods === true) return console.table(UnderpostDeploy.API.get(podName ?? undefined));
|
|
43
|
+
|
|
44
|
+
if (options.nsUse && typeof options.nsUse === 'string') {
|
|
12
45
|
shellExec(`kubectl config set-context --current --namespace=${options.nsUse}`);
|
|
13
46
|
return;
|
|
14
47
|
}
|
|
15
|
-
if (options.info) {
|
|
48
|
+
if (options.info === true) {
|
|
16
49
|
shellExec(`kubectl config get-contexts`); // config env persisente for manage multiple clusters
|
|
17
50
|
shellExec(`kubectl config get-clusters`);
|
|
18
51
|
shellExec(`kubectl get nodes -o wide`); // set of nodes of a cluster
|
|
@@ -40,28 +73,60 @@ class UnderpostCluster {
|
|
|
40
73
|
logger.info('----------------------------------------------------------------');
|
|
41
74
|
shellExec(`kubectl get secrets --all-namespaces -o wide`);
|
|
42
75
|
shellExec(`docker secret ls`);
|
|
76
|
+
shellExec(`kubectl get crd --all-namespaces -o wide`);
|
|
77
|
+
shellExec(`sudo kubectl api-resources`);
|
|
43
78
|
return;
|
|
44
79
|
}
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
80
|
+
|
|
81
|
+
if (
|
|
82
|
+
(!options.istio && !UnderpostDeploy.API.get('kube-apiserver-kind-control-plane')[0]) ||
|
|
83
|
+
(options.istio === true && !UnderpostDeploy.API.get('calico-kube-controllers')[0])
|
|
84
|
+
) {
|
|
85
|
+
shellExec(`sudo setenforce 0`);
|
|
86
|
+
shellExec(`sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config`);
|
|
87
|
+
// sudo systemctl disable kubelet
|
|
88
|
+
// shellExec(`sudo systemctl enable --now kubelet`);
|
|
51
89
|
shellExec(`containerd config default > /etc/containerd/config.toml`);
|
|
52
90
|
shellExec(`sed -i -e "s/SystemdCgroup = false/SystemdCgroup = true/g" /etc/containerd/config.toml`);
|
|
53
91
|
// shellExec(`cp /etc/kubernetes/admin.conf ~/.kube/config`);
|
|
54
|
-
shellExec(`sudo systemctl restart kubelet`);
|
|
92
|
+
// shellExec(`sudo systemctl restart kubelet`);
|
|
55
93
|
shellExec(`sudo service docker restart`);
|
|
56
|
-
shellExec(`
|
|
57
|
-
shellExec(`sudo
|
|
94
|
+
shellExec(`sudo systemctl enable --now containerd.service`);
|
|
95
|
+
shellExec(`sudo swapoff -a; sudo sed -i '/swap/d' /etc/fstab`);
|
|
96
|
+
if (options.istio === true) {
|
|
97
|
+
shellExec(`sysctl net.bridge.bridge-nf-call-iptables=1`);
|
|
98
|
+
shellExec(`sudo kubeadm init --pod-network-cidr=192.168.0.0/16`);
|
|
99
|
+
shellExec(`sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config`);
|
|
100
|
+
shellExec(`sudo chown $(id -u):$(id -g) $HOME/.kube/config**`);
|
|
101
|
+
// https://docs.tigera.io/calico/latest/getting-started/kubernetes/quickstart
|
|
102
|
+
shellExec(
|
|
103
|
+
`sudo kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.29.3/manifests/tigera-operator.yaml`,
|
|
104
|
+
);
|
|
105
|
+
// shellExec(
|
|
106
|
+
// `wget https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/custom-resources.yaml`,
|
|
107
|
+
// );
|
|
108
|
+
shellExec(`sudo kubectl apply -f ./manifests/kubeadm-calico-config.yaml`);
|
|
109
|
+
shellExec(`sudo systemctl restart containerd`);
|
|
110
|
+
} else {
|
|
111
|
+
shellExec(`sudo systemctl restart containerd`);
|
|
112
|
+
shellExec(
|
|
113
|
+
`cd ${underpostRoot}/manifests && kind create cluster --config kind-config${
|
|
114
|
+
options?.dev === true ? '-dev' : ''
|
|
115
|
+
}.yaml`,
|
|
116
|
+
);
|
|
117
|
+
shellExec(`sudo chown $(id -u):$(id -g) $HOME/.kube/config**`);
|
|
118
|
+
}
|
|
58
119
|
} else logger.warn('Cluster already initialized');
|
|
59
120
|
|
|
60
|
-
if (options.full || options.valkey) {
|
|
121
|
+
if (options.full === true || options.valkey === true) {
|
|
122
|
+
if (options.pullImage === true) {
|
|
123
|
+
shellExec(`docker pull valkey/valkey`);
|
|
124
|
+
shellExec(`sudo kind load docker-image valkey/valkey:latest`);
|
|
125
|
+
}
|
|
61
126
|
shellExec(`kubectl delete statefulset service-valkey`);
|
|
62
|
-
shellExec(`kubectl apply -k
|
|
127
|
+
shellExec(`kubectl apply -k ${underpostRoot}/manifests/valkey`);
|
|
63
128
|
}
|
|
64
|
-
if (options.full || options.mariadb) {
|
|
129
|
+
if (options.full === true || options.mariadb === true) {
|
|
65
130
|
shellExec(
|
|
66
131
|
`sudo kubectl create secret generic mariadb-secret --from-file=username=/home/dd/engine/engine-private/mariadb-username --from-file=password=/home/dd/engine/engine-private/mariadb-password`,
|
|
67
132
|
);
|
|
@@ -69,9 +134,45 @@ class UnderpostCluster {
|
|
|
69
134
|
`sudo kubectl create secret generic github-secret --from-literal=GITHUB_TOKEN=${process.env.GITHUB_TOKEN}`,
|
|
70
135
|
);
|
|
71
136
|
shellExec(`kubectl delete statefulset mariadb-statefulset`);
|
|
72
|
-
shellExec(`kubectl apply -k
|
|
137
|
+
shellExec(`kubectl apply -k ${underpostRoot}/manifests/mariadb`);
|
|
73
138
|
}
|
|
74
|
-
if (options.full || options.
|
|
139
|
+
if (options.full === true || options.postgresql === true) {
|
|
140
|
+
if (options.pullImage === true) {
|
|
141
|
+
shellExec(`docker pull postgres:latest`);
|
|
142
|
+
shellExec(`sudo kind load docker-image postgres:latest`);
|
|
143
|
+
}
|
|
144
|
+
shellExec(
|
|
145
|
+
`sudo kubectl create secret generic postgres-secret --from-file=password=/home/dd/engine/engine-private/postgresql-password`,
|
|
146
|
+
);
|
|
147
|
+
shellExec(`kubectl apply -k ./manifests/postgresql`);
|
|
148
|
+
}
|
|
149
|
+
if (options.mongodb4 === true) {
|
|
150
|
+
if (options.pullImage === true) {
|
|
151
|
+
shellExec(`docker pull mongo:4.4`);
|
|
152
|
+
shellExec(`sudo kind load docker-image mongo:4.4`);
|
|
153
|
+
}
|
|
154
|
+
shellExec(`kubectl apply -k ${underpostRoot}/manifests/mongodb-4.4`);
|
|
155
|
+
|
|
156
|
+
const deploymentName = 'mongodb-deployment';
|
|
157
|
+
|
|
158
|
+
const successInstance = await UnderpostTest.API.statusMonitor(deploymentName);
|
|
159
|
+
|
|
160
|
+
if (successInstance) {
|
|
161
|
+
const mongoConfig = {
|
|
162
|
+
_id: 'rs0',
|
|
163
|
+
members: [{ _id: 0, host: '127.0.0.1:27017' }],
|
|
164
|
+
};
|
|
165
|
+
|
|
166
|
+
const [pod] = UnderpostDeploy.API.get(deploymentName);
|
|
167
|
+
|
|
168
|
+
shellExec(
|
|
169
|
+
`sudo kubectl exec -i ${pod.NAME} -- mongo --quiet \
|
|
170
|
+
--eval 'rs.initiate(${JSON.stringify(mongoConfig)})'`,
|
|
171
|
+
);
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
// await UnderpostTest.API.statusMonitor('mongodb-1');
|
|
175
|
+
} else if (options.full === true || options.mongodb === true) {
|
|
75
176
|
shellExec(
|
|
76
177
|
`sudo kubectl create secret generic mongodb-keyfile --from-file=/home/dd/engine/engine-private/mongodb-keyfile`,
|
|
77
178
|
);
|
|
@@ -79,75 +180,209 @@ class UnderpostCluster {
|
|
|
79
180
|
`sudo kubectl create secret generic mongodb-secret --from-file=username=/home/dd/engine/engine-private/mongodb-username --from-file=password=/home/dd/engine/engine-private/mongodb-password`,
|
|
80
181
|
);
|
|
81
182
|
shellExec(`kubectl delete statefulset mongodb`);
|
|
82
|
-
shellExec(`kubectl apply -k
|
|
83
|
-
|
|
84
|
-
await new Promise(async (resolve) => {
|
|
85
|
-
cliSpinner(3000, `[cluster.js] `, ` Load mongodb instance`, 'yellow', 'material');
|
|
86
|
-
await timer(3000);
|
|
87
|
-
|
|
88
|
-
const monitor = async () => {
|
|
89
|
-
cliSpinner(1000, `[cluster.js] `, ` Load mongodb instance`, 'yellow', 'material');
|
|
90
|
-
await timer(1000);
|
|
91
|
-
if (
|
|
92
|
-
shellExec(`kubectl get pods --all-namespaces -o wide`, {
|
|
93
|
-
silent: true,
|
|
94
|
-
stdout: true,
|
|
95
|
-
disableLog: true,
|
|
96
|
-
}).match(`mongodb-1 1/1 Running`)
|
|
97
|
-
)
|
|
98
|
-
return resolve();
|
|
99
|
-
return monitor();
|
|
100
|
-
};
|
|
101
|
-
await monitor();
|
|
102
|
-
});
|
|
183
|
+
shellExec(`kubectl apply -k ${underpostRoot}/manifests/mongodb`);
|
|
103
184
|
|
|
104
|
-
const
|
|
105
|
-
_id: 'rs0',
|
|
106
|
-
members: [
|
|
107
|
-
{ _id: 0, host: 'mongodb-0.mongodb-service:27017', priority: 1 },
|
|
108
|
-
{ _id: 1, host: 'mongodb-1.mongodb-service:27017', priority: 1 },
|
|
109
|
-
],
|
|
110
|
-
};
|
|
185
|
+
const successInstance = await UnderpostTest.API.statusMonitor('mongodb-1');
|
|
111
186
|
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
187
|
+
if (successInstance) {
|
|
188
|
+
const mongoConfig = {
|
|
189
|
+
_id: 'rs0',
|
|
190
|
+
members: [
|
|
191
|
+
{ _id: 0, host: 'mongodb-0.mongodb-service:27017', priority: 1 },
|
|
192
|
+
{ _id: 1, host: 'mongodb-1.mongodb-service:27017', priority: 1 },
|
|
193
|
+
],
|
|
194
|
+
};
|
|
195
|
+
|
|
196
|
+
shellExec(
|
|
197
|
+
`sudo kubectl exec -i mongodb-0 -- mongosh --quiet --json=relaxed \
|
|
198
|
+
--eval 'use admin' \
|
|
199
|
+
--eval 'rs.initiate(${JSON.stringify(mongoConfig)})' \
|
|
200
|
+
--eval 'rs.status()'`,
|
|
201
|
+
);
|
|
202
|
+
}
|
|
118
203
|
}
|
|
119
204
|
|
|
120
|
-
if (options.full || options.contour)
|
|
205
|
+
if (options.full === true || options.contour === true)
|
|
121
206
|
shellExec(`kubectl apply -f https://projectcontour.io/quickstart/contour.yaml`);
|
|
207
|
+
|
|
208
|
+
if (options.full === true || options.certManager === true) {
|
|
209
|
+
if (!UnderpostDeploy.API.get('cert-manager').find((p) => p.STATUS === 'Running')) {
|
|
210
|
+
shellExec(`helm repo add jetstack https://charts.jetstack.io --force-update`);
|
|
211
|
+
shellExec(
|
|
212
|
+
`helm install cert-manager jetstack/cert-manager \
|
|
213
|
+
--namespace cert-manager \
|
|
214
|
+
--create-namespace \
|
|
215
|
+
--version v1.17.0 \
|
|
216
|
+
--set crds.enabled=true`,
|
|
217
|
+
);
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
const letsEncName = 'letsencrypt-prod';
|
|
221
|
+
shellExec(`sudo kubectl delete ClusterIssuer ${letsEncName}`);
|
|
222
|
+
shellExec(`sudo kubectl apply -f ${underpostRoot}/manifests/${letsEncName}.yaml`);
|
|
223
|
+
}
|
|
122
224
|
},
|
|
225
|
+
// This function performs a comprehensive reset of Kubernetes and container environments
|
|
226
|
+
// on the host machine. Its primary goal is to clean up cluster components, temporary files,
|
|
227
|
+
// and container data, ensuring a clean state for re-initialization or fresh deployments,
|
|
228
|
+
// while also preventing the loss of the host machine's internet connectivity.
|
|
229
|
+
|
|
123
230
|
reset() {
|
|
231
|
+
// Step 1: Delete all existing Kind (Kubernetes in Docker) clusters.
|
|
232
|
+
// 'kind get clusters' lists all Kind clusters.
|
|
233
|
+
// 'xargs -t -n1 kind delete cluster --name' then iterates through each cluster name
|
|
234
|
+
// and executes 'kind delete cluster --name <cluster_name>' to remove them.
|
|
124
235
|
shellExec(`kind get clusters | xargs -t -n1 kind delete cluster --name`);
|
|
236
|
+
|
|
237
|
+
// Step 2: Reset the Kubernetes control-plane components installed by kubeadm.
|
|
238
|
+
// 'kubeadm reset -f' performs a forceful reset, removing installed Kubernetes components,
|
|
239
|
+
// configuration files, and associated network rules (like iptables entries created by kubeadm).
|
|
240
|
+
// The '-f' flag bypasses confirmation prompts.
|
|
125
241
|
shellExec(`sudo kubeadm reset -f`);
|
|
242
|
+
|
|
243
|
+
// Step 3: Remove specific CNI (Container Network Interface) configuration files.
|
|
244
|
+
// This command targets and removes the configuration file for Flannel,
|
|
245
|
+
// a common CNI plugin, which might be left behind after a reset.
|
|
126
246
|
shellExec('sudo rm -f /etc/cni/net.d/10-flannel.conflist');
|
|
127
|
-
|
|
247
|
+
|
|
248
|
+
// Note: The aggressive 'sudo iptables -F ...' command was intentionally removed from previous versions.
|
|
249
|
+
// This command would flush all iptables rules, including those crucial for the host's general
|
|
250
|
+
// internet connectivity, leading to network loss. 'kubeadm reset' and container runtime pruning
|
|
251
|
+
// adequately handle Kubernetes and container-specific iptables rules without affecting the host's
|
|
252
|
+
// default network configuration.
|
|
253
|
+
|
|
254
|
+
// Step 4: Remove the kubectl configuration file from the current user's home directory.
|
|
255
|
+
// This ensures that after a reset, there's no lingering configuration pointing to the old cluster,
|
|
256
|
+
// providing a clean slate for connecting to a new or re-initialized cluster.
|
|
128
257
|
shellExec('sudo rm -f $HOME/.kube/config');
|
|
258
|
+
|
|
259
|
+
// Step 5: Clear trash files from the root user's trash directory.
|
|
260
|
+
// This is a general cleanup step to remove temporary or deleted files.
|
|
129
261
|
shellExec('sudo rm -rf /root/.local/share/Trash/files/*');
|
|
262
|
+
|
|
263
|
+
// Step 6: Prune all unused Docker data.
|
|
264
|
+
// 'docker system prune -a -f' removes:
|
|
265
|
+
// - All stopped containers
|
|
266
|
+
// - All unused networks
|
|
267
|
+
// - All dangling images
|
|
268
|
+
// - All build cache
|
|
269
|
+
// - All unused volumes
|
|
270
|
+
// This aggressively frees up disk space and removes temporary Docker artifacts.
|
|
130
271
|
shellExec('sudo docker system prune -a -f');
|
|
272
|
+
|
|
273
|
+
// Step 7: Stop the Docker daemon service.
|
|
274
|
+
// This step is often necessary to ensure that Docker's files and directories
|
|
275
|
+
// can be safely manipulated or moved in subsequent steps without conflicts.
|
|
131
276
|
shellExec('sudo service docker stop');
|
|
277
|
+
|
|
278
|
+
// Step 8: Aggressively remove container storage data for containerd and Docker.
|
|
279
|
+
// These commands target the default storage locations for containerd and Docker,
|
|
280
|
+
// as well as any custom paths that might have been used (`/home/containers/storage`, `/home/docker`).
|
|
281
|
+
// This ensures a complete wipe of all container images, layers, and volumes.
|
|
132
282
|
shellExec(`sudo rm -rf /var/lib/containers/storage/*`);
|
|
133
283
|
shellExec(`sudo rm -rf /var/lib/docker/volumes/*`);
|
|
134
|
-
shellExec(`sudo rm -rf /var/lib/docker~/*`);
|
|
135
|
-
shellExec(`sudo rm -rf /home/containers/storage/*`);
|
|
136
|
-
shellExec(`sudo rm -rf /home/docker/*`);
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
284
|
+
shellExec(`sudo rm -rf /var/lib/docker~/*`); // Cleans up a potential backup directory for Docker data
|
|
285
|
+
shellExec(`sudo rm -rf /home/containers/storage/*`); // Cleans up custom containerd/Podman storage
|
|
286
|
+
shellExec(`sudo rm -rf /home/docker/*`); // Cleans up custom Docker storage
|
|
287
|
+
|
|
288
|
+
// Step 9: Re-configure Docker's default storage location (if desired).
|
|
289
|
+
// These commands effectively move Docker's data directory from its default `/var/lib/docker`
|
|
290
|
+
// to a new location (`/home/docker`) and create a symbolic link.
|
|
291
|
+
// This is a specific customization to relocate Docker's storage.
|
|
292
|
+
shellExec('sudo mv /var/lib/docker /var/lib/docker~'); // Moves existing /var/lib/docker to /var/lib/docker~ (backup)
|
|
293
|
+
shellExec('sudo mkdir /home/docker'); // Creates the new desired directory for Docker data
|
|
294
|
+
shellExec('sudo chmod 0711 /home/docker'); // Sets appropriate permissions for the new directory
|
|
295
|
+
shellExec('sudo ln -s /home/docker /var/lib/docker'); // Creates a symlink from original path to new path
|
|
296
|
+
|
|
297
|
+
// Step 10: Prune all unused Podman data.
|
|
298
|
+
// Similar to Docker pruning, these commands remove:
|
|
299
|
+
// - All stopped containers
|
|
300
|
+
// - All unused networks
|
|
301
|
+
// - All unused images
|
|
302
|
+
// - All unused volumes ('--volumes')
|
|
303
|
+
// - The '--force' flag bypasses confirmation.
|
|
304
|
+
// '--external' prunes external content not managed by Podman's default storage backend.
|
|
141
305
|
shellExec(`sudo podman system prune -a -f`);
|
|
142
306
|
shellExec(`sudo podman system prune --all --volumes --force`);
|
|
143
307
|
shellExec(`sudo podman system prune --external --force`);
|
|
144
|
-
shellExec(`sudo podman system prune --all --volumes --force`);
|
|
308
|
+
shellExec(`sudo podman system prune --all --volumes --force`); // Redundant but harmless repetition
|
|
309
|
+
|
|
310
|
+
// Step 11: Create and set permissions for Podman's custom storage directory.
|
|
311
|
+
// This ensures the custom path `/home/containers/storage` exists and has correct permissions
|
|
312
|
+
// before Podman attempts to use it.
|
|
145
313
|
shellExec(`sudo mkdir -p /home/containers/storage`);
|
|
146
314
|
shellExec('sudo chmod 0711 /home/containers/storage');
|
|
315
|
+
|
|
316
|
+
// Step 12: Update Podman's storage configuration file.
|
|
317
|
+
// This command uses 'sed' to modify `/etc/containers/storage.conf`,
|
|
318
|
+
// changing the default storage path from `/var/lib/containers/storage`
|
|
319
|
+
// to the customized `/home/containers/storage`.
|
|
147
320
|
shellExec(
|
|
148
321
|
`sudo sed -i -e "s@/var/lib/containers/storage@/home/containers/storage@g" /etc/containers/storage.conf`,
|
|
149
322
|
);
|
|
323
|
+
|
|
324
|
+
// Step 13: Reset Podman system settings.
|
|
325
|
+
// This command resets Podman's system-wide configuration to its default state.
|
|
150
326
|
shellExec(`sudo podman system reset -f`);
|
|
327
|
+
|
|
328
|
+
// Note: The 'sysctl net.bridge.bridge-nf-call-iptables=0' and related commands
|
|
329
|
+
// were previously removed. These sysctl settings (bridge-nf-call-iptables,
|
|
330
|
+
// bridge-nf-call-arptables, bridge-nf-call-ip6tables) are crucial for allowing
|
|
331
|
+
// network traffic through Linux bridges to be processed by iptables.
|
|
332
|
+
// Kubernetes and CNI plugins generally require them to be enabled (set to '1').
|
|
333
|
+
// Re-initializing Kubernetes will typically set these as needed, and leaving them
|
|
334
|
+
// at their system default (or '1' if already configured) is safer for host
|
|
335
|
+
// connectivity during a reset operation.
|
|
336
|
+
|
|
337
|
+
// https://github.com/kubernetes-sigs/kind/issues/2886
|
|
338
|
+
// shellExec(`sysctl net.bridge.bridge-nf-call-iptables=0`);
|
|
339
|
+
// shellExec(`sysctl net.bridge.bridge-nf-call-arptables=0`);
|
|
340
|
+
// shellExec(`sysctl net.bridge.bridge-nf-call-ip6tables=0`);
|
|
341
|
+
|
|
342
|
+
// Step 14: Remove the 'kind' Docker network.
|
|
343
|
+
// This cleans up any network bridges or configurations specifically created by Kind.
|
|
344
|
+
shellExec(`docker network rm kind`);
|
|
345
|
+
},
|
|
346
|
+
|
|
347
|
+
getResourcesCapacity() {
|
|
348
|
+
const resources = {};
|
|
349
|
+
const info = false
|
|
350
|
+
? `Capacity:
|
|
351
|
+
cpu: 8
|
|
352
|
+
ephemeral-storage: 153131976Ki
|
|
353
|
+
hugepages-1Gi: 0
|
|
354
|
+
hugepages-2Mi: 0
|
|
355
|
+
memory: 11914720Ki
|
|
356
|
+
pods: 110
|
|
357
|
+
Allocatable:
|
|
358
|
+
cpu: 8
|
|
359
|
+
ephemeral-storage: 153131976Ki
|
|
360
|
+
hugepages-1Gi: 0
|
|
361
|
+
hugepages-2Mi: 0
|
|
362
|
+
memory: 11914720Ki
|
|
363
|
+
pods: `
|
|
364
|
+
: shellExec(`kubectl describe node kind-worker | grep -E '(Allocatable:|Capacity:)' -A 6`, {
|
|
365
|
+
stdout: true,
|
|
366
|
+
silent: true,
|
|
367
|
+
});
|
|
368
|
+
info
|
|
369
|
+
.split('Allocatable:')[1]
|
|
370
|
+
.split('\n')
|
|
371
|
+
.filter((row) => row.match('cpu') || row.match('memory'))
|
|
372
|
+
.map((row) => {
|
|
373
|
+
if (row.match('cpu'))
|
|
374
|
+
resources.cpu = {
|
|
375
|
+
value: parseInt(row.split(':')[1].trim()) * 1000,
|
|
376
|
+
unit: 'm',
|
|
377
|
+
};
|
|
378
|
+
if (row.match('memory'))
|
|
379
|
+
resources.memory = {
|
|
380
|
+
value: parseInt(row.split(':')[1].split('Ki')[0].trim()),
|
|
381
|
+
unit: 'Ki',
|
|
382
|
+
};
|
|
383
|
+
});
|
|
384
|
+
|
|
385
|
+
return resources;
|
|
151
386
|
},
|
|
152
387
|
};
|
|
153
388
|
}
|
package/src/cli/cron.js
CHANGED
|
@@ -4,20 +4,24 @@
|
|
|
4
4
|
* @namespace UnderpostCron
|
|
5
5
|
*/
|
|
6
6
|
|
|
7
|
-
import
|
|
7
|
+
import { DataBaseProvider } from '../db/DataBaseProvider.js';
|
|
8
8
|
import BackUp from '../server/backup.js';
|
|
9
9
|
import { Cmd } from '../server/conf.js';
|
|
10
10
|
import Dns from '../server/dns.js';
|
|
11
|
-
import {
|
|
11
|
+
import { loggerFactory } from '../server/logger.js';
|
|
12
|
+
|
|
12
13
|
import { shellExec } from '../server/process.js';
|
|
13
14
|
import fs from 'fs-extra';
|
|
14
15
|
|
|
16
|
+
const logger = loggerFactory(import.meta);
|
|
17
|
+
|
|
15
18
|
/**
|
|
16
19
|
* UnderpostCron main module methods
|
|
17
20
|
* @class
|
|
18
21
|
* @memberof UnderpostCron
|
|
19
22
|
*/
|
|
20
23
|
class UnderpostCron {
|
|
24
|
+
static NETWORK = [];
|
|
21
25
|
static JOB = {
|
|
22
26
|
/**
|
|
23
27
|
* DNS cli API
|
|
@@ -46,10 +50,10 @@ class UnderpostCron {
|
|
|
46
50
|
callback: async function (
|
|
47
51
|
deployList = 'default',
|
|
48
52
|
jobList = Object.keys(UnderpostCron.JOB),
|
|
49
|
-
options = {
|
|
53
|
+
options = { itc: false, init: false, git: false, dashboardUpdate: false },
|
|
50
54
|
) {
|
|
51
55
|
if (options.init === true) {
|
|
52
|
-
|
|
56
|
+
UnderpostCron.NETWORK = [];
|
|
53
57
|
const jobDeployId = fs.readFileSync('./engine-private/deploy/dd.cron', 'utf8').trim();
|
|
54
58
|
deployList = fs.readFileSync('./engine-private/deploy/dd.router', 'utf8').trim();
|
|
55
59
|
const confCronConfig = JSON.parse(fs.readFileSync(`./engine-private/conf/${jobDeployId}/conf.cron.json`));
|
|
@@ -57,7 +61,7 @@ class UnderpostCron {
|
|
|
57
61
|
for (const job of Object.keys(confCronConfig.jobs)) {
|
|
58
62
|
const name = `${jobDeployId}-${job}`;
|
|
59
63
|
let deployId;
|
|
60
|
-
shellExec(Cmd.delete(name));
|
|
64
|
+
if (!options.dashboardUpdate) shellExec(Cmd.delete(name));
|
|
61
65
|
switch (job) {
|
|
62
66
|
case 'dns':
|
|
63
67
|
deployId = jobDeployId;
|
|
@@ -67,15 +71,16 @@ class UnderpostCron {
|
|
|
67
71
|
deployId = deployList;
|
|
68
72
|
break;
|
|
69
73
|
}
|
|
70
|
-
|
|
71
|
-
|
|
74
|
+
if (!options.dashboardUpdate)
|
|
75
|
+
shellExec(Cmd.cron(deployId, job, name, confCronConfig.jobs[job].expression, options));
|
|
76
|
+
UnderpostCron.NETWORK.push({
|
|
72
77
|
deployId,
|
|
73
78
|
jobId: job,
|
|
74
79
|
expression: confCronConfig.jobs[job].expression,
|
|
75
80
|
});
|
|
76
81
|
}
|
|
77
82
|
}
|
|
78
|
-
await
|
|
83
|
+
if (options.dashboardUpdate === true) await UnderpostCron.API.updateDashboardData();
|
|
79
84
|
if (fs.existsSync(`./tmp/await-deploy`)) fs.remove(`./tmp/await-deploy`);
|
|
80
85
|
return;
|
|
81
86
|
}
|
|
@@ -84,6 +89,32 @@ class UnderpostCron {
|
|
|
84
89
|
if (UnderpostCron.JOB[jobId]) await UnderpostCron.JOB[jobId].callback(deployList, options);
|
|
85
90
|
}
|
|
86
91
|
},
|
|
92
|
+
async updateDashboardData() {
|
|
93
|
+
try {
|
|
94
|
+
const deployId = process.env.DEFAULT_DEPLOY_ID;
|
|
95
|
+
const host = process.env.DEFAULT_DEPLOY_HOST;
|
|
96
|
+
const path = process.env.DEFAULT_DEPLOY_PATH;
|
|
97
|
+
const confServerPath = `./engine-private/conf/${deployId}/conf.server.json`;
|
|
98
|
+
const confServer = JSON.parse(fs.readFileSync(confServerPath, 'utf8'));
|
|
99
|
+
const { db } = confServer[host][path];
|
|
100
|
+
|
|
101
|
+
await DataBaseProvider.load({ apis: ['cron'], host, path, db });
|
|
102
|
+
|
|
103
|
+
/** @type {import('../api/cron/cron.model.js').CronModel} */
|
|
104
|
+
const Cron = DataBaseProvider.instance[`${host}${path}`].mongoose.models.Cron;
|
|
105
|
+
|
|
106
|
+
await Cron.deleteMany();
|
|
107
|
+
|
|
108
|
+
for (const cronInstance of UnderpostCron.NETWORK) {
|
|
109
|
+
logger.info('save', cronInstance);
|
|
110
|
+
await new Cron(cronInstance).save();
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
await DataBaseProvider.instance[`${host}${path}`].mongoose.close();
|
|
114
|
+
} catch (error) {
|
|
115
|
+
logger.error(error, error.stack);
|
|
116
|
+
}
|
|
117
|
+
},
|
|
87
118
|
};
|
|
88
119
|
}
|
|
89
120
|
|