underpost 2.8.6 → 2.8.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.vscode/extensions.json +3 -2
- package/.vscode/settings.json +2 -0
- package/CHANGELOG.md +24 -4
- package/Dockerfile +9 -10
- package/README.md +39 -2
- package/bin/build.js +2 -2
- package/bin/deploy.js +1337 -131
- package/bin/file.js +8 -0
- package/bin/index.js +1 -218
- package/cli.md +451 -0
- package/docker-compose.yml +1 -1
- package/jsdoc.json +1 -1
- package/manifests/calico-custom-resources.yaml +25 -0
- package/manifests/deployment/adminer/deployment.yaml +32 -0
- package/manifests/deployment/adminer/kustomization.yaml +7 -0
- package/manifests/deployment/adminer/service.yaml +13 -0
- package/manifests/deployment/fastapi/backend-deployment.yml +120 -0
- package/manifests/deployment/fastapi/backend-service.yml +19 -0
- package/manifests/deployment/fastapi/frontend-deployment.yml +54 -0
- package/manifests/deployment/fastapi/frontend-service.yml +15 -0
- package/manifests/deployment/kafka/deployment.yaml +69 -0
- package/manifests/kubeadm-calico-config.yaml +119 -0
- package/manifests/mongodb-4.4/service-deployment.yaml +1 -1
- package/manifests/postgresql/configmap.yaml +9 -0
- package/manifests/postgresql/kustomization.yaml +10 -0
- package/manifests/postgresql/pv.yaml +15 -0
- package/manifests/postgresql/pvc.yaml +13 -0
- package/manifests/postgresql/service.yaml +10 -0
- package/manifests/postgresql/statefulset.yaml +37 -0
- package/manifests/valkey/statefulset.yaml +6 -4
- package/package.json +3 -9
- package/src/api/default/default.service.js +1 -1
- package/src/api/user/user.service.js +14 -11
- package/src/cli/cluster.js +207 -20
- package/src/cli/cron.js +39 -8
- package/src/cli/db.js +20 -10
- package/src/cli/deploy.js +254 -85
- package/src/cli/env.js +9 -3
- package/src/cli/fs.js +21 -9
- package/src/cli/image.js +42 -124
- package/src/cli/index.js +312 -0
- package/src/cli/monitor.js +236 -0
- package/src/cli/repository.js +5 -2
- package/src/client/components/core/Account.js +28 -24
- package/src/client/components/core/Blockchain.js +1 -1
- package/src/client/components/core/CalendarCore.js +14 -84
- package/src/client/components/core/CommonJs.js +2 -1
- package/src/client/components/core/Css.js +0 -1
- package/src/client/components/core/CssCore.js +10 -2
- package/src/client/components/core/Docs.js +1 -1
- package/src/client/components/core/EventsUI.js +3 -3
- package/src/client/components/core/FileExplorer.js +86 -78
- package/src/client/components/core/JoyStick.js +2 -2
- package/src/client/components/core/LoadingAnimation.js +1 -17
- package/src/client/components/core/LogIn.js +3 -3
- package/src/client/components/core/LogOut.js +1 -1
- package/src/client/components/core/Modal.js +14 -8
- package/src/client/components/core/Panel.js +19 -61
- package/src/client/components/core/PanelForm.js +13 -22
- package/src/client/components/core/Recover.js +3 -3
- package/src/client/components/core/RichText.js +1 -11
- package/src/client/components/core/Router.js +3 -1
- package/src/client/components/core/SignUp.js +2 -2
- package/src/client/components/default/RoutesDefault.js +3 -2
- package/src/client/services/default/default.management.js +45 -38
- package/src/client/ssr/Render.js +2 -0
- package/src/index.js +18 -2
- package/src/mailer/MailerProvider.js +3 -0
- package/src/runtime/lampp/Dockerfile +65 -0
- package/src/server/client-build.js +13 -0
- package/src/server/conf.js +93 -1
- package/src/server/dns.js +56 -18
- package/src/server/json-schema.js +77 -0
- package/src/server/network.js +7 -122
- package/src/server/peer.js +2 -2
- package/src/server/proxy.js +4 -4
- package/src/server/runtime.js +24 -11
- package/src/server/start.js +122 -0
- package/src/server/valkey.js +25 -11
|
@@ -225,8 +225,8 @@ const UserService = {
|
|
|
225
225
|
} else throw new Error('invalid email or password');
|
|
226
226
|
|
|
227
227
|
case 'guest': {
|
|
228
|
-
const user = await ValkeyAPI.valkeyObjectFactory('user'
|
|
229
|
-
await ValkeyAPI.setValkeyObject(user.email, user);
|
|
228
|
+
const user = await ValkeyAPI.valkeyObjectFactory(options, 'user');
|
|
229
|
+
await ValkeyAPI.setValkeyObject(options, user.email, user);
|
|
230
230
|
return {
|
|
231
231
|
token: hashJWT({ user: UserDto.auth.payload(user) }),
|
|
232
232
|
user: selectDtoFactory(user, UserDto.select.get()),
|
|
@@ -325,15 +325,18 @@ const UserService = {
|
|
|
325
325
|
return await User.find().select(UserDto.select.getAll());
|
|
326
326
|
|
|
327
327
|
case 'auth': {
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
328
|
+
let user;
|
|
329
|
+
if (req.auth.user._id.match('guest')) {
|
|
330
|
+
user = await ValkeyAPI.getValkeyObject(options, req.auth.user.email);
|
|
331
|
+
if (!user) throw new Error('guest user expired');
|
|
332
|
+
} else
|
|
333
|
+
user = await User.findOne({
|
|
334
|
+
_id: req.auth.user._id,
|
|
335
|
+
});
|
|
333
336
|
|
|
334
337
|
const file = await File.findOne({ _id: user.profileImageId });
|
|
335
338
|
|
|
336
|
-
if (!file && !(await ValkeyAPI.getValkeyObject(req.auth.user.email))) {
|
|
339
|
+
if (!file && !(await ValkeyAPI.getValkeyObject(options, req.auth.user.email))) {
|
|
337
340
|
await User.findByIdAndUpdate(
|
|
338
341
|
user._id,
|
|
339
342
|
{ profileImageId: await getDefaultProfileImageId(File) },
|
|
@@ -342,8 +345,8 @@ const UserService = {
|
|
|
342
345
|
},
|
|
343
346
|
);
|
|
344
347
|
}
|
|
345
|
-
return (await ValkeyAPI.getValkeyObject(req.auth.user.email))
|
|
346
|
-
? selectDtoFactory(await ValkeyAPI.getValkeyObject(req.auth.user.email), UserDto.select.get())
|
|
348
|
+
return (await ValkeyAPI.getValkeyObject(options, req.auth.user.email))
|
|
349
|
+
? selectDtoFactory(await ValkeyAPI.getValkeyObject(options, req.auth.user.email), UserDto.select.get())
|
|
347
350
|
: await User.findOne({
|
|
348
351
|
_id: req.auth.user._id,
|
|
349
352
|
}).select(UserDto.select.get());
|
|
@@ -378,7 +381,7 @@ const UserService = {
|
|
|
378
381
|
switch (user.role) {
|
|
379
382
|
case 'admin': {
|
|
380
383
|
if (req.params.id) return await User.findByIdAndDelete(req.params.id);
|
|
381
|
-
else return await
|
|
384
|
+
else return await User.deleteMany();
|
|
382
385
|
}
|
|
383
386
|
default:
|
|
384
387
|
if (req.auth.user._id !== req.params.id) throw new Error(`Invalid token user id`);
|
package/src/cli/cluster.js
CHANGED
|
@@ -1,5 +1,4 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { cliSpinner, getNpmRootPath } from '../server/conf.js';
|
|
1
|
+
import { getNpmRootPath } from '../server/conf.js';
|
|
3
2
|
import { loggerFactory } from '../server/logger.js';
|
|
4
3
|
import { shellExec } from '../server/process.js';
|
|
5
4
|
import UnderpostDeploy from './deploy.js';
|
|
@@ -15,6 +14,7 @@ class UnderpostCluster {
|
|
|
15
14
|
mongodb: false,
|
|
16
15
|
mongodb4: false,
|
|
17
16
|
mariadb: false,
|
|
17
|
+
postgresql: false,
|
|
18
18
|
valkey: false,
|
|
19
19
|
full: false,
|
|
20
20
|
info: false,
|
|
@@ -23,10 +23,21 @@ class UnderpostCluster {
|
|
|
23
23
|
reset: false,
|
|
24
24
|
dev: false,
|
|
25
25
|
nsUse: '',
|
|
26
|
+
infoCapacity: false,
|
|
27
|
+
infoCapacityPod: false,
|
|
28
|
+
istio: false,
|
|
29
|
+
pullImage: false,
|
|
26
30
|
},
|
|
27
31
|
) {
|
|
32
|
+
// 1) Install kind, kubeadm, docker, podman
|
|
33
|
+
// 2) Check kubectl, kubelet, containerd.io
|
|
34
|
+
// 3) Install Nvidia drivers from Rocky Linux docs
|
|
35
|
+
// 4) Install LXD with MAAS from Rocky Linux docs
|
|
36
|
+
// 5) Install MAAS src from snap
|
|
28
37
|
const npmRoot = getNpmRootPath();
|
|
29
38
|
const underpostRoot = options?.dev === true ? '.' : `${npmRoot}/underpost`;
|
|
39
|
+
if (options.infoCapacityPod === true) return logger.info('', UnderpostDeploy.API.resourcesFactory());
|
|
40
|
+
if (options.infoCapacity === true) return logger.info('', UnderpostCluster.API.getResourcesCapacity());
|
|
30
41
|
if (options.reset === true) return await UnderpostCluster.API.reset();
|
|
31
42
|
if (options.listPods === true) return console.table(UnderpostDeploy.API.get(podName ?? undefined));
|
|
32
43
|
|
|
@@ -63,26 +74,55 @@ class UnderpostCluster {
|
|
|
63
74
|
shellExec(`kubectl get secrets --all-namespaces -o wide`);
|
|
64
75
|
shellExec(`docker secret ls`);
|
|
65
76
|
shellExec(`kubectl get crd --all-namespaces -o wide`);
|
|
77
|
+
shellExec(`sudo kubectl api-resources`);
|
|
66
78
|
return;
|
|
67
79
|
}
|
|
68
80
|
|
|
69
|
-
if (
|
|
81
|
+
if (
|
|
82
|
+
(!options.istio && !UnderpostDeploy.API.get('kube-apiserver-kind-control-plane')[0]) ||
|
|
83
|
+
(options.istio === true && !UnderpostDeploy.API.get('calico-kube-controllers')[0])
|
|
84
|
+
) {
|
|
85
|
+
shellExec(`sudo setenforce 0`);
|
|
86
|
+
shellExec(`sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config`);
|
|
87
|
+
// sudo systemctl disable kubelet
|
|
88
|
+
// shellExec(`sudo systemctl enable --now kubelet`);
|
|
70
89
|
shellExec(`containerd config default > /etc/containerd/config.toml`);
|
|
71
90
|
shellExec(`sed -i -e "s/SystemdCgroup = false/SystemdCgroup = true/g" /etc/containerd/config.toml`);
|
|
72
91
|
// shellExec(`cp /etc/kubernetes/admin.conf ~/.kube/config`);
|
|
73
|
-
shellExec(`sudo systemctl restart kubelet`);
|
|
92
|
+
// shellExec(`sudo systemctl restart kubelet`);
|
|
74
93
|
shellExec(`sudo service docker restart`);
|
|
75
94
|
shellExec(`sudo systemctl enable --now containerd.service`);
|
|
76
|
-
shellExec(`sudo
|
|
77
|
-
|
|
78
|
-
`
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
95
|
+
shellExec(`sudo swapoff -a; sudo sed -i '/swap/d' /etc/fstab`);
|
|
96
|
+
if (options.istio === true) {
|
|
97
|
+
shellExec(`sysctl net.bridge.bridge-nf-call-iptables=1`);
|
|
98
|
+
shellExec(`sudo kubeadm init --pod-network-cidr=192.168.0.0/16`);
|
|
99
|
+
shellExec(`sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config`);
|
|
100
|
+
shellExec(`sudo chown $(id -u):$(id -g) $HOME/.kube/config**`);
|
|
101
|
+
// https://docs.tigera.io/calico/latest/getting-started/kubernetes/quickstart
|
|
102
|
+
shellExec(
|
|
103
|
+
`sudo kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.29.3/manifests/tigera-operator.yaml`,
|
|
104
|
+
);
|
|
105
|
+
// shellExec(
|
|
106
|
+
// `wget https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/custom-resources.yaml`,
|
|
107
|
+
// );
|
|
108
|
+
shellExec(`sudo kubectl apply -f ./manifests/kubeadm-calico-config.yaml`);
|
|
109
|
+
shellExec(`sudo systemctl restart containerd`);
|
|
110
|
+
} else {
|
|
111
|
+
shellExec(`sudo systemctl restart containerd`);
|
|
112
|
+
shellExec(
|
|
113
|
+
`cd ${underpostRoot}/manifests && kind create cluster --config kind-config${
|
|
114
|
+
options?.dev === true ? '-dev' : ''
|
|
115
|
+
}.yaml`,
|
|
116
|
+
);
|
|
117
|
+
shellExec(`sudo chown $(id -u):$(id -g) $HOME/.kube/config**`);
|
|
118
|
+
}
|
|
83
119
|
} else logger.warn('Cluster already initialized');
|
|
84
120
|
|
|
85
121
|
if (options.full === true || options.valkey === true) {
|
|
122
|
+
if (options.pullImage === true) {
|
|
123
|
+
shellExec(`docker pull valkey/valkey`);
|
|
124
|
+
shellExec(`sudo kind load docker-image valkey/valkey:latest`);
|
|
125
|
+
}
|
|
86
126
|
shellExec(`kubectl delete statefulset service-valkey`);
|
|
87
127
|
shellExec(`kubectl apply -k ${underpostRoot}/manifests/valkey`);
|
|
88
128
|
}
|
|
@@ -96,7 +136,21 @@ class UnderpostCluster {
|
|
|
96
136
|
shellExec(`kubectl delete statefulset mariadb-statefulset`);
|
|
97
137
|
shellExec(`kubectl apply -k ${underpostRoot}/manifests/mariadb`);
|
|
98
138
|
}
|
|
139
|
+
if (options.full === true || options.postgresql === true) {
|
|
140
|
+
if (options.pullImage === true) {
|
|
141
|
+
shellExec(`docker pull postgres:latest`);
|
|
142
|
+
shellExec(`sudo kind load docker-image postgres:latest`);
|
|
143
|
+
}
|
|
144
|
+
shellExec(
|
|
145
|
+
`sudo kubectl create secret generic postgres-secret --from-file=password=/home/dd/engine/engine-private/postgresql-password`,
|
|
146
|
+
);
|
|
147
|
+
shellExec(`kubectl apply -k ./manifests/postgresql`);
|
|
148
|
+
}
|
|
99
149
|
if (options.mongodb4 === true) {
|
|
150
|
+
if (options.pullImage === true) {
|
|
151
|
+
shellExec(`docker pull mongo:4.4`);
|
|
152
|
+
shellExec(`sudo kind load docker-image mongo:4.4`);
|
|
153
|
+
}
|
|
100
154
|
shellExec(`kubectl apply -k ${underpostRoot}/manifests/mongodb-4.4`);
|
|
101
155
|
|
|
102
156
|
const deploymentName = 'mongodb-deployment';
|
|
@@ -168,34 +222,167 @@ class UnderpostCluster {
|
|
|
168
222
|
shellExec(`sudo kubectl apply -f ${underpostRoot}/manifests/${letsEncName}.yaml`);
|
|
169
223
|
}
|
|
170
224
|
},
|
|
225
|
+
// This function performs a comprehensive reset of Kubernetes and container environments
|
|
226
|
+
// on the host machine. Its primary goal is to clean up cluster components, temporary files,
|
|
227
|
+
// and container data, ensuring a clean state for re-initialization or fresh deployments,
|
|
228
|
+
// while also preventing the loss of the host machine's internet connectivity.
|
|
229
|
+
|
|
171
230
|
reset() {
|
|
231
|
+
// Step 1: Delete all existing Kind (Kubernetes in Docker) clusters.
|
|
232
|
+
// 'kind get clusters' lists all Kind clusters.
|
|
233
|
+
// 'xargs -t -n1 kind delete cluster --name' then iterates through each cluster name
|
|
234
|
+
// and executes 'kind delete cluster --name <cluster_name>' to remove them.
|
|
172
235
|
shellExec(`kind get clusters | xargs -t -n1 kind delete cluster --name`);
|
|
236
|
+
|
|
237
|
+
// Step 2: Reset the Kubernetes control-plane components installed by kubeadm.
|
|
238
|
+
// 'kubeadm reset -f' performs a forceful reset, removing installed Kubernetes components,
|
|
239
|
+
// configuration files, and associated network rules (like iptables entries created by kubeadm).
|
|
240
|
+
// The '-f' flag bypasses confirmation prompts.
|
|
173
241
|
shellExec(`sudo kubeadm reset -f`);
|
|
242
|
+
|
|
243
|
+
// Step 3: Remove specific CNI (Container Network Interface) configuration files.
|
|
244
|
+
// This command targets and removes the configuration file for Flannel,
|
|
245
|
+
// a common CNI plugin, which might be left behind after a reset.
|
|
174
246
|
shellExec('sudo rm -f /etc/cni/net.d/10-flannel.conflist');
|
|
175
|
-
|
|
247
|
+
|
|
248
|
+
// Note: The aggressive 'sudo iptables -F ...' command was intentionally removed from previous versions.
|
|
249
|
+
// This command would flush all iptables rules, including those crucial for the host's general
|
|
250
|
+
// internet connectivity, leading to network loss. 'kubeadm reset' and container runtime pruning
|
|
251
|
+
// adequately handle Kubernetes and container-specific iptables rules without affecting the host's
|
|
252
|
+
// default network configuration.
|
|
253
|
+
|
|
254
|
+
// Step 4: Remove the kubectl configuration file from the current user's home directory.
|
|
255
|
+
// This ensures that after a reset, there's no lingering configuration pointing to the old cluster,
|
|
256
|
+
// providing a clean slate for connecting to a new or re-initialized cluster.
|
|
176
257
|
shellExec('sudo rm -f $HOME/.kube/config');
|
|
258
|
+
|
|
259
|
+
// Step 5: Clear trash files from the root user's trash directory.
|
|
260
|
+
// This is a general cleanup step to remove temporary or deleted files.
|
|
177
261
|
shellExec('sudo rm -rf /root/.local/share/Trash/files/*');
|
|
262
|
+
|
|
263
|
+
// Step 6: Prune all unused Docker data.
|
|
264
|
+
// 'docker system prune -a -f' removes:
|
|
265
|
+
// - All stopped containers
|
|
266
|
+
// - All unused networks
|
|
267
|
+
// - All dangling images
|
|
268
|
+
// - All build cache
|
|
269
|
+
// - All unused volumes
|
|
270
|
+
// This aggressively frees up disk space and removes temporary Docker artifacts.
|
|
178
271
|
shellExec('sudo docker system prune -a -f');
|
|
272
|
+
|
|
273
|
+
// Step 7: Stop the Docker daemon service.
|
|
274
|
+
// This step is often necessary to ensure that Docker's files and directories
|
|
275
|
+
// can be safely manipulated or moved in subsequent steps without conflicts.
|
|
179
276
|
shellExec('sudo service docker stop');
|
|
277
|
+
|
|
278
|
+
// Step 8: Aggressively remove container storage data for containerd and Docker.
|
|
279
|
+
// These commands target the default storage locations for containerd and Docker,
|
|
280
|
+
// as well as any custom paths that might have been used (`/home/containers/storage`, `/home/docker`).
|
|
281
|
+
// This ensures a complete wipe of all container images, layers, and volumes.
|
|
180
282
|
shellExec(`sudo rm -rf /var/lib/containers/storage/*`);
|
|
181
283
|
shellExec(`sudo rm -rf /var/lib/docker/volumes/*`);
|
|
182
|
-
shellExec(`sudo rm -rf /var/lib/docker~/*`);
|
|
183
|
-
shellExec(`sudo rm -rf /home/containers/storage/*`);
|
|
184
|
-
shellExec(`sudo rm -rf /home/docker/*`);
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
284
|
+
shellExec(`sudo rm -rf /var/lib/docker~/*`); // Cleans up a potential backup directory for Docker data
|
|
285
|
+
shellExec(`sudo rm -rf /home/containers/storage/*`); // Cleans up custom containerd/Podman storage
|
|
286
|
+
shellExec(`sudo rm -rf /home/docker/*`); // Cleans up custom Docker storage
|
|
287
|
+
|
|
288
|
+
// Step 9: Re-configure Docker's default storage location (if desired).
|
|
289
|
+
// These commands effectively move Docker's data directory from its default `/var/lib/docker`
|
|
290
|
+
// to a new location (`/home/docker`) and create a symbolic link.
|
|
291
|
+
// This is a specific customization to relocate Docker's storage.
|
|
292
|
+
shellExec('sudo mv /var/lib/docker /var/lib/docker~'); // Moves existing /var/lib/docker to /var/lib/docker~ (backup)
|
|
293
|
+
shellExec('sudo mkdir /home/docker'); // Creates the new desired directory for Docker data
|
|
294
|
+
shellExec('sudo chmod 0711 /home/docker'); // Sets appropriate permissions for the new directory
|
|
295
|
+
shellExec('sudo ln -s /home/docker /var/lib/docker'); // Creates a symlink from original path to new path
|
|
296
|
+
|
|
297
|
+
// Step 10: Prune all unused Podman data.
|
|
298
|
+
// Similar to Docker pruning, these commands remove:
|
|
299
|
+
// - All stopped containers
|
|
300
|
+
// - All unused networks
|
|
301
|
+
// - All unused images
|
|
302
|
+
// - All unused volumes ('--volumes')
|
|
303
|
+
// - The '--force' flag bypasses confirmation.
|
|
304
|
+
// '--external' prunes external content not managed by Podman's default storage backend.
|
|
189
305
|
shellExec(`sudo podman system prune -a -f`);
|
|
190
306
|
shellExec(`sudo podman system prune --all --volumes --force`);
|
|
191
307
|
shellExec(`sudo podman system prune --external --force`);
|
|
192
|
-
shellExec(`sudo podman system prune --all --volumes --force`);
|
|
308
|
+
shellExec(`sudo podman system prune --all --volumes --force`); // Redundant but harmless repetition
|
|
309
|
+
|
|
310
|
+
// Step 11: Create and set permissions for Podman's custom storage directory.
|
|
311
|
+
// This ensures the custom path `/home/containers/storage` exists and has correct permissions
|
|
312
|
+
// before Podman attempts to use it.
|
|
193
313
|
shellExec(`sudo mkdir -p /home/containers/storage`);
|
|
194
314
|
shellExec('sudo chmod 0711 /home/containers/storage');
|
|
315
|
+
|
|
316
|
+
// Step 12: Update Podman's storage configuration file.
|
|
317
|
+
// This command uses 'sed' to modify `/etc/containers/storage.conf`,
|
|
318
|
+
// changing the default storage path from `/var/lib/containers/storage`
|
|
319
|
+
// to the customized `/home/containers/storage`.
|
|
195
320
|
shellExec(
|
|
196
321
|
`sudo sed -i -e "s@/var/lib/containers/storage@/home/containers/storage@g" /etc/containers/storage.conf`,
|
|
197
322
|
);
|
|
323
|
+
|
|
324
|
+
// Step 13: Reset Podman system settings.
|
|
325
|
+
// This command resets Podman's system-wide configuration to its default state.
|
|
198
326
|
shellExec(`sudo podman system reset -f`);
|
|
327
|
+
|
|
328
|
+
// Note: The 'sysctl net.bridge.bridge-nf-call-iptables=0' and related commands
|
|
329
|
+
// were previously removed. These sysctl settings (bridge-nf-call-iptables,
|
|
330
|
+
// bridge-nf-call-arptables, bridge-nf-call-ip6tables) are crucial for allowing
|
|
331
|
+
// network traffic through Linux bridges to be processed by iptables.
|
|
332
|
+
// Kubernetes and CNI plugins generally require them to be enabled (set to '1').
|
|
333
|
+
// Re-initializing Kubernetes will typically set these as needed, and leaving them
|
|
334
|
+
// at their system default (or '1' if already configured) is safer for host
|
|
335
|
+
// connectivity during a reset operation.
|
|
336
|
+
|
|
337
|
+
// https://github.com/kubernetes-sigs/kind/issues/2886
|
|
338
|
+
// shellExec(`sysctl net.bridge.bridge-nf-call-iptables=0`);
|
|
339
|
+
// shellExec(`sysctl net.bridge.bridge-nf-call-arptables=0`);
|
|
340
|
+
// shellExec(`sysctl net.bridge.bridge-nf-call-ip6tables=0`);
|
|
341
|
+
|
|
342
|
+
// Step 14: Remove the 'kind' Docker network.
|
|
343
|
+
// This cleans up any network bridges or configurations specifically created by Kind.
|
|
344
|
+
shellExec(`docker network rm kind`);
|
|
345
|
+
},
|
|
346
|
+
|
|
347
|
+
getResourcesCapacity() {
|
|
348
|
+
const resources = {};
|
|
349
|
+
const info = false
|
|
350
|
+
? `Capacity:
|
|
351
|
+
cpu: 8
|
|
352
|
+
ephemeral-storage: 153131976Ki
|
|
353
|
+
hugepages-1Gi: 0
|
|
354
|
+
hugepages-2Mi: 0
|
|
355
|
+
memory: 11914720Ki
|
|
356
|
+
pods: 110
|
|
357
|
+
Allocatable:
|
|
358
|
+
cpu: 8
|
|
359
|
+
ephemeral-storage: 153131976Ki
|
|
360
|
+
hugepages-1Gi: 0
|
|
361
|
+
hugepages-2Mi: 0
|
|
362
|
+
memory: 11914720Ki
|
|
363
|
+
pods: `
|
|
364
|
+
: shellExec(`kubectl describe node kind-worker | grep -E '(Allocatable:|Capacity:)' -A 6`, {
|
|
365
|
+
stdout: true,
|
|
366
|
+
silent: true,
|
|
367
|
+
});
|
|
368
|
+
info
|
|
369
|
+
.split('Allocatable:')[1]
|
|
370
|
+
.split('\n')
|
|
371
|
+
.filter((row) => row.match('cpu') || row.match('memory'))
|
|
372
|
+
.map((row) => {
|
|
373
|
+
if (row.match('cpu'))
|
|
374
|
+
resources.cpu = {
|
|
375
|
+
value: parseInt(row.split(':')[1].trim()) * 1000,
|
|
376
|
+
unit: 'm',
|
|
377
|
+
};
|
|
378
|
+
if (row.match('memory'))
|
|
379
|
+
resources.memory = {
|
|
380
|
+
value: parseInt(row.split(':')[1].split('Ki')[0].trim()),
|
|
381
|
+
unit: 'Ki',
|
|
382
|
+
};
|
|
383
|
+
});
|
|
384
|
+
|
|
385
|
+
return resources;
|
|
199
386
|
},
|
|
200
387
|
};
|
|
201
388
|
}
|
package/src/cli/cron.js
CHANGED
|
@@ -4,20 +4,24 @@
|
|
|
4
4
|
* @namespace UnderpostCron
|
|
5
5
|
*/
|
|
6
6
|
|
|
7
|
-
import
|
|
7
|
+
import { DataBaseProvider } from '../db/DataBaseProvider.js';
|
|
8
8
|
import BackUp from '../server/backup.js';
|
|
9
9
|
import { Cmd } from '../server/conf.js';
|
|
10
10
|
import Dns from '../server/dns.js';
|
|
11
|
-
import {
|
|
11
|
+
import { loggerFactory } from '../server/logger.js';
|
|
12
|
+
|
|
12
13
|
import { shellExec } from '../server/process.js';
|
|
13
14
|
import fs from 'fs-extra';
|
|
14
15
|
|
|
16
|
+
const logger = loggerFactory(import.meta);
|
|
17
|
+
|
|
15
18
|
/**
|
|
16
19
|
* UnderpostCron main module methods
|
|
17
20
|
* @class
|
|
18
21
|
* @memberof UnderpostCron
|
|
19
22
|
*/
|
|
20
23
|
class UnderpostCron {
|
|
24
|
+
static NETWORK = [];
|
|
21
25
|
static JOB = {
|
|
22
26
|
/**
|
|
23
27
|
* DNS cli API
|
|
@@ -46,10 +50,10 @@ class UnderpostCron {
|
|
|
46
50
|
callback: async function (
|
|
47
51
|
deployList = 'default',
|
|
48
52
|
jobList = Object.keys(UnderpostCron.JOB),
|
|
49
|
-
options = { itc: false, init: false, git: false },
|
|
53
|
+
options = { itc: false, init: false, git: false, dashboardUpdate: false },
|
|
50
54
|
) {
|
|
51
55
|
if (options.init === true) {
|
|
52
|
-
|
|
56
|
+
UnderpostCron.NETWORK = [];
|
|
53
57
|
const jobDeployId = fs.readFileSync('./engine-private/deploy/dd.cron', 'utf8').trim();
|
|
54
58
|
deployList = fs.readFileSync('./engine-private/deploy/dd.router', 'utf8').trim();
|
|
55
59
|
const confCronConfig = JSON.parse(fs.readFileSync(`./engine-private/conf/${jobDeployId}/conf.cron.json`));
|
|
@@ -57,7 +61,7 @@ class UnderpostCron {
|
|
|
57
61
|
for (const job of Object.keys(confCronConfig.jobs)) {
|
|
58
62
|
const name = `${jobDeployId}-${job}`;
|
|
59
63
|
let deployId;
|
|
60
|
-
shellExec(Cmd.delete(name));
|
|
64
|
+
if (!options.dashboardUpdate) shellExec(Cmd.delete(name));
|
|
61
65
|
switch (job) {
|
|
62
66
|
case 'dns':
|
|
63
67
|
deployId = jobDeployId;
|
|
@@ -67,15 +71,16 @@ class UnderpostCron {
|
|
|
67
71
|
deployId = deployList;
|
|
68
72
|
break;
|
|
69
73
|
}
|
|
70
|
-
|
|
71
|
-
|
|
74
|
+
if (!options.dashboardUpdate)
|
|
75
|
+
shellExec(Cmd.cron(deployId, job, name, confCronConfig.jobs[job].expression, options));
|
|
76
|
+
UnderpostCron.NETWORK.push({
|
|
72
77
|
deployId,
|
|
73
78
|
jobId: job,
|
|
74
79
|
expression: confCronConfig.jobs[job].expression,
|
|
75
80
|
});
|
|
76
81
|
}
|
|
77
82
|
}
|
|
78
|
-
await
|
|
83
|
+
if (options.dashboardUpdate === true) await UnderpostCron.API.updateDashboardData();
|
|
79
84
|
if (fs.existsSync(`./tmp/await-deploy`)) fs.remove(`./tmp/await-deploy`);
|
|
80
85
|
return;
|
|
81
86
|
}
|
|
@@ -84,6 +89,32 @@ class UnderpostCron {
|
|
|
84
89
|
if (UnderpostCron.JOB[jobId]) await UnderpostCron.JOB[jobId].callback(deployList, options);
|
|
85
90
|
}
|
|
86
91
|
},
|
|
92
|
+
async updateDashboardData() {
|
|
93
|
+
try {
|
|
94
|
+
const deployId = process.env.DEFAULT_DEPLOY_ID;
|
|
95
|
+
const host = process.env.DEFAULT_DEPLOY_HOST;
|
|
96
|
+
const path = process.env.DEFAULT_DEPLOY_PATH;
|
|
97
|
+
const confServerPath = `./engine-private/conf/${deployId}/conf.server.json`;
|
|
98
|
+
const confServer = JSON.parse(fs.readFileSync(confServerPath, 'utf8'));
|
|
99
|
+
const { db } = confServer[host][path];
|
|
100
|
+
|
|
101
|
+
await DataBaseProvider.load({ apis: ['cron'], host, path, db });
|
|
102
|
+
|
|
103
|
+
/** @type {import('../api/cron/cron.model.js').CronModel} */
|
|
104
|
+
const Cron = DataBaseProvider.instance[`${host}${path}`].mongoose.models.Cron;
|
|
105
|
+
|
|
106
|
+
await Cron.deleteMany();
|
|
107
|
+
|
|
108
|
+
for (const cronInstance of UnderpostCron.NETWORK) {
|
|
109
|
+
logger.info('save', cronInstance);
|
|
110
|
+
await new Cron(cronInstance).save();
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
await DataBaseProvider.instance[`${host}${path}`].mongoose.close();
|
|
114
|
+
} catch (error) {
|
|
115
|
+
logger.error(error, error.stack);
|
|
116
|
+
}
|
|
117
|
+
},
|
|
87
118
|
};
|
|
88
119
|
}
|
|
89
120
|
|
package/src/cli/db.js
CHANGED
|
@@ -15,11 +15,13 @@ class UnderpostDB {
|
|
|
15
15
|
export: false,
|
|
16
16
|
podName: false,
|
|
17
17
|
ns: false,
|
|
18
|
-
|
|
18
|
+
collections: '',
|
|
19
19
|
outPath: '',
|
|
20
20
|
drop: false,
|
|
21
21
|
preserveUUID: false,
|
|
22
22
|
git: false,
|
|
23
|
+
hosts: '',
|
|
24
|
+
paths: '',
|
|
23
25
|
},
|
|
24
26
|
) {
|
|
25
27
|
const newBackupTimestamp = new Date().getTime();
|
|
@@ -39,20 +41,28 @@ class UnderpostDB {
|
|
|
39
41
|
if (!dbs[provider]) dbs[provider] = {};
|
|
40
42
|
|
|
41
43
|
if (!(name in dbs[provider]))
|
|
42
|
-
dbs[provider][name] = { user, password, hostFolder: host + path.replaceAll('/', '-') };
|
|
44
|
+
dbs[provider][name] = { user, password, hostFolder: host + path.replaceAll('/', '-'), host, path };
|
|
43
45
|
}
|
|
44
46
|
}
|
|
45
47
|
}
|
|
46
48
|
|
|
47
|
-
if (
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
49
|
+
if (options.git === true) {
|
|
50
|
+
if (!fs.existsSync(`../${repoName}`)) {
|
|
51
|
+
shellExec(`cd .. && underpost clone ${process.env.GITHUB_USERNAME}/${repoName}`);
|
|
52
|
+
} else {
|
|
53
|
+
shellExec(`cd ../${repoName} && git checkout . && git clean -f -d`);
|
|
54
|
+
shellExec(`cd ../${repoName} && underpost pull . ${process.env.GITHUB_USERNAME}/${repoName}`);
|
|
55
|
+
}
|
|
51
56
|
}
|
|
52
57
|
|
|
53
58
|
for (const provider of Object.keys(dbs)) {
|
|
54
59
|
for (const dbName of Object.keys(dbs[provider])) {
|
|
55
|
-
const { hostFolder, user, password } = dbs[provider][dbName];
|
|
60
|
+
const { hostFolder, user, password, host, path } = dbs[provider][dbName];
|
|
61
|
+
if (
|
|
62
|
+
(options.hosts && !options.hosts.split(',').includes(host)) ||
|
|
63
|
+
(options.paths && !options.paths.split(',').includes(path))
|
|
64
|
+
)
|
|
65
|
+
continue;
|
|
56
66
|
if (hostFolder) {
|
|
57
67
|
logger.info('', { hostFolder, provider, dbName });
|
|
58
68
|
|
|
@@ -153,11 +163,11 @@ class UnderpostDB {
|
|
|
153
163
|
const podName = podNameData.NAME;
|
|
154
164
|
shellExec(`sudo kubectl exec -i ${podName} -- sh -c "rm -rf /${dbName}"`);
|
|
155
165
|
if (options.collections)
|
|
156
|
-
for (const collection of options.collections)
|
|
166
|
+
for (const collection of options.collections.split(','))
|
|
157
167
|
shellExec(
|
|
158
|
-
`sudo kubectl exec -i ${podName} -- sh -c "mongodump -d ${dbName} --collection ${collection} -o
|
|
168
|
+
`sudo kubectl exec -i ${podName} -- sh -c "mongodump -d ${dbName} --collection ${collection} -o /"`,
|
|
159
169
|
);
|
|
160
|
-
else shellExec(`sudo kubectl exec -i ${podName} -- sh -c "mongodump -d ${dbName} -o
|
|
170
|
+
else shellExec(`sudo kubectl exec -i ${podName} -- sh -c "mongodump -d ${dbName} -o /"`);
|
|
161
171
|
shellExec(
|
|
162
172
|
`sudo kubectl cp ${nameSpace}/${podName}:/${dbName} ${
|
|
163
173
|
options.outPath ? options.outPath : _toNewBsonPath
|