@underpostnet/underpost 2.8.1 → 2.8.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.dockerignore +1 -0
- package/.github/workflows/ghpkg.yml +20 -50
- package/.github/workflows/npmpkg.yml +67 -0
- package/.github/workflows/publish.yml +5 -5
- package/.github/workflows/pwa-microservices-template.page.yml +13 -5
- package/.github/workflows/pwa-microservices-template.test.yml +2 -2
- package/.vscode/extensions.json +17 -71
- package/.vscode/settings.json +14 -3
- package/AUTHORS.md +16 -5
- package/CHANGELOG.md +79 -3
- package/Dockerfile +24 -66
- package/README.md +1 -28
- package/bin/build.js +161 -0
- package/bin/db.js +2 -24
- package/bin/deploy.js +111 -82
- package/bin/file.js +59 -16
- package/bin/index.js +168 -58
- package/bin/ssl.js +19 -11
- package/bin/util.js +9 -97
- package/bin/vs.js +25 -2
- package/conf.js +31 -138
- package/docker-compose.yml +1 -1
- package/manifests/core/kustomization.yaml +11 -0
- package/manifests/core/underpost-engine-backup-access.yaml +16 -0
- package/manifests/core/underpost-engine-backup-pv-pvc.yaml +22 -0
- package/manifests/core/underpost-engine-headless-service.yaml +10 -0
- package/manifests/core/underpost-engine-mongodb-backup-cronjob.yaml +40 -0
- package/manifests/core/underpost-engine-mongodb-configmap.yaml +26 -0
- package/manifests/core/underpost-engine-pv-pvc.yaml +23 -0
- package/manifests/core/underpost-engine-statefulset.yaml +91 -0
- package/manifests/deployment/mongo-express/deployment.yaml +60 -0
- package/manifests/deployment/phpmyadmin/deployment.yaml +54 -0
- package/manifests/kind-config.yaml +12 -0
- package/manifests/letsencrypt-prod.yaml +15 -0
- package/manifests/mariadb/config.yaml +10 -0
- package/manifests/mariadb/kustomization.yaml +9 -0
- package/manifests/mariadb/pv.yaml +12 -0
- package/manifests/mariadb/pvc.yaml +10 -0
- package/manifests/mariadb/secret.yaml +8 -0
- package/manifests/mariadb/service.yaml +10 -0
- package/manifests/mariadb/statefulset.yaml +55 -0
- package/manifests/mongodb/backup-access.yaml +16 -0
- package/manifests/mongodb/backup-cronjob.yaml +42 -0
- package/manifests/mongodb/backup-pv-pvc.yaml +22 -0
- package/manifests/mongodb/configmap.yaml +26 -0
- package/manifests/mongodb/headless-service.yaml +10 -0
- package/manifests/mongodb/kustomization.yaml +11 -0
- package/manifests/mongodb/pv-pvc.yaml +23 -0
- package/manifests/mongodb/statefulset.yaml +125 -0
- package/manifests/valkey/kustomization.yaml +7 -0
- package/manifests/valkey/service.yaml +17 -0
- package/manifests/valkey/statefulset.yaml +39 -0
- package/manifests/valkey/underpost-engine-valkey-service.yaml +17 -0
- package/manifests/valkey/underpost-engine-valkey-statefulset.yaml +39 -0
- package/package.json +26 -31
- package/src/api/core/core.service.js +1 -1
- package/src/api/user/user.model.js +16 -3
- package/src/api/user/user.service.js +1 -1
- package/src/cli/cluster.js +154 -0
- package/src/cli/cron.js +90 -0
- package/src/cli/db.js +148 -0
- package/src/cli/deploy.js +277 -0
- package/src/cli/env.js +52 -0
- package/src/cli/image.js +125 -0
- package/src/cli/repository.js +104 -0
- package/src/cli/script.js +29 -0
- package/src/cli/secrets.js +37 -0
- package/src/cli/test.js +83 -0
- package/src/client/components/core/Auth.js +22 -4
- package/src/client/components/core/CalendarCore.js +115 -49
- package/src/client/components/core/CommonJs.js +231 -19
- package/src/client/components/core/Css.js +1 -0
- package/src/client/components/core/CssCore.js +6 -0
- package/src/client/components/core/DropDown.js +5 -1
- package/src/client/components/core/Input.js +18 -4
- package/src/client/components/core/Modal.js +10 -6
- package/src/client/components/core/Panel.js +84 -25
- package/src/client/components/core/PanelForm.js +4 -18
- package/src/client/components/core/Scroll.js +1 -0
- package/src/client/components/core/Translate.js +47 -9
- package/src/client/components/core/Validator.js +9 -1
- package/src/client/components/core/VanillaJs.js +0 -9
- package/src/client/components/core/Worker.js +34 -31
- package/src/client/services/default/default.management.js +4 -2
- package/src/client/ssr/body/CacheControl.js +2 -2
- package/src/db/mongo/MongooseDB.js +13 -1
- package/src/index.js +77 -19
- package/src/runtime/lampp/Lampp.js +1 -13
- package/src/runtime/xampp/Xampp.js +0 -13
- package/src/server/auth.js +3 -3
- package/src/server/backup.js +49 -93
- package/src/server/client-build.js +4 -23
- package/src/server/client-formatted.js +5 -3
- package/src/server/conf.js +193 -45
- package/src/server/dns.js +49 -67
- package/src/server/logger.js +15 -10
- package/src/server/network.js +17 -43
- package/src/server/process.js +25 -2
- package/src/server/proxy.js +4 -26
- package/src/server/runtime.js +14 -29
- package/src/server/ssl.js +1 -1
- package/src/server/valkey.js +2 -0
- package/src/dns.js +0 -22
- package/src/server/prompt-optimizer.js +0 -28
- package/startup.js +0 -11
|
@@ -0,0 +1,154 @@
|
|
|
1
|
+
import { timer } from '../client/components/core/CommonJs.js';
|
|
2
|
+
import { cliSpinner } from '../server/conf.js';
|
|
3
|
+
import { loggerFactory } from '../server/logger.js';
|
|
4
|
+
import { shellExec } from '../server/process.js';
|
|
5
|
+
|
|
6
|
+
const logger = loggerFactory(import.meta);
|
|
7
|
+
|
|
8
|
+
class UnderpostCluster {
|
|
9
|
+
static API = {
|
|
10
|
+
async init(options = { valkey: false, mariadb: false, valkey: false, full: false, info: false, nsUse: '' }) {
|
|
11
|
+
if (options.nsUse) {
|
|
12
|
+
shellExec(`kubectl config set-context --current --namespace=${options.nsUse}`);
|
|
13
|
+
return;
|
|
14
|
+
}
|
|
15
|
+
if (options.info) {
|
|
16
|
+
shellExec(`kubectl config get-contexts`); // config env persisente for manage multiple clusters
|
|
17
|
+
shellExec(`kubectl config get-clusters`);
|
|
18
|
+
shellExec(`kubectl get nodes -o wide`); // set of nodes of a cluster
|
|
19
|
+
shellExec(`kubectl config view | grep namespace`);
|
|
20
|
+
shellExec(`kubectl get ns -o wide`); // A namespace can have pods of different nodes
|
|
21
|
+
shellExec(`kubectl get pvc --all-namespaces -o wide`); // PersistentVolumeClaim -> request storage service
|
|
22
|
+
shellExec(`kubectl get pv --all-namespaces -o wide`); // PersistentVolume -> real storage
|
|
23
|
+
shellExec(`kubectl get cronjob --all-namespaces -o wide`);
|
|
24
|
+
shellExec(`kubectl get svc --all-namespaces -o wide`); // proxy dns gate way -> deployments, statefulsets, pods
|
|
25
|
+
shellExec(`kubectl get statefulsets --all-namespaces -o wide`); // set pods with data/volume persistence
|
|
26
|
+
shellExec(`kubectl get deployments --all-namespaces -o wide`); // set pods
|
|
27
|
+
shellExec(`kubectl get configmap --all-namespaces -o wide`);
|
|
28
|
+
shellExec(`kubectl get pods --all-namespaces -o wide`);
|
|
29
|
+
shellExec(
|
|
30
|
+
`kubectl get pod --all-namespaces -o="custom-columns=NAME:.metadata.name,INIT-CONTAINERS:.spec.initContainers[*].name,CONTAINERS:.spec.containers[*].name"`,
|
|
31
|
+
);
|
|
32
|
+
shellExec(
|
|
33
|
+
`kubectl get pods --all-namespaces -o=jsonpath='{range .items[*]}{"\\n"}{.metadata.name}{":\\t"}{range .spec.containers[*]}{.image}{", "}{end}{end}'`,
|
|
34
|
+
);
|
|
35
|
+
console.log();
|
|
36
|
+
logger.info('contour -------------------------------------------------');
|
|
37
|
+
for (const _k of ['Cluster', 'HTTPProxy', 'ClusterIssuer', 'Certificate']) {
|
|
38
|
+
shellExec(`kubectl get ${_k} --all-namespaces -o wide`);
|
|
39
|
+
}
|
|
40
|
+
logger.info('----------------------------------------------------------------');
|
|
41
|
+
shellExec(`kubectl get secrets --all-namespaces -o wide`);
|
|
42
|
+
shellExec(`docker secret ls`);
|
|
43
|
+
return;
|
|
44
|
+
}
|
|
45
|
+
const testClusterInit = shellExec(`kubectl get pods --all-namespaces -o wide`, {
|
|
46
|
+
disableLog: true,
|
|
47
|
+
silent: true,
|
|
48
|
+
stdout: true,
|
|
49
|
+
});
|
|
50
|
+
if (!(testClusterInit.match('kube-system') && testClusterInit.match('kube-proxy'))) {
|
|
51
|
+
shellExec(`containerd config default > /etc/containerd/config.toml`);
|
|
52
|
+
shellExec(`sed -i -e "s/SystemdCgroup = false/SystemdCgroup = true/g" /etc/containerd/config.toml`);
|
|
53
|
+
// shellExec(`cp /etc/kubernetes/admin.conf ~/.kube/config`);
|
|
54
|
+
shellExec(`sudo systemctl restart kubelet`);
|
|
55
|
+
shellExec(`sudo service docker restart`);
|
|
56
|
+
shellExec(`cd ./manifests && kind create cluster --config kind-config.yaml`);
|
|
57
|
+
shellExec(`sudo chown $(id -u):$(id -g) $HOME/.kube/config**`);
|
|
58
|
+
} else logger.warn('Cluster already initialized');
|
|
59
|
+
|
|
60
|
+
if (options.full || options.valkey) {
|
|
61
|
+
shellExec(`kubectl delete statefulset service-valkey`);
|
|
62
|
+
shellExec(`kubectl apply -k ./manifests/valkey`);
|
|
63
|
+
}
|
|
64
|
+
if (options.full || options.mariadb) {
|
|
65
|
+
shellExec(
|
|
66
|
+
`sudo kubectl create secret generic mariadb-secret --from-file=username=/home/dd/engine/engine-private/mariadb-username --from-file=password=/home/dd/engine/engine-private/mariadb-password`,
|
|
67
|
+
);
|
|
68
|
+
shellExec(
|
|
69
|
+
`sudo kubectl create secret generic github-secret --from-literal=GITHUB_TOKEN=${process.env.GITHUB_TOKEN}`,
|
|
70
|
+
);
|
|
71
|
+
shellExec(`kubectl delete statefulset mariadb-statefulset`);
|
|
72
|
+
shellExec(`kubectl apply -k ./manifests/mariadb`);
|
|
73
|
+
}
|
|
74
|
+
if (options.full || options.mongodb) {
|
|
75
|
+
shellExec(
|
|
76
|
+
`sudo kubectl create secret generic mongodb-keyfile --from-file=/home/dd/engine/engine-private/mongodb-keyfile`,
|
|
77
|
+
);
|
|
78
|
+
shellExec(
|
|
79
|
+
`sudo kubectl create secret generic mongodb-secret --from-file=username=/home/dd/engine/engine-private/mongodb-username --from-file=password=/home/dd/engine/engine-private/mongodb-password`,
|
|
80
|
+
);
|
|
81
|
+
shellExec(`kubectl delete statefulset mongodb`);
|
|
82
|
+
shellExec(`kubectl apply -k ./manifests/mongodb`);
|
|
83
|
+
|
|
84
|
+
await new Promise(async (resolve) => {
|
|
85
|
+
cliSpinner(3000, `[cluster.js] `, ` Load mongodb instance`, 'yellow', 'material');
|
|
86
|
+
await timer(3000);
|
|
87
|
+
|
|
88
|
+
const monitor = async () => {
|
|
89
|
+
cliSpinner(1000, `[cluster.js] `, ` Load mongodb instance`, 'yellow', 'material');
|
|
90
|
+
await timer(1000);
|
|
91
|
+
if (
|
|
92
|
+
shellExec(`kubectl get pods --all-namespaces -o wide`, {
|
|
93
|
+
silent: true,
|
|
94
|
+
stdout: true,
|
|
95
|
+
disableLog: true,
|
|
96
|
+
}).match(`mongodb-1 1/1 Running`)
|
|
97
|
+
)
|
|
98
|
+
return resolve();
|
|
99
|
+
return monitor();
|
|
100
|
+
};
|
|
101
|
+
await monitor();
|
|
102
|
+
});
|
|
103
|
+
|
|
104
|
+
const mongoConfig = {
|
|
105
|
+
_id: 'rs0',
|
|
106
|
+
members: [
|
|
107
|
+
{ _id: 0, host: 'mongodb-0.mongodb-service:27017', priority: 1 },
|
|
108
|
+
{ _id: 1, host: 'mongodb-1.mongodb-service:27017', priority: 1 },
|
|
109
|
+
],
|
|
110
|
+
};
|
|
111
|
+
|
|
112
|
+
shellExec(
|
|
113
|
+
`sudo kubectl exec -i mongodb-0 -- mongosh --quiet --json=relaxed \
|
|
114
|
+
--eval 'use admin' \
|
|
115
|
+
--eval 'rs.initiate(${JSON.stringify(mongoConfig)})' \
|
|
116
|
+
--eval 'rs.status()'`,
|
|
117
|
+
);
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
if (options.full || options.contour)
|
|
121
|
+
shellExec(`kubectl apply -f https://projectcontour.io/quickstart/contour.yaml`);
|
|
122
|
+
},
|
|
123
|
+
reset() {
|
|
124
|
+
shellExec(`kind get clusters | xargs -t -n1 kind delete cluster --name`);
|
|
125
|
+
shellExec(`sudo kubeadm reset -f`);
|
|
126
|
+
shellExec('sudo rm -f /etc/cni/net.d/10-flannel.conflist');
|
|
127
|
+
shellExec('sudo iptables -F && sudo iptables -t nat -F && sudo iptables -t mangle -F && sudo iptables -X');
|
|
128
|
+
shellExec('sudo rm -f $HOME/.kube/config');
|
|
129
|
+
shellExec('sudo rm -rf /root/.local/share/Trash/files/*');
|
|
130
|
+
shellExec('sudo docker system prune -a -f');
|
|
131
|
+
shellExec('sudo service docker stop');
|
|
132
|
+
shellExec(`sudo rm -rf /var/lib/containers/storage/*`);
|
|
133
|
+
shellExec(`sudo rm -rf /var/lib/docker/volumes/*`);
|
|
134
|
+
shellExec(`sudo rm -rf /var/lib/docker~/*`);
|
|
135
|
+
shellExec(`sudo rm -rf /home/containers/storage/*`);
|
|
136
|
+
shellExec(`sudo rm -rf /home/docker/*`);
|
|
137
|
+
shellExec('sudo mv /var/lib/docker /var/lib/docker~');
|
|
138
|
+
shellExec('sudo mkdir /home/docker');
|
|
139
|
+
shellExec('sudo chmod 0711 /home/docker');
|
|
140
|
+
shellExec('sudo ln -s /home/docker /var/lib/docker');
|
|
141
|
+
shellExec(`sudo podman system prune -a -f`);
|
|
142
|
+
shellExec(`sudo podman system prune --all --volumes --force`);
|
|
143
|
+
shellExec(`sudo podman system prune --external --force`);
|
|
144
|
+
shellExec(`sudo podman system prune --all --volumes --force`);
|
|
145
|
+
shellExec(`sudo mkdir -p /home/containers/storage`);
|
|
146
|
+
shellExec('sudo chmod 0711 /home/containers/storage');
|
|
147
|
+
shellExec(
|
|
148
|
+
`sudo sed -i -e "s@/var/lib/containers/storage@/home/containers/storage@g" /etc/containers/storage.conf`,
|
|
149
|
+
);
|
|
150
|
+
shellExec(`sudo podman system reset -f`);
|
|
151
|
+
},
|
|
152
|
+
};
|
|
153
|
+
}
|
|
154
|
+
export default UnderpostCluster;
|
package/src/cli/cron.js
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* UnderpostCron CLI index module
|
|
3
|
+
* @module src/cli/cron.js
|
|
4
|
+
* @namespace UnderpostCron
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
import Underpost from '../index.js';
|
|
8
|
+
import BackUp from '../server/backup.js';
|
|
9
|
+
import { Cmd } from '../server/conf.js';
|
|
10
|
+
import Dns from '../server/dns.js';
|
|
11
|
+
import { netWorkCron, saveRuntimeCron } from '../server/network.js';
|
|
12
|
+
import { shellExec } from '../server/process.js';
|
|
13
|
+
import fs from 'fs-extra';
|
|
14
|
+
|
|
15
|
+
/**
|
|
16
|
+
* UnderpostCron main module methods
|
|
17
|
+
* @class
|
|
18
|
+
* @memberof UnderpostCron
|
|
19
|
+
*/
|
|
20
|
+
class UnderpostCron {
|
|
21
|
+
static JOB = {
|
|
22
|
+
/**
|
|
23
|
+
* DNS cli API
|
|
24
|
+
* @static
|
|
25
|
+
* @type {Dns}
|
|
26
|
+
* @memberof UnderpostCron
|
|
27
|
+
*/
|
|
28
|
+
dns: Dns,
|
|
29
|
+
/**
|
|
30
|
+
* BackUp cli API
|
|
31
|
+
* @static
|
|
32
|
+
* @type {BackUp}
|
|
33
|
+
* @memberof UnderpostCron
|
|
34
|
+
*/
|
|
35
|
+
backup: BackUp,
|
|
36
|
+
};
|
|
37
|
+
static API = {
|
|
38
|
+
/**
|
|
39
|
+
* Run the cron jobs
|
|
40
|
+
* @static
|
|
41
|
+
* @param {String} deployList - Comma separated deploy ids
|
|
42
|
+
* @param {String} jobList - Comma separated job ids
|
|
43
|
+
* @return {void}
|
|
44
|
+
* @memberof UnderpostCron
|
|
45
|
+
*/
|
|
46
|
+
callback: async function (
|
|
47
|
+
deployList = 'default',
|
|
48
|
+
jobList = Object.keys(UnderpostCron.JOB),
|
|
49
|
+
options = { disableKindCluster: false, init: false },
|
|
50
|
+
) {
|
|
51
|
+
if (options.init === true) {
|
|
52
|
+
await Underpost.test.setUpInfo();
|
|
53
|
+
const jobDeployId = fs.readFileSync('./engine-private/deploy/dd.cron', 'utf8').trim();
|
|
54
|
+
deployList = fs.readFileSync('./engine-private/deploy/dd.router', 'utf8').trim();
|
|
55
|
+
const confCronConfig = JSON.parse(fs.readFileSync(`./engine-private/conf/${jobDeployId}/conf.cron.json`));
|
|
56
|
+
if (confCronConfig.jobs && Object.keys(confCronConfig.jobs).length > 0) {
|
|
57
|
+
for (const job of Object.keys(confCronConfig.jobs)) {
|
|
58
|
+
const name = `${jobDeployId}-${job}`;
|
|
59
|
+
let deployId;
|
|
60
|
+
shellExec(Cmd.delete(name));
|
|
61
|
+
switch (job) {
|
|
62
|
+
case 'dns':
|
|
63
|
+
deployId = jobDeployId;
|
|
64
|
+
break;
|
|
65
|
+
|
|
66
|
+
default:
|
|
67
|
+
deployId = deployList;
|
|
68
|
+
break;
|
|
69
|
+
}
|
|
70
|
+
shellExec(Cmd.cron(deployId, job, name, confCronConfig.jobs[job].expression, options));
|
|
71
|
+
netWorkCron.push({
|
|
72
|
+
deployId,
|
|
73
|
+
jobId: job,
|
|
74
|
+
expression: confCronConfig.jobs[job].expression,
|
|
75
|
+
});
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
await saveRuntimeCron();
|
|
79
|
+
if (fs.existsSync(`./tmp/await-deploy`)) fs.remove(`./tmp/await-deploy`);
|
|
80
|
+
return;
|
|
81
|
+
}
|
|
82
|
+
for (const _jobId of jobList.split(',')) {
|
|
83
|
+
const jobId = _jobId.trim();
|
|
84
|
+
if (UnderpostCron.JOB[jobId]) await UnderpostCron.JOB[jobId].callback(deployList, options);
|
|
85
|
+
}
|
|
86
|
+
},
|
|
87
|
+
};
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
export default UnderpostCron;
|
package/src/cli/db.js
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
1
|
+
import { mergeFile, splitFileFactory } from '../server/conf.js';
|
|
2
|
+
import { loggerFactory } from '../server/logger.js';
|
|
3
|
+
import { shellExec } from '../server/process.js';
|
|
4
|
+
import fs from 'fs-extra';
|
|
5
|
+
|
|
6
|
+
const logger = loggerFactory(import.meta);
|
|
7
|
+
|
|
8
|
+
class UnderpostDB {
|
|
9
|
+
static API = {
|
|
10
|
+
async callback(deployList = 'default', options = { import: false, export: false }) {
|
|
11
|
+
const newBackupTimestamp = new Date().getTime();
|
|
12
|
+
const nameSpace = 'default';
|
|
13
|
+
for (const _deployId of deployList.split(',')) {
|
|
14
|
+
const deployId = _deployId.trim();
|
|
15
|
+
if (!deployId) continue;
|
|
16
|
+
const dbs = {};
|
|
17
|
+
const repoName = `engine-${deployId.split('dd-')[1]}-cron-backups`;
|
|
18
|
+
|
|
19
|
+
const confServer = JSON.parse(fs.readFileSync(`./engine-private/conf/${deployId}/conf.server.json`, 'utf8'));
|
|
20
|
+
for (const host of Object.keys(confServer)) {
|
|
21
|
+
for (const path of Object.keys(confServer[host])) {
|
|
22
|
+
const { db } = confServer[host][path];
|
|
23
|
+
if (db) {
|
|
24
|
+
const { provider, name, user, password } = db;
|
|
25
|
+
if (!dbs[provider]) dbs[provider] = {};
|
|
26
|
+
|
|
27
|
+
if (!(name in dbs[provider]))
|
|
28
|
+
dbs[provider][name] = { user, password, hostFolder: host + path.replaceAll('/', '-') };
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
if (!fs.existsSync(`../${repoName}`)) {
|
|
34
|
+
shellExec(`cd .. && underpost clone ${process.env.GITHUB_USERNAME}/${repoName}`);
|
|
35
|
+
} else {
|
|
36
|
+
shellExec(`cd ../${repoName} && underpost pull . ${process.env.GITHUB_USERNAME}/${repoName}`);
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
for (const provider of Object.keys(dbs)) {
|
|
40
|
+
for (const dbName of Object.keys(dbs[provider])) {
|
|
41
|
+
const { hostFolder, user, password } = dbs[provider][dbName];
|
|
42
|
+
if (hostFolder) {
|
|
43
|
+
logger.info('', { hostFolder, provider, dbName });
|
|
44
|
+
|
|
45
|
+
const backUpPath = `../${repoName}/${hostFolder}`;
|
|
46
|
+
const times = await fs.readdir(backUpPath);
|
|
47
|
+
const currentBackupTimestamp = Math.max(...times.map((t) => parseInt(t)));
|
|
48
|
+
dbs[provider][dbName].currentBackupTimestamp = currentBackupTimestamp;
|
|
49
|
+
const removeBackupTimestamp = Math.min(...times.map((t) => parseInt(t)));
|
|
50
|
+
|
|
51
|
+
const sqlContainerPath = `/home/${dbName}.sql`;
|
|
52
|
+
const _fromPartsParts = `../${repoName}/${hostFolder}/${currentBackupTimestamp}/${dbName}-parths.json`;
|
|
53
|
+
const _toSqlPath = `../${repoName}/${hostFolder}/${currentBackupTimestamp}/${dbName}.sql`;
|
|
54
|
+
const _toNewSqlPath = `../${repoName}/${hostFolder}/${newBackupTimestamp}/${dbName}.sql`;
|
|
55
|
+
const _toBsonPath = `../${repoName}/${hostFolder}/${currentBackupTimestamp}/${dbName}`;
|
|
56
|
+
const _toNewBsonPath = `../${repoName}/${hostFolder}/${newBackupTimestamp}/${dbName}`;
|
|
57
|
+
|
|
58
|
+
if (options.import === true && fs.existsSync(_fromPartsParts) && !fs.existsSync(_toSqlPath)) {
|
|
59
|
+
const names = JSON.parse(fs.readFileSync(_fromPartsParts, 'utf8')).map((_path) => {
|
|
60
|
+
return `../${repoName}/${hostFolder}/${currentBackupTimestamp}/${_path.split('/').pop()}`;
|
|
61
|
+
});
|
|
62
|
+
logger.info('merge Back Up paths', {
|
|
63
|
+
_fromPartsParts,
|
|
64
|
+
_toSqlPath,
|
|
65
|
+
names,
|
|
66
|
+
});
|
|
67
|
+
await mergeFile(names, _toSqlPath);
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
if (options.export === true && times.length >= 5) {
|
|
71
|
+
fs.removeSync(`../${repoName}/${hostFolder}/${removeBackupTimestamp}`);
|
|
72
|
+
fs.mkdirSync(`../${repoName}/${hostFolder}/${newBackupTimestamp}`, { recursive: true });
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
switch (provider) {
|
|
76
|
+
case 'mariadb': {
|
|
77
|
+
const podName = `mariadb-statefulset-0`;
|
|
78
|
+
const serviceName = 'mariadb';
|
|
79
|
+
if (options.import === true) {
|
|
80
|
+
shellExec(`sudo kubectl cp ${_toSqlPath} ${nameSpace}/${podName}:/${dbName}.sql`);
|
|
81
|
+
const cmd = `mariadb -u ${user} -p${password} ${dbName} < /${dbName}.sql`;
|
|
82
|
+
shellExec(
|
|
83
|
+
`kubectl exec -i ${podName} -- ${serviceName} -p${password} -e 'CREATE DATABASE ${dbName};'`,
|
|
84
|
+
);
|
|
85
|
+
shellExec(`sudo kubectl exec -i ${podName} -- sh -c "${cmd}"`);
|
|
86
|
+
}
|
|
87
|
+
if (options.export === true) {
|
|
88
|
+
const cmd = `mariadb-dump --user=${user} --password=${password} --lock-tables ${dbName} > ${sqlContainerPath}`;
|
|
89
|
+
shellExec(`sudo kubectl exec -i ${podName} -- sh -c "${cmd}"`);
|
|
90
|
+
shellExec(`sudo kubectl cp ${nameSpace}/${podName}:${sqlContainerPath} ${_toNewSqlPath}`);
|
|
91
|
+
await splitFileFactory(dbName, _toNewSqlPath);
|
|
92
|
+
}
|
|
93
|
+
break;
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
case 'mongoose': {
|
|
97
|
+
if (options.import === true) {
|
|
98
|
+
const podName = `mongodb-0`;
|
|
99
|
+
shellExec(`sudo kubectl cp ${_toBsonPath} ${nameSpace}/${podName}:/${dbName}`);
|
|
100
|
+
const cmd = `mongorestore -d ${dbName} /${dbName}`;
|
|
101
|
+
shellExec(`sudo kubectl exec -i ${podName} -- sh -c "${cmd}"`);
|
|
102
|
+
}
|
|
103
|
+
if (options.export === true) {
|
|
104
|
+
const podName = `backup-access`;
|
|
105
|
+
const containerBaseBackupPath = '/backup';
|
|
106
|
+
let timeFolder = shellExec(
|
|
107
|
+
`sudo kubectl exec -i ${podName} -- sh -c "cd ${containerBaseBackupPath} && ls -a"`,
|
|
108
|
+
{
|
|
109
|
+
stdout: true,
|
|
110
|
+
disableLog: false,
|
|
111
|
+
silent: true,
|
|
112
|
+
},
|
|
113
|
+
).split(`\n`);
|
|
114
|
+
timeFolder = timeFolder[timeFolder.length - 2];
|
|
115
|
+
if (timeFolder === '..') {
|
|
116
|
+
logger.warn(`Cannot backup available`, { timeFolder });
|
|
117
|
+
} else {
|
|
118
|
+
shellExec(
|
|
119
|
+
`sudo kubectl cp ${nameSpace}/${podName}:${containerBaseBackupPath}/${timeFolder}/${dbName} ${_toNewBsonPath}`,
|
|
120
|
+
);
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
break;
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
default:
|
|
127
|
+
break;
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
if (options.export === true) {
|
|
133
|
+
shellExec(`cd ../${repoName} && git add .`);
|
|
134
|
+
shellExec(
|
|
135
|
+
`underpost cmt ../${repoName} backup '' '${new Date(newBackupTimestamp).toLocaleDateString()} ${new Date(
|
|
136
|
+
newBackupTimestamp,
|
|
137
|
+
).toLocaleTimeString()}'`,
|
|
138
|
+
);
|
|
139
|
+
shellExec(`cd ../${repoName} && underpost push . ${process.env.GITHUB_USERNAME}/${repoName}`, {
|
|
140
|
+
disableLog: true,
|
|
141
|
+
});
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
},
|
|
145
|
+
};
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
export default UnderpostDB;
|
|
@@ -0,0 +1,277 @@
|
|
|
1
|
+
import {
|
|
2
|
+
buildKindPorts,
|
|
3
|
+
buildPortProxyRouter,
|
|
4
|
+
buildProxyRouter,
|
|
5
|
+
Config,
|
|
6
|
+
getDataDeploy,
|
|
7
|
+
loadReplicas,
|
|
8
|
+
} from '../server/conf.js';
|
|
9
|
+
import { loggerFactory } from '../server/logger.js';
|
|
10
|
+
import { shellExec } from '../server/process.js';
|
|
11
|
+
import fs from 'fs-extra';
|
|
12
|
+
import dotenv from 'dotenv';
|
|
13
|
+
import Underpost from '../index.js';
|
|
14
|
+
|
|
15
|
+
const logger = loggerFactory(import.meta);
|
|
16
|
+
|
|
17
|
+
class UnderpostDeploy {
|
|
18
|
+
static API = {
|
|
19
|
+
sync(deployList) {
|
|
20
|
+
const deployGroupId = 'dd.tmp';
|
|
21
|
+
fs.writeFileSync(`./engine-private/deploy/${deployGroupId}`, deployList, 'utf8');
|
|
22
|
+
return getDataDeploy({
|
|
23
|
+
buildSingleReplica: true,
|
|
24
|
+
deployGroupId,
|
|
25
|
+
});
|
|
26
|
+
},
|
|
27
|
+
async routerFactory(deployList, env) {
|
|
28
|
+
const initEnvPath = `./engine-private/conf/${deployList.split(',')[0]}/.env.${env}`;
|
|
29
|
+
const initEnvObj = dotenv.parse(fs.readFileSync(initEnvPath, 'utf8'));
|
|
30
|
+
process.env.PORT = initEnvObj.PORT;
|
|
31
|
+
process.env.NODE_ENV = env;
|
|
32
|
+
await Config.build(undefined, 'proxy', deployList);
|
|
33
|
+
return buildPortProxyRouter(env === 'development' ? 80 : 443, buildProxyRouter());
|
|
34
|
+
},
|
|
35
|
+
async buildManifest(deployList, env) {
|
|
36
|
+
for (const _deployId of deployList.split(',')) {
|
|
37
|
+
const deployId = _deployId.trim();
|
|
38
|
+
if (!deployId) continue;
|
|
39
|
+
|
|
40
|
+
const router = await UnderpostDeploy.API.routerFactory(deployId, env);
|
|
41
|
+
const ports = Object.values(router).map((p) => parseInt(p.split(':')[2]));
|
|
42
|
+
const fromPort = Math.min(...ports);
|
|
43
|
+
const toPort = Math.max(...ports);
|
|
44
|
+
const confServer = loadReplicas(
|
|
45
|
+
JSON.parse(fs.readFileSync(`./engine-private/conf/${deployId}/conf.server.json`, 'utf8')),
|
|
46
|
+
'proxy',
|
|
47
|
+
);
|
|
48
|
+
|
|
49
|
+
fs.mkdirSync(`./engine-private/conf/${deployId}/build/${env}`, { recursive: true });
|
|
50
|
+
if (env === 'development') fs.mkdirSync(`./manifests/deployment/${deployId}-${env}`, { recursive: true });
|
|
51
|
+
|
|
52
|
+
logger.info('port range', { deployId, fromPort, toPort });
|
|
53
|
+
|
|
54
|
+
const deploymentYamlParts = `apiVersion: apps/v1
|
|
55
|
+
kind: Deployment
|
|
56
|
+
metadata:
|
|
57
|
+
name: ${deployId}-${env}
|
|
58
|
+
labels:
|
|
59
|
+
app: ${deployId}-${env}
|
|
60
|
+
spec:
|
|
61
|
+
replicas: 2
|
|
62
|
+
selector:
|
|
63
|
+
matchLabels:
|
|
64
|
+
app: ${deployId}-${env}
|
|
65
|
+
template:
|
|
66
|
+
metadata:
|
|
67
|
+
labels:
|
|
68
|
+
app: ${deployId}-${env}
|
|
69
|
+
spec:
|
|
70
|
+
containers:
|
|
71
|
+
- name: ${deployId}-${env}
|
|
72
|
+
image: localhost/${deployId}-${env}:${Underpost.version}
|
|
73
|
+
---
|
|
74
|
+
apiVersion: v1
|
|
75
|
+
kind: Service
|
|
76
|
+
metadata:
|
|
77
|
+
name: ${deployId}-${env}-service
|
|
78
|
+
spec:
|
|
79
|
+
selector:
|
|
80
|
+
app: ${deployId}-${env}
|
|
81
|
+
ports:
|
|
82
|
+
type: LoadBalancer`.split('ports:');
|
|
83
|
+
deploymentYamlParts[1] =
|
|
84
|
+
buildKindPorts(fromPort, toPort) +
|
|
85
|
+
` type: LoadBalancer
|
|
86
|
+
`;
|
|
87
|
+
|
|
88
|
+
fs.writeFileSync(
|
|
89
|
+
`./engine-private/conf/${deployId}/build/${env}/deployment.yaml`,
|
|
90
|
+
deploymentYamlParts.join(`ports:
|
|
91
|
+
`),
|
|
92
|
+
);
|
|
93
|
+
|
|
94
|
+
let proxyYaml = '';
|
|
95
|
+
let secretYaml = '';
|
|
96
|
+
|
|
97
|
+
for (const host of Object.keys(confServer)) {
|
|
98
|
+
if (env === 'production')
|
|
99
|
+
secretYaml += `
|
|
100
|
+
---
|
|
101
|
+
apiVersion: cert-manager.io/v1
|
|
102
|
+
kind: Certificate
|
|
103
|
+
metadata:
|
|
104
|
+
name: ${host}
|
|
105
|
+
spec:
|
|
106
|
+
commonName: ${host}
|
|
107
|
+
dnsNames:
|
|
108
|
+
- ${host}
|
|
109
|
+
issuerRef:
|
|
110
|
+
name: letsencrypt-prod
|
|
111
|
+
kind: ClusterIssuer
|
|
112
|
+
secretName: ${host}`;
|
|
113
|
+
|
|
114
|
+
const pathPortConditions = [];
|
|
115
|
+
for (const path of Object.keys(confServer[host])) {
|
|
116
|
+
const { peer } = confServer[host][path];
|
|
117
|
+
if (!router[`${host}${path === '/' ? '' : path}`]) continue;
|
|
118
|
+
const port = parseInt(router[`${host}${path === '/' ? '' : path}`].split(':')[2]);
|
|
119
|
+
// logger.info('', { host, port, path });
|
|
120
|
+
pathPortConditions.push({
|
|
121
|
+
port,
|
|
122
|
+
path,
|
|
123
|
+
});
|
|
124
|
+
|
|
125
|
+
if (peer) {
|
|
126
|
+
// logger.info('', { host, port: port + 1, path: '/peer' });
|
|
127
|
+
pathPortConditions.push({
|
|
128
|
+
port: port + 1,
|
|
129
|
+
path: '/peer',
|
|
130
|
+
});
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
// logger.info('', { host, pathPortConditions });
|
|
135
|
+
proxyYaml += `
|
|
136
|
+
---
|
|
137
|
+
apiVersion: projectcontour.io/v1
|
|
138
|
+
kind: HTTPProxy
|
|
139
|
+
metadata:
|
|
140
|
+
name: ${host}
|
|
141
|
+
spec:
|
|
142
|
+
virtualhost:
|
|
143
|
+
fqdn: ${host}${
|
|
144
|
+
env === 'development'
|
|
145
|
+
? ''
|
|
146
|
+
: `
|
|
147
|
+
tls:
|
|
148
|
+
secretName: ${host}`
|
|
149
|
+
}
|
|
150
|
+
routes:`;
|
|
151
|
+
for (const conditionObj of pathPortConditions) {
|
|
152
|
+
const { path, port } = conditionObj;
|
|
153
|
+
proxyYaml += `
|
|
154
|
+
- conditions:
|
|
155
|
+
- prefix: ${path}
|
|
156
|
+
enableWebsockets: true
|
|
157
|
+
services:
|
|
158
|
+
- name: ${deployId}-${env}-service
|
|
159
|
+
port: ${port}`;
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
const yamlPath = `./engine-private/conf/${deployId}/build/${env}/proxy.yaml`;
|
|
163
|
+
fs.writeFileSync(yamlPath, proxyYaml, 'utf8');
|
|
164
|
+
if (env === 'production') {
|
|
165
|
+
const yamlPath = `./engine-private/conf/${deployId}/build/${env}/secret.yaml`;
|
|
166
|
+
fs.writeFileSync(yamlPath, secretYaml, 'utf8');
|
|
167
|
+
} else {
|
|
168
|
+
const deploymentsFiles = ['Dockerfile', 'proxy.yaml', 'deployment.yaml'];
|
|
169
|
+
for (const file of deploymentsFiles) {
|
|
170
|
+
if (fs.existsSync(`./engine-private/conf/${deployId}/build/${env}/${file}`)) {
|
|
171
|
+
fs.copyFileSync(
|
|
172
|
+
`./engine-private/conf/${deployId}/build/${env}/${file}`,
|
|
173
|
+
`./manifests/deployment/${deployId}-${env}/${file}`,
|
|
174
|
+
);
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
},
|
|
180
|
+
async callback(
|
|
181
|
+
deployList = 'default',
|
|
182
|
+
env = 'development',
|
|
183
|
+
options = { remove: false, infoRouter: false, sync: false, buildManifest: false },
|
|
184
|
+
) {
|
|
185
|
+
if (deployList === 'dd' && fs.existsSync(`./engine-private/deploy/dd.router`))
|
|
186
|
+
deployList = fs.readFileSync(`./engine-private/deploy/dd.router`, 'utf8');
|
|
187
|
+
if (options.sync) UnderpostDeploy.API.sync(deployList);
|
|
188
|
+
if (options.buildManifest === true) await UnderpostDeploy.API.buildManifest(deployList, env);
|
|
189
|
+
if (options.infoRouter === true)
|
|
190
|
+
return logger.info('router', await UnderpostDeploy.API.routerFactory(deployList, env));
|
|
191
|
+
|
|
192
|
+
for (const _deployId of deployList.split(',')) {
|
|
193
|
+
const deployId = _deployId.trim();
|
|
194
|
+
if (!deployId) continue;
|
|
195
|
+
|
|
196
|
+
shellExec(`sudo kubectl delete svc ${deployId}-${env}-service`);
|
|
197
|
+
shellExec(`sudo kubectl delete deployment ${deployId}-${env}`);
|
|
198
|
+
|
|
199
|
+
const etcHost = (
|
|
200
|
+
concat,
|
|
201
|
+
) => `127.0.0.1 ${concat} localhost localhost.localdomain localhost4 localhost4.localdomain4
|
|
202
|
+
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6`;
|
|
203
|
+
let concatHots = '';
|
|
204
|
+
|
|
205
|
+
const confServer = JSON.parse(fs.readFileSync(`./engine-private/conf/${deployId}/conf.server.json`, 'utf8'));
|
|
206
|
+
for (const host of Object.keys(confServer)) {
|
|
207
|
+
shellExec(`sudo kubectl delete HTTPProxy ${host}`);
|
|
208
|
+
if (!options.remove === true && env === 'development') concatHots += ` ${host}`;
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
if (!options.remove === true) {
|
|
212
|
+
shellExec(`sudo kubectl apply -f ./manifests/deployment/${deployId}-${env}/deployment.yaml`);
|
|
213
|
+
shellExec(`sudo kubectl apply -f ./manifests/deployment/${deployId}-${env}/proxy.yaml`);
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
let renderHosts;
|
|
217
|
+
|
|
218
|
+
switch (process.platform) {
|
|
219
|
+
case 'linux':
|
|
220
|
+
{
|
|
221
|
+
switch (env) {
|
|
222
|
+
case 'development':
|
|
223
|
+
renderHosts = etcHost(concatHots);
|
|
224
|
+
fs.writeFileSync(`/etc/hosts`, renderHosts, 'utf8');
|
|
225
|
+
|
|
226
|
+
break;
|
|
227
|
+
|
|
228
|
+
default:
|
|
229
|
+
break;
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
break;
|
|
233
|
+
|
|
234
|
+
default:
|
|
235
|
+
break;
|
|
236
|
+
}
|
|
237
|
+
logger.info(
|
|
238
|
+
`
|
|
239
|
+
` + renderHosts,
|
|
240
|
+
);
|
|
241
|
+
}
|
|
242
|
+
},
|
|
243
|
+
getPods(deployId) {
|
|
244
|
+
const raw = shellExec(`sudo kubectl get pods --all-namespaces -o wide`, {
|
|
245
|
+
stdout: true,
|
|
246
|
+
disableLog: false,
|
|
247
|
+
silent: true,
|
|
248
|
+
});
|
|
249
|
+
|
|
250
|
+
const heads = raw
|
|
251
|
+
.split(`\n`)[0]
|
|
252
|
+
.split(' ')
|
|
253
|
+
.filter((_r) => _r.trim());
|
|
254
|
+
|
|
255
|
+
const pods = raw
|
|
256
|
+
.split(`\n`)
|
|
257
|
+
.filter((r) => (deployId ? r.match(deployId) : r.trim() && !r.match('NAME')))
|
|
258
|
+
.map((r) => r.split(' ').filter((_r) => _r.trim()));
|
|
259
|
+
|
|
260
|
+
const result = [];
|
|
261
|
+
|
|
262
|
+
for (const row of pods) {
|
|
263
|
+
const pod = {};
|
|
264
|
+
let index = -1;
|
|
265
|
+
for (const head of heads) {
|
|
266
|
+
index++;
|
|
267
|
+
pod[head] = row[index];
|
|
268
|
+
}
|
|
269
|
+
result.push(pod);
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
return result;
|
|
273
|
+
},
|
|
274
|
+
};
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
export default UnderpostDeploy;
|