underpost 2.8.85 → 2.8.86

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. package/.env.development +1 -1
  2. package/.env.production +1 -1
  3. package/.env.test +1 -1
  4. package/.github/workflows/pwa-microservices-template-page.cd.yml +1 -1
  5. package/.github/workflows/release.cd.yml +37 -0
  6. package/README.md +7 -44
  7. package/bin/cyberia0.js +78 -0
  8. package/bin/db.js +1 -3
  9. package/bin/deploy.js +13 -350
  10. package/bin/file.js +11 -1
  11. package/cli.md +39 -19
  12. package/manifests/deployment/{dd-template-development → dd-default-development}/deployment.yaml +16 -16
  13. package/manifests/deployment/{dd-template-development → dd-default-development}/proxy.yaml +3 -3
  14. package/manifests/grafana/deployment.yaml +57 -0
  15. package/manifests/grafana/kustomization.yaml +7 -0
  16. package/manifests/grafana/pvc.yaml +12 -0
  17. package/manifests/grafana/service.yaml +14 -0
  18. package/manifests/maas/ssh-cluster-info.sh +14 -0
  19. package/manifests/prometheus/deployment.yaml +82 -0
  20. package/package.json +1 -2
  21. package/src/api/user/user.service.js +8 -34
  22. package/src/cli/cluster.js +41 -2
  23. package/src/cli/cron.js +12 -45
  24. package/src/cli/db.js +149 -0
  25. package/src/cli/deploy.js +20 -81
  26. package/src/cli/index.js +20 -6
  27. package/src/cli/monitor.js +1 -4
  28. package/src/cli/repository.js +12 -5
  29. package/src/cli/run.js +77 -14
  30. package/src/client/Default.index.js +0 -2
  31. package/src/client/components/core/Account.js +6 -2
  32. package/src/client/components/core/Content.js +11 -7
  33. package/src/client/components/core/Css.js +5 -1
  34. package/src/client/components/core/Input.js +6 -1
  35. package/src/client/components/core/LogIn.js +3 -0
  36. package/src/client/components/core/LogOut.js +1 -1
  37. package/src/client/components/core/Modal.js +7 -4
  38. package/src/client/components/core/Recover.js +5 -2
  39. package/src/client/components/core/Scroll.js +65 -120
  40. package/src/client/components/core/SignUp.js +1 -0
  41. package/src/client/components/core/VanillaJs.js +48 -2
  42. package/src/client/components/default/MenuDefault.js +2 -2
  43. package/src/client/components/default/RoutesDefault.js +3 -3
  44. package/src/index.js +1 -1
  45. package/src/mailer/MailerProvider.js +37 -0
  46. package/src/server/client-build-live.js +1 -1
  47. package/src/server/client-dev-server.js +1 -1
  48. package/src/server/conf.js +2 -272
  49. package/src/server/proxy.js +1 -2
  50. package/src/server/start.js +3 -3
  51. package/docker-compose.yml +0 -67
  52. package/prometheus.yml +0 -36
@@ -104,6 +104,7 @@ const loadConf = (deployId, envInput, subConf) => {
104
104
  shellExec(`git checkout ${path}/package-lock.json`);
105
105
  return;
106
106
  }
107
+ if (!deployId.startsWith('dd-')) deployId = 'dd-default';
107
108
  const folder = fs.existsSync(`./engine-private/replica/${deployId}`)
108
109
  ? `./engine-private/replica/${deployId}`
109
110
  : `./engine-private/conf/${deployId}`;
@@ -770,152 +771,22 @@ const validateTemplatePath = (absolutePath = '') => {
770
771
  return true;
771
772
  };
772
773
 
773
- const deployTest = async (dataDeploy = [{ deployId: 'default' }]) => {
774
- const failed = [];
775
- for (const deploy of dataDeploy) {
776
- const deployServerConfPath = fs.existsSync(`./engine-private/replica/${deploy.deployId}/conf.server.json`)
777
- ? `./engine-private/replica/${deploy.deployId}/conf.server.json`
778
- : `./engine-private/conf/${deploy.deployId}/conf.server.json`;
779
- const serverConf = loadReplicas(JSON.parse(fs.readFileSync(deployServerConfPath, 'utf8')));
780
- let fail = false;
781
- for (const host of Object.keys(serverConf))
782
- for (const path of Object.keys(serverConf[host])) {
783
- const { singleReplica } = serverConf[host][path];
784
- if (singleReplica) continue;
785
- const urlTest = `https://${host}${path}`;
786
- try {
787
- const result = await axios.get(urlTest, { timeout: 10000 });
788
- const test = result.data.split('<title>');
789
- if (test[1])
790
- logger.info('Success deploy', {
791
- ...deploy,
792
- result: test[1].split('</title>')[0],
793
- urlTest,
794
- });
795
- else {
796
- logger.error('Error deploy', {
797
- ...deploy,
798
- result: result.data,
799
- urlTest,
800
- });
801
- fail = true;
802
- }
803
- } catch (error) {
804
- logger.error('Error deploy', {
805
- ...deploy,
806
- message: error.message,
807
- urlTest,
808
- });
809
- fail = true;
810
- }
811
- }
812
- if (fail) failed.push(deploy);
813
- }
814
- return { failed };
815
- };
816
-
817
774
  const awaitDeployMonitor = async (init = false, deltaMs = 1000) => {
818
775
  if (init) fs.writeFileSync(`./tmp/await-deploy`, '', 'utf8');
819
776
  await timer(deltaMs);
820
777
  if (fs.existsSync(`./tmp/await-deploy`)) return await awaitDeployMonitor();
821
778
  };
822
779
 
823
- const getDeployGroupId = () => {
824
- const deployGroupIndexArg = process.argv.findIndex((a) => a.match(`deploy-group:`));
825
- if (deployGroupIndexArg > -1) return process.argv[deployGroupIndexArg].split(':')[1].trim();
826
- return 'dd';
827
- };
828
-
829
780
  const getDeployId = () => {
830
781
  const deployIndexArg = process.argv.findIndex((a) => a.match(`deploy-id:`));
831
782
  if (deployIndexArg > -1) return process.argv[deployIndexArg].split(':')[1].trim();
832
- for (const deployId of process.argv) {
833
- if (fs.existsSync(`./engine-private/conf/${deployId}`)) return deployId;
834
- else if (fs.existsSync(`./engine-private/replica/${deployId}`)) return deployId;
835
- }
836
- return 'default';
783
+ return 'dd-default';
837
784
  };
838
785
 
839
786
  const getCronBackUpFolder = (host = '', path = '') => {
840
787
  return `${host}${path.replace(/\\/g, '/').replace(`/`, '-')}`;
841
788
  };
842
789
 
843
- const execDeploy = async (options = { deployId: 'default' }, currentAttempt = 1) => {
844
- const { deployId } = options;
845
- shellExec(Cmd.delete(deployId));
846
- shellExec(Cmd.conf(deployId));
847
- shellExec(Cmd.run(deployId));
848
- const maxTime = 1000 * 60;
849
- const minTime = 20 * 1000;
850
- const intervalTime = 1000;
851
- return await new Promise(async (resolve) => {
852
- let currentTime = 0;
853
- const attempt = () => {
854
- if (currentTime >= minTime && !fs.existsSync(`./tmp/await-deploy`)) {
855
- clearInterval(processMonitor);
856
- return resolve(true);
857
- }
858
- cliSpinner(
859
- intervalTime,
860
- `[deploy.js] `,
861
- ` Load instance | attempt:${currentAttempt} | elapsed time ${currentTime / 1000}s / ${maxTime / 1000}s`,
862
- 'yellow',
863
- 'material',
864
- );
865
- currentTime += intervalTime;
866
- if (currentTime >= maxTime) {
867
- clearInterval(processMonitor);
868
- return resolve(false);
869
- }
870
- };
871
- const processMonitor = setInterval(attempt, intervalTime);
872
- });
873
- };
874
-
875
- const deployRun = async (dataDeploy, currentAttempt = 1) => {
876
- if (!fs.existsSync(`./tmp`)) fs.mkdirSync(`./tmp`, { recursive: true });
877
- await fixDependencies();
878
- const maxAttempts = 3;
879
- for (const deploy of dataDeploy) {
880
- let currentAttempt = 1;
881
- const attempt = async () => {
882
- const success = await execDeploy(deploy, currentAttempt);
883
- currentAttempt++;
884
- if (!success && currentAttempt <= maxAttempts) await attempt();
885
- };
886
- await attempt();
887
- }
888
- const { failed } = await deployTest(dataDeploy);
889
- if (failed.length > 0) {
890
- for (const deploy of failed) logger.error(deploy.deployId, Cmd.run(deploy.deployId));
891
- if (currentAttempt === maxAttempts) return logger.error(`max deploy attempts exceeded`);
892
- await read({ prompt: 'Press enter to retry failed processes\n' });
893
- currentAttempt++;
894
- await deployRun(failed, currentAttempt);
895
- } else logger.info(`Deploy process successfully`);
896
- };
897
-
898
- const restoreMacroDb = async (deployGroupId = '', deployId = null) => {
899
- const dataDeploy = await getDataDeploy({ deployGroupId, buildSingleReplica: false });
900
- for (const deployGroup of dataDeploy) {
901
- if (deployId && deployGroup.deployId !== deployId) continue;
902
- if (!deployGroup.replicaHost) {
903
- const deployServerConfPath = `./engine-private/conf/${deployGroup.deployId}/conf.server.json`;
904
- const serverConf = JSON.parse(fs.readFileSync(deployServerConfPath, 'utf8'));
905
-
906
- for (const host of Object.keys(serverConf)) {
907
- for (const path of Object.keys(serverConf[host])) {
908
- const { db, singleReplica } = serverConf[host][path];
909
- if (db && !singleReplica) {
910
- const cmd = `node bin/db ${host}${path} import ${deployGroup.deployId} cron`;
911
- shellExec(cmd);
912
- }
913
- }
914
- }
915
- }
916
- }
917
- };
918
-
919
790
  const mergeFile = async (parts = [], outputFilePath) => {
920
791
  await new Promise((resolve) => {
921
792
  splitFile
@@ -977,99 +848,6 @@ const rebuildConfFactory = ({ deployId, valkey, mongo }) => {
977
848
  return { hosts };
978
849
  };
979
850
 
980
- const getRestoreCronCmd = async (options = { host: '', path: '', conf: {}, deployId: '' }) => {
981
- const { host, path, conf, deployId } = options;
982
- const { runtime, db, git, directory } = conf[host][path];
983
- const { provider, name, user, password = '', backupPath = '' } = db;
984
-
985
- if (['xampp', 'lampp'].includes(runtime)) {
986
- logger.info('Create database', `node bin/db ${host}${path} create ${deployId}`);
987
- shellExec(`node bin/db ${host}${path} create ${deployId}`);
988
- }
989
-
990
- if (git) {
991
- if (directory && !fs.existsSync(directory)) fs.mkdirSync(directory, { recursive: true });
992
-
993
- shellExec(`git clone ${git}`);
994
-
995
- // fs.mkdirSync(`./public/${host}${path}`, { recursive: true });
996
-
997
- if (fs.existsSync(`./${git.split('/').pop()}`))
998
- fs.moveSync(`./${git.split('/').pop()}`, directory ? directory : `./public/${host}${path}`, {
999
- overwrite: true,
1000
- });
1001
- }
1002
-
1003
- let cmd, currentBackupTimestamp, baseBackUpPath;
1004
-
1005
- if (process.argv.includes('cron')) {
1006
- baseBackUpPath = `${process.cwd()}/engine-private/cron-backups/${getCronBackUpFolder(host, path)}`;
1007
-
1008
- const files = await fs.readdir(baseBackUpPath, { withFileTypes: true });
1009
-
1010
- currentBackupTimestamp = files
1011
- .map((fileObj) => parseInt(fileObj.name))
1012
- .sort((a, b) => a - b)
1013
- .reverse()[0];
1014
- }
1015
-
1016
- switch (provider) {
1017
- case 'mariadb':
1018
- {
1019
- if (process.argv.includes('cron')) {
1020
- cmd = `mysql -u ${user} -p${password} ${name} < ${baseBackUpPath}/${currentBackupTimestamp}/${name}.sql`;
1021
- if (fs.existsSync(`${baseBackUpPath}/${currentBackupTimestamp}/${name}-parths.json`)) {
1022
- const names = JSON.parse(
1023
- fs.readFileSync(`${baseBackUpPath}/${currentBackupTimestamp}/${name}-parths.json`, 'utf8'),
1024
- ).map((p) => p.replaceAll(`\\`, '/').replaceAll('C:/', '/').replaceAll('c:/', '/'));
1025
-
1026
- await mergeFile(names, `${baseBackUpPath}/${currentBackupTimestamp}/${name}.sql`);
1027
- }
1028
- } else {
1029
- cmd = `mysql -u ${user} -p${password} ${name} < ${
1030
- backupPath ? backupPath : `./engine-private/sql-backups/${name}.sql`
1031
- }`;
1032
- if (
1033
- fs.existsSync(
1034
- `${
1035
- backupPath ? backupPath.split('/').slice(0, -1).join('/') : `./engine-private/sql-backups`
1036
- }/${name}-parths.json`,
1037
- )
1038
- ) {
1039
- const names = JSON.parse(
1040
- fs.readFileSync(
1041
- `${
1042
- backupPath ? backupPath.split('/').slice(0, -1).join('/') : `./engine-private/sql-backups`
1043
- }/${name}-parths.json`,
1044
- 'utf8',
1045
- ),
1046
- ).map((p) => p.replaceAll(`\\`, '/').replaceAll('C:/', '/').replaceAll('c:/', '/'));
1047
-
1048
- await mergeFile(
1049
- names,
1050
- `${
1051
- backupPath ? backupPath.split('/').slice(0, -1).join('/') : `./engine-private/sql-backups`
1052
- }/${name}.sql`,
1053
- );
1054
- }
1055
- }
1056
- }
1057
- break;
1058
-
1059
- case 'mongoose':
1060
- {
1061
- if (process.argv.includes('cron')) {
1062
- cmd = `mongorestore -d ${name} ${baseBackUpPath}/${currentBackupTimestamp}/${name}`;
1063
- } else cmd = `mongorestore -d ${name} ${backupPath ? backupPath : `./engine-private/mongodb-backup/${name}`}`;
1064
- }
1065
- break;
1066
- }
1067
-
1068
- // logger.info('Restore', cmd);
1069
-
1070
- return cmd;
1071
- };
1072
-
1073
851
  const getPathsSSR = (conf) => {
1074
852
  const paths = ['src/client/ssr/Render.js'];
1075
853
  for (const o of conf.head) paths.push(`src/client/ssr/head/${o}.js`);
@@ -1093,37 +871,6 @@ const Cmd = {
1093
871
  }${options?.git ? `--git ` : ''}${deployList} ${jobList}`,
1094
872
  };
1095
873
 
1096
- const fixDependencies = async () => {
1097
- return;
1098
- // sed -i "$line_number s,.*,$new_text," "$file"
1099
- // sed -i "$line_number c \\$new_text" "$file"
1100
- const dep = fs.readFileSync(`./node_modules/peer/dist/module.mjs`, 'utf8');
1101
- const errorLine = `import {WebSocketServer as $hSjDC$WebSocketServer} from "ws";`;
1102
-
1103
- fs.writeFileSync(
1104
- `./node_modules/peer/dist/module.mjs`,
1105
- dep.replaceAll(
1106
- errorLine,
1107
- `import WebSocketServer from "ws";
1108
- let $hSjDC$WebSocketServer = WebSocketServer.Server;`,
1109
- ),
1110
- 'utf8',
1111
- );
1112
- };
1113
-
1114
- const maintenanceMiddleware = (req, res, port, proxyRouter) => {
1115
- if (process.argv.includes('maintenance') && globalThis.defaultHtmlSrcMaintenance) {
1116
- if (req.method.toUpperCase() === 'GET') {
1117
- res.set('Content-Type', 'text/html');
1118
- return res.status(503).send(globalThis.defaultHtmlSrcMaintenance);
1119
- }
1120
- return res.status(503).json({
1121
- status: 'error',
1122
- message: 'Server is under maintenance',
1123
- });
1124
- }
1125
- };
1126
-
1127
874
  const splitFileFactory = async (name, _path) => {
1128
875
  const stats = fs.statSync(_path);
1129
876
  const maxSizeInBytes = 1024 * 1024 * 50; // 50 mb
@@ -1152,14 +899,6 @@ const splitFileFactory = async (name, _path) => {
1152
899
  return false;
1153
900
  };
1154
901
 
1155
- const setUpProxyMaintenanceServer = ({ deployGroupId }) => {
1156
- shellExec(`pm2 kill`);
1157
- shellExec(`node bin/deploy valkey-service`);
1158
- const proxyDeployId = fs.readFileSync(`./engine-private/deploy/${deployGroupId}.proxy`, 'utf8').trim();
1159
- shellExec(`node bin/deploy conf ${proxyDeployId} production`);
1160
- shellExec(`npm start ${proxyDeployId} maintenance`);
1161
- };
1162
-
1163
902
  const getNpmRootPath = () =>
1164
903
  shellExec(`npm root -g`, {
1165
904
  stdout: true,
@@ -1258,17 +997,9 @@ export {
1258
997
  getDataDeploy,
1259
998
  validateTemplatePath,
1260
999
  buildReplicaId,
1261
- restoreMacroDb,
1262
- getDeployGroupId,
1263
- execDeploy,
1264
- deployRun,
1265
1000
  getCronBackUpFolder,
1266
- getRestoreCronCmd,
1267
1001
  mergeFile,
1268
- fixDependencies,
1269
1002
  getDeployId,
1270
- maintenanceMiddleware,
1271
- setUpProxyMaintenanceServer,
1272
1003
  getPathsSSR,
1273
1004
  buildKindPorts,
1274
1005
  buildPortProxyRouter,
@@ -1276,7 +1007,6 @@ export {
1276
1007
  getNpmRootPath,
1277
1008
  getUnderpostRootPath,
1278
1009
  writeEnv,
1279
- deployTest,
1280
1010
  pathPortAssignmentFactory,
1281
1011
  deployRangePortFactory,
1282
1012
  awaitDeployMonitor,
@@ -6,7 +6,7 @@ import dotenv from 'dotenv';
6
6
  import { createProxyMiddleware } from 'http-proxy-middleware';
7
7
  import { loggerFactory, loggerMiddleware } from './logger.js';
8
8
  import { createSslServer, sslRedirectMiddleware } from './ssl.js';
9
- import { buildPortProxyRouter, buildProxyRouter, maintenanceMiddleware } from './conf.js';
9
+ import { buildPortProxyRouter, buildProxyRouter } from './conf.js';
10
10
  import UnderpostStartUp from './start.js';
11
11
 
12
12
  dotenv.config();
@@ -48,7 +48,6 @@ const buildProxy = async () => {
48
48
  onProxyReq: (proxyReq, req, res, options) => {
49
49
  // https://wtools.io/check-http-status-code
50
50
  // http://nexodev.org
51
- maintenanceMiddleware(req, res, port, proxyRouter);
52
51
  sslRedirectMiddleware(req, res, port, proxyRouter);
53
52
  },
54
53
  pathRewrite: {
@@ -79,11 +79,11 @@ class UnderpostStartUp {
79
79
  }
80
80
  }),
81
81
 
82
- async callback(deployId = 'default', env = 'development', options = { build: false, run: false }) {
82
+ async callback(deployId = 'dd-default', env = 'development', options = { build: false, run: false }) {
83
83
  if (options.build === true) await UnderpostStartUp.API.build(deployId, env);
84
84
  if (options.run === true) await UnderpostStartUp.API.run(deployId, env);
85
85
  },
86
- async build(deployId = 'default', env = 'development') {
86
+ async build(deployId = 'dd-default', env = 'development') {
87
87
  const buildBasePath = `/home/dd`;
88
88
  const repoName = `engine-${deployId.split('-')[1]}`;
89
89
  shellExec(`cd ${buildBasePath} && underpost clone underpostnet/${repoName}`);
@@ -100,7 +100,7 @@ class UnderpostStartUp {
100
100
  }
101
101
  shellExec(`node bin/deploy build-full-client ${deployId}`);
102
102
  },
103
- async run(deployId = 'default', env = 'development') {
103
+ async run(deployId = 'dd-default', env = 'development') {
104
104
  const runCmd = env === 'production' ? 'run prod-img' : 'run dev-img';
105
105
  if (fs.existsSync(`./engine-private/replica`)) {
106
106
  const replicas = await fs.readdir(`./engine-private/replica`);
@@ -1,67 +0,0 @@
1
- # https://docs.docker.com/compose/compose-file/compose-file-v3
2
- # https://docs.docker.com/engine/reference/commandline/compose/
3
- version: '3'
4
- services:
5
- prometheus:
6
- image: prom/prometheus
7
- ports:
8
- - 9090:9090
9
- volumes:
10
- - ./prometheus_data:/prometheus
11
- - ./prometheus.yml:/etc/prometheus/prometheus.yml
12
- command:
13
- - '--config.file=/etc/prometheus/prometheus.yml'
14
- networks:
15
- - load-balancer
16
-
17
- grafana:
18
- image: grafana/grafana
19
- ports:
20
- - 3000:3000
21
- volumes:
22
- - ./grafana_data:/var/lib/grafana
23
- # - ./grafana.ini:/etc/grafana/grafana.ini
24
- networks:
25
- - load-balancer
26
- depends_on:
27
- - prometheus
28
-
29
- underpost-engine:
30
- build:
31
- dockerfile: ./Dockerfile
32
- context: . # workdir path
33
- # image: underpost-engine
34
- # container_name: <name> ignore for replicas
35
- ports:
36
- - '22'
37
- - '80' # host port allocated dynamically, host ports are unique independent of replicas
38
- - '443'
39
- - '3306'
40
- - '27017'
41
- - '4001-4002:3001'
42
- - '3002-3020'
43
- volumes:
44
- - ./logs:/code/logs
45
- deploy:
46
- mode: replicated
47
- replicas: 2
48
- restart_policy:
49
- condition: on-failure
50
- delay: 5s
51
- max_attempts: 3
52
- window: 120s
53
- resources:
54
- limits:
55
- cpus: '2'
56
- memory: 400M
57
- reservations:
58
- cpus: '0.25'
59
- memory: 20M
60
- labels: # labels in Compose file instead of Dockerfile
61
- engine.version: '2.8.85'
62
- networks:
63
- - load-balancer
64
-
65
- networks:
66
- load-balancer:
67
- driver: bridge
package/prometheus.yml DELETED
@@ -1,36 +0,0 @@
1
- # my global config
2
- global:
3
- scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
4
- evaluation_interval: 5s # Evaluate rules every 15 seconds. The default is every 1 minute.
5
- # scrape_timeout is set to the global default (10s).
6
-
7
- # Alertmanager configuration
8
- alerting:
9
- alertmanagers:
10
- - static_configs:
11
- - targets:
12
- # - alertmanager:9093
13
-
14
- # Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
15
- rule_files:
16
- # - "first_rules.yml"
17
- # - "second_rules.yml"
18
-
19
- # A scrape configuration containing exactly one endpoint to scrape:
20
- # Here it's Prometheus itself.
21
- scrape_configs:
22
- # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
23
- - job_name: 'prometheus_service'
24
-
25
- # metrics_path defaults to '/metrics'
26
- # scheme defaults to 'http'.
27
-
28
- static_configs:
29
- - targets: ['host.docker.internal:9090']
30
-
31
- - job_name: 'nodejs_service'
32
- static_configs:
33
- - targets: ['host.docker.internal:4001', 'host.docker.internal:4002']
34
- # - targets: ['localhost:4001', 'localhost:4002']
35
- # - targets: ["host.docker.internal:3002"] # Windows
36
- # - targets: ["docker.for.mac.localhost:9090"] # macOs