underpost 2.8.482 → 2.8.521

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -42,6 +42,7 @@
42
42
  "Itemledger",
43
43
  "jsonld",
44
44
  "lampp",
45
+ "letsencrypt",
45
46
  "loadingio",
46
47
  "Longname",
47
48
  "metanarrative",
package/bin/build.js CHANGED
@@ -36,7 +36,7 @@ if (process.argv.includes('clean')) {
36
36
 
37
37
  if (process.argv.includes('conf')) {
38
38
  for (const _confName of (confName === 'dd'
39
- ? fs.readFileSync(`./engine-private/deploy/dd-router`, 'utf8')
39
+ ? fs.readFileSync(`./engine-private/deploy/dd.router`, 'utf8')
40
40
  : confName
41
41
  ).split(',')) {
42
42
  const _repoName = `engine-${_confName.split('dd-')[1]}`;
package/bin/deploy.js CHANGED
@@ -141,23 +141,6 @@ try {
141
141
  loadConf(process.argv[3], process.argv[4]);
142
142
  break;
143
143
  }
144
- case 'run':
145
- {
146
- if (process.argv.includes('replicas')) {
147
- const deployGroupId = getDeployGroupId();
148
- const dataDeploy = getDataDeploy({
149
- deployId: process.argv[3],
150
- buildSingleReplica: true,
151
- deployGroupId,
152
- });
153
- if (fs.existsSync(`./tmp/await-deploy`)) fs.remove(`./tmp/await-deploy`);
154
- await deployRun(dataDeploy);
155
- } else {
156
- loadConf(process.argv[3]);
157
- shellExec(`npm start ${process.argv.includes('maintenance') ? 'maintenance' : ''}`);
158
- }
159
- }
160
- break;
161
144
 
162
145
  case 'new-nodejs-app':
163
146
  {
@@ -453,12 +436,13 @@ try {
453
436
  case 'run-macro':
454
437
  {
455
438
  if (fs.existsSync(`./tmp/await-deploy`)) fs.remove(`./tmp/await-deploy`);
456
- const dataDeploy = getDataDeploy({ deployGroupId: process.argv[3], buildSingleReplica: true });
439
+ const dataDeploy = getDataDeploy({
440
+ deployGroupId: process.argv[3],
441
+ buildSingleReplica: true,
442
+ deployIdConcat: ['dd-proxy', 'dd-cron'],
443
+ });
457
444
  if (!process.argv[4]) await setUpProxyMaintenanceServer({ deployGroupId: process.argv[3] });
458
- await deployRun(
459
- process.argv[4] ? dataDeploy.filter((d) => d.deployId.match(process.argv[4])) : dataDeploy,
460
- true,
461
- );
445
+ await deployRun(process.argv[4] ? dataDeploy.filter((d) => d.deployId.match(process.argv[4])) : dataDeploy);
462
446
  }
463
447
  break;
464
448
 
@@ -748,6 +732,16 @@ try {
748
732
 
749
733
  shellExec(`node bin/deploy update-dependencies`);
750
734
  shellExec(`auto-changelog`);
735
+ shellExec(`underpost deploy dd --build-manifest --sync --info-router`);
736
+ shellExec(`underpost deploy dd production --build-manifest --sync --info-router`);
737
+ shellExec(`node bin/build dd conf`);
738
+ shellExec(`git add . && cd ./engine-private && git add .`);
739
+ shellExec(`underpost cmt . ci package-pwa-microservices-template 'update version ${newVersion}'`);
740
+ shellExec(
741
+ `underpost cmt ./engine-private ci package-pwa-microservices-template 'update version ${newVersion}'`,
742
+ );
743
+ shellExec(`underpost push . underpostnet/engine`);
744
+ shellExec(`cd ./engine-private && underpost push . underpostnet/engine-private`);
751
745
  }
752
746
  break;
753
747
 
package/bin/index.js CHANGED
@@ -8,8 +8,8 @@ import { getNpmRootPath, loadConf } from '../src/server/conf.js';
8
8
  import fs from 'fs-extra';
9
9
  import { commitData } from '../src/client/components/core/CommonJs.js';
10
10
  import UnderpostScript from '../src/cli/script.js';
11
- import { shellExec } from '../src/server/process.js';
12
11
  import UnderpostDB from '../src/cli/db.js';
12
+ import UnderpostCron from '../src/cli/cron.js';
13
13
 
14
14
  const npmRoot = getNpmRootPath();
15
15
  const underpostRoot = `${npmRoot}/underpost/.env`;
@@ -63,7 +63,7 @@ program
63
63
 
64
64
  program
65
65
  .command('env')
66
- .argument('<deploy-id>', 'deploy configuration id')
66
+ .argument('<deploy-id>', `deploy configuration id, if 'clean' restore default`)
67
67
  .argument('[env]', 'Optional environment, for default is production')
68
68
  .description('Set environment variables files and conf related to <deploy-id>')
69
69
  .action(loadConf);
@@ -83,18 +83,18 @@ program
83
83
 
84
84
  program
85
85
  .command('cluster')
86
+ .argument('[pod-name]', 'Optional pod name filter')
86
87
  .option('--reset', `Delete all clusters and prune all data and caches`)
87
88
  .option('--mariadb', 'Init with mariadb statefulset')
88
89
  .option('--mongodb', 'Init with mongodb statefulset')
89
90
  .option('--valkey', 'Init with valkey service')
90
91
  .option('--contour', 'Init with project contour base HTTPProxy and envoy')
92
+ .option('--cert-manager', 'Init with letsencrypt-prod ClusterIssuer')
91
93
  .option('--info', 'Get all kinds objects deployed')
92
94
  .option('--full', 'Init with all statefulsets and services available')
93
95
  .option('--ns-use <ns-name>', 'Switches current context to namespace')
94
- .action((...args) => {
95
- if (args[0].reset) return Underpost.cluster.reset();
96
- return Underpost.cluster.init(args[0]);
97
- })
96
+ .option('--list-pods', 'Display list pods information')
97
+ .action(Underpost.cluster.init)
98
98
  .description('Manage cluster, for default initialization base kind cluster');
99
99
 
100
100
  program
@@ -136,7 +136,8 @@ program
136
136
  .argument('<deploy-id>', 'Deploy configuration id')
137
137
  .argument('[env]', 'Optional environment, for default is development')
138
138
  .argument('[path]', 'Absolute or relative directory, for default is current')
139
- .option('--image-archive', 'Only load tar image from /images')
139
+ .option('--image-archive', 'Only load tar image from ./images')
140
+ .option('--podman-save', 'Save image from podman to ./images')
140
141
  .description('Build image from Dockerfile')
141
142
  .action(Underpost.image.dockerfile.build);
142
143
 
@@ -170,6 +171,15 @@ program
170
171
  )
171
172
  .action((...args) => Underpost.script[args[0]](args[1], args[2]));
172
173
 
174
+ program
175
+ .command('cron')
176
+ .argument('[deploy-list]', 'Deploy id list, e.g. default-a,default-b')
177
+ .argument('[job-list]', `Deploy id list, e.g. ${Object.keys(UnderpostCron.JOB)}, for default all available jobs`)
178
+ .option('--disable-kind-cluster', 'Disable kind cluster configuration')
179
+ .option('--init', 'Init cron jobs for cron job default deploy id')
180
+ .description('Cron jobs management')
181
+ .action(Underpost.cron.callback);
182
+
173
183
  program
174
184
  .command('test')
175
185
  .argument('[deploy-list]', 'Deploy id list, e.g. default-a,default-b')
@@ -177,6 +187,8 @@ program
177
187
  .option('--inside-container', 'Inside container execution context')
178
188
  .option('--sh', 'Copy to clipboard, container entrypoint shell command')
179
189
  .option('--logs', 'Display container logs')
190
+ .option('--pod-name <pod-name>')
191
+ .option('--pod-status <pod-status>')
180
192
  .action(Underpost.test.callback);
181
193
 
182
194
  program.parse();
package/bin/util.js CHANGED
@@ -112,14 +112,6 @@ try {
112
112
  fs.writeFileSync('b64-image', `data:image/jpg;base64,${fs.readFileSync(process.argv[3]).toString('base64')}`);
113
113
  break;
114
114
 
115
- case 'clean-env': {
116
- shellExec(`git checkout package.json`);
117
- shellExec(`git checkout .env.production`);
118
- shellExec(`git checkout .env.development`);
119
- shellExec(`git checkout .env.test`);
120
- shellExec(`git checkout jsdoc.json`);
121
- break;
122
- }
123
115
  case 'get-keys': {
124
116
  const sentence = fs.existsSync('./_')
125
117
  ? fs.readFileSync('./_', 'utf8')
@@ -58,7 +58,7 @@ services:
58
58
  cpus: '0.25'
59
59
  memory: 20M
60
60
  labels: # labels in Compose file instead of Dockerfile
61
- engine.version: '2.8.482'
61
+ engine.version: '2.8.521'
62
62
  networks:
63
63
  - load-balancer
64
64
 
package/package.json CHANGED
@@ -2,7 +2,7 @@
2
2
  "type": "module",
3
3
  "main": "src/index.js",
4
4
  "name": "underpost",
5
- "version": "2.8.482",
5
+ "version": "2.8.521",
6
6
  "description": "pwa api rest template",
7
7
  "scripts": {
8
8
  "start": "env-cmd -f .env.production node --max-old-space-size=8192 src/server",
@@ -10,6 +10,7 @@
10
10
  "pm2": "env-cmd -f .env.production pm2 start src/server.js --node-args=\"--max-old-space-size=8192\" --name engine",
11
11
  "dev": "env-cmd -f .env.development node src/client.dev default",
12
12
  "dev-img": "env-cmd -f .env.development node src/server",
13
+ "prod-img": "env-cmd -f .env.production node src/server",
13
14
  "dev-api": "env-cmd -f .env.development nodemon --watch src --ignore src/client src/api",
14
15
  "dev-client": "env-cmd -f .env.development node src/client.dev",
15
16
  "proxy": "node src/proxy proxy",
@@ -9,7 +9,7 @@ const CoreService = {
9
9
  /** @type {import('./core.model.js').CoreModel} */
10
10
  const Core = DataBaseProvider.instance[`${options.host}${options.path}`].mongoose.models.Core;
11
11
  if (req.path.startsWith('/sh')) {
12
- if (req.body.print) return shellExec(req.body.sh, { stdout: true });
12
+ if (req.body.stdout) return shellExec(req.body.sh, { stdout: true });
13
13
  shellExec(req.body.sh, { async: true });
14
14
  return 'Command "' + req.body.sh + '" running';
15
15
  }
@@ -2,12 +2,30 @@ import { timer } from '../client/components/core/CommonJs.js';
2
2
  import { cliSpinner } from '../server/conf.js';
3
3
  import { loggerFactory } from '../server/logger.js';
4
4
  import { shellExec } from '../server/process.js';
5
+ import UnderpostDeploy from './deploy.js';
6
+ import UnderpostTest from './test.js';
5
7
 
6
8
  const logger = loggerFactory(import.meta);
7
9
 
8
10
  class UnderpostCluster {
9
11
  static API = {
10
- async init(options = { valkey: false, mariadb: false, valkey: false, full: false, info: false, nsUse: '' }) {
12
+ async init(
13
+ podName,
14
+ options = {
15
+ valkey: false,
16
+ mariadb: false,
17
+ valkey: false,
18
+ full: false,
19
+ info: false,
20
+ certManager: false,
21
+ listPods: false,
22
+ reset: false,
23
+ nsUse: '',
24
+ },
25
+ ) {
26
+ if (options.reset === true) return await UnderpostCluster.API.reset();
27
+ if (options.listPods === true) return console.table(UnderpostDeploy.API.getPods(podName ?? undefined));
28
+
11
29
  if (options.nsUse) {
12
30
  shellExec(`kubectl config set-context --current --namespace=${options.nsUse}`);
13
31
  return;
@@ -40,6 +58,7 @@ class UnderpostCluster {
40
58
  logger.info('----------------------------------------------------------------');
41
59
  shellExec(`kubectl get secrets --all-namespaces -o wide`);
42
60
  shellExec(`docker secret ls`);
61
+ shellExec(`kubectl get crd --all-namespaces -o wide`);
43
62
  return;
44
63
  }
45
64
  const testClusterInit = shellExec(`kubectl get pods --all-namespaces -o wide`, {
@@ -81,25 +100,7 @@ class UnderpostCluster {
81
100
  shellExec(`kubectl delete statefulset mongodb`);
82
101
  shellExec(`kubectl apply -k ./manifests/mongodb`);
83
102
 
84
- await new Promise(async (resolve) => {
85
- cliSpinner(3000, `[cluster.js] `, ` Load mongodb instance`, 'yellow', 'material');
86
- await timer(3000);
87
-
88
- const monitor = async () => {
89
- cliSpinner(1000, `[cluster.js] `, ` Load mongodb instance`, 'yellow', 'material');
90
- await timer(1000);
91
- if (
92
- shellExec(`kubectl get pods --all-namespaces -o wide`, {
93
- silent: true,
94
- stdout: true,
95
- disableLog: true,
96
- }).match(`mongodb-1 1/1 Running`)
97
- )
98
- return resolve();
99
- return monitor();
100
- };
101
- await monitor();
102
- });
103
+ await UnderpostTest.API.podStatusMonitor('mongodb-1');
103
104
 
104
105
  const mongoConfig = {
105
106
  _id: 'rs0',
@@ -119,6 +120,23 @@ class UnderpostCluster {
119
120
 
120
121
  if (options.full || options.contour)
121
122
  shellExec(`kubectl apply -f https://projectcontour.io/quickstart/contour.yaml`);
123
+
124
+ if (options.full || options.certManager) {
125
+ if (!UnderpostDeploy.API.getPods('cert-manager').find((p) => p.STATUS === 'Running')) {
126
+ shellExec(`helm repo add jetstack https://charts.jetstack.io --force-update`);
127
+ shellExec(
128
+ `helm install cert-manager jetstack/cert-manager \
129
+ --namespace cert-manager \
130
+ --create-namespace \
131
+ --version v1.17.0 \
132
+ --set crds.enabled=true`,
133
+ );
134
+ }
135
+
136
+ const letsEncName = 'letsencrypt-prod';
137
+ shellExec(`sudo kubectl delete ClusterIssuer ${letsEncName}`);
138
+ shellExec(`sudo kubectl apply -f ./manifests/${letsEncName}.yaml`);
139
+ }
122
140
  },
123
141
  reset() {
124
142
  shellExec(`kind get clusters | xargs -t -n1 kind delete cluster --name`);
@@ -0,0 +1,90 @@
1
+ /**
2
+ * UnderpostCron CLI index module
3
+ * @module src/cli/cron.js
4
+ * @namespace UnderpostCron
5
+ */
6
+
7
+ import Underpost from '../index.js';
8
+ import BackUp from '../server/backup.js';
9
+ import { Cmd } from '../server/conf.js';
10
+ import Dns from '../server/dns.js';
11
+ import { netWorkCron, saveRuntimeCron } from '../server/network.js';
12
+ import { shellExec } from '../server/process.js';
13
+ import fs from 'fs-extra';
14
+
15
+ /**
16
+ * UnderpostCron main module methods
17
+ * @class
18
+ * @memberof UnderpostCron
19
+ */
20
+ class UnderpostCron {
21
+ static JOB = {
22
+ /**
23
+ * DNS cli API
24
+ * @static
25
+ * @type {Dns}
26
+ * @memberof UnderpostCron
27
+ */
28
+ dns: Dns,
29
+ /**
30
+ * BackUp cli API
31
+ * @static
32
+ * @type {BackUp}
33
+ * @memberof UnderpostCron
34
+ */
35
+ backup: BackUp,
36
+ };
37
+ static API = {
38
+ /**
39
+ * Run the cron jobs
40
+ * @static
41
+ * @param {String} deployList - Comma separated deploy ids
42
+ * @param {String} jobList - Comma separated job ids
43
+ * @return {void}
44
+ * @memberof UnderpostCron
45
+ */
46
+ callback: async function (
47
+ deployList = 'default',
48
+ jobList = Object.keys(UnderpostCron.JOB),
49
+ options = { disableKindCluster: false, init: false },
50
+ ) {
51
+ if (options.init === true) {
52
+ await Underpost.test.setUpInfo();
53
+ const jobDeployId = fs.readFileSync('./engine-private/deploy/dd.cron', 'utf8').trim();
54
+ deployList = fs.readFileSync('./engine-private/deploy/dd.router', 'utf8').trim();
55
+ const confCronConfig = JSON.parse(fs.readFileSync(`./engine-private/conf/${jobDeployId}/conf.cron.json`));
56
+ if (confCronConfig.jobs && Object.keys(confCronConfig.jobs).length > 0) {
57
+ for (const job of Object.keys(confCronConfig.jobs)) {
58
+ const name = `${jobDeployId}-${job}`;
59
+ let deployId;
60
+ shellExec(Cmd.delete(name));
61
+ switch (job) {
62
+ case 'dns':
63
+ deployId = jobDeployId;
64
+ break;
65
+
66
+ default:
67
+ deployId = deployList;
68
+ break;
69
+ }
70
+ shellExec(Cmd.cron(deployId, job, name, confCronConfig.jobs[job].expression, options));
71
+ netWorkCron.push({
72
+ deployId,
73
+ jobId: job,
74
+ expression: confCronConfig.jobs[job].expression,
75
+ });
76
+ }
77
+ }
78
+ await saveRuntimeCron();
79
+ if (fs.existsSync(`./tmp/await-deploy`)) fs.remove(`./tmp/await-deploy`);
80
+ return;
81
+ }
82
+ for (const _jobId of jobList.split(',')) {
83
+ const jobId = _jobId.trim();
84
+ if (UnderpostCron.JOB[jobId]) await UnderpostCron.JOB[jobId].callback(deployList, options);
85
+ }
86
+ },
87
+ };
88
+ }
89
+
90
+ export default UnderpostCron;
package/src/cli/deploy.js CHANGED
@@ -17,8 +17,8 @@ const logger = loggerFactory(import.meta);
17
17
  class UnderpostDeploy {
18
18
  static API = {
19
19
  sync(deployList) {
20
- const deployGroupId = '_dd';
21
- fs.writeFileSync(`./engine-private/deploy/${deployGroupId}.json`, JSON.stringify(deployList.split(',')), 'utf8');
20
+ const deployGroupId = 'dd.tmp';
21
+ fs.writeFileSync(`./engine-private/deploy/${deployGroupId}`, deployList, 'utf8');
22
22
  return getDataDeploy({
23
23
  buildSingleReplica: true,
24
24
  deployGroupId,
@@ -182,8 +182,8 @@ spec:
182
182
  env = 'development',
183
183
  options = { remove: false, infoRouter: false, sync: false, buildManifest: false },
184
184
  ) {
185
- if (deployList === 'dd' && fs.existsSync(`./engine-private/deploy/dd-router`))
186
- deployList = fs.readFileSync(`./engine-private/deploy/dd-router`, 'utf8');
185
+ if (deployList === 'dd' && fs.existsSync(`./engine-private/deploy/dd.router`))
186
+ deployList = fs.readFileSync(`./engine-private/deploy/dd.router`, 'utf8');
187
187
  if (options.sync) UnderpostDeploy.API.sync(deployList);
188
188
  if (options.buildManifest === true) await UnderpostDeploy.API.buildManifest(deployList, env);
189
189
  if (options.infoRouter === true)
@@ -205,12 +205,19 @@ spec:
205
205
  const confServer = JSON.parse(fs.readFileSync(`./engine-private/conf/${deployId}/conf.server.json`, 'utf8'));
206
206
  for (const host of Object.keys(confServer)) {
207
207
  shellExec(`sudo kubectl delete HTTPProxy ${host}`);
208
+ if (env === 'production') shellExec(`sudo kubectl delete Certificate ${host}`);
208
209
  if (!options.remove === true && env === 'development') concatHots += ` ${host}`;
209
210
  }
210
211
 
212
+ const manifestsPath =
213
+ env === 'production'
214
+ ? `engine-private/conf/${deployId}/build/production`
215
+ : `manifests/deployment/${deployId}-${env}`;
216
+
211
217
  if (!options.remove === true) {
212
- shellExec(`sudo kubectl apply -f ./manifests/deployment/${deployId}-${env}/deployment.yaml`);
213
- shellExec(`sudo kubectl apply -f ./manifests/deployment/${deployId}-${env}/proxy.yaml`);
218
+ shellExec(`sudo kubectl apply -f ./${manifestsPath}/deployment.yaml`);
219
+ shellExec(`sudo kubectl apply -f ./${manifestsPath}/proxy.yaml`);
220
+ if (env === 'production') shellExec(`sudo kubectl apply -f ./${manifestsPath}/secret.yaml`);
214
221
  }
215
222
 
216
223
  let renderHosts;
@@ -234,16 +241,17 @@ spec:
234
241
  default:
235
242
  break;
236
243
  }
237
- logger.info(
238
- `
244
+ if (renderHosts)
245
+ logger.info(
246
+ `
239
247
  ` + renderHosts,
240
- );
248
+ );
241
249
  }
242
250
  },
243
251
  getPods(deployId) {
244
252
  const raw = shellExec(`sudo kubectl get pods --all-namespaces -o wide`, {
245
253
  stdout: true,
246
- disableLog: false,
254
+ disableLog: true,
247
255
  silent: true,
248
256
  });
249
257
 
package/src/cli/image.js CHANGED
@@ -13,11 +13,18 @@ class UnderpostImage {
13
13
  pullBaseImages() {
14
14
  shellExec(`sudo podman pull docker.io/library/debian:buster`);
15
15
  },
16
- build(deployId = 'default', env = 'development', path = '.', options = { imageArchive: false }) {
16
+ build(
17
+ deployId = 'default',
18
+ env = 'development',
19
+ path = '.',
20
+ options = { imageArchive: false, podmanSave: false },
21
+ ) {
17
22
  const imgName = `${deployId}-${env}:${Underpost.version}`;
18
23
  const podManImg = `localhost/${imgName}`;
19
- const imagesStoragePath = `./images`;
20
- const tarFile = `${imagesStoragePath}/${imgName.replace(':', '_')}.tar`;
24
+ const imagesStoragePath = `/images`;
25
+ if (!fs.existsSync(`${path}${imagesStoragePath}`))
26
+ fs.mkdirSync(`${path}${imagesStoragePath}`, { recursive: true });
27
+ const tarFile = `.${imagesStoragePath}/${imgName.replace(':', '_')}.tar`;
21
28
 
22
29
  let secrets = ' ';
23
30
  let secretDockerInput = '';
@@ -36,8 +43,9 @@ class UnderpostImage {
36
43
  `cd ${path}${secrets}&& sudo podman build -f ./Dockerfile -t ${imgName} --pull=never --cap-add=CAP_AUDIT_WRITE${secretDockerInput}`,
37
44
  );
38
45
  fs.removeSync(`${path}/.env.underpost`);
39
- shellExec(`cd ${path} && podman save -o ${tarFile} ${podManImg}`);
40
46
  }
47
+ if (options.imageArchive !== true || options.podmanSave === true)
48
+ shellExec(`cd ${path} && podman save -o ${tarFile} ${podManImg}`);
41
49
  shellExec(`cd ${path} && sudo kind load image-archive ${tarFile}`);
42
50
  },
43
51
  async script(deployId = 'default', env = 'development', options = { run: false }) {
@@ -101,12 +109,12 @@ class UnderpostImage {
101
109
  shellExec(`node bin/deploy conf ${deployId} ${env}`);
102
110
  shellExec(`node bin/deploy build-full-client ${deployId}`);
103
111
  if (options.run === true) {
104
- const runCmd = env === 'production' ? 'prod-img' : 'dev-img';
112
+ const runCmd = env === 'production' ? 'run prod-img' : 'run dev-img';
105
113
  if (fs.existsSync(`./engine-private/replica`)) {
106
114
  const replicas = await fs.readdir(`./engine-private/replica`);
107
115
  for (const replica of replicas) {
108
116
  shellExec(`node bin/deploy conf ${replica} ${env}`);
109
- shellExec(`npm run ${runCmd} ${replica} deploy`, { async: true });
117
+ shellExec(`npm ${runCmd} ${replica} deploy`, { async: true });
110
118
  fs.writeFileSync(`./tmp/await-deploy`, '', 'utf8');
111
119
  const monitor = async () => {
112
120
  await timer(1000);
@@ -116,7 +124,7 @@ class UnderpostImage {
116
124
  }
117
125
  shellExec(`node bin/deploy conf ${deployId} ${env}`);
118
126
  }
119
- shellExec(`npm run ${runCmd} ${deployId} deploy`);
127
+ shellExec(`npm ${runCmd} ${deployId} deploy`);
120
128
  }
121
129
  },
122
130
  },
package/src/cli/test.js CHANGED
@@ -1,6 +1,7 @@
1
+ import { timer } from '../client/components/core/CommonJs.js';
1
2
  import { MariaDB } from '../db/mariadb/MariaDB.js';
2
3
  import { getNpmRootPath } from '../server/conf.js';
3
- import { actionInitLog, loggerFactory } from '../server/logger.js';
4
+ import { actionInitLog, loggerFactory, setUpInfo } from '../server/logger.js';
4
5
  import { pbcopy, shellExec } from '../server/process.js';
5
6
  import UnderpostDeploy from './deploy.js';
6
7
 
@@ -29,6 +30,14 @@ class UnderpostTest {
29
30
  shellExec(`cd ${getNpmRootPath()}/underpost && npm run test`);
30
31
  },
31
32
  async callback(deployList = '', options = { insideContainer: false, sh: false, logs: false }) {
33
+ if (
34
+ options.podName &&
35
+ typeof options.podName === 'string' &&
36
+ options.podStatus &&
37
+ typeof options.podStatus === 'string'
38
+ )
39
+ return await UnderpostTest.API.podStatusMonitor(options.podName, options.podStatus);
40
+
32
41
  if (options.sh === true || options.logs === true) {
33
42
  const [pod] = UnderpostDeploy.API.getPods(deployList);
34
43
  if (pod) {
@@ -77,6 +86,21 @@ class UnderpostTest {
77
86
  }
78
87
  } else return UnderpostTest.API.run();
79
88
  },
89
+ podStatusMonitor(podName, status = 'Running', deltaMs = 1000) {
90
+ return new Promise(async (resolve) => {
91
+ let index = 0;
92
+ logger.info(`Loading ${podName} instance`, { status, deltaMs });
93
+ const _monitor = async () => {
94
+ await timer(deltaMs);
95
+ const result = UnderpostDeploy.API.getPods(podName).find((p) => p.STATUS === status);
96
+ logger.info(`Testing pod ${podName}... ${result ? 1 : 0}/1 - elapsed time ${deltaMs * (index + 1)}ms`);
97
+ if (result) return resolve();
98
+ index++;
99
+ return _monitor();
100
+ };
101
+ await _monitor();
102
+ });
103
+ },
80
104
  };
81
105
  }
82
106
 
@@ -761,6 +761,7 @@ const renderWave = ({ id }) => {
761
761
  const cssTokensEffect = {};
762
762
  const cssTokensContainer = {};
763
763
  const cssEffect = async (containerSelector, event) => {
764
+ return;
764
765
  // Array.from(event.target.classList)
765
766
  let offsetX, offsetY;
766
767
  if (Array.from(event.srcElement.classList).includes('ripple') && cssTokensContainer[containerSelector]) {
package/src/index.js CHANGED
@@ -5,6 +5,7 @@
5
5
  */
6
6
 
7
7
  import UnderpostCluster from './cli/cluster.js';
8
+ import UnderpostCron from './cli/cron.js';
8
9
  import UnderpostDB from './cli/db.js';
9
10
  import UnderpostDeploy from './cli/deploy.js';
10
11
  import UnderpostRootEnv from './cli/env.js';
@@ -26,7 +27,7 @@ class Underpost {
26
27
  * @type {String}
27
28
  * @memberof Underpost
28
29
  */
29
- static version = 'v2.8.482';
30
+ static version = 'v2.8.521';
30
31
  /**
31
32
  * Repository cli API
32
33
  * @static
@@ -90,6 +91,13 @@ class Underpost {
90
91
  * @memberof Underpost
91
92
  */
92
93
  static deploy = UnderpostDeploy.API;
94
+ /**
95
+ * Cron cli API
96
+ * @static
97
+ * @type {UnderpostCron.API}
98
+ * @memberof Underpost
99
+ */
100
+ static cron = UnderpostCron.API;
93
101
  }
94
102
 
95
103
  const up = Underpost;
@@ -1,120 +1,76 @@
1
1
  import fs from 'fs-extra';
2
2
  import { loggerFactory } from './logger.js';
3
- import { shellCd, shellExec } from './process.js';
4
- import { getCronBackUpFolder, getDataDeploy } from './conf.js';
3
+ import { shellExec } from './process.js';
4
+ import { getCronBackUpFolder } from './conf.js';
5
5
  import dotenv from 'dotenv';
6
6
 
7
7
  dotenv.config();
8
8
 
9
9
  const logger = loggerFactory(import.meta);
10
10
 
11
- const BackUpManagement = {
12
- repoUrl: `https://${process.env.GITHUB_TOKEN}@github.com/${process.env.GITHUB_USERNAME}/${process.env.GITHUB_BACKUP_REPO}.git`,
13
- Init: async function ({ deployId }) {
14
- const Callback = async function () {
15
- const privateCronConfPath = `./engine-private/conf/${deployId}/conf.cron.json`;
11
+ class BackUp {
12
+ static callback = async function (deployList, options = { disableKindCluster: false }) {
13
+ if ((!deployList || deployList === 'dd') && fs.existsSync(`./engine-private/deploy/dd.router`))
14
+ deployList = fs.readFileSync(`./engine-private/deploy/dd.router`, 'utf8');
16
15
 
17
- const confCronPath = fs.existsSync(privateCronConfPath) ? privateCronConfPath : './conf/conf.cron.json';
16
+ logger.info('init backups callback', deployList);
17
+ await logger.setUpInfo();
18
+ const currentDate = new Date().getTime();
19
+ const maxBackupRetention = 5;
18
20
 
19
- const { backups } = JSON.parse(fs.readFileSync(confCronPath, 'utf8'));
21
+ if (!fs.existsSync('./engine-private/cron-backups'))
22
+ fs.mkdirSync('./engine-private/cron-backups', { recursive: true });
20
23
 
21
- if (!backups) return;
24
+ for (const _deployId of deployList.split(',')) {
25
+ const deployId = _deployId.trim();
26
+ if (!deployId) continue;
22
27
 
23
- logger.info('init backups callback');
24
- await logger.setUpInfo();
25
-
26
- const currentDate = new Date().getTime();
27
-
28
- if (!fs.existsSync('./engine-private/cron-backups'))
29
- fs.mkdirSync('./engine-private/cron-backups', { recursive: true });
30
-
31
- for (const deployGroupData of backups) {
32
- const { deployGroupId } = deployGroupData;
33
- const dataDeploy = getDataDeploy({ deployGroupId });
34
-
35
- for (const deployObj of dataDeploy) {
36
- const { deployId, replicaHost } = deployObj;
37
-
38
- if (replicaHost) continue;
39
-
40
- const confServer = JSON.parse(
41
- fs.existsSync(`./engine-private/replica/${deployId}/conf.server.json`)
42
- ? fs.readFileSync(`./engine-private/replica/${deployId}/conf.server.json`, 'utf8')
43
- : fs.readFileSync(`./engine-private/conf/${deployId}/conf.server.json`, 'utf8'),
44
- );
45
-
46
- for (const host of Object.keys(confServer))
47
- for (const path of Object.keys(confServer[host])) {
48
- // retention policy
49
- let { db, backupFrequency, maxBackupRetention, singleReplica, wp, git, directory } =
50
- confServer[host][path];
51
-
52
- if (!db || singleReplica) continue;
53
-
54
- if (!backupFrequency) backupFrequency = 'daily';
55
- if (!maxBackupRetention) maxBackupRetention = 5;
56
-
57
- const backUpPath = `${process.cwd()}/engine-private/cron-backups/${getCronBackUpFolder(host, path)}`;
58
- if (!fs.existsSync(backUpPath)) fs.mkdirSync(`${backUpPath}`, { recursive: true });
59
- // .isDirectory()
60
- const files = await fs.readdir(backUpPath, { withFileTypes: true });
61
-
62
- const currentBackupsDirs = files
63
- .map((fileObj) => parseInt(fileObj.name))
64
- .sort((a, b) => a - b)
65
- .reverse();
28
+ if (options.disableKindCluster !== true) {
29
+ shellExec(`underpost db --export ${deployId}`);
30
+ continue;
31
+ }
66
32
 
67
- switch (backupFrequency) {
68
- case 'daily':
33
+ const confServer = JSON.parse(fs.readFileSync(`./engine-private/conf/${deployId}/conf.server.json`, 'utf8'));
69
34
 
70
- default:
71
- // if (currentBackupsDirs[0] && currentDate - currentBackupsDirs[0] < 1000 * 60 * 60 * 24) continue;
72
- break;
73
- }
35
+ for (const host of Object.keys(confServer))
36
+ for (const path of Object.keys(confServer[host])) {
37
+ // retention policy
38
+ const { db } = confServer[host][path];
39
+ if (!db) continue;
40
+ logger.info('Init backup', { host, path, db });
74
41
 
75
- for (const retentionPath of currentBackupsDirs.filter((t, i) => i >= maxBackupRetention - 1)) {
76
- const removePathRetention = `${backUpPath}/${retentionPath}`;
77
- logger.info('Remove backup folder', removePathRetention);
78
- fs.removeSync(removePathRetention);
79
- }
42
+ const backUpPath = `${process.cwd()}/engine-private/cron-backups/${getCronBackUpFolder(host, path)}`;
43
+ if (!fs.existsSync(backUpPath)) fs.mkdirSync(`${backUpPath}`, { recursive: true });
44
+ // .isDirectory()
45
+ const files = await fs.readdir(backUpPath, { withFileTypes: true });
80
46
 
81
- fs.mkdirSync(`${backUpPath}/${currentDate}`, { recursive: true });
47
+ const currentBackupsDirs = files
48
+ .map((fileObj) => parseInt(fileObj.name))
49
+ .sort((a, b) => a - b)
50
+ .reverse();
82
51
 
83
- shellExec(`node bin/db ${host}${path} export ${deployId} ${backUpPath}/${currentDate}`);
52
+ for (const retentionPath of currentBackupsDirs.filter((t, i) => i >= maxBackupRetention - 1)) {
53
+ const removePathRetention = `${backUpPath}/${retentionPath}`;
54
+ logger.info('Remove backup folder', removePathRetention);
55
+ fs.removeSync(removePathRetention);
56
+ }
84
57
 
85
- if (wp) {
86
- const repoUrl = `https://${process.env.GITHUB_TOKEN}@github.com/${process.env.GITHUB_USERNAME}/${git
87
- .split('/')
88
- .pop()}.git`;
58
+ fs.mkdirSync(`${backUpPath}/${currentDate}`, { recursive: true });
89
59
 
90
- shellExec(
91
- `cd ${directory}` +
92
- ` && git pull ${repoUrl}` +
93
- ` && git add . && git commit -m "backup ${new Date().toLocaleDateString()}"` +
94
- ` && git push ${repoUrl}`,
95
- {
96
- disableLog: true,
97
- },
98
- );
99
- }
100
- }
60
+ shellExec(`node bin/db ${host}${path} export ${deployId} ${backUpPath}/${currentDate}`);
101
61
  }
102
- }
103
62
  shellExec(
104
63
  `cd ./engine-private/cron-backups` +
105
- ` && git pull ${BackUpManagement.repoUrl}` +
106
- ` && git add . && git commit -m "backup ${new Date().toLocaleDateString()}"` +
107
- ` && git push ${BackUpManagement.repoUrl}`,
64
+ ` && underpost pull . underpostnet/cron-backups` +
65
+ ` && git add .` +
66
+ ` && underpost cmt . backup cron-job '${new Date().toLocaleDateString()}'` +
67
+ ` && underpost push . underpostnet/cron-backups`,
108
68
  {
109
69
  disableLog: true,
110
70
  },
111
71
  );
112
- };
113
- await Callback();
114
- BackUpManagement.Callback = Callback;
115
- return Callback;
116
- },
117
- Callback: async function (params) {},
118
- };
72
+ }
73
+ };
74
+ }
119
75
 
120
- export { BackUpManagement };
76
+ export default BackUp;
@@ -98,6 +98,14 @@ const Config = {
98
98
  };
99
99
 
100
100
  const loadConf = (deployId, envInput, subConf) => {
101
+ if (deployId === 'clean') {
102
+ shellExec(`git checkout package.json`);
103
+ shellExec(`git checkout .env.production`);
104
+ shellExec(`git checkout .env.development`);
105
+ shellExec(`git checkout .env.test`);
106
+ shellExec(`git checkout jsdoc.json`);
107
+ return;
108
+ }
101
109
  const folder = fs.existsSync(`./engine-private/replica/${deployId}`)
102
110
  ? `./engine-private/replica/${deployId}`
103
111
  : `./engine-private/conf/${deployId}`;
@@ -590,9 +598,25 @@ const cliSpinner = async (time = 5000, message0, message1, color, type = 'dots')
590
598
  const buildReplicaId = ({ deployId, replica }) => `${deployId}-${replica.slice(1)}`;
591
599
 
592
600
  const getDataDeploy = (
593
- options = { buildSingleReplica: false, deployGroupId: '', deployId: '', disableSyncEnvPort: false },
601
+ options = {
602
+ buildSingleReplica: false,
603
+ deployGroupId: '',
604
+ deployId: '',
605
+ disableSyncEnvPort: false,
606
+ deployIdConcat: [],
607
+ },
594
608
  ) => {
595
- let dataDeploy = JSON.parse(fs.readFileSync(`./engine-private/deploy/${options.deployGroupId}.json`, 'utf8'));
609
+ let dataDeploy =
610
+ options.deployGroupId === 'dd'
611
+ ? fs.readFileSync(`./engine-private/deploy/${options.deployGroupId}.router`, 'utf8')
612
+ : fs.readFileSync(`./engine-private/deploy/${options.deployGroupId}`, 'utf8');
613
+
614
+ dataDeploy = dataDeploy
615
+ .split(',')
616
+ .map((deployId) => deployId.trim())
617
+ .filter((deployId) => deployId);
618
+
619
+ if (options.deployIdConcat) dataDeploy = dataDeploy.concat(options.deployIdConcat);
596
620
 
597
621
  if (options.deployId) dataDeploy = dataDeploy.filter((d) => d === options.deployId);
598
622
 
@@ -827,7 +851,7 @@ const deployRun = async (dataDeploy, currentAttempt = 1) => {
827
851
  if (failed.length > 0) {
828
852
  for (const deploy of failed) logger.error(deploy.deployId, Cmd.run(deploy.deployId));
829
853
  if (currentAttempt === maxAttempts) return logger.error(`max deploy attempts exceeded`);
830
- if (process.argv.includes('manual')) await read({ prompt: 'Press enter to retry failed processes\n' });
854
+ await read({ prompt: 'Press enter to retry failed processes\n' });
831
855
  currentAttempt++;
832
856
  await deployRun(failed, currentAttempt);
833
857
  } else logger.info(`Deploy process successfully`);
@@ -973,15 +997,15 @@ const getPathsSSR = (conf) => {
973
997
 
974
998
  const Cmd = {
975
999
  delete: (deployId) => `pm2 delete ${deployId}`,
976
- run: (deployId) => `node bin/deploy run ${deployId}`,
1000
+ run: () => `npm start`,
977
1001
  build: (deployId) => `node bin/deploy build-full-client ${deployId}${process.argv.includes('l') ? ' l' : ''}`,
978
1002
  conf: (deployId, env) => `node bin/deploy conf ${deployId} ${env ? env : 'production'}`,
979
1003
  replica: (deployId, host, path) => `node bin/deploy build-single-replica ${deployId} ${host} ${path}`,
980
1004
  syncPorts: (deployGroupId) => `node bin/deploy sync-env-port ${deployGroupId}`,
981
- cron: (deployId, job, expression) => {
982
- shellExec(Cmd.delete(`${deployId}-${job}`));
983
- return `env-cmd -f .env.production pm2 start bin/cron.js --no-autorestart --instances 1 --cron "${expression}" --name ${deployId}-${job} -- ${job} ${deployId}`;
984
- },
1005
+ cron: (deployList, jobList, name, expression, options) =>
1006
+ `pm2 start ./bin/index.js --no-autorestart --instances 1 --cron "${expression}" --name ${name} -- cron ${
1007
+ options?.disableKindCluster ? `--disable-kind-cluster ` : ''
1008
+ }${deployList} ${jobList}`,
985
1009
  };
986
1010
 
987
1011
  const fixDependencies = async () => {
@@ -1048,7 +1072,7 @@ const setUpProxyMaintenanceServer = ({ deployGroupId }) => {
1048
1072
  shellExec(`node bin/deploy valkey-service`);
1049
1073
  const proxyDeployId = fs.readFileSync(`./engine-private/deploy/${deployGroupId}.proxy`, 'utf8').trim();
1050
1074
  shellExec(`node bin/deploy conf ${proxyDeployId} production`);
1051
- shellExec(`node bin/deploy run ${proxyDeployId} maintenance`);
1075
+ shellExec(`npm start ${proxyDeployId} maintenance`);
1052
1076
  };
1053
1077
 
1054
1078
  const getNpmRootPath = () =>
package/src/server/dns.js CHANGED
@@ -17,10 +17,8 @@ dotenv.config();
17
17
 
18
18
  const logger = loggerFactory(import.meta);
19
19
 
20
- const Dns = {
21
- repoUrl: `https://${process.env.GITHUB_TOKEN}@github.com/${process.env.GITHUB_USERNAME}/${process.env.GITHUB_DNS_REPO}.git`,
22
- callback: () => null,
23
- InitIpDaemon: async function ({ deployId }) {
20
+ class Dns {
21
+ static callback = async function (deployList) {
24
22
  // NAT-VPS modem/router device configuration:
25
23
  // LAN --> [NAT-VPS] --> WAN
26
24
  // enabled DMZ Host to proxy IP 80-443 (79-444) sometimes router block first port
@@ -30,56 +28,55 @@ const Dns = {
30
28
  // LAN server or device's local servers port -> 3000-3100 (2999-3101)
31
29
  // DNS Records: [ANAME](Address Dynamic) -> [A](ipv4) host | [AAAA](ipv6) host -> [public-ip]
32
30
  // Forward the router's TCP/UDP ports to the LAN device's IP address
33
-
31
+ for (const _deployId of deployList.split(',')) {
32
+ const deployId = _deployId.trim();
34
33
  const privateCronConfPath = `./engine-private/conf/${deployId}/conf.cron.json`;
35
-
36
34
  const confCronPath = fs.existsSync(privateCronConfPath) ? privateCronConfPath : './conf/conf.cron.json';
37
- let confCronData = JSON.parse(fs.readFileSync(confCronPath, 'utf8'));
38
- if (confCronData.ipDaemon.disabled) return;
39
- Dns.ip = confCronData.ipDaemon.ip;
40
- logger.info(`Current ip`, Dns.ip);
41
- const callback = async () => {
42
- logger.info('init dns ip callback');
43
- await logger.setUpInfo();
44
- let testIp;
45
- try {
46
- testIp = await ip.public.ipv4();
47
- } catch (error) {
48
- logger.error(error, { testIp, stack: error.stack });
49
- }
50
- if (testIp && typeof testIp === 'string' && validator.isIP(testIp) && Dns.ip !== testIp) {
51
- logger.info(`New ip`, testIp);
52
- for (const recordType of Object.keys(confCronData.records)) {
53
- switch (recordType) {
54
- case 'A':
55
- for (const dnsProvider of confCronData.records[recordType]) {
56
- if (typeof Dns.services.updateIp[dnsProvider.dns] === 'function')
57
- await Dns.services.updateIp[dnsProvider.dns]({ ...dnsProvider, ip: testIp });
58
- }
59
- break;
35
+ const confCronData = JSON.parse(fs.readFileSync(confCronPath, 'utf8'));
36
+
37
+ let testIp;
38
+
39
+ try {
40
+ testIp = await ip.public.ipv4();
41
+ } catch (error) {
42
+ logger.error(error, { testIp, stack: error.stack });
43
+ }
44
+ const ipFileName = `${deployId}.ip`;
45
+ const currentIp = fs.existsSync(`./engine-private/deploy/${ipFileName}`)
46
+ ? fs.readFileSync(`./engine-private/deploy/${ipFileName}`, 'utf8')
47
+ : undefined;
60
48
 
61
- default:
62
- break;
63
- }
49
+ if (testIp && typeof testIp === 'string' && validator.isIP(testIp) && currentIp !== testIp) {
50
+ logger.info(`new ip`, testIp);
51
+ for (const recordType of Object.keys(confCronData.records)) {
52
+ switch (recordType) {
53
+ case 'A':
54
+ for (const dnsProvider of confCronData.records[recordType]) {
55
+ if (typeof Dns.services.updateIp[dnsProvider.dns] === 'function')
56
+ await Dns.services.updateIp[dnsProvider.dns]({ ...dnsProvider, ip: testIp });
57
+ }
58
+ break;
59
+
60
+ default:
61
+ break;
64
62
  }
65
- try {
66
- const ipUrlTest = `https://${process.env.DEFAULT_DEPLOY_HOST}`;
67
- const response = await axios.get(ipUrlTest);
68
- const verifyIp = response.request.socket.remoteAddress;
69
- logger.info(ipUrlTest + ' IP', verifyIp);
70
- if (verifyIp === testIp) {
71
- await this.saveIp(confCronPath, confCronData, testIp);
72
- } else logger.error('ip not updated');
73
- } catch (error) {
74
- logger.error(error), 'ip not updated';
63
+ }
64
+ try {
65
+ const ipUrlTest = `https://${process.env.DEFAULT_DEPLOY_HOST}`;
66
+ const response = await axios.get(ipUrlTest);
67
+ const verifyIp = response.request.socket.remoteAddress;
68
+ logger.info(ipUrlTest + ' IP', verifyIp);
69
+ if (verifyIp === testIp) {
70
+ fs.writeFileSync(`./engine-private/deploy/${ipFileName}`, testIp, 'utf8');
71
+ } else logger.error('ip not updated');
72
+ } catch (error) {
73
+ logger.error(error), 'ip not updated';
75
74
  }
76
75
  }
77
- };
78
- await callback();
79
- this.callback = callback;
80
- return callback;
81
- },
82
- services: {
76
+ }
77
+ };
78
+
79
+ static services = {
83
80
  updateIp: {
84
81
  dondominio: (options) => {
85
82
  const { user, api_key, host, dns, ip } = options;
@@ -100,21 +97,7 @@ const Dns = {
100
97
  });
101
98
  },
102
99
  },
103
- },
104
- saveIp: async (confCronPath, confCronData, ip) => {
105
- Dns.ip = ip;
106
- confCronData.ipDaemon.ip = ip;
107
- fs.writeFileSync(confCronPath, JSON.stringify(confCronData, null, 4), 'utf8');
108
- shellExec(
109
- `cd ./engine-private` +
110
- ` && git pull ${Dns.repoUrl}` +
111
- ` && git add . && git commit -m "update ip ${new Date().toLocaleDateString()}"` +
112
- ` && git push ${Dns.repoUrl}`,
113
- {
114
- disableLog: true,
115
- },
116
- );
117
- },
118
- };
100
+ };
101
+ }
119
102
 
120
- export { Dns };
103
+ export default Dns;
package/src/dns.js DELETED
@@ -1,22 +0,0 @@
1
- 'use strict';
2
-
3
- // https://nodejs.org/api
4
- // https://expressjs.com/en/4x/api.html
5
-
6
- import dotenv from 'dotenv';
7
- import { loggerFactory } from './server/logger.js';
8
- import { Dns } from './server/dns.js';
9
- import { ProcessController } from './server/process.js';
10
- import { Config } from './server/conf.js';
11
-
12
- dotenv.config();
13
-
14
- await Config.build();
15
-
16
- const logger = loggerFactory(import.meta);
17
-
18
- await logger.setUpInfo();
19
-
20
- await Dns.InitIpDaemon();
21
-
22
- ProcessController.init(logger);