underpost 2.8.46 → 2.8.48

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/Dockerfile CHANGED
@@ -4,7 +4,7 @@ FROM debian:${BASE_DEBIAN}
4
4
 
5
5
  ENV DEBIAN_FRONTEND=noninteractive
6
6
 
7
- WORKDIR /code
7
+ WORKDIR /home/dd
8
8
 
9
9
  # Set root password to root, format is 'user:password'.
10
10
  RUN echo 'root:root' | chpasswd
@@ -39,10 +39,10 @@ RUN npm --version
39
39
 
40
40
  RUN npm install -g underpost
41
41
 
42
- VOLUME [ "/code/app/logs" ]
42
+ VOLUME [ "/home/dd/engine/logs" ]
43
43
 
44
44
  EXPOSE 22
45
45
 
46
46
  EXPOSE 4000-4004
47
47
 
48
- CMD [ "underpost", "new", "app" ]
48
+ CMD [ "underpost", "new", "service" ]
package/bin/build.js CHANGED
@@ -3,7 +3,7 @@ import { loggerFactory } from '../src/server/logger.js';
3
3
  import { shellExec } from '../src/server/process.js';
4
4
  import dotenv from 'dotenv';
5
5
  import { getCapVariableName } from '../src/client/components/core/CommonJs.js';
6
- import { buildProxyRouter, buildPortProxyRouter, Config, getPathsSSR, buildKindPorts } from '../src/server/conf.js';
6
+ import { getPathsSSR } from '../src/server/conf.js';
7
7
 
8
8
  const baseConfPath = './engine-private/conf/dd-cron/.env.production';
9
9
  if (fs.existsSync(baseConfPath)) dotenv.config({ path: baseConfPath, override: true });
@@ -20,21 +20,13 @@ const logger = loggerFactory(import.meta);
20
20
  const confName = process.argv[2];
21
21
  const basePath = '../pwa-microservices-template';
22
22
  const repoName = `engine-${confName.split('dd-')[1]}`;
23
- const privateRepoName = `${repoName}-private`;
24
- const privateRepoNameBackUp = `${repoName}-cron-backups`;
25
- const gitPrivateUrl = `https://${process.env.GITHUB_TOKEN}@github.com/underpostnet/${privateRepoName}.git`;
26
- const gitPrivateBackUpUrl = `https://${process.env.GITHUB_TOKEN}@github.com/underpostnet/${privateRepoNameBackUp}.git`;
27
23
 
28
24
  logger.info('', {
29
25
  confName,
30
26
  repoName,
31
- privateRepoName,
32
- privateRepoNameBackUp,
33
27
  basePath,
34
28
  });
35
29
 
36
- if (process.argv.includes('info')) process.exit(0);
37
-
38
30
  if (process.argv.includes('clean')) {
39
31
  if (fs.existsSync(`${basePath}/images`)) fs.copySync(`${basePath}/images`, `./images`);
40
32
  shellExec(`cd ${basePath} && git checkout .`);
@@ -42,173 +34,34 @@ if (process.argv.includes('clean')) {
42
34
  process.exit(0);
43
35
  }
44
36
 
45
- if (process.argv.includes('proxy')) {
46
- const env = process.argv.includes('development') ? 'development' : 'production';
47
- process.env.NODE_ENV = env;
48
- process.env.PORT = process.env.NODE_ENV === 'development' ? 4000 : 3000;
49
- process.argv[2] = 'proxy';
50
- process.argv[3] = fs.readFileSync('./engine-private/deploy/dd-router', 'utf8').trim();
51
-
52
- await Config.build();
53
- process.env.NODE_ENV = 'production';
54
- const router = buildPortProxyRouter(443, buildProxyRouter());
55
- const confServer = JSON.parse(fs.readFileSync(`./engine-private/conf/${confName}/conf.server.json`, 'utf8'));
56
- const confHosts = Object.keys(confServer);
57
-
58
- for (const host of Object.keys(router)) {
59
- if (!confHosts.find((_host) => host.match(_host))) {
60
- delete router[host];
61
- }
62
- }
63
-
64
- const ports = Object.values(router).map((p) => p.split(':')[2]);
65
-
66
- const fromPort = ports[0];
67
- const toPort = ports[ports.length - 1];
68
-
69
- logger.info('port range', { fromPort, toPort, router });
70
-
71
- const deploymentYamlFilePath = `./engine-private/conf/${confName}/build/${env}/deployment.yaml`;
72
-
73
- const deploymentYamlParts = fs.readFileSync(deploymentYamlFilePath, 'utf8').split('ports:');
74
- deploymentYamlParts[1] =
75
- buildKindPorts(fromPort, toPort) +
76
- ` type: LoadBalancer
77
- `;
78
-
79
- fs.writeFileSync(
80
- deploymentYamlFilePath,
81
- deploymentYamlParts.join(`ports:
82
- `),
83
- );
84
-
85
- let proxyYaml = '';
86
- let secretYaml = '';
87
-
88
- for (const host of Object.keys(confServer)) {
89
- if (env === 'production')
90
- secretYaml += `
91
- ---
92
- apiVersion: cert-manager.io/v1
93
- kind: Certificate
94
- metadata:
95
- name: ${host}
96
- spec:
97
- commonName: ${host}
98
- dnsNames:
99
- - ${host}
100
- issuerRef:
101
- name: letsencrypt-prod
102
- kind: ClusterIssuer
103
- secretName: ${host}`;
104
-
105
- const pathPortConditions = [];
106
- for (const path of Object.keys(confServer[host])) {
107
- const { peer } = confServer[host][path];
108
- const port = parseInt(router[`${host}${path === '/' ? '' : path}`].split(':')[2]);
109
- // logger.info('', { host, port, path });
110
- pathPortConditions.push({
111
- port,
112
- path,
113
- });
114
-
115
- if (peer) {
116
- // logger.info('', { host, port: port + 1, path: '/peer' });
117
- pathPortConditions.push({
118
- port: port + 1,
119
- path: '/peer',
120
- });
121
- }
122
- }
123
- // logger.info('', { host, pathPortConditions });
124
- proxyYaml += `
125
- ---
126
- apiVersion: projectcontour.io/v1
127
- kind: HTTPProxy
128
- metadata:
129
- name: ${host}
130
- spec:
131
- virtualhost:
132
- fqdn: ${host}${
133
- env === 'development'
134
- ? ''
135
- : `
136
- tls:
137
- secretName: ${host}`
138
- }
139
- routes:`;
140
- for (const conditionObj of pathPortConditions) {
141
- const { path, port } = conditionObj;
142
- proxyYaml += `
143
- - conditions:
144
- - prefix: ${path}
145
- enableWebsockets: true
146
- services:
147
- - name: ${confName}-${env}-service
148
- port: ${port}`;
149
- }
150
- }
151
- const yamlPath = `./engine-private/conf/${confName}/build/${env}/proxy.yaml`;
152
- fs.writeFileSync(yamlPath, proxyYaml, 'utf8');
153
- if (env === 'production') {
154
- const yamlPath = `./engine-private/conf/${confName}/build/${env}/secret.yaml`;
155
- fs.writeFileSync(yamlPath, secretYaml, 'utf8');
156
- }
157
-
158
- process.exit(0);
159
- }
160
37
  if (process.argv.includes('conf')) {
161
- if (!fs.existsSync(`../${privateRepoName}`)) {
162
- shellExec(`cd .. && git clone ${gitPrivateUrl}`, { silent: true });
163
- } else {
164
- shellExec(`cd ../${privateRepoName} && git pull`);
165
- }
166
- const toPath = `../${privateRepoName}/conf/${confName}`;
167
- fs.removeSync(toPath);
168
- fs.mkdirSync(toPath, { recursive: true });
169
- fs.copySync(`./engine-private/conf/${confName}`, toPath);
170
- shellExec(
171
- `cd ../${privateRepoName}` +
172
- ` && git add .` +
173
- ` && git commit -m "ci(engine-core-conf): ⚙️ Update ${confName} conf"` +
174
- ` && git push`,
175
- );
176
- process.exit(0);
177
- }
178
-
179
- if (process.argv.includes('cron-backups')) {
180
- if (!fs.existsSync(`../${privateRepoNameBackUp}`)) {
181
- shellExec(`cd .. && git clone ${gitPrivateBackUpUrl}`, { silent: true });
182
- } else {
183
- shellExec(`cd ../${privateRepoNameBackUp} && git pull`);
184
- }
185
- const serverConf = JSON.parse(fs.readFileSync(`./engine-private/conf/${confName}/conf.server.json`, 'utf8'));
186
- for (const host of Object.keys(serverConf)) {
187
- for (let path of Object.keys(serverConf[host])) {
188
- path = path.replaceAll('/', '-');
189
- const toPath = `../${privateRepoNameBackUp}/${host}${path}`;
190
- const fromPath = `./engine-private/cron-backups/${host}${path}`;
191
- if (fs.existsSync(fromPath)) {
192
- if (fs.existsSync(toPath)) fs.removeSync(toPath);
193
- logger.info('Build', { fromPath, toPath });
194
- fs.copySync(fromPath, toPath);
195
- }
38
+ for (const _confName of (confName === 'dd'
39
+ ? fs.readFileSync(`./engine-private/deploy/dd-router`, 'utf8')
40
+ : confName
41
+ ).split(',')) {
42
+ const _repoName = `engine-${_confName.split('dd-')[1]}`;
43
+ const privateRepoName = `${_repoName}-private`;
44
+ const privateGitUri = `${process.env.GITHUB_USERNAME}/${privateRepoName}`;
45
+
46
+ if (!fs.existsSync(`../${privateRepoName}`)) {
47
+ shellExec(`cd .. && underpost clone ${privateGitUri}`, { silent: true });
48
+ } else {
49
+ shellExec(`cd ../${privateRepoName} && underpost pull . ${privateGitUri}`);
196
50
  }
51
+ const toPath = `../${privateRepoName}/conf/${_confName}`;
52
+ fs.removeSync(toPath);
53
+ fs.mkdirSync(toPath, { recursive: true });
54
+ fs.copySync(`./engine-private/conf/${_confName}`, toPath);
55
+ shellExec(
56
+ `cd ../${privateRepoName}` +
57
+ ` && git add .` +
58
+ ` && underpost cmt . ci engine-core-conf 'Update ${_confName} conf'` +
59
+ ` && underpost push . ${privateGitUri}`,
60
+ );
197
61
  }
198
- shellExec(
199
- `cd ../${privateRepoNameBackUp}` +
200
- ` && git add .` +
201
- ` && git commit -m "ci(engine-core-cron-backups): ⚙️ Update ${confName} cron backups"` +
202
- ` && git push`,
203
- );
204
62
  process.exit(0);
205
63
  }
206
64
 
207
- if (process.argv.includes('test')) {
208
- fs.mkdirSync(`${basePath}/engine-private/conf`, { recursive: true });
209
- fs.copySync(`./engine-private/conf/${confName}`, `${basePath}/engine-private/conf/${confName}`);
210
- }
211
-
212
65
  const { DefaultConf } = await import(`../conf.${confName}.js`);
213
66
 
214
67
  {
@@ -294,27 +147,9 @@ const { DefaultConf } = await import(`../conf.${confName}.js`);
294
147
  const env = process.argv.includes('development') ? 'development' : 'production';
295
148
  const deploymentsFiles = ['Dockerfile', 'proxy.yaml', 'deployment.yaml', 'secret.yaml'];
296
149
  // remove engine-private of .dockerignore for local testing
297
-
298
- if (process.argv.includes('engine')) {
299
- fs.removeSync(`${basePath}/manifests/deployment`);
300
-
301
- if (!fs.existsSync(`./manifests/deployment/${confName}-${env}`))
302
- fs.mkdirSync(`./manifests/deployment/${confName}-${env}`);
303
-
304
- for (const file of deploymentsFiles) {
305
- if (fs.existsSync(`./engine-private/conf/${confName}/build/${env}/${file}`)) {
306
- fs.copyFileSync(`./engine-private/conf/${confName}/build/${env}/${file}`, `${basePath}/${file}`);
307
- fs.copyFileSync(
308
- `./engine-private/conf/${confName}/build/${env}/${file}`,
309
- `./manifests/deployment/${confName}-${env}/${file}`,
310
- );
311
- }
312
- }
313
- } else {
314
- for (const file of deploymentsFiles) {
315
- if (fs.existsSync(`./manifests/deployment/${confName}-${env}/${file}`)) {
316
- fs.copyFileSync(`./manifests/deployment/${confName}-${env}/${file}`, `${basePath}/${file}`);
317
- }
150
+ for (const file of deploymentsFiles) {
151
+ if (fs.existsSync(`./manifests/deployment/${confName}-${env}/${file}`)) {
152
+ fs.copyFileSync(`./manifests/deployment/${confName}-${env}/${file}`, `${basePath}/${file}`);
318
153
  }
319
154
  }
320
155
  }
package/bin/file.js CHANGED
@@ -1,7 +1,13 @@
1
1
  import fs from 'fs-extra';
2
2
 
3
3
  import { loggerFactory } from '../src/server/logger.js';
4
- import { cap, getCapVariableName, getDirname, newInstance } from '../src/client/components/core/CommonJs.js';
4
+ import {
5
+ cap,
6
+ getCapVariableName,
7
+ getDirname,
8
+ newInstance,
9
+ uniqueArray,
10
+ } from '../src/client/components/core/CommonJs.js';
5
11
  import { shellCd, shellExec } from '../src/server/process.js';
6
12
  import walk from 'ignore-walk';
7
13
  import { validateTemplatePath } from '../src/server/conf.js';
@@ -81,10 +87,12 @@ try {
81
87
  '.github/workflows/engine.core.ci.yml',
82
88
  '.github/workflows/engine.cyberia.ci.yml',
83
89
  './manifests/deployment/dd-lampp-development',
90
+ './manifests/deployment/dd-cyberia-development',
91
+ './manifests/deployment/dd-core-development',
84
92
  'bin/web3.js',
85
93
  'bin/cyberia.js',
86
94
  ]) {
87
- fs.removeSync('../pwa-microservices-template/' + deletePath);
95
+ if (fs.existsSync(deletePath)) fs.removeSync('../pwa-microservices-template/' + deletePath);
88
96
  }
89
97
  const originPackageJson = JSON.parse(fs.readFileSync('./package.json', 'utf8'));
90
98
  const templatePackageJson = JSON.parse(fs.readFileSync('../pwa-microservices-template/package.json', 'utf8'));
@@ -102,8 +110,8 @@ try {
102
110
  templatePackageJson.description = description;
103
111
  templatePackageJson.scripts.dev = dev;
104
112
  templatePackageJson.scripts.build = build;
105
- templatePackageJson.keywords = ['pwa', 'microservices', 'template', 'builder'].concat(
106
- templatePackageJson.keywords,
113
+ templatePackageJson.keywords = uniqueArray(
114
+ ['pwa', 'microservices', 'template', 'builder'].concat(templatePackageJson.keywords),
107
115
  );
108
116
  delete templatePackageJson.scripts['update-template'];
109
117
  fs.writeFileSync(
package/bin/index.js CHANGED
@@ -97,6 +97,17 @@ program
97
97
  })
98
98
  .description('Manage cluster, for default initialization base kind cluster');
99
99
 
100
+ program
101
+ .command('deploy')
102
+ .argument('<deploy-list>', 'Deploy id list, e.g. default-a, default-b')
103
+ .argument('[env]', 'Optional environment, for default is development')
104
+ .option('--remove', 'Delete deployments and services')
105
+ .option('--sync', 'Sync deployments env, ports, and replicas')
106
+ .option('--info-router', 'Display router structure')
107
+ .option('--build-manifest', 'Build kind yaml manifests: deployments, services, proxy and secrets')
108
+ .description('Manage deployment, for default deploy development pods')
109
+ .action(Underpost.deploy.callback);
110
+
100
111
  program
101
112
  .command('secret')
102
113
  .argument('<platform>', `Options: ${Object.keys(Underpost.secret)}`)
@@ -142,11 +153,11 @@ program
142
153
 
143
154
  program
144
155
  .command('db')
145
- .option('--import <deploy-id-list>', 'Import databases to containers from deploy id list, e.g. default-a, default-b')
156
+ .argument('<deploy-list>', 'Deploy id list, e.g. default-a, default-b')
157
+ .option('--import', 'Import container backups from repositories')
158
+ .option('--export', 'Export container backups to repositories')
146
159
  .description('Manage databases')
147
- .action((...args) => {
148
- if (args && args[0].import) return UnderpostDB.API.import(...args);
149
- });
160
+ .action(UnderpostDB.API.callback);
150
161
 
151
162
  program
152
163
  .command('script')
@@ -158,6 +169,12 @@ program
158
169
  )
159
170
  .action((...args) => Underpost.script[args[0]](args[1], args[2]));
160
171
 
161
- program.command('test').description('Run tests').action(Underpost.test.run);
172
+ program
173
+ .command('test')
174
+ .argument('[deploy-list]', 'Deploy id list, e.g. default-a, default-b')
175
+ .description('Manage Test, for default run current underpost default test')
176
+ .option('--inside-container', 'Inside container execution context')
177
+ .option('--sh', 'Copy to clipboard, container entrypoint shell command')
178
+ .action(Underpost.test.callback);
162
179
 
163
180
  program.parse();
@@ -58,7 +58,7 @@ services:
58
58
  cpus: '0.25'
59
59
  memory: 20M
60
60
  labels: # labels in Compose file instead of Dockerfile
61
- engine.version: '2.8.46'
61
+ engine.version: '2.8.48'
62
62
  networks:
63
63
  - load-balancer
64
64
 
package/package.json CHANGED
@@ -2,7 +2,7 @@
2
2
  "type": "module",
3
3
  "main": "src/index.js",
4
4
  "name": "underpost",
5
- "version": "2.8.46",
5
+ "version": "2.8.48",
6
6
  "description": "pwa api rest template",
7
7
  "scripts": {
8
8
  "start": "env-cmd -f .env.production node --max-old-space-size=8192 src/server",
@@ -32,10 +32,6 @@
32
32
  "url": "git+https://github.com/underpostnet/pwa-microservices-template.git"
33
33
  },
34
34
  "keywords": [
35
- "pwa",
36
- "microservices",
37
- "template",
38
- "builder",
39
35
  "pwa",
40
36
  "microservices",
41
37
  "template",
@@ -43,7 +43,7 @@ class UnderpostCluster {
43
43
  return;
44
44
  }
45
45
  const testClusterInit = shellExec(`kubectl get pods --all-namespaces -o wide`, {
46
- disableLogging: true,
46
+ disableLog: true,
47
47
  silent: true,
48
48
  stdout: true,
49
49
  });
package/src/cli/db.js CHANGED
@@ -1,4 +1,4 @@
1
- import { mergeFile } from '../server/conf.js';
1
+ import { mergeFile, splitFileFactory } from '../server/conf.js';
2
2
  import { loggerFactory } from '../server/logger.js';
3
3
  import { shellExec } from '../server/process.js';
4
4
  import fs from 'fs-extra';
@@ -7,8 +7,10 @@ const logger = loggerFactory(import.meta);
7
7
 
8
8
  class UnderpostDB {
9
9
  static API = {
10
- async import(options = { import: 'default' }) {
11
- for (const _deployId of options.import.split(',')) {
10
+ async callback(deployList = 'default', options = { import: false, export: false }) {
11
+ const newBackupTimestamp = new Date().getTime();
12
+ const nameSpace = 'default';
13
+ for (const _deployId of deployList.split(',')) {
12
14
  const deployId = _deployId.trim();
13
15
  if (!deployId) continue;
14
16
  const dbs = {};
@@ -38,18 +40,22 @@ class UnderpostDB {
38
40
  for (const dbName of Object.keys(dbs[provider])) {
39
41
  const { hostFolder, user, password } = dbs[provider][dbName];
40
42
  if (hostFolder) {
41
- logger.info('import', { hostFolder, provider, dbName });
43
+ logger.info('', { hostFolder, provider, dbName });
42
44
 
43
45
  const backUpPath = `../${repoName}/${hostFolder}`;
44
46
  const times = await fs.readdir(backUpPath);
45
47
  const currentBackupTimestamp = Math.max(...times.map((t) => parseInt(t)));
46
48
  dbs[provider][dbName].currentBackupTimestamp = currentBackupTimestamp;
49
+ const removeBackupTimestamp = Math.min(...times.map((t) => parseInt(t)));
47
50
 
51
+ const sqlContainerPath = `/home/${dbName}.sql`;
48
52
  const _fromPartsParts = `../${repoName}/${hostFolder}/${currentBackupTimestamp}/${dbName}-parths.json`;
49
53
  const _toSqlPath = `../${repoName}/${hostFolder}/${currentBackupTimestamp}/${dbName}.sql`;
54
+ const _toNewSqlPath = `../${repoName}/${hostFolder}/${newBackupTimestamp}/${dbName}.sql`;
50
55
  const _toBsonPath = `../${repoName}/${hostFolder}/${currentBackupTimestamp}/${dbName}`;
56
+ const _toNewBsonPath = `../${repoName}/${hostFolder}/${newBackupTimestamp}/${dbName}`;
51
57
 
52
- if (fs.existsSync(_fromPartsParts) && !fs.existsSync(_toSqlPath)) {
58
+ if (options.import === true && fs.existsSync(_fromPartsParts) && !fs.existsSync(_toSqlPath)) {
53
59
  const names = JSON.parse(fs.readFileSync(_fromPartsParts, 'utf8')).map((_path) => {
54
60
  return `../${repoName}/${hostFolder}/${currentBackupTimestamp}/${_path.split('/').pop()}`;
55
61
  });
@@ -61,26 +67,59 @@ class UnderpostDB {
61
67
  await mergeFile(names, _toSqlPath);
62
68
  }
63
69
 
70
+ if (options.export === true && times.length >= 5) {
71
+ fs.removeSync(`../${repoName}/${hostFolder}/${removeBackupTimestamp}`);
72
+ fs.mkdirSync(`../${repoName}/${hostFolder}/${newBackupTimestamp}`, { recursive: true });
73
+ }
74
+
64
75
  switch (provider) {
65
76
  case 'mariadb': {
66
77
  const podName = `mariadb-statefulset-0`;
67
- const nameSpace = 'default';
68
78
  const serviceName = 'mariadb';
69
- shellExec(`sudo kubectl cp ${_toSqlPath} ${nameSpace}/${podName}:/${dbName}.sql`);
70
- const cmd = `mariadb -u ${user} -p${password} ${dbName} < /${dbName}.sql`;
71
- shellExec(
72
- `kubectl exec -i ${podName} -- ${serviceName} -p${password} -e 'CREATE DATABASE ${dbName};'`,
73
- );
74
- shellExec(`sudo kubectl exec -i ${podName} -- sh -c "${cmd}"`);
79
+ if (options.import === true) {
80
+ shellExec(`sudo kubectl cp ${_toSqlPath} ${nameSpace}/${podName}:/${dbName}.sql`);
81
+ const cmd = `mariadb -u ${user} -p${password} ${dbName} < /${dbName}.sql`;
82
+ shellExec(
83
+ `kubectl exec -i ${podName} -- ${serviceName} -p${password} -e 'CREATE DATABASE ${dbName};'`,
84
+ );
85
+ shellExec(`sudo kubectl exec -i ${podName} -- sh -c "${cmd}"`);
86
+ }
87
+ if (options.export === true) {
88
+ const cmd = `mariadb-dump --user=${user} --password=${password} --lock-tables ${dbName} > ${sqlContainerPath}`;
89
+ shellExec(`sudo kubectl exec -i ${podName} -- sh -c "${cmd}"`);
90
+ shellExec(`sudo kubectl cp ${nameSpace}/${podName}:${sqlContainerPath} ${_toNewSqlPath}`);
91
+ await splitFileFactory(dbName, _toNewSqlPath);
92
+ }
75
93
  break;
76
94
  }
77
95
 
78
96
  case 'mongoose': {
79
- const podName = `mongodb-0`;
80
- const nameSpace = 'default';
81
- shellExec(`sudo kubectl cp ${_toBsonPath} ${nameSpace}/${podName}:/${dbName}`);
82
- const cmd = `mongorestore -d ${dbName} /${dbName}`;
83
- shellExec(`sudo kubectl exec -i ${podName} -- sh -c "${cmd}"`);
97
+ if (options.import === true) {
98
+ const podName = `mongodb-0`;
99
+ shellExec(`sudo kubectl cp ${_toBsonPath} ${nameSpace}/${podName}:/${dbName}`);
100
+ const cmd = `mongorestore -d ${dbName} /${dbName}`;
101
+ shellExec(`sudo kubectl exec -i ${podName} -- sh -c "${cmd}"`);
102
+ }
103
+ if (options.export === true) {
104
+ const podName = `backup-access`;
105
+ const containerBaseBackupPath = '/backup';
106
+ let timeFolder = shellExec(
107
+ `sudo kubectl exec -i ${podName} -- sh -c "cd ${containerBaseBackupPath} && ls -a"`,
108
+ {
109
+ stdout: true,
110
+ disableLog: false,
111
+ silent: true,
112
+ },
113
+ ).split(`\n`);
114
+ timeFolder = timeFolder[timeFolder.length - 2];
115
+ if (timeFolder === '..') {
116
+ logger.warn(`Cannot backup available`, { timeFolder });
117
+ } else {
118
+ shellExec(
119
+ `sudo kubectl cp ${nameSpace}/${podName}:${containerBaseBackupPath}/${timeFolder}/${dbName} ${_toNewBsonPath}`,
120
+ );
121
+ }
122
+ }
84
123
  break;
85
124
  }
86
125
 
@@ -90,6 +129,17 @@ class UnderpostDB {
90
129
  }
91
130
  }
92
131
  }
132
+ if (options.export === true) {
133
+ shellExec(`cd ../${repoName} && git add .`);
134
+ shellExec(
135
+ `underpost cmt ../${repoName} backup '' '${new Date(newBackupTimestamp).toLocaleDateString()} ${new Date(
136
+ newBackupTimestamp,
137
+ ).toLocaleTimeString()}'`,
138
+ );
139
+ shellExec(`cd ../${repoName} && underpost push . ${process.env.GITHUB_USERNAME}/${repoName}`, {
140
+ disableLog: true,
141
+ });
142
+ }
93
143
  }
94
144
  },
95
145
  };
@@ -0,0 +1,277 @@
1
+ import {
2
+ buildKindPorts,
3
+ buildPortProxyRouter,
4
+ buildProxyRouter,
5
+ Config,
6
+ getDataDeploy,
7
+ loadReplicas,
8
+ } from '../server/conf.js';
9
+ import { loggerFactory } from '../server/logger.js';
10
+ import { shellExec } from '../server/process.js';
11
+ import fs from 'fs-extra';
12
+ import dotenv from 'dotenv';
13
+ import Underpost from '../index.js';
14
+
15
+ const logger = loggerFactory(import.meta);
16
+
17
+ class UnderpostDeploy {
18
+ static API = {
19
+ sync(deployList) {
20
+ const deployGroupId = '_dd';
21
+ fs.writeFileSync(`./engine-private/deploy/${deployGroupId}.json`, JSON.stringify(deployList.split(',')), 'utf8');
22
+ return getDataDeploy({
23
+ buildSingleReplica: true,
24
+ deployGroupId,
25
+ });
26
+ },
27
+ async routerFactory(deployList, env) {
28
+ const initEnvPath = `./engine-private/conf/${deployList.split(',')[0]}/.env.${env}`;
29
+ const initEnvObj = dotenv.parse(fs.readFileSync(initEnvPath, 'utf8'));
30
+ process.env.PORT = initEnvObj.PORT;
31
+ process.env.NODE_ENV = env;
32
+ await Config.build(undefined, 'proxy', deployList);
33
+ return buildPortProxyRouter(env === 'development' ? 80 : 443, buildProxyRouter());
34
+ },
35
+ async buildManifest(deployList, env) {
36
+ for (const _deployId of deployList.split(',')) {
37
+ const deployId = _deployId.trim();
38
+ if (!deployId) continue;
39
+
40
+ const router = await UnderpostDeploy.API.routerFactory(deployId, env);
41
+ const ports = Object.values(router).map((p) => parseInt(p.split(':')[2]));
42
+ const fromPort = Math.min(...ports);
43
+ const toPort = Math.max(...ports);
44
+ const confServer = loadReplicas(
45
+ JSON.parse(fs.readFileSync(`./engine-private/conf/${deployId}/conf.server.json`, 'utf8')),
46
+ 'proxy',
47
+ );
48
+
49
+ fs.mkdirSync(`./engine-private/conf/${deployId}/build/${env}`, { recursive: true });
50
+ if (env === 'development') fs.mkdirSync(`./manifests/deployment/${deployId}-${env}`, { recursive: true });
51
+
52
+ logger.info('port range', { deployId, fromPort, toPort });
53
+
54
+ const deploymentYamlParts = `apiVersion: apps/v1
55
+ kind: Deployment
56
+ metadata:
57
+ name: ${deployId}-${env}
58
+ labels:
59
+ app: ${deployId}-${env}
60
+ spec:
61
+ replicas: 2
62
+ selector:
63
+ matchLabels:
64
+ app: ${deployId}-${env}
65
+ template:
66
+ metadata:
67
+ labels:
68
+ app: ${deployId}-${env}
69
+ spec:
70
+ containers:
71
+ - name: ${deployId}-${env}
72
+ image: localhost/${deployId}-${env}:${Underpost.version}
73
+ ---
74
+ apiVersion: v1
75
+ kind: Service
76
+ metadata:
77
+ name: ${deployId}-${env}-service
78
+ spec:
79
+ selector:
80
+ app: ${deployId}-${env}
81
+ ports:
82
+ type: LoadBalancer`.split('ports:');
83
+ deploymentYamlParts[1] =
84
+ buildKindPorts(fromPort, toPort) +
85
+ ` type: LoadBalancer
86
+ `;
87
+
88
+ fs.writeFileSync(
89
+ `./engine-private/conf/${deployId}/build/${env}/deployment.yaml`,
90
+ deploymentYamlParts.join(`ports:
91
+ `),
92
+ );
93
+
94
+ let proxyYaml = '';
95
+ let secretYaml = '';
96
+
97
+ for (const host of Object.keys(confServer)) {
98
+ if (env === 'production')
99
+ secretYaml += `
100
+ ---
101
+ apiVersion: cert-manager.io/v1
102
+ kind: Certificate
103
+ metadata:
104
+ name: ${host}
105
+ spec:
106
+ commonName: ${host}
107
+ dnsNames:
108
+ - ${host}
109
+ issuerRef:
110
+ name: letsencrypt-prod
111
+ kind: ClusterIssuer
112
+ secretName: ${host}`;
113
+
114
+ const pathPortConditions = [];
115
+ for (const path of Object.keys(confServer[host])) {
116
+ const { peer } = confServer[host][path];
117
+ if (!router[`${host}${path === '/' ? '' : path}`]) continue;
118
+ const port = parseInt(router[`${host}${path === '/' ? '' : path}`].split(':')[2]);
119
+ // logger.info('', { host, port, path });
120
+ pathPortConditions.push({
121
+ port,
122
+ path,
123
+ });
124
+
125
+ if (peer) {
126
+ // logger.info('', { host, port: port + 1, path: '/peer' });
127
+ pathPortConditions.push({
128
+ port: port + 1,
129
+ path: '/peer',
130
+ });
131
+ }
132
+ }
133
+
134
+ // logger.info('', { host, pathPortConditions });
135
+ proxyYaml += `
136
+ ---
137
+ apiVersion: projectcontour.io/v1
138
+ kind: HTTPProxy
139
+ metadata:
140
+ name: ${host}
141
+ spec:
142
+ virtualhost:
143
+ fqdn: ${host}${
144
+ env === 'development'
145
+ ? ''
146
+ : `
147
+ tls:
148
+ secretName: ${host}`
149
+ }
150
+ routes:`;
151
+ for (const conditionObj of pathPortConditions) {
152
+ const { path, port } = conditionObj;
153
+ proxyYaml += `
154
+ - conditions:
155
+ - prefix: ${path}
156
+ enableWebsockets: true
157
+ services:
158
+ - name: ${deployId}-${env}-service
159
+ port: ${port}`;
160
+ }
161
+ }
162
+ const yamlPath = `./engine-private/conf/${deployId}/build/${env}/proxy.yaml`;
163
+ fs.writeFileSync(yamlPath, proxyYaml, 'utf8');
164
+ if (env === 'production') {
165
+ const yamlPath = `./engine-private/conf/${deployId}/build/${env}/secret.yaml`;
166
+ fs.writeFileSync(yamlPath, secretYaml, 'utf8');
167
+ } else {
168
+ const deploymentsFiles = ['Dockerfile', 'proxy.yaml', 'deployment.yaml'];
169
+ for (const file of deploymentsFiles) {
170
+ if (fs.existsSync(`./engine-private/conf/${deployId}/build/${env}/${file}`)) {
171
+ fs.copyFileSync(
172
+ `./engine-private/conf/${deployId}/build/${env}/${file}`,
173
+ `./manifests/deployment/${deployId}-${env}/${file}`,
174
+ );
175
+ }
176
+ }
177
+ }
178
+ }
179
+ },
180
+ async callback(
181
+ deployList = 'default',
182
+ env = 'development',
183
+ options = { remove: false, infoRouter: false, sync: false, buildManifest: false },
184
+ ) {
185
+ if (deployList === 'dd' && fs.existsSync(`./engine-private/deploy/dd-router`))
186
+ deployList = fs.readFileSync(`./engine-private/deploy/dd-router`, 'utf8');
187
+ if (options.sync) UnderpostDeploy.API.sync(deployList);
188
+ if (options.buildManifest === true) await UnderpostDeploy.API.buildManifest(deployList, env);
189
+ if (options.infoRouter === true)
190
+ return logger.info('router', await UnderpostDeploy.API.routerFactory(deployList, env));
191
+
192
+ for (const _deployId of deployList.split(',')) {
193
+ const deployId = _deployId.trim();
194
+ if (!deployId) continue;
195
+
196
+ shellExec(`sudo kubectl delete svc ${deployId}-${env}-service`);
197
+ shellExec(`sudo kubectl delete deployment ${deployId}-${env}`);
198
+
199
+ const etcHost = (
200
+ concat,
201
+ ) => `127.0.0.1 ${concat} localhost localhost.localdomain localhost4 localhost4.localdomain4
202
+ ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6`;
203
+ let concatHots = '';
204
+
205
+ const confServer = JSON.parse(fs.readFileSync(`./engine-private/conf/${deployId}/conf.server.json`, 'utf8'));
206
+ for (const host of Object.keys(confServer)) {
207
+ shellExec(`sudo kubectl delete HTTPProxy ${host}`);
208
+ if (!options.remove === true && env === 'development') concatHots += ` ${host}`;
209
+ }
210
+
211
+ if (!options.remove === true) {
212
+ shellExec(`sudo kubectl apply -f ./manifests/deployment/${deployId}-${env}/deployment.yaml`);
213
+ shellExec(`sudo kubectl apply -f ./manifests/deployment/${deployId}-${env}/proxy.yaml`);
214
+ }
215
+
216
+ let renderHosts;
217
+
218
+ switch (process.platform) {
219
+ case 'linux':
220
+ {
221
+ switch (env) {
222
+ case 'development':
223
+ renderHosts = etcHost(concatHots);
224
+ fs.writeFileSync(`/etc/hosts`, renderHosts, 'utf8');
225
+
226
+ break;
227
+
228
+ default:
229
+ break;
230
+ }
231
+ }
232
+ break;
233
+
234
+ default:
235
+ break;
236
+ }
237
+ logger.info(
238
+ `
239
+ ` + renderHosts,
240
+ );
241
+ }
242
+ },
243
+ getPods(deployId) {
244
+ const raw = shellExec(`sudo kubectl get pods --all-namespaces -o wide`, {
245
+ stdout: true,
246
+ disableLog: false,
247
+ silent: true,
248
+ });
249
+
250
+ const heads = raw
251
+ .split(`\n`)[0]
252
+ .split(' ')
253
+ .filter((_r) => _r.trim());
254
+
255
+ const pods = raw
256
+ .split(`\n`)
257
+ .filter((r) => (deployId ? r.match(deployId) : r.trim() && !r.match('NAME')))
258
+ .map((r) => r.split(' ').filter((_r) => _r.trim()));
259
+
260
+ const result = [];
261
+
262
+ for (const row of pods) {
263
+ const pod = {};
264
+ let index = -1;
265
+ for (const head of heads) {
266
+ index++;
267
+ pod[head] = row[index];
268
+ }
269
+ result.push(pod);
270
+ }
271
+
272
+ return result;
273
+ },
274
+ };
275
+ }
276
+
277
+ export default UnderpostDeploy;
package/src/cli/image.js CHANGED
@@ -1,7 +1,6 @@
1
1
  import fs from 'fs-extra';
2
2
  import Underpost from '../index.js';
3
3
  import { shellExec } from '../server/process.js';
4
- import { MariaDB } from '../db/mariadb/MariaDB.js';
5
4
  import dotenv from 'dotenv';
6
5
  import { getNpmRootPath } from '../server/conf.js';
7
6
 
@@ -45,17 +44,6 @@ class UnderpostImage {
45
44
  case 'dd-lampp':
46
45
  {
47
46
  const lamppPublicPath = '/xampp/htdocs/online';
48
- if (process.argv.includes('test')) {
49
- const { MARIADB_HOST, MARIADB_USER, MARIADB_PASSWORD, DD_LAMPP_TEST_DB_0 } = process.env;
50
-
51
- await MariaDB.query({
52
- host: MARIADB_HOST,
53
- user: MARIADB_USER,
54
- password: MARIADB_PASSWORD,
55
- query: `SHOW TABLES FROM ${DD_LAMPP_TEST_DB_0}`,
56
- });
57
- process.exit(0);
58
- }
59
47
  shellExec(`sudo mkdir -p ${lamppPublicPath}`);
60
48
 
61
49
  {
@@ -4,6 +4,7 @@ import { pbcopy, shellExec } from '../server/process.js';
4
4
  import { actionInitLog, loggerFactory } from '../server/logger.js';
5
5
  import fs from 'fs-extra';
6
6
  import { getNpmRootPath } from '../server/conf.js';
7
+ import { listenPortController, listenServerFactory } from '../server/network.js';
7
8
 
8
9
  dotenv.config();
9
10
 
@@ -54,6 +55,10 @@ class UnderpostRepository {
54
55
  empty: false,
55
56
  },
56
57
  ) {
58
+ if (commitType === 'reset') {
59
+ shellExec(`cd ${repoPath} && git reset --soft HEAD~${isNaN(parseInt(subModule)) ? 1 : parseInt(subModule)}`);
60
+ return;
61
+ }
57
62
  if (options.info) return logger.info('', commitData);
58
63
  const _message = `${commitType}${subModule ? `(${subModule})` : ''}${process.argv.includes('!') ? '!' : ''}: ${
59
64
  commitData[commitType].emoji
@@ -82,9 +87,10 @@ class UnderpostRepository {
82
87
  new(repositoryName) {
83
88
  return new Promise(async (resolve, reject) => {
84
89
  try {
85
- const exeRootPath = `${getNpmRootPath()}/underpost`;
86
- actionInitLog();
87
90
  await logger.setUpInfo();
91
+ if (repositoryName === 'service') return resolve(await listenPortController(listenServerFactory(), ':'));
92
+ else actionInitLog();
93
+ const exeRootPath = `${getNpmRootPath()}/underpost`;
88
94
  const destFolder = `./${repositoryName}`;
89
95
  logger.info('Note: This process may take several minutes to complete');
90
96
  logger.info('build app', { destFolder });
package/src/cli/test.js CHANGED
@@ -1,6 +1,8 @@
1
+ import { MariaDB } from '../db/mariadb/MariaDB.js';
1
2
  import { getNpmRootPath } from '../server/conf.js';
2
3
  import { actionInitLog, loggerFactory } from '../server/logger.js';
3
- import { shellExec } from '../server/process.js';
4
+ import { pbcopy, shellExec } from '../server/process.js';
5
+ import UnderpostDeploy from './deploy.js';
4
6
 
5
7
  const logger = loggerFactory(import.meta);
6
8
 
@@ -26,6 +28,52 @@ class UnderpostTest {
26
28
  actionInitLog();
27
29
  shellExec(`cd ${getNpmRootPath()}/underpost && npm run test`);
28
30
  },
31
+ async callback(deployList = '', options = { insideContainer: false, sh: false }) {
32
+ if (options.sh === true) {
33
+ const [pod] = UnderpostDeploy.API.getPods(deployList);
34
+ if (pod) return pbcopy(`sudo kubectl exec -it ${pod.NAME} -- sh`);
35
+ return logger.warn(`Couldn't find pods in deployment`, deployList);
36
+ }
37
+ if (deployList) {
38
+ for (const _deployId of deployList.split(',')) {
39
+ const deployId = _deployId.trim();
40
+ if (!deployId) continue;
41
+ if (options.insideContainer === true)
42
+ switch (deployId) {
43
+ case 'dd-lampp':
44
+ {
45
+ const { MARIADB_HOST, MARIADB_USER, MARIADB_PASSWORD, DD_LAMPP_TEST_DB_0 } = process.env;
46
+
47
+ await MariaDB.query({
48
+ host: MARIADB_HOST,
49
+ user: MARIADB_USER,
50
+ password: MARIADB_PASSWORD,
51
+ query: `SHOW TABLES FROM ${DD_LAMPP_TEST_DB_0}`,
52
+ });
53
+ }
54
+ break;
55
+
56
+ default:
57
+ {
58
+ shellExec('npm run test');
59
+ }
60
+
61
+ break;
62
+ }
63
+ else {
64
+ const pods = UnderpostDeploy.API.getPods(deployId);
65
+ if (pods.length > 0)
66
+ for (const deployData of pods) {
67
+ const { NAME } = deployData;
68
+ shellExec(
69
+ `sudo kubectl exec -i ${NAME} -- sh -c "cd /home/dd/engine && underpost test ${deployId} --inside-container"`,
70
+ );
71
+ }
72
+ else logger.warn(`Couldn't find pods in deployment`, { deployId });
73
+ }
74
+ }
75
+ } else return UnderpostTest.API.run();
76
+ },
29
77
  };
30
78
  }
31
79
 
@@ -573,6 +573,14 @@ const isValidFormat = (value, format) => {
573
573
  }
574
574
  };
575
575
 
576
+ const getCurrentTrace = () => {
577
+ try {
578
+ _stack;
579
+ } catch (error) {
580
+ return error.stack.split('is not defined')[1];
581
+ }
582
+ };
583
+
576
584
  /**
577
585
  * Returns the time difference between UTC time and local time, in minutes.
578
586
  * @memberof CommonJS
@@ -943,6 +951,7 @@ export {
943
951
  commonAdminGuard,
944
952
  commonModeratorGuard,
945
953
  isChileanIdentityDocument,
954
+ getCurrentTrace,
946
955
  userRoleEnum,
947
956
  commitData,
948
957
  };
@@ -9,7 +9,6 @@ import {
9
9
  htmls,
10
10
  sa,
11
11
  getAllChildNodes,
12
- getCurrentTrace,
13
12
  isActiveElement,
14
13
  } from './VanillaJs.js';
15
14
  import { BtnIcon } from './BtnIcon.js';
@@ -419,14 +419,6 @@ const isActiveTab = () => document.hasFocus();
419
419
  const isActiveElement = (classSearch = '') =>
420
420
  document.activeElement?.classList?.value?.match(classSearch) ? true : false;
421
421
 
422
- const getCurrentTrace = () => {
423
- try {
424
- _stack;
425
- } catch (error) {
426
- return error.stack.split('is not defined')[1];
427
- }
428
- };
429
-
430
422
  const isDevInstance = () => location.origin.match('localhost') && location.port;
431
423
 
432
424
  const getDataFromInputFile = async (file) => Array.from(new Uint8Array(await file.arrayBuffer()));
@@ -460,7 +452,6 @@ export {
460
452
  isNavigator,
461
453
  getTimeZone,
462
454
  getAllChildNodes,
463
- getCurrentTrace,
464
455
  isActiveTab,
465
456
  isActiveElement,
466
457
  isDevInstance,
@@ -22,41 +22,44 @@ const Worker = {
22
22
  setTimeout(() => {
23
23
  if ('onLine' in navigator && navigator.onLine) window.ononline();
24
24
  });
25
- navigator.serviceWorker.addEventListener('controllerchange', () => {
26
- logger.info('The controller of current browsing context has changed.');
27
- });
28
- navigator.serviceWorker.ready.then((worker) => {
29
- logger.info('Ready', worker);
30
- // event message
31
- navigator.serviceWorker.addEventListener('message', (event) => {
32
- logger.info('Received event message', event.data);
33
- const { status } = event.data;
25
+ if ('serviceWorker' in navigator) {
26
+ navigator.serviceWorker.addEventListener('controllerchange', () => {
27
+ logger.info('The controller of current browsing context has changed.');
28
+ });
29
+ navigator.serviceWorker.ready.then((worker) => {
30
+ logger.info('Ready', worker);
31
+ // event message
32
+ navigator.serviceWorker.addEventListener('message', (event) => {
33
+ logger.info('Received event message', event.data);
34
+ const { status } = event.data;
34
35
 
35
- switch (status) {
36
- case 'loader':
37
- {
38
- LoadingAnimation.RenderCurrentSrcLoad(event);
39
- }
40
- break;
36
+ switch (status) {
37
+ case 'loader':
38
+ {
39
+ LoadingAnimation.RenderCurrentSrcLoad(event);
40
+ }
41
+ break;
41
42
 
42
- default:
43
- break;
44
- }
45
- });
43
+ default:
44
+ break;
45
+ }
46
+ });
46
47
 
47
- // if (navigator.serviceWorker.controller)
48
- // navigator.serviceWorker.controller.postMessage({
49
- // title: 'Hello from Client event message',
50
- // });
48
+ // if (navigator.serviceWorker.controller)
49
+ // navigator.serviceWorker.controller.postMessage({
50
+ // title: 'Hello from Client event message',
51
+ // });
52
+
53
+ // broadcast message
54
+ // const channel = new BroadcastChannel('sw-messages');
55
+ // channel.addEventListener('message', (event) => {
56
+ // logger.info('Received broadcast message', event.data);
57
+ // });
58
+ // channel.postMessage({ title: 'Hello from Client broadcast message' });
59
+ // channel.close();
60
+ });
61
+ }
51
62
 
52
- // broadcast message
53
- // const channel = new BroadcastChannel('sw-messages');
54
- // channel.addEventListener('message', (event) => {
55
- // logger.info('Received broadcast message', event.data);
56
- // });
57
- // channel.postMessage({ title: 'Hello from Client broadcast message' });
58
- // channel.close();
59
- });
60
63
  this.RouterInstance = router();
61
64
  const isInstall = await this.status();
62
65
  if (!isInstall) await this.install();
package/src/index.js CHANGED
@@ -6,6 +6,7 @@
6
6
 
7
7
  import UnderpostCluster from './cli/cluster.js';
8
8
  import UnderpostDB from './cli/db.js';
9
+ import UnderpostDeploy from './cli/deploy.js';
9
10
  import UnderpostRootEnv from './cli/env.js';
10
11
  import UnderpostImage from './cli/image.js';
11
12
  import UnderpostRepository from './cli/repository.js';
@@ -25,7 +26,7 @@ class Underpost {
25
26
  * @type {String}
26
27
  * @memberof Underpost
27
28
  */
28
- static version = 'v2.8.46';
29
+ static version = 'v2.8.48';
29
30
  /**
30
31
  * Repository cli API
31
32
  * @static
@@ -82,6 +83,13 @@ class Underpost {
82
83
  * @memberof Underpost
83
84
  */
84
85
  static db = UnderpostDB.API;
86
+ /**
87
+ * Deployment cli API
88
+ * @static
89
+ * @type {UnderpostDeploy.API}
90
+ * @memberof Underpost
91
+ */
92
+ static deploy = UnderpostDeploy.API;
85
93
  }
86
94
 
87
95
  const up = Underpost;
@@ -40,21 +40,26 @@ const logger = loggerFactory(import.meta);
40
40
 
41
41
  const Config = {
42
42
  default: DefaultConf,
43
- build: async function (options = { folder: '' }) {
43
+ build: async function (options = { folder: '' }, deployContext, deployList, subConf) {
44
+ if (!deployContext) deployContext = process.argv[2];
44
45
  if (!fs.existsSync(`./tmp`)) fs.mkdirSync(`./tmp`, { recursive: true });
45
46
  fs.writeFileSync(`./tmp/await-deploy`, '', 'utf8');
46
- if (fs.existsSync(`./engine-private/conf/${process.argv[2]}`)) return loadConf(process.argv[2]);
47
- if (fs.existsSync(`./engine-private/replica/${process.argv[2]}`)) return loadConf(process.argv[2]);
47
+ if (fs.existsSync(`./engine-private/conf/${deployContext}`))
48
+ return loadConf(deployContext, process.env.NODE_ENV, subConf);
49
+ if (fs.existsSync(`./engine-private/replica/${deployContext}`))
50
+ return loadConf(deployContext, process.env.NODE_ENV, subConf);
48
51
 
49
- if (process.argv[2] === 'deploy') return;
52
+ if (deployContext === 'deploy') return;
50
53
 
51
- if (process.argv[2] === 'proxy') {
54
+ if (deployContext === 'proxy') {
55
+ if (!deployList) deployList = process.argv[3];
56
+ if (!subConf) subConf = process.argv[4];
52
57
  this.default.server = {};
53
- for (const deployId of process.argv[3].split(',')) {
58
+ for (const deployId of deployList.split(',')) {
54
59
  let confPath = `./engine-private/conf/${deployId}/conf.server.json`;
55
60
  const privateConfDevPath = fs.existsSync(`./engine-private/replica/${deployId}/conf.server.json`)
56
61
  ? `./engine-private/replica/${deployId}/conf.server.json`
57
- : `./engine-private/conf/${deployId}/conf.server.dev.${process.argv[4]}.json`;
62
+ : `./engine-private/conf/${deployId}/conf.server.dev.${subConf}.json`;
58
63
  const confDevPath = fs.existsSync(privateConfDevPath)
59
64
  ? privateConfDevPath
60
65
  : `./engine-private/conf/${deployId}/conf.server.dev.json`;
@@ -62,7 +67,7 @@ const Config = {
62
67
  if (process.env.NODE_ENV === 'development' && fs.existsSync(confDevPath)) confPath = confDevPath;
63
68
  const serverConf = JSON.parse(fs.readFileSync(confPath, 'utf8'));
64
69
 
65
- for (const host of Object.keys(loadReplicas(serverConf))) {
70
+ for (const host of Object.keys(loadReplicas(serverConf, deployContext, subConf))) {
66
71
  if (serverConf[host]['/'])
67
72
  this.default.server[host] = {
68
73
  ...this.default.server[host],
@@ -92,7 +97,7 @@ const Config = {
92
97
  },
93
98
  };
94
99
 
95
- const loadConf = (deployId, envInput) => {
100
+ const loadConf = (deployId, envInput, subConf) => {
96
101
  const folder = fs.existsSync(`./engine-private/replica/${deployId}`)
97
102
  ? `./engine-private/replica/${deployId}`
98
103
  : `./engine-private/conf/${deployId}`;
@@ -109,7 +114,8 @@ const loadConf = (deployId, envInput) => {
109
114
  ? fs.readFileSync(`${folder}/conf.${typeConf}.json`, 'utf8')
110
115
  : JSON.stringify(Config.default[typeConf]);
111
116
  if (process.env.NODE_ENV === 'development' && typeConf === 'server') {
112
- const devConfPath = `${folder}/conf.${typeConf}.dev${process.argv[3] ? `.${process.argv[3]}` : ''}.json`;
117
+ if (!subConf) subConf = process.argv[3];
118
+ const devConfPath = `${folder}/conf.${typeConf}.dev${subConf ? `.${subConf}` : ''}.json`;
113
119
  if (fs.existsSync(devConfPath)) srcConf = fs.readFileSync(devConfPath, 'utf8');
114
120
  }
115
121
  if (typeConf === 'server') srcConf = JSON.stringify(loadReplicas(JSON.parse(srcConf)), null, 4);
@@ -135,15 +141,17 @@ const loadConf = (deployId, envInput) => {
135
141
  return { folder, deployId };
136
142
  };
137
143
 
138
- const loadReplicas = (confServer) => {
144
+ const loadReplicas = (confServer, deployContext, subConf) => {
145
+ if (!deployContext) deployContext = process.argv[2];
146
+ if (!subConf) subConf = process.argv[3];
139
147
  for (const host of Object.keys(confServer)) {
140
148
  for (const path of Object.keys(confServer[host])) {
141
149
  const { replicas, singleReplica } = confServer[host][path];
142
150
  if (
143
151
  replicas &&
144
- (process.argv[2] === 'proxy' ||
152
+ (deployContext === 'proxy' ||
145
153
  !singleReplica ||
146
- (singleReplica && process.env.NODE_ENV === 'development' && !process.argv[3]))
154
+ (singleReplica && process.env.NODE_ENV === 'development' && !subConf))
147
155
  )
148
156
  for (const replicaPath of replicas) {
149
157
  confServer[host][replicaPath] = newInstance(confServer[host][path]);
@@ -513,14 +521,16 @@ const buildPortProxyRouter = (port, proxyRouter) => {
513
521
  // build router
514
522
  Object.keys(hosts).map((hostKey) => {
515
523
  let { host, path, target, proxy, peer } = hosts[hostKey];
516
- if (process.env.NODE_ENV === 'development') host = `localhost`;
524
+ if (process.argv.includes('localhost') && process.env.NODE_ENV === 'development') host = `localhost`;
517
525
 
518
526
  if (!proxy.includes(port)) return;
519
527
  const absoluteHost = [80, 443].includes(port)
520
528
  ? `${host}${path === '/' ? '' : path}`
521
529
  : `${host}:${port}${path === '/' ? '' : path}`;
522
530
 
523
- if (!(absoluteHost in router)) router[absoluteHost] = target;
531
+ if (process.argv.includes('localhost')) {
532
+ if (!(absoluteHost in router)) router[absoluteHost] = target;
533
+ } else router[absoluteHost] = target;
524
534
  }); // order router
525
535
 
526
536
  if (Object.keys(router).length === 0) return router;
@@ -1,7 +1,7 @@
1
1
  import fs from 'fs-extra';
2
2
 
3
3
  import { publicIp, publicIpv4, publicIpv6 } from 'public-ip';
4
- import { loggerFactory } from './logger.js';
4
+ import { actionInitLog, loggerFactory } from './logger.js';
5
5
  import { DataBaseProvider } from '../db/DataBaseProvider.js';
6
6
  import { getDeployId } from './conf.js';
7
7
 
@@ -77,7 +77,7 @@ const saveRuntimeRouter = async () => {
77
77
 
78
78
  if (closeConn) await DataBaseProvider.instance[`${host}${path}`].mongoose.close();
79
79
  } catch (error) {
80
- logger.error(error);
80
+ logger.error(error, error.stack);
81
81
  }
82
82
  };
83
83
 
@@ -114,20 +114,30 @@ const saveRuntimeCron = async () => {
114
114
 
115
115
  if (closeConn) await DataBaseProvider.instance[`${host}${path}`].mongoose.close();
116
116
  } catch (error) {
117
- logger.error(error);
117
+ logger.error(error, error.stack);
118
118
  }
119
119
  };
120
120
 
121
121
  const listenServerFactory = (logic = async () => {}) => {
122
122
  return {
123
- listen: async (...args) => (logic ? await logic(...args) : undefined, args[1]()),
123
+ listen: async (...args) => (
124
+ setTimeout(() => {
125
+ const message = 'Listen server factory timeout';
126
+ logger.error(message);
127
+ throw new Error(message);
128
+ }, 80000000), // ~ 55 days
129
+ (logic ? await logic(...args) : undefined, args[1]())
130
+ ),
124
131
  };
125
132
  };
126
133
 
127
134
  const listenPortController = async (server, port, metadata) =>
128
135
  new Promise((resolve) => {
129
136
  try {
130
- if (!server) server = listenServerFactory();
137
+ if (port === ':') {
138
+ server.listen(port, actionInitLog);
139
+ return resolve(true);
140
+ }
131
141
 
132
142
  const { host, path, client, runtime, meta } = metadata;
133
143
  const error = [];