underpost 2.8.64 → 2.8.67

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/.vscode/extensions.json +3 -2
  2. package/.vscode/settings.json +2 -0
  3. package/CHANGELOG.md +24 -4
  4. package/README.md +39 -2
  5. package/bin/deploy.js +1205 -131
  6. package/bin/file.js +8 -0
  7. package/bin/index.js +1 -233
  8. package/cli.md +451 -0
  9. package/docker-compose.yml +1 -1
  10. package/jsdoc.json +1 -1
  11. package/manifests/calico-custom-resources.yaml +25 -0
  12. package/manifests/deployment/adminer/deployment.yaml +32 -0
  13. package/manifests/deployment/adminer/kustomization.yaml +7 -0
  14. package/manifests/deployment/adminer/service.yaml +13 -0
  15. package/manifests/mongodb-4.4/service-deployment.yaml +1 -1
  16. package/manifests/postgresql/configmap.yaml +9 -0
  17. package/manifests/postgresql/kustomization.yaml +10 -0
  18. package/manifests/postgresql/pv.yaml +15 -0
  19. package/manifests/postgresql/pvc.yaml +13 -0
  20. package/manifests/postgresql/service.yaml +10 -0
  21. package/manifests/postgresql/statefulset.yaml +37 -0
  22. package/manifests/valkey/statefulset.yaml +6 -4
  23. package/package.json +3 -9
  24. package/src/api/user/user.service.js +13 -10
  25. package/src/cli/cluster.js +113 -11
  26. package/src/cli/db.js +18 -8
  27. package/src/cli/deploy.js +157 -58
  28. package/src/cli/fs.js +14 -3
  29. package/src/cli/image.js +0 -68
  30. package/src/cli/index.js +312 -0
  31. package/src/cli/monitor.js +170 -26
  32. package/src/cli/repository.js +5 -2
  33. package/src/client/components/core/Account.js +3 -3
  34. package/src/client/components/core/CalendarCore.js +0 -1
  35. package/src/client/components/core/Css.js +0 -1
  36. package/src/client/components/core/CssCore.js +2 -0
  37. package/src/client/components/core/EventsUI.js +1 -1
  38. package/src/client/components/core/JoyStick.js +2 -2
  39. package/src/client/components/core/Modal.js +1 -0
  40. package/src/client/components/core/RichText.js +1 -11
  41. package/src/index.js +9 -8
  42. package/src/mailer/MailerProvider.js +3 -0
  43. package/src/server/client-build.js +13 -0
  44. package/src/server/conf.js +48 -0
  45. package/src/server/dns.js +47 -17
  46. package/src/server/json-schema.js +77 -0
  47. package/src/server/peer.js +2 -2
  48. package/src/server/proxy.js +4 -4
  49. package/src/server/runtime.js +24 -9
  50. package/src/server/start.js +122 -0
  51. package/src/server/valkey.js +25 -11
@@ -58,7 +58,7 @@ services:
58
58
  cpus: '0.25'
59
59
  memory: 20M
60
60
  labels: # labels in Compose file instead of Dockerfile
61
- engine.version: '2.8.64'
61
+ engine.version: '2.8.67'
62
62
  networks:
63
63
  - load-balancer
64
64
 
package/jsdoc.json CHANGED
@@ -23,7 +23,7 @@
23
23
  "homepageTitle": "Docs",
24
24
  "favicon": "./public/www.nexodev.org/"
25
25
  },
26
- "tutorials": "./public/www.nexodev.org/docs/learn"
26
+ "tutorials": "./public/www.nexodev.org/docs/references"
27
27
  },
28
28
  "markdown": {
29
29
  "hardwrap": false,
@@ -0,0 +1,25 @@
1
+ # This section includes base Calico installation configuration.
2
+ # For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.Installation
3
+ apiVersion: operator.tigera.io/v1
4
+ kind: Installation
5
+ metadata:
6
+ name: default
7
+ spec:
8
+ # Configures Calico networking.
9
+ calicoNetwork:
10
+ # Note: The ipPools section cannot be modified post-install.
11
+ ipPools:
12
+ - blockSize: 26
13
+ cidr: 192.168.0.0/16
14
+ encapsulation: VXLANCrossSubnet
15
+ natOutgoing: Enabled
16
+ nodeSelector: all()
17
+
18
+ ---
19
+ # This section configures the Calico API server.
20
+ # For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer
21
+ apiVersion: operator.tigera.io/v1
22
+ kind: APIServer
23
+ metadata:
24
+ name: default
25
+ spec: {}
@@ -0,0 +1,32 @@
1
+ apiVersion: apps/v1
2
+ kind: Deployment
3
+ metadata:
4
+ name: adminer
5
+ labels:
6
+ app: adminer
7
+ group: db
8
+ spec:
9
+ replicas: 1
10
+ selector:
11
+ matchLabels:
12
+ app: adminer
13
+ template:
14
+ metadata:
15
+ labels:
16
+ app: adminer
17
+ group: db
18
+ spec:
19
+ containers:
20
+ - name: adminer
21
+ image: adminer:4.7.6-standalone
22
+ ports:
23
+ - containerPort: 8080
24
+ env:
25
+ - name: ADMINER_DESIGN
26
+ value: pepa-linha
27
+ - name: ADMINER_DEFAULT_SERVER
28
+ value: postgres
29
+ resources:
30
+ limits:
31
+ memory: '256Mi'
32
+ cpu: '500m'
@@ -0,0 +1,7 @@
1
+ ---
2
+ # kubectl apply -k manifests/deployment/adminer/.
3
+ apiVersion: kustomize.config.k8s.io/v1beta1
4
+ kind: Kustomization
5
+ resources:
6
+ - deployment.yaml
7
+ - service.yaml
@@ -0,0 +1,13 @@
1
+ apiVersion: v1
2
+ kind: Service
3
+ metadata:
4
+ name: adminer
5
+ labels:
6
+ group: db
7
+ spec:
8
+ type: ClusterIP
9
+ selector:
10
+ app: adminer
11
+ ports:
12
+ - port: 8080
13
+ targetPort: 8080
@@ -16,7 +16,7 @@ spec:
16
16
  hostname: mongo
17
17
  containers:
18
18
  - name: mongodb
19
- image: docker.io/library/mongo:4.4
19
+ image: mongo:4.4
20
20
  command: ['mongod', '--replSet', 'rs0', '--bind_ip_all']
21
21
  # -- bash
22
22
  # mongo
@@ -0,0 +1,9 @@
1
+ apiVersion: v1
2
+ kind: ConfigMap
3
+ metadata:
4
+ name: postgres-config
5
+ labels:
6
+ app: postgres
7
+ data:
8
+ POSTGRES_DB: postgresdb
9
+ POSTGRES_USER: admin
@@ -0,0 +1,10 @@
1
+ ---
2
+ # kubectl apply -k postgresql/.
3
+ apiVersion: kustomize.config.k8s.io/v1beta1
4
+ kind: Kustomization
5
+ resources:
6
+ - pv.yaml
7
+ - pvc.yaml
8
+ - configmap.yaml
9
+ - statefulset.yaml
10
+ - service.yaml
@@ -0,0 +1,15 @@
1
+ kind: PersistentVolume
2
+ apiVersion: v1
3
+ metadata:
4
+ name: postgres-pv-volume
5
+ labels:
6
+ type: local
7
+ app: postgres
8
+ spec:
9
+ storageClassName: manual
10
+ capacity:
11
+ storage: 5Gi
12
+ accessModes:
13
+ - ReadWriteMany
14
+ hostPath:
15
+ path: '/mnt/data'
@@ -0,0 +1,13 @@
1
+ kind: PersistentVolumeClaim
2
+ apiVersion: v1
3
+ metadata:
4
+ name: postgres-pv-claim
5
+ labels:
6
+ app: postgres
7
+ spec:
8
+ storageClassName: manual
9
+ accessModes:
10
+ - ReadWriteMany
11
+ resources:
12
+ requests:
13
+ storage: 5Gi
@@ -0,0 +1,10 @@
1
+ apiVersion: v1
2
+ kind: Service
3
+ metadata:
4
+ name: postgres-service
5
+ spec:
6
+ clusterIP: None
7
+ selector:
8
+ app: postgres
9
+ ports:
10
+ - port: 5432
@@ -0,0 +1,37 @@
1
+ apiVersion: apps/v1
2
+ kind: StatefulSet
3
+ metadata:
4
+ name: postgres
5
+ spec:
6
+ serviceName: postgres
7
+ replicas: 3
8
+ selector:
9
+ matchLabels:
10
+ app: postgres
11
+ template:
12
+ metadata:
13
+ labels:
14
+ app: postgres
15
+ spec:
16
+ containers:
17
+ - name: postgres
18
+ image: postgres:latest
19
+ imagePullPolicy: Never
20
+ ports:
21
+ - containerPort: 5432
22
+ envFrom:
23
+ - configMapRef:
24
+ name: postgres-config
25
+ env:
26
+ - name: POSTGRES_PASSWORD
27
+ valueFrom:
28
+ secretKeyRef:
29
+ name: postgres-secret
30
+ key: password
31
+ volumeMounts:
32
+ - mountPath: /var/lib/postgresql/data
33
+ name: postgredb
34
+ volumes:
35
+ - name: postgredb
36
+ persistentVolumeClaim:
37
+ claimName: postgres-pv-claim
@@ -1,4 +1,3 @@
1
- ---
2
1
  apiVersion: apps/v1
3
2
  kind: StatefulSet
4
3
  metadata:
@@ -15,9 +14,14 @@ spec:
15
14
  labels:
16
15
  app: service-valkey
17
16
  spec:
17
+ # Prevent automatic token mounting if you're not using the default ServiceAccount
18
+ automountServiceAccountToken: false
19
+
18
20
  containers:
19
21
  - name: service-valkey
20
- image: docker.io/valkey/valkey:latest
22
+ image: valkey/valkey:latest
23
+ # Ensure you pull only if not present (Never will error if missing)
24
+ imagePullPolicy: Never
21
25
  env:
22
26
  - name: TZ
23
27
  value: Europe/Zurich
@@ -35,5 +39,3 @@ spec:
35
39
  failureThreshold: 2
36
40
  periodSeconds: 30
37
41
  timeoutSeconds: 5
38
- restartPolicy: Always
39
- automountServiceAccountToken: false
package/package.json CHANGED
@@ -2,7 +2,7 @@
2
2
  "type": "module",
3
3
  "main": "src/index.js",
4
4
  "name": "underpost",
5
- "version": "2.8.64",
5
+ "version": "2.8.67",
6
6
  "description": "pwa api rest template",
7
7
  "scripts": {
8
8
  "start": "env-cmd -f .env.production node --max-old-space-size=8192 src/server",
@@ -11,6 +11,7 @@
11
11
  "dev": "env-cmd -f .env.development node src/client.dev default",
12
12
  "dev-img": "env-cmd -f .env.development node src/server",
13
13
  "prod-img": "env-cmd -f .env.production node src/server",
14
+ "monitor": "pm2 start bin/deploy.js --name monitor -- monitor",
14
15
  "dev-api": "env-cmd -f .env.development nodemon --watch src --ignore src/client src/api",
15
16
  "dev-client": "env-cmd -f .env.development node src/client.dev",
16
17
  "proxy": "node src/proxy proxy",
@@ -69,7 +70,6 @@
69
70
  "cors": "^2.8.5",
70
71
  "d3": "^7.9.0",
71
72
  "dotenv": "^16.3.1",
72
- "easy-json-schema": "^0.0.2-beta",
73
73
  "easymde": "^2.18.0",
74
74
  "env-cmd": "^10.1.0",
75
75
  "express": "^4.18.2",
@@ -119,13 +119,7 @@
119
119
  "vanilla-jsoneditor": "^2.3.2",
120
120
  "winston": "^3.11.0"
121
121
  },
122
- "devDependencies": {
123
- "clean-jsdoc-theme": "^4.3.0",
124
- "easy-json-schema": "^0.0.2-beta",
125
- "mocha": "^10.8.2",
126
- "plantuml": "^0.0.2",
127
- "swagger-autogen": "^2.23.7"
128
- },
122
+ "devDependencies": {},
129
123
  "publishConfig": {
130
124
  "provenance": true,
131
125
  "access": "public",
@@ -225,8 +225,8 @@ const UserService = {
225
225
  } else throw new Error('invalid email or password');
226
226
 
227
227
  case 'guest': {
228
- const user = await ValkeyAPI.valkeyObjectFactory('user', options);
229
- await ValkeyAPI.setValkeyObject(user.email, user);
228
+ const user = await ValkeyAPI.valkeyObjectFactory(options, 'user');
229
+ await ValkeyAPI.setValkeyObject(options, user.email, user);
230
230
  return {
231
231
  token: hashJWT({ user: UserDto.auth.payload(user) }),
232
232
  user: selectDtoFactory(user, UserDto.select.get()),
@@ -325,15 +325,18 @@ const UserService = {
325
325
  return await User.find().select(UserDto.select.getAll());
326
326
 
327
327
  case 'auth': {
328
- const user = (await ValkeyAPI.getValkeyObject(req.auth.user.email))
329
- ? await ValkeyAPI.getValkeyObject(req.auth.user.email)
330
- : await User.findOne({
331
- _id: req.auth.user._id,
332
- });
328
+ let user;
329
+ if (req.auth.user._id.match('guest')) {
330
+ user = await ValkeyAPI.getValkeyObject(options, req.auth.user.email);
331
+ if (!user) throw new Error('guest user expired');
332
+ } else
333
+ user = await User.findOne({
334
+ _id: req.auth.user._id,
335
+ });
333
336
 
334
337
  const file = await File.findOne({ _id: user.profileImageId });
335
338
 
336
- if (!file && !(await ValkeyAPI.getValkeyObject(req.auth.user.email))) {
339
+ if (!file && !(await ValkeyAPI.getValkeyObject(options, req.auth.user.email))) {
337
340
  await User.findByIdAndUpdate(
338
341
  user._id,
339
342
  { profileImageId: await getDefaultProfileImageId(File) },
@@ -342,8 +345,8 @@ const UserService = {
342
345
  },
343
346
  );
344
347
  }
345
- return (await ValkeyAPI.getValkeyObject(req.auth.user.email))
346
- ? selectDtoFactory(await ValkeyAPI.getValkeyObject(req.auth.user.email), UserDto.select.get())
348
+ return (await ValkeyAPI.getValkeyObject(options, req.auth.user.email))
349
+ ? selectDtoFactory(await ValkeyAPI.getValkeyObject(options, req.auth.user.email), UserDto.select.get())
347
350
  : await User.findOne({
348
351
  _id: req.auth.user._id,
349
352
  }).select(UserDto.select.get());
@@ -1,5 +1,4 @@
1
- import { timer } from '../client/components/core/CommonJs.js';
2
- import { cliSpinner, getNpmRootPath } from '../server/conf.js';
1
+ import { getNpmRootPath } from '../server/conf.js';
3
2
  import { loggerFactory } from '../server/logger.js';
4
3
  import { shellExec } from '../server/process.js';
5
4
  import UnderpostDeploy from './deploy.js';
@@ -15,6 +14,7 @@ class UnderpostCluster {
15
14
  mongodb: false,
16
15
  mongodb4: false,
17
16
  mariadb: false,
17
+ postgresql: false,
18
18
  valkey: false,
19
19
  full: false,
20
20
  info: false,
@@ -23,10 +23,21 @@ class UnderpostCluster {
23
23
  reset: false,
24
24
  dev: false,
25
25
  nsUse: '',
26
+ infoCapacity: false,
27
+ infoCapacityPod: false,
28
+ istio: false,
29
+ pullImage: false,
26
30
  },
27
31
  ) {
32
+ // 1) Install kind, kubeadm, docker, podman
33
+ // 2) Check kubectl, kubelet, containerd.io
34
+ // 3) Install Nvidia drivers from Rocky Linux docs
35
+ // 4) Install LXD with MAAS from Rocky Linux docs
36
+ // 5) Install MAAS src from snap
28
37
  const npmRoot = getNpmRootPath();
29
38
  const underpostRoot = options?.dev === true ? '.' : `${npmRoot}/underpost`;
39
+ if (options.infoCapacityPod === true) return logger.info('', UnderpostDeploy.API.resourcesFactory());
40
+ if (options.infoCapacity === true) return logger.info('', UnderpostCluster.API.getResourcesCapacity());
30
41
  if (options.reset === true) return await UnderpostCluster.API.reset();
31
42
  if (options.listPods === true) return console.table(UnderpostDeploy.API.get(podName ?? undefined));
32
43
 
@@ -63,26 +74,62 @@ class UnderpostCluster {
63
74
  shellExec(`kubectl get secrets --all-namespaces -o wide`);
64
75
  shellExec(`docker secret ls`);
65
76
  shellExec(`kubectl get crd --all-namespaces -o wide`);
77
+ shellExec(`sudo kubectl api-resources`);
66
78
  return;
67
79
  }
68
80
 
69
- if (!UnderpostDeploy.API.get('kube-apiserver-kind-control-plane')[0]) {
81
+ if (
82
+ (!options.istio && !UnderpostDeploy.API.get('kube-apiserver-kind-control-plane')[0]) ||
83
+ (options.istio === true && !UnderpostDeploy.API.get('calico-kube-controllers')[0])
84
+ ) {
85
+ shellExec(`sudo setenforce 0`);
86
+ shellExec(`sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config`);
87
+ // sudo systemctl disable kubelet
88
+ // shellExec(`sudo systemctl enable --now kubelet`);
70
89
  shellExec(`containerd config default > /etc/containerd/config.toml`);
71
90
  shellExec(`sed -i -e "s/SystemdCgroup = false/SystemdCgroup = true/g" /etc/containerd/config.toml`);
72
91
  // shellExec(`cp /etc/kubernetes/admin.conf ~/.kube/config`);
73
- shellExec(`sudo systemctl restart kubelet`);
92
+ // shellExec(`sudo systemctl restart kubelet`);
74
93
  shellExec(`sudo service docker restart`);
75
94
  shellExec(`sudo systemctl enable --now containerd.service`);
76
- shellExec(`sudo systemctl restart containerd`);
77
- shellExec(
78
- `cd ${underpostRoot}/manifests && kind create cluster --config kind-config${
79
- options?.dev === true ? '-dev' : ''
80
- }.yaml`,
81
- );
82
- shellExec(`sudo chown $(id -u):$(id -g) $HOME/.kube/config**`);
95
+ shellExec(`sudo swapoff -a; sudo sed -i '/swap/d' /etc/fstab`);
96
+ if (options.istio === true) {
97
+ shellExec(`sysctl net.bridge.bridge-nf-call-iptables=1`);
98
+ shellExec(`sudo kubeadm init --pod-network-cidr=192.168.0.0/16`);
99
+ shellExec(`sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config`);
100
+ shellExec(`sudo chown $(id -u):$(id -g) $HOME/.kube/config**`);
101
+ // https://docs.tigera.io/calico/latest/getting-started/kubernetes/quickstart
102
+ shellExec(
103
+ `sudo kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.29.3/manifests/tigera-operator.yaml`,
104
+ );
105
+ // shellExec(
106
+ // `wget https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/custom-resources.yaml`,
107
+ // );
108
+ shellExec(`sudo kubectl apply -f ./manifests/calico-custom-resources.yaml`);
109
+ shellExec(`sudo systemctl restart containerd`);
110
+ } else {
111
+ shellExec(`sudo systemctl restart containerd`);
112
+ shellExec(
113
+ `cd ${underpostRoot}/manifests && kind create cluster --config kind-config${
114
+ options?.dev === true ? '-dev' : ''
115
+ }.yaml`,
116
+ );
117
+ shellExec(`sudo chown $(id -u):$(id -g) $HOME/.kube/config**`);
118
+ }
83
119
  } else logger.warn('Cluster already initialized');
84
120
 
85
121
  if (options.full === true || options.valkey === true) {
122
+ if (options.pullImage === true) {
123
+ // kubectl patch statefulset service-valkey --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value":"valkey/valkey:latest"}]'
124
+ // kubectl patch statefulset service-valkey -p '{"spec":{"template":{"spec":{"containers":[{"name":"service-valkey","imagePullPolicy":"Never"}]}}}}'
125
+ shellExec(`docker pull valkey/valkey`);
126
+ // shellExec(`sudo kind load docker-image valkey/valkey`);
127
+ // shellExec(`sudo podman pull docker.io/valkey/valkey:latest`);
128
+ // shellExec(`podman save -o valkey.tar valkey/valkey`);
129
+ // shellExec(`sudo kind load image-archive valkey.tar`);
130
+ // shellExec(`sudo rm -rf ./valkey.tar`);
131
+ shellExec(`sudo kind load docker-image valkey/valkey:latest`);
132
+ }
86
133
  shellExec(`kubectl delete statefulset service-valkey`);
87
134
  shellExec(`kubectl apply -k ${underpostRoot}/manifests/valkey`);
88
135
  }
@@ -96,7 +143,17 @@ class UnderpostCluster {
96
143
  shellExec(`kubectl delete statefulset mariadb-statefulset`);
97
144
  shellExec(`kubectl apply -k ${underpostRoot}/manifests/mariadb`);
98
145
  }
146
+ if (options.full === true || options.postgresql === true) {
147
+ shellExec(
148
+ `sudo kubectl create secret generic postgres-secret --from-file=password=/home/dd/engine/engine-private/postgresql-password`,
149
+ );
150
+ shellExec(`kubectl apply -k ./manifests/postgresql`);
151
+ }
99
152
  if (options.mongodb4 === true) {
153
+ if (options.pullImage === true) {
154
+ shellExec(`docker pull mongo:4.4`);
155
+ shellExec(`sudo kind load docker-image mongo:4.4`);
156
+ }
100
157
  shellExec(`kubectl apply -k ${underpostRoot}/manifests/mongodb-4.4`);
101
158
 
102
159
  const deploymentName = 'mongodb-deployment';
@@ -196,6 +253,51 @@ class UnderpostCluster {
196
253
  `sudo sed -i -e "s@/var/lib/containers/storage@/home/containers/storage@g" /etc/containers/storage.conf`,
197
254
  );
198
255
  shellExec(`sudo podman system reset -f`);
256
+ // https://github.com/kubernetes-sigs/kind/issues/2886
257
+ shellExec(`sysctl net.bridge.bridge-nf-call-iptables=0`);
258
+ shellExec(`sysctl net.bridge.bridge-nf-call-arptables=0`);
259
+ shellExec(`sysctl net.bridge.bridge-nf-call-ip6tables=0`);
260
+ shellExec(`docker network rm kind`);
261
+ },
262
+ getResourcesCapacity() {
263
+ const resources = {};
264
+ const info = true
265
+ ? `Capacity:
266
+ cpu: 8
267
+ ephemeral-storage: 153131976Ki
268
+ hugepages-1Gi: 0
269
+ hugepages-2Mi: 0
270
+ memory: 11914720Ki
271
+ pods: 110
272
+ Allocatable:
273
+ cpu: 8
274
+ ephemeral-storage: 153131976Ki
275
+ hugepages-1Gi: 0
276
+ hugepages-2Mi: 0
277
+ memory: 11914720Ki
278
+ pods: `
279
+ : shellExec(`kubectl describe node kind-worker | grep -E '(Allocatable:|Capacity:)' -A 6`, {
280
+ stdout: true,
281
+ silent: true,
282
+ });
283
+ info
284
+ .split('Allocatable:')[1]
285
+ .split('\n')
286
+ .filter((row) => row.match('cpu') || row.match('memory'))
287
+ .map((row) => {
288
+ if (row.match('cpu'))
289
+ resources.cpu = {
290
+ value: parseInt(row.split(':')[1].trim()) * 1000,
291
+ unit: 'm',
292
+ };
293
+ if (row.match('memory'))
294
+ resources.memory = {
295
+ value: parseInt(row.split(':')[1].split('Ki')[0].trim()),
296
+ unit: 'Ki',
297
+ };
298
+ });
299
+
300
+ return resources;
199
301
  },
200
302
  };
201
303
  }
package/src/cli/db.js CHANGED
@@ -15,11 +15,13 @@ class UnderpostDB {
15
15
  export: false,
16
16
  podName: false,
17
17
  ns: false,
18
- collection: '',
18
+ collections: '',
19
19
  outPath: '',
20
20
  drop: false,
21
21
  preserveUUID: false,
22
22
  git: false,
23
+ hosts: '',
24
+ paths: '',
23
25
  },
24
26
  ) {
25
27
  const newBackupTimestamp = new Date().getTime();
@@ -39,20 +41,28 @@ class UnderpostDB {
39
41
  if (!dbs[provider]) dbs[provider] = {};
40
42
 
41
43
  if (!(name in dbs[provider]))
42
- dbs[provider][name] = { user, password, hostFolder: host + path.replaceAll('/', '-') };
44
+ dbs[provider][name] = { user, password, hostFolder: host + path.replaceAll('/', '-'), host, path };
43
45
  }
44
46
  }
45
47
  }
46
48
 
47
- if (!fs.existsSync(`../${repoName}`)) {
48
- shellExec(`cd .. && underpost clone ${process.env.GITHUB_USERNAME}/${repoName}`);
49
- } else {
50
- shellExec(`cd ../${repoName} && underpost pull . ${process.env.GITHUB_USERNAME}/${repoName}`);
49
+ if (options.git === true) {
50
+ if (!fs.existsSync(`../${repoName}`)) {
51
+ shellExec(`cd .. && underpost clone ${process.env.GITHUB_USERNAME}/${repoName}`);
52
+ } else {
53
+ shellExec(`cd ../${repoName} && git checkout . && git clean -f -d`);
54
+ shellExec(`cd ../${repoName} && underpost pull . ${process.env.GITHUB_USERNAME}/${repoName}`);
55
+ }
51
56
  }
52
57
 
53
58
  for (const provider of Object.keys(dbs)) {
54
59
  for (const dbName of Object.keys(dbs[provider])) {
55
- const { hostFolder, user, password } = dbs[provider][dbName];
60
+ const { hostFolder, user, password, host, path } = dbs[provider][dbName];
61
+ if (
62
+ (options.hosts && !options.hosts.split(',').includes(host)) ||
63
+ (options.paths && !options.paths.split(',').includes(path))
64
+ )
65
+ continue;
56
66
  if (hostFolder) {
57
67
  logger.info('', { hostFolder, provider, dbName });
58
68
 
@@ -153,7 +163,7 @@ class UnderpostDB {
153
163
  const podName = podNameData.NAME;
154
164
  shellExec(`sudo kubectl exec -i ${podName} -- sh -c "rm -rf /${dbName}"`);
155
165
  if (options.collections)
156
- for (const collection of options.collections)
166
+ for (const collection of options.collections.split(','))
157
167
  shellExec(
158
168
  `sudo kubectl exec -i ${podName} -- sh -c "mongodump -d ${dbName} --collection ${collection} -o /"`,
159
169
  );