underpost 2.8.65 → 2.8.71

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/.vscode/extensions.json +3 -2
  2. package/.vscode/settings.json +2 -0
  3. package/CHANGELOG.md +24 -4
  4. package/README.md +39 -2
  5. package/bin/deploy.js +1351 -145
  6. package/bin/file.js +8 -0
  7. package/bin/index.js +1 -240
  8. package/cli.md +451 -0
  9. package/docker-compose.yml +1 -1
  10. package/jsdoc.json +1 -1
  11. package/manifests/calico-custom-resources.yaml +25 -0
  12. package/manifests/deployment/adminer/deployment.yaml +32 -0
  13. package/manifests/deployment/adminer/kustomization.yaml +7 -0
  14. package/manifests/deployment/adminer/service.yaml +13 -0
  15. package/manifests/deployment/fastapi/backend-deployment.yml +120 -0
  16. package/manifests/deployment/fastapi/backend-service.yml +19 -0
  17. package/manifests/deployment/fastapi/frontend-deployment.yml +54 -0
  18. package/manifests/deployment/fastapi/frontend-service.yml +15 -0
  19. package/manifests/deployment/fastapi/initial_data.sh +56 -0
  20. package/manifests/deployment/kafka/deployment.yaml +69 -0
  21. package/manifests/kubeadm-calico-config.yaml +119 -0
  22. package/manifests/mongodb-4.4/service-deployment.yaml +1 -1
  23. package/manifests/postgresql/configmap.yaml +9 -0
  24. package/manifests/postgresql/kustomization.yaml +10 -0
  25. package/manifests/postgresql/pv.yaml +15 -0
  26. package/manifests/postgresql/pvc.yaml +13 -0
  27. package/manifests/postgresql/service.yaml +10 -0
  28. package/manifests/postgresql/statefulset.yaml +37 -0
  29. package/manifests/valkey/statefulset.yaml +6 -4
  30. package/package.json +2 -1
  31. package/src/cli/cluster.js +163 -18
  32. package/src/cli/deploy.js +68 -8
  33. package/src/cli/fs.js +14 -3
  34. package/src/cli/image.js +1 -1
  35. package/src/cli/index.js +312 -0
  36. package/src/cli/monitor.js +93 -39
  37. package/src/client/components/core/JoyStick.js +2 -2
  38. package/src/client/components/core/Modal.js +1 -0
  39. package/src/index.js +1 -1
  40. package/src/server/client-build.js +13 -0
  41. package/src/server/conf.js +5 -1
  42. package/src/server/dns.js +47 -17
  43. package/src/server/runtime.js +2 -0
  44. package/src/server/start.js +0 -1
@@ -14,6 +14,7 @@ class UnderpostCluster {
14
14
  mongodb: false,
15
15
  mongodb4: false,
16
16
  mariadb: false,
17
+ postgresql: false,
17
18
  valkey: false,
18
19
  full: false,
19
20
  info: false,
@@ -24,8 +25,16 @@ class UnderpostCluster {
24
25
  nsUse: '',
25
26
  infoCapacity: false,
26
27
  infoCapacityPod: false,
28
+ istio: false,
29
+ pullImage: false,
27
30
  },
28
31
  ) {
32
+ // sudo dnf update
33
+ // 1) Install kind, kubeadm, docker, podman
34
+ // 2) Check kubectl, kubelet, containerd.io
35
+ // 3) Install Nvidia drivers from Rocky Linux docs
36
+ // 4) Install LXD with MAAS from Rocky Linux docs
37
+ // 5) Install MAAS src from snap
29
38
  const npmRoot = getNpmRootPath();
30
39
  const underpostRoot = options?.dev === true ? '.' : `${npmRoot}/underpost`;
31
40
  if (options.infoCapacityPod === true) return logger.info('', UnderpostDeploy.API.resourcesFactory());
@@ -66,26 +75,55 @@ class UnderpostCluster {
66
75
  shellExec(`kubectl get secrets --all-namespaces -o wide`);
67
76
  shellExec(`docker secret ls`);
68
77
  shellExec(`kubectl get crd --all-namespaces -o wide`);
78
+ shellExec(`sudo kubectl api-resources`);
69
79
  return;
70
80
  }
71
81
 
72
- if (!UnderpostDeploy.API.get('kube-apiserver-kind-control-plane')[0]) {
82
+ if (
83
+ (!options.istio && !UnderpostDeploy.API.get('kube-apiserver-kind-control-plane')[0]) ||
84
+ (options.istio === true && !UnderpostDeploy.API.get('calico-kube-controllers')[0])
85
+ ) {
86
+ shellExec(`sudo setenforce 0`);
87
+ shellExec(`sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config`);
88
+ // sudo systemctl disable kubelet
89
+ // shellExec(`sudo systemctl enable --now kubelet`);
73
90
  shellExec(`containerd config default > /etc/containerd/config.toml`);
74
91
  shellExec(`sed -i -e "s/SystemdCgroup = false/SystemdCgroup = true/g" /etc/containerd/config.toml`);
75
92
  // shellExec(`cp /etc/kubernetes/admin.conf ~/.kube/config`);
76
- shellExec(`sudo systemctl restart kubelet`);
93
+ // shellExec(`sudo systemctl restart kubelet`);
77
94
  shellExec(`sudo service docker restart`);
78
95
  shellExec(`sudo systemctl enable --now containerd.service`);
79
- shellExec(`sudo systemctl restart containerd`);
80
- shellExec(
81
- `cd ${underpostRoot}/manifests && kind create cluster --config kind-config${
82
- options?.dev === true ? '-dev' : ''
83
- }.yaml`,
84
- );
85
- shellExec(`sudo chown $(id -u):$(id -g) $HOME/.kube/config**`);
96
+ shellExec(`sudo swapoff -a; sudo sed -i '/swap/d' /etc/fstab`);
97
+ if (options.istio === true) {
98
+ shellExec(`sysctl net.bridge.bridge-nf-call-iptables=1`);
99
+ shellExec(`sudo kubeadm init --pod-network-cidr=192.168.0.0/16`);
100
+ shellExec(`sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config`);
101
+ shellExec(`sudo chown $(id -u):$(id -g) $HOME/.kube/config**`);
102
+ // https://docs.tigera.io/calico/latest/getting-started/kubernetes/quickstart
103
+ shellExec(
104
+ `sudo kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.29.3/manifests/tigera-operator.yaml`,
105
+ );
106
+ // shellExec(
107
+ // `wget https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/custom-resources.yaml`,
108
+ // );
109
+ shellExec(`sudo kubectl apply -f ./manifests/kubeadm-calico-config.yaml`);
110
+ shellExec(`sudo systemctl restart containerd`);
111
+ } else {
112
+ shellExec(`sudo systemctl restart containerd`);
113
+ shellExec(
114
+ `cd ${underpostRoot}/manifests && kind create cluster --config kind-config${
115
+ options?.dev === true ? '-dev' : ''
116
+ }.yaml`,
117
+ );
118
+ shellExec(`sudo chown $(id -u):$(id -g) $HOME/.kube/config**`);
119
+ }
86
120
  } else logger.warn('Cluster already initialized');
87
121
 
88
122
  if (options.full === true || options.valkey === true) {
123
+ if (options.pullImage === true) {
124
+ shellExec(`docker pull valkey/valkey`);
125
+ shellExec(`sudo kind load docker-image valkey/valkey:latest`);
126
+ }
89
127
  shellExec(`kubectl delete statefulset service-valkey`);
90
128
  shellExec(`kubectl apply -k ${underpostRoot}/manifests/valkey`);
91
129
  }
@@ -99,7 +137,21 @@ class UnderpostCluster {
99
137
  shellExec(`kubectl delete statefulset mariadb-statefulset`);
100
138
  shellExec(`kubectl apply -k ${underpostRoot}/manifests/mariadb`);
101
139
  }
140
+ if (options.full === true || options.postgresql === true) {
141
+ if (options.pullImage === true) {
142
+ shellExec(`docker pull postgres:latest`);
143
+ shellExec(`sudo kind load docker-image postgres:latest`);
144
+ }
145
+ shellExec(
146
+ `sudo kubectl create secret generic postgres-secret --from-file=password=/home/dd/engine/engine-private/postgresql-password`,
147
+ );
148
+ shellExec(`kubectl apply -k ./manifests/postgresql`);
149
+ }
102
150
  if (options.mongodb4 === true) {
151
+ if (options.pullImage === true) {
152
+ shellExec(`docker pull mongo:4.4`);
153
+ shellExec(`sudo kind load docker-image mongo:4.4`);
154
+ }
103
155
  shellExec(`kubectl apply -k ${underpostRoot}/manifests/mongodb-4.4`);
104
156
 
105
157
  const deploymentName = 'mongodb-deployment';
@@ -171,35 +223,128 @@ class UnderpostCluster {
171
223
  shellExec(`sudo kubectl apply -f ${underpostRoot}/manifests/${letsEncName}.yaml`);
172
224
  }
173
225
  },
226
+ // This function performs a comprehensive reset of Kubernetes and container environments
227
+ // on the host machine. Its primary goal is to clean up cluster components, temporary files,
228
+ // and container data, ensuring a clean state for re-initialization or fresh deployments,
229
+ // while also preventing the loss of the host machine's internet connectivity.
230
+
174
231
  reset() {
232
+ // Step 1: Delete all existing Kind (Kubernetes in Docker) clusters.
233
+ // 'kind get clusters' lists all Kind clusters.
234
+ // 'xargs -t -n1 kind delete cluster --name' then iterates through each cluster name
235
+ // and executes 'kind delete cluster --name <cluster_name>' to remove them.
175
236
  shellExec(`kind get clusters | xargs -t -n1 kind delete cluster --name`);
237
+
238
+ // Step 2: Reset the Kubernetes control-plane components installed by kubeadm.
239
+ // 'kubeadm reset -f' performs a forceful reset, removing installed Kubernetes components,
240
+ // configuration files, and associated network rules (like iptables entries created by kubeadm).
241
+ // The '-f' flag bypasses confirmation prompts.
176
242
  shellExec(`sudo kubeadm reset -f`);
243
+
244
+ // Step 3: Remove specific CNI (Container Network Interface) configuration files.
245
+ // This command targets and removes the configuration file for Flannel,
246
+ // a common CNI plugin, which might be left behind after a reset.
177
247
  shellExec('sudo rm -f /etc/cni/net.d/10-flannel.conflist');
178
- shellExec('sudo iptables -F && sudo iptables -t nat -F && sudo iptables -t mangle -F && sudo iptables -X');
248
+
249
+ // Note: The aggressive 'sudo iptables -F ...' command was intentionally removed from previous versions.
250
+ // This command would flush all iptables rules, including those crucial for the host's general
251
+ // internet connectivity, leading to network loss. 'kubeadm reset' and container runtime pruning
252
+ // adequately handle Kubernetes and container-specific iptables rules without affecting the host's
253
+ // default network configuration.
254
+
255
+ // Step 4: Remove the kubectl configuration file from the current user's home directory.
256
+ // This ensures that after a reset, there's no lingering configuration pointing to the old cluster,
257
+ // providing a clean slate for connecting to a new or re-initialized cluster.
179
258
  shellExec('sudo rm -f $HOME/.kube/config');
259
+
260
+ // Step 5: Clear trash files from the root user's trash directory.
261
+ // This is a general cleanup step to remove temporary or deleted files.
180
262
  shellExec('sudo rm -rf /root/.local/share/Trash/files/*');
263
+
264
+ // Step 6: Prune all unused Docker data.
265
+ // 'docker system prune -a -f' removes:
266
+ // - All stopped containers
267
+ // - All unused networks
268
+ // - All dangling images
269
+ // - All build cache
270
+ // - All unused volumes
271
+ // This aggressively frees up disk space and removes temporary Docker artifacts.
181
272
  shellExec('sudo docker system prune -a -f');
273
+
274
+ // Step 7: Stop the Docker daemon service.
275
+ // This step is often necessary to ensure that Docker's files and directories
276
+ // can be safely manipulated or moved in subsequent steps without conflicts.
182
277
  shellExec('sudo service docker stop');
278
+
279
+ // Step 8: Aggressively remove container storage data for containerd and Docker.
280
+ // These commands target the default storage locations for containerd and Docker,
281
+ // as well as any custom paths that might have been used (`/home/containers/storage`, `/home/docker`).
282
+ // This ensures a complete wipe of all container images, layers, and volumes.
183
283
  shellExec(`sudo rm -rf /var/lib/containers/storage/*`);
184
284
  shellExec(`sudo rm -rf /var/lib/docker/volumes/*`);
185
- shellExec(`sudo rm -rf /var/lib/docker~/*`);
186
- shellExec(`sudo rm -rf /home/containers/storage/*`);
187
- shellExec(`sudo rm -rf /home/docker/*`);
188
- shellExec('sudo mv /var/lib/docker /var/lib/docker~');
189
- shellExec('sudo mkdir /home/docker');
190
- shellExec('sudo chmod 0711 /home/docker');
191
- shellExec('sudo ln -s /home/docker /var/lib/docker');
285
+ shellExec(`sudo rm -rf /var/lib/docker~/*`); // Cleans up a potential backup directory for Docker data
286
+ shellExec(`sudo rm -rf /home/containers/storage/*`); // Cleans up custom containerd/Podman storage
287
+ shellExec(`sudo rm -rf /home/docker/*`); // Cleans up custom Docker storage
288
+
289
+ // Step 9: Re-configure Docker's default storage location (if desired).
290
+ // These commands effectively move Docker's data directory from its default `/var/lib/docker`
291
+ // to a new location (`/home/docker`) and create a symbolic link.
292
+ // This is a specific customization to relocate Docker's storage.
293
+ shellExec('sudo mv /var/lib/docker /var/lib/docker~'); // Moves existing /var/lib/docker to /var/lib/docker~ (backup)
294
+ shellExec('sudo mkdir /home/docker'); // Creates the new desired directory for Docker data
295
+ shellExec('sudo chmod 0711 /home/docker'); // Sets appropriate permissions for the new directory
296
+ shellExec('sudo ln -s /home/docker /var/lib/docker'); // Creates a symlink from original path to new path
297
+
298
+ // Step 10: Prune all unused Podman data.
299
+ // Similar to Docker pruning, these commands remove:
300
+ // - All stopped containers
301
+ // - All unused networks
302
+ // - All unused images
303
+ // - All unused volumes ('--volumes')
304
+ // - The '--force' flag bypasses confirmation.
305
+ // '--external' prunes external content not managed by Podman's default storage backend.
192
306
  shellExec(`sudo podman system prune -a -f`);
193
307
  shellExec(`sudo podman system prune --all --volumes --force`);
194
308
  shellExec(`sudo podman system prune --external --force`);
195
- shellExec(`sudo podman system prune --all --volumes --force`);
309
+ shellExec(`sudo podman system prune --all --volumes --force`); // Redundant but harmless repetition
310
+
311
+ // Step 11: Create and set permissions for Podman's custom storage directory.
312
+ // This ensures the custom path `/home/containers/storage` exists and has correct permissions
313
+ // before Podman attempts to use it.
196
314
  shellExec(`sudo mkdir -p /home/containers/storage`);
197
315
  shellExec('sudo chmod 0711 /home/containers/storage');
316
+
317
+ // Step 12: Update Podman's storage configuration file.
318
+ // This command uses 'sed' to modify `/etc/containers/storage.conf`,
319
+ // changing the default storage path from `/var/lib/containers/storage`
320
+ // to the customized `/home/containers/storage`.
198
321
  shellExec(
199
322
  `sudo sed -i -e "s@/var/lib/containers/storage@/home/containers/storage@g" /etc/containers/storage.conf`,
200
323
  );
324
+
325
+ // Step 13: Reset Podman system settings.
326
+ // This command resets Podman's system-wide configuration to its default state.
201
327
  shellExec(`sudo podman system reset -f`);
328
+
329
+ // Note: The 'sysctl net.bridge.bridge-nf-call-iptables=0' and related commands
330
+ // were previously removed. These sysctl settings (bridge-nf-call-iptables,
331
+ // bridge-nf-call-arptables, bridge-nf-call-ip6tables) are crucial for allowing
332
+ // network traffic through Linux bridges to be processed by iptables.
333
+ // Kubernetes and CNI plugins generally require them to be enabled (set to '1').
334
+ // Re-initializing Kubernetes will typically set these as needed, and leaving them
335
+ // at their system default (or '1' if already configured) is safer for host
336
+ // connectivity during a reset operation.
337
+
338
+ // https://github.com/kubernetes-sigs/kind/issues/2886
339
+ // shellExec(`sysctl net.bridge.bridge-nf-call-iptables=0`);
340
+ // shellExec(`sysctl net.bridge.bridge-nf-call-arptables=0`);
341
+ // shellExec(`sysctl net.bridge.bridge-nf-call-ip6tables=0`);
342
+
343
+ // Step 14: Remove the 'kind' Docker network.
344
+ // This cleans up any network bridges or configurations specifically created by Kind.
345
+ shellExec(`docker network rm kind`);
202
346
  },
347
+
203
348
  getResourcesCapacity() {
204
349
  const resources = {};
205
350
  const info = false
package/src/cli/deploy.js CHANGED
@@ -22,11 +22,11 @@ class UnderpostDeploy {
22
22
  static NETWORK = {};
23
23
  static API = {
24
24
  sync(deployList, { versions, replicas }) {
25
- const deployGroupId = 'dd.tmp';
25
+ const deployGroupId = 'dd.router';
26
26
  fs.writeFileSync(`./engine-private/deploy/${deployGroupId}`, deployList, 'utf8');
27
27
  const totalPods = deployList.split(',').length * versions.split(',').length * parseInt(replicas);
28
- const limitFactor = 0.95;
29
- const reserveFactor = 0.35;
28
+ const limitFactor = 0.8;
29
+ const reserveFactor = 0.05;
30
30
  const resources = UnderpostCluster.API.getResourcesCapacity();
31
31
  const memory = parseInt(resources.memory.value / totalPods);
32
32
  const cpu = parseInt(resources.cpu.value / totalPods);
@@ -220,6 +220,14 @@ spec:
220
220
  }
221
221
  }
222
222
  },
223
+ getCurrentTraffic(deployId) {
224
+ // kubectl get deploy,sts,svc,configmap,secret -n default -o yaml --export > default.yaml
225
+ const hostTest = Object.keys(
226
+ JSON.parse(fs.readFileSync(`./engine-private/conf/${deployId}/conf.server.json`, 'utf8')),
227
+ )[0];
228
+ const info = shellExec(`sudo kubectl get HTTPProxy/${hostTest} -o yaml`, { silent: true, stdout: true });
229
+ return info.match('blue') ? 'blue' : info.match('green') ? 'green' : null;
230
+ },
223
231
  async callback(
224
232
  deployList = 'default',
225
233
  env = 'development',
@@ -235,6 +243,9 @@ spec:
235
243
  traffic: '',
236
244
  dashboardUpdate: false,
237
245
  replicas: '',
246
+ disableUpdateDeployment: false,
247
+ infoTraffic: false,
248
+ rebuildClientsBundle: false,
238
249
  },
239
250
  ) {
240
251
  if (options.infoUtil === true)
@@ -242,11 +253,39 @@ spec:
242
253
  kubectl rollout restart deployment/deployment-name
243
254
  kubectl rollout undo deployment/deployment-name
244
255
  kubectl scale statefulsets <stateful-set-name> --replicas=<new-replicas>
245
- `);
256
+ kubectl get pods -w
257
+ kubectl patch statefulset service-valkey --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value":"valkey/valkey:latest"}]'
258
+ kubectl patch statefulset service-valkey -p '{"spec":{"template":{"spec":{"containers":[{"name":"service-valkey","imagePullPolicy":"Never"}]}}}}'
259
+ kubectl logs -f <pod-name>
260
+ kubectl describe pod <pod-name>
261
+ kubectl exec -it <pod-name> -- bash
262
+ kubectl exec -it <pod-name> -- sh
263
+ docker exec -it kind-control-plane bash
264
+ curl -4 -v google.com
265
+ kubectl taint nodes <node-name> node-role.kubernetes.io/control-plane:NoSchedule-
266
+ kubectl run test-pod --image=busybox:latest --restart=Never -- /bin/sh -c "while true; do sleep 30; done;"
267
+ kubectl run test-pod --image=alpine/curl:latest --restart=Never -- sh -c "sleep infinity"
268
+ kubectl get ippools -o yaml
269
+ kubectl get node <node-name> -o jsonpath='{.spec.podCIDR}'
270
+ kubectl patch ippool default-ipv4-ippool --type='json' -p='[{"op": "replace", "path": "/spec/cidr", "value": "10.244.0.0/16"}]'
271
+ kubectl patch ippool default-ipv4-ippool --type='json' -p='[{"op": "replace", "path": "/spec/cidr", "value": "192.168.0.0/24"}]'
272
+ `);
246
273
  if (deployList === 'dd' && fs.existsSync(`./engine-private/deploy/dd.router`))
247
274
  deployList = fs.readFileSync(`./engine-private/deploy/dd.router`, 'utf8');
275
+ if (options.infoTraffic === true) {
276
+ for (const _deployId of deployList.split(',')) {
277
+ const deployId = _deployId.trim();
278
+ logger.info('', {
279
+ deployId,
280
+ env,
281
+ traffic: UnderpostDeploy.API.getCurrentTraffic(deployId),
282
+ });
283
+ }
284
+ return;
285
+ }
286
+ if (options.rebuildClientsBundle === true) await UnderpostDeploy.API.rebuildClientsBundle(deployList);
248
287
  if (!(options.versions && typeof options.versions === 'string')) options.versions = 'blue,green';
249
- if (!options.replicas) options.replicas = 2;
288
+ if (!options.replicas) options.replicas = 1;
250
289
  if (options.sync) UnderpostDeploy.API.sync(deployList, options);
251
290
  if (options.buildManifest === true) await UnderpostDeploy.API.buildManifest(deployList, env, options);
252
291
  if (options.infoRouter === true) logger.info('router', await UnderpostDeploy.API.routerFactory(deployList, env));
@@ -275,8 +314,12 @@ kubectl scale statefulsets <stateful-set-name> --replicas=<new-replicas>
275
314
  shellExec(`sudo kubectl port-forward -n default svc/${svc.NAME} ${port}:${port}`, { async: true });
276
315
  continue;
277
316
  }
278
- shellExec(`sudo kubectl delete svc ${deployId}-${env}-service`);
279
- shellExec(`sudo kubectl delete deployment ${deployId}-${env}`);
317
+
318
+ if (!options.disableUpdateDeployment)
319
+ for (const version of options.versions.split(',')) {
320
+ shellExec(`sudo kubectl delete svc ${deployId}-${env}-${version}-service`);
321
+ shellExec(`sudo kubectl delete deployment ${deployId}-${env}-${version}`);
322
+ }
280
323
 
281
324
  const confServer = JSON.parse(fs.readFileSync(`./engine-private/conf/${deployId}/conf.server.json`, 'utf8'));
282
325
  for (const host of Object.keys(confServer)) {
@@ -291,7 +334,7 @@ kubectl scale statefulsets <stateful-set-name> --replicas=<new-replicas>
291
334
  : `manifests/deployment/${deployId}-${env}`;
292
335
 
293
336
  if (!options.remove === true) {
294
- shellExec(`sudo kubectl apply -f ./${manifestsPath}/deployment.yaml`);
337
+ if (!options.disableUpdateDeployment) shellExec(`sudo kubectl apply -f ./${manifestsPath}/deployment.yaml`);
295
338
  shellExec(`sudo kubectl apply -f ./${manifestsPath}/proxy.yaml`);
296
339
  if (env === 'production' && options.cert === true)
297
340
  shellExec(`sudo kubectl apply -f ./${manifestsPath}/secret.yaml`);
@@ -354,6 +397,23 @@ kubectl scale statefulsets <stateful-set-name> --replicas=<new-replicas>
354
397
 
355
398
  return result;
356
399
  },
400
+ rebuildClientsBundle(deployList) {
401
+ for (const _deployId of deployList.split(',')) {
402
+ const deployId = _deployId.trim();
403
+ const repoName = `engine-${deployId.split('-')[1]}`;
404
+
405
+ shellExec(`underpost script set ${deployId}-client-build '
406
+ cd /home/dd/engine &&
407
+ git checkout . &&
408
+ underpost pull . underpostnet/${repoName} &&
409
+ underpost pull ./engine-private underpostnet/${repoName}-private &&
410
+ underpost env ${deployId} production &&
411
+ node bin/deploy build-full-client ${deployId}
412
+ '`);
413
+
414
+ shellExec(`node bin script run ${deployId}-client-build --itc --pod-name ${deployId}`);
415
+ }
416
+ },
357
417
  resourcesFactory() {
358
418
  return {
359
419
  requests: {
package/src/cli/fs.js CHANGED
@@ -24,7 +24,7 @@ class UnderpostFileStorage {
24
24
  getStorageConf(options) {
25
25
  let storage, storageConf;
26
26
  if (options.deployId && typeof options.deployId === 'string') {
27
- storageConf = `./engine-private/conf/${options.deployId}/storage.json`;
27
+ storageConf = options.storageFilePath ?? `./engine-private/conf/${options.deployId}/storage.json`;
28
28
  if (!fs.existsSync(storageConf)) fs.writeFileSync(storageConf, JSON.stringify({}), 'utf8');
29
29
  storage = JSON.parse(fs.readFileSync(storageConf, 'utf8'));
30
30
  }
@@ -35,7 +35,15 @@ class UnderpostFileStorage {
35
35
  },
36
36
  async recursiveCallback(
37
37
  path,
38
- options = { rm: false, recursive: false, deployId: '', force: false, pull: false, git: false },
38
+ options = {
39
+ rm: false,
40
+ recursive: false,
41
+ deployId: '',
42
+ force: false,
43
+ pull: false,
44
+ git: false,
45
+ storageFilePath: '',
46
+ },
39
47
  ) {
40
48
  const { storage, storageConf } = UnderpostFileStorage.API.getStorageConf(options);
41
49
  const deleteFiles = options.pull === true ? [] : UnderpostRepository.API.getDeleteFiles(path);
@@ -85,7 +93,10 @@ class UnderpostFileStorage {
85
93
  if (options.rm === true) return await UnderpostFileStorage.API.delete(path, options);
86
94
  return await UnderpostFileStorage.API.upload(path, options);
87
95
  },
88
- async upload(path, options = { rm: false, recursive: false, deployId: '', force: false, pull: false }) {
96
+ async upload(
97
+ path,
98
+ options = { rm: false, recursive: false, deployId: '', force: false, pull: false, storageFilePath: '' },
99
+ ) {
89
100
  UnderpostFileStorage.API.cloudinaryConfig();
90
101
  const { storage, storageConf } = UnderpostFileStorage.API.getStorageConf(options);
91
102
  // path = UnderpostFileStorage.API.file2Zip(path);
package/src/cli/image.js CHANGED
@@ -54,7 +54,7 @@ class UnderpostImage {
54
54
  shellExec(
55
55
  `cd ${path}${secretsInput}&& sudo podman build -f ./${
56
56
  dockerfileName && typeof dockerfileName === 'string' ? dockerfileName : 'Dockerfile'
57
- } -t ${imageName} --pull=never --cap-add=CAP_AUDIT_WRITE${cache}${secretDockerInput}`,
57
+ } -t ${imageName} --pull=never --cap-add=CAP_AUDIT_WRITE${cache}${secretDockerInput} --network host`,
58
58
  );
59
59
 
60
60
  if (podmanSave === true) shellExec(`podman save -o ${tarFile} ${podManImg}`);