underpost 2.8.821 → 2.8.832

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -6,7 +6,6 @@
6
6
  "eamodio.gitlens",
7
7
  "esbenp.prettier-vscode",
8
8
  "formulahendry.code-runner",
9
- "foxundermoon.shell-format",
10
9
  "github.codespaces",
11
10
  "github.vscode-github-actions",
12
11
  "golang.go",
@@ -31,6 +30,7 @@
31
30
  "scala-lang.scala",
32
31
  "scala-lang.scala-snippets",
33
32
  "scalameta.metals",
33
+ "shakram02.bash-beautify",
34
34
  "shardulm94.trailing-spaces",
35
35
  "streetsidesoftware.code-spell-checker",
36
36
  "tamasfe.even-better-toml",
@@ -81,50 +81,7 @@
81
81
  "[xml]": {
82
82
  "editor.defaultFormatter": "redhat.vscode-xml"
83
83
  },
84
- "docwriter.style": "Auto-detect",
85
- "docwriter.progress.trackFunctions": false,
86
- "docwriter.progress.trackMethods": false,
87
- "files.exclude": {
88
- "**/.git": true, // this is a default value
89
- "**/.DS_Store": true, // this is a default value
90
84
 
91
- "**/node_modules": true, // this excludes all folders
92
- // named "node_modules" from
93
- // the explore tree
94
-
95
- // alternative version
96
- "node_modules": true, // this excludes the folder
97
- // only from the root of
98
- // your workspace
99
- "public": true,
100
- "engine-private": true,
101
- "conf": true,
102
- "tmp": true,
103
- "bkt": true,
104
- "logs": true,
105
- "build": true,
106
- "coverage": true,
107
- "prometheus_data": true,
108
- "grafana_data": true,
109
- ".nyc_output": true
110
- },
111
- "[solidity]": {
112
- "editor.defaultFormatter": "JuanBlanco.solidity"
113
- },
114
- "[dockerfile]": {
115
- "editor.defaultFormatter": "foxundermoon.shell-format"
116
- },
117
- "[ignore]": {
118
- "editor.defaultFormatter": "foxundermoon.shell-format"
119
- },
120
- "docwriter.progress.trackTypes": true,
121
- "docwriter.hotkey.mac": "⌘ + .",
122
- "[dotenv]": {
123
- "editor.defaultFormatter": "foxundermoon.shell-format"
124
- },
125
- "[shellscript]": {
126
- "editor.defaultFormatter": "foxundermoon.shell-format"
127
- },
128
85
  "files.watcherExclude": {
129
86
  "**/target": true
130
87
  }
package/README.md CHANGED
@@ -68,7 +68,7 @@ Run dev client server
68
68
  npm run dev
69
69
  ```
70
70
  <!-- -->
71
- ## underpost ci/cd cli v2.8.821
71
+ ## underpost ci/cd cli v2.8.832
72
72
 
73
73
  ### Usage: `underpost [options] [command]`
74
74
  ```
@@ -98,6 +98,7 @@ Commands:
98
98
  fs [options] [path] Manages file storage, defaulting to file upload operations.
99
99
  test [options] [deploy-list] Manages and runs tests, defaulting to the current Underpost default test suite.
100
100
  monitor [options] <deploy-id> [env] Manages health server monitoring for specified deployments.
101
+ run [options] [path] Runs a script from the specified path.
101
102
  lxd [options] Manages LXD containers and virtual machines.
102
103
  baremetal [options] [workflow-id] [hostname] [ip-address] Manages baremetal server operations, including installation, database setup, commissioning, and user management.
103
104
  help [command] display help for command
package/bin/deploy.js CHANGED
@@ -1014,6 +1014,29 @@ EOF`);
1014
1014
  break;
1015
1015
  }
1016
1016
 
1017
+ case 'maas-db': {
1018
+ // DROP, ALTER, CREATE, WITH ENCRYPTED
1019
+ // sudo -u <user> -h <host> psql <db-name>
1020
+ shellExec(`DB_PG_MAAS_NAME=${process.env.DB_PG_MAAS_NAME}`);
1021
+ shellExec(`DB_PG_MAAS_PASS=${process.env.DB_PG_MAAS_PASS}`);
1022
+ shellExec(`DB_PG_MAAS_USER=${process.env.DB_PG_MAAS_USER}`);
1023
+ shellExec(`DB_PG_MAAS_HOST=${process.env.DB_PG_MAAS_HOST}`);
1024
+ shellExec(
1025
+ `sudo -i -u postgres psql -c "CREATE USER \"$DB_PG_MAAS_USER\" WITH ENCRYPTED PASSWORD '$DB_PG_MAAS_PASS'"`,
1026
+ );
1027
+ shellExec(
1028
+ `sudo -i -u postgres psql -c "ALTER USER \"$DB_PG_MAAS_USER\" WITH ENCRYPTED PASSWORD '$DB_PG_MAAS_PASS'"`,
1029
+ );
1030
+ const actions = ['LOGIN', 'SUPERUSER', 'INHERIT', 'CREATEDB', 'CREATEROLE', 'REPLICATION'];
1031
+ shellExec(`sudo -i -u postgres psql -c "ALTER USER \"$DB_PG_MAAS_USER\" WITH ${actions.join(' ')}"`);
1032
+ shellExec(`sudo -i -u postgres psql -c "\\du"`);
1033
+
1034
+ shellExec(`sudo -i -u postgres createdb -O "$DB_PG_MAAS_USER" "$DB_PG_MAAS_NAME"`);
1035
+
1036
+ shellExec(`sudo -i -u postgres psql -c "\\l"`);
1037
+ break;
1038
+ }
1039
+
1017
1040
  case 'valkey': {
1018
1041
  if (!process.argv.includes('server')) {
1019
1042
  if (process.argv.includes('rocky')) {
@@ -1692,14 +1715,6 @@ nvidia/gpu-operator \
1692
1715
  // sudo yum install sbt
1693
1716
  break;
1694
1717
  }
1695
-
1696
- case 'chrony': {
1697
- shellExec(`sudo dnf install chrony -y`);
1698
- // debian chroot: sudo apt install chrony
1699
- for (const cmd of chronySetUp(`/etc/chrony.conf`)) shellExec(cmd);
1700
-
1701
- break;
1702
- }
1703
1718
  }
1704
1719
  } catch (error) {
1705
1720
  logger.error(error, error.stack);
package/bin/vs.js CHANGED
@@ -4,7 +4,11 @@ import { loggerFactory } from '../src/server/logger.js';
4
4
 
5
5
  const logger = loggerFactory(import.meta);
6
6
 
7
- const vsCodeRootPath = '/root/.vscode-root';
7
+ // const vsCodeRootPath = '/root/.vscode-root';
8
+ // const vsProgram = 'code';
9
+
10
+ const vsCodeRootPath = '/root/.windsurf';
11
+ const vsProgram = 'windsurf';
8
12
 
9
13
  switch (process.argv[2]) {
10
14
  case 'info': {
@@ -31,13 +35,17 @@ switch (process.argv[2]) {
31
35
  const extensions = JSON.parse(fs.readFileSync(`./.vscode/extensions.json`, 'utf8'));
32
36
  extensions.recommendations.map((extension) => {
33
37
  if (extension)
34
- shellExec(`sudo code --user-data-dir="${vsCodeRootPath}" --no-sandbox --install-extension ${extension}`);
38
+ shellExec(
39
+ `sudo ${vsProgram} --user-data-dir="${vsCodeRootPath}" --no-sandbox --install-extension ${extension}`,
40
+ );
35
41
  });
36
42
  }
37
43
  break;
38
44
  case 'export':
39
45
  {
40
- shellExec(`sudo code --user-data-dir="${vsCodeRootPath}" --no-sandbox --list-extensions > vs-extensions.txt`);
46
+ shellExec(
47
+ `sudo ${vsProgram} --user-data-dir="${vsCodeRootPath}" --no-sandbox --list-extensions > vs-extensions.txt`,
48
+ );
41
49
  fs.writeFileSync(
42
50
  `./.vscode/extensions.json`,
43
51
  JSON.stringify(
@@ -61,6 +69,6 @@ switch (process.argv[2]) {
61
69
  break;
62
70
  }
63
71
  default:
64
- shellExec(`sudo code ${process.argv[2]} --user-data-dir="${vsCodeRootPath}" --no-sandbox`);
72
+ shellExec(`sudo ${vsProgram} ${process.argv[2]} --user-data-dir="${vsCodeRootPath}" --no-sandbox`);
65
73
  break;
66
74
  }
package/cli.md CHANGED
@@ -1,4 +1,4 @@
1
- ## underpost ci/cd cli v2.8.821
1
+ ## underpost ci/cd cli v2.8.832
2
2
 
3
3
  ### Usage: `underpost [options] [command]`
4
4
  ```
@@ -28,6 +28,7 @@ Commands:
28
28
  fs [options] [path] Manages file storage, defaulting to file upload operations.
29
29
  test [options] [deploy-list] Manages and runs tests, defaulting to the current Underpost default test suite.
30
30
  monitor [options] <deploy-id> [env] Manages health server monitoring for specified deployments.
31
+ run [options] [path] Runs a script from the specified path.
31
32
  lxd [options] Manages LXD containers and virtual machines.
32
33
  baremetal [options] [workflow-id] [hostname] [ip-address] Manages baremetal server operations, including installation, database setup, commissioning, and user management.
33
34
  help [command] display help for command
@@ -240,6 +241,7 @@ Options:
240
241
  initialization.
241
242
  --init-host Installs necessary Kubernetes node CLI tools (e.g.,
242
243
  kind, kubeadm, docker, podman, helm).
244
+ --uninstall-host Uninstalls all host components installed by init-host.
243
245
  --config Sets the base Kubernetes node configuration.
244
246
  --worker Sets the context for a worker node.
245
247
  --chown Sets the appropriate ownership for Kubernetes kubeconfig
@@ -545,6 +547,23 @@ Options:
545
547
  ```
546
548
 
547
549
 
550
+ ### `run` :
551
+ ```
552
+ Usage: underpost run [options] [path]
553
+
554
+ Runs a script from the specified path.
555
+
556
+ Arguments:
557
+ path The absolute or relative directory path where the script is
558
+ located.
559
+
560
+ Options:
561
+ --dev Sets the development context environment for the script.
562
+ -h, --help display help for command
563
+
564
+ ```
565
+
566
+
548
567
  ### `lxd` :
549
568
  ```
550
569
  Usage: underpost lxd [options]
@@ -58,7 +58,7 @@ services:
58
58
  cpus: '0.25'
59
59
  memory: 20M
60
60
  labels: # labels in Compose file instead of Dockerfile
61
- engine.version: '2.8.821'
61
+ engine.version: '2.8.832'
62
62
  networks:
63
63
  - load-balancer
64
64
 
@@ -17,7 +17,7 @@ spec:
17
17
  spec:
18
18
  containers:
19
19
  - name: dd-template-development-blue
20
- image: localhost/rockylinux9-underpost:v2.8.821
20
+ image: localhost/rockylinux9-underpost:v2.8.832
21
21
  # resources:
22
22
  # requests:
23
23
  # memory: "124Ki"
@@ -100,7 +100,7 @@ spec:
100
100
  spec:
101
101
  containers:
102
102
  - name: dd-template-development-green
103
- image: localhost/rockylinux9-underpost:v2.8.821
103
+ image: localhost/rockylinux9-underpost:v2.8.832
104
104
  # resources:
105
105
  # requests:
106
106
  # memory: "124Ki"
@@ -0,0 +1,65 @@
1
+ ---
2
+ apiVersion: v1
3
+ kind: ConfigMap
4
+ metadata:
5
+ name: tf-gpu-test-script
6
+ namespace: default
7
+ data:
8
+ main_tf_gpu_test.py: |
9
+ import os
10
+ import tensorflow as tf
11
+
12
+ print("--- Starting GPU and Library Check ---")
13
+
14
+ gpus = tf.config.list_physical_devices("GPU")
15
+ if gpus:
16
+ try:
17
+ tf.config.set_visible_devices(gpus[0], "GPU")
18
+ logical_gpus = tf.config.list_logical_devices("GPU")
19
+ print(
20
+ f"TensorFlow detected {len(gpus)} Physical GPUs, {len(logical_gpus)} Logical GPUs. Using: {gpus[0].name}"
21
+ )
22
+ except RuntimeError as e:
23
+ print(f"RuntimeError during GPU configuration: {e}")
24
+ else:
25
+ print("TensorFlow did not detect any GPU devices. Running on CPU.")
26
+
27
+ print(f"XLA_FLAGS environment variable: {os.environ.get('XLA_FLAGS')}")
28
+ print(f"TF_XLA_FLAGS environment variable: {os.environ.get('TF_XLA_FLAGS')}")
29
+
30
+ print(f"TensorFlow version: {tf.__version__}")
31
+ print(f"Built with CUDA: {tf.test.is_built_with_cuda()}")
32
+ print(f"Is GPU available: {tf.config.list_physical_devices('GPU') != []}")
33
+
34
+ print("--- GPU and Library Check Complete ---")
35
+ ---
36
+ apiVersion: v1
37
+ kind: Pod
38
+ metadata:
39
+ name: tf-gpu-test-pod
40
+ namespace: default
41
+ spec:
42
+ restartPolicy: Never
43
+ runtimeClassName: nvidia
44
+ containers:
45
+ - name: tensorflow-gpu-tester
46
+ image: nvcr.io/nvidia/tensorflow:24.04-tf2-py3
47
+ imagePullPolicy: IfNotPresent
48
+ command: ['python']
49
+ args: ['/app/main_tf_gpu_test.py']
50
+ resources:
51
+ limits:
52
+ nvidia.com/gpu: '1'
53
+ env:
54
+ - name: NVIDIA_VISIBLE_DEVICES
55
+ value: all
56
+ volumeMounts:
57
+ - name: tf-script-volume
58
+ mountPath: /app
59
+ volumes:
60
+ - name: tf-script-volume
61
+ configMap:
62
+ name: tf-gpu-test-script
63
+ items:
64
+ - key: main_tf_gpu_test.py
65
+ path: main_tf_gpu_test.py
@@ -58,14 +58,14 @@ maas "$MAAS_ADMIN_USERNAME" maas set-config name=upstream_dns value=8.8.8.8
58
58
  # os="ubuntu" release="noble" arches="amd64" \
59
59
  # subarches="ga-24.04" labels="*"
60
60
 
61
- echo "Downloading Ubuntu Noble arm64/ga-24.04 image..."
62
- maas $MAAS_ADMIN_USERNAME boot-source-selections create 1 \
63
- os="ubuntu" release="noble" arches="arm64" \
64
- subarches="ga-24.04" labels="*"
61
+ # echo "Downloading Ubuntu Noble arm64/ga-24.04 image..."
62
+ # maas $MAAS_ADMIN_USERNAME boot-source-selections create 1 \
63
+ # os="ubuntu" release="noble" arches="arm64" \
64
+ # subarches="ga-24.04" labels="*"
65
65
 
66
66
  # Import the newly selected boot images
67
- echo "Importing boot images (this may take some time)..."
68
- maas "$MAAS_ADMIN_USERNAME" boot-resources import
67
+ # echo "Importing boot images (this may take some time)..."
68
+ # maas "$MAAS_ADMIN_USERNAME" boot-resources import
69
69
 
70
70
  # Disable the MAAS HTTP proxy
71
71
  echo "Disabling MAAS HTTP proxy..."
@@ -108,13 +108,13 @@ if [ -z "$FABRIC_ID" ]; then
108
108
  exit 1
109
109
  fi
110
110
 
111
- # Enable DHCP on the untagged VLAN (VLAN tag 0)
112
- echo "Enabling DHCP on VLAN 0 for fabric-1 (ID: $FABRIC_ID)..."
113
- maas "$MAAS_ADMIN_USERNAME" vlan update "$FABRIC_ID" 0 dhcp_on=true primary_rack="$RACK_CONTROLLER_ID"
114
-
115
111
  # Create a Dynamic IP Range for enlistment, commissioning, and deployment
116
112
  echo "Creating dynamic IP range from $START_IP to $END_IP..."
117
113
  maas "$MAAS_ADMIN_USERNAME" ipranges create type=dynamic start_ip="$START_IP" end_ip="$END_IP"
118
114
 
115
+ # Enable DHCP on the untagged VLAN (VLAN tag 0)
116
+ echo "Enabling DHCP on VLAN 0 for fabric-1 (ID: $FABRIC_ID)..."
117
+ maas "$MAAS_ADMIN_USERNAME" vlan update "$FABRIC_ID" 0 dhcp_on=true primary_rack="$RACK_CONTROLLER_ID"
118
+
119
119
  echo "Setting gateway IP for subnet $SUBNET_CIDR (ID: $SUBNET_ID) to $IP_ADDRESS..."
120
120
  maas "$MAAS_ADMIN_USERNAME" subnet update $SUBNET_ID gateway_ip=$IP_ADDRESS
@@ -0,0 +1,26 @@
1
+ #!/usr/bin/env bash
2
+ # cleanup-snap.sh
3
+ # Remove all disabled snap revisions to free up disk space.
4
+
5
+ set -euo pipefail
6
+
7
+ # Ensure we’re running as root
8
+ if [[ $EUID -ne 0 ]]; then
9
+ echo "Please run this script with sudo or as root."
10
+ exit 1
11
+ fi
12
+
13
+ echo "Gathering list of snaps with disabled revisions..."
14
+ snap list --all \
15
+ | awk '/disabled/ {print $1, $3}' \
16
+ | while read -r pkg rev; do
17
+ echo " -> Removing $pkg (revision $rev)..."
18
+ snap remove "$pkg" --revision="$rev"
19
+ done
20
+
21
+ echo "Cleanup complete."
22
+ echo
23
+ echo "Tip: Limit how many revisions Snap retains by setting:"
24
+ echo " sudo snap set system refresh.retain=2"
25
+ echo "Then apply with:"
26
+ echo " sudo snap refresh"
package/package.json CHANGED
@@ -2,7 +2,7 @@
2
2
  "type": "module",
3
3
  "main": "src/index.js",
4
4
  "name": "underpost",
5
- "version": "2.8.821",
5
+ "version": "2.8.832",
6
6
  "description": "pwa api rest template",
7
7
  "scripts": {
8
8
  "start": "env-cmd -f .env.production node --max-old-space-size=8192 src/server",
@@ -91,7 +91,7 @@ class UnderpostBaremetal {
91
91
  const callbackMetaData = {
92
92
  args: { hostname, ipAddress, workflowId },
93
93
  options,
94
- runnerHost: { architecture: UnderpostBaremetal.API.getHostArch(), ip: getLocalIPv4Address() },
94
+ runnerHost: { architecture: UnderpostBaremetal.API.getHostArch().alias, ip: getLocalIPv4Address() },
95
95
  nfsHostPath,
96
96
  tftpRootPath,
97
97
  };
@@ -149,6 +149,7 @@ class UnderpostBaremetal {
149
149
  shellExec(`chmod +x ${underpostRoot}/manifests/maas/nat-iptables.sh`);
150
150
  shellExec(`${underpostRoot}/manifests/maas/maas-setup.sh`);
151
151
  shellExec(`${underpostRoot}/manifests/maas/nat-iptables.sh`);
152
+ return;
152
153
  }
153
154
 
154
155
  // Handle control server uninstallation.
@@ -166,6 +167,7 @@ class UnderpostBaremetal {
166
167
  shellExec(`sudo rm -rf /etc/maas`);
167
168
  shellExec(`sudo rm -rf /var/lib/maas`);
168
169
  shellExec(`sudo rm -rf /var/log/maas`);
170
+ return;
169
171
  }
170
172
 
171
173
  // Handle control server database installation.
@@ -175,12 +177,14 @@ class UnderpostBaremetal {
175
177
  shellExec(
176
178
  `node ${underpostRoot}/bin/deploy pg-drop-db ${process.env.DB_PG_MAAS_NAME} ${process.env.DB_PG_MAAS_USER}`,
177
179
  );
178
- shellExec(`node ${underpostRoot}/bin/deploy maas db`);
180
+ shellExec(`node ${underpostRoot}/bin/deploy maas-db`);
181
+ return;
179
182
  }
180
183
 
181
184
  // Handle control server database uninstallation.
182
185
  if (options.controlServerDbUninstall === true) {
183
186
  shellExec(`node ${underpostRoot}/bin/deploy ${dbProviderId} uninstall`);
187
+ return;
184
188
  }
185
189
 
186
190
  // Set debootstrap architecture.
@@ -941,8 +945,8 @@ EOF`);
941
945
  getHostArch() {
942
946
  // `uname -m` returns e.g. 'x86_64' or 'aarch64'
943
947
  const machine = shellExec('uname -m', { stdout: true }).trim();
944
- if (machine === 'x86_64') return 'amd64';
945
- if (machine === 'aarch64') return 'arm64';
948
+ if (machine === 'x86_64') return { alias: 'amd64', name: 'x86_64' };
949
+ if (machine === 'aarch64') return { alias: 'arm64', name: 'aarch64' };
946
950
  throw new Error(`Unsupported host architecture: ${machine}`);
947
951
  },
948
952
 
@@ -1,6 +1,7 @@
1
1
  import { getNpmRootPath } from '../server/conf.js';
2
2
  import { loggerFactory } from '../server/logger.js';
3
3
  import { shellExec } from '../server/process.js';
4
+ import UnderpostBaremetal from './baremetal.js';
4
5
  import UnderpostDeploy from './deploy.js';
5
6
  import UnderpostTest from './test.js';
6
7
  import os from 'os';
@@ -37,6 +38,7 @@ class UnderpostCluster {
37
38
  * @param {boolean} [options.kubeadm=false] - Initialize the cluster using Kubeadm.
38
39
  * @param {boolean} [options.k3s=false] - Initialize the cluster using K3s.
39
40
  * @param {boolean} [options.initHost=false] - Perform initial host setup (install Docker, Podman, Kind, Kubeadm, Helm).
41
+ * @param {boolean} [options.uninstallHost=false] - Uninstall all host components.
40
42
  * @param {boolean} [options.config=false] - Apply general host configuration (SELinux, containerd, sysctl, firewalld).
41
43
  * @param {boolean} [options.worker=false] - Configure as a worker node (for Kubeadm or K3s join).
42
44
  * @param {boolean} [options.chown=false] - Set up kubectl configuration for the current user.
@@ -65,6 +67,7 @@ class UnderpostCluster {
65
67
  kubeadm: false,
66
68
  k3s: false,
67
69
  initHost: false,
70
+ uninstallHost: false,
68
71
  config: false,
69
72
  worker: false,
70
73
  chown: false,
@@ -73,6 +76,9 @@ class UnderpostCluster {
73
76
  // Handles initial host setup (installing docker, podman, kind, kubeadm, helm)
74
77
  if (options.initHost === true) return UnderpostCluster.API.initHost();
75
78
 
79
+ // Handles initial host setup (installing docker, podman, kind, kubeadm, helm)
80
+ if (options.uninstallHost === true) return UnderpostCluster.API.uninstallHost();
81
+
76
82
  // Applies general host configuration (SELinux, containerd, sysctl)
77
83
  if (options.config === true) return UnderpostCluster.API.config();
78
84
 
@@ -126,7 +132,7 @@ class UnderpostCluster {
126
132
  }
127
133
 
128
134
  // Reset Kubernetes cluster components (Kind/Kubeadm/K3s) and container runtimes
129
- if (options.reset === true) return await UnderpostCluster.API.reset();
135
+ if (options.reset === true) return await UnderpostCluster.API.safeReset({ underpostRoot });
130
136
 
131
137
  // Check if a cluster (Kind, Kubeadm, or K3s) is already initialized
132
138
  const alreadyKubeadmCluster = UnderpostDeploy.API.get('calico-kube-controllers')[0];
@@ -138,6 +144,7 @@ class UnderpostCluster {
138
144
  // This block handles the initial setup of the Kubernetes cluster (control plane or worker).
139
145
  // It prevents re-initialization if a cluster is already detected.
140
146
  if (!options.worker && !alreadyKubeadmCluster && !alreadyKindCluster && !alreadyK3sCluster) {
147
+ UnderpostCluster.API.config();
141
148
  if (options.k3s === true) {
142
149
  logger.info('Initializing K3s control plane...');
143
150
  // Install K3s
@@ -415,8 +422,10 @@ class UnderpostCluster {
415
422
  * This method ensures proper SELinux, Docker, Containerd, and Sysctl settings
416
423
  * are applied for a healthy Kubernetes environment. It explicitly avoids
417
424
  * iptables flushing commands to prevent conflicts with Kubernetes' own network management.
425
+ * @param {string} underpostRoot - The root directory of the underpost project.
418
426
  */
419
- config() {
427
+ config(options = { underpostRoot: '.' }) {
428
+ const underpostRoot = options;
420
429
  console.log('Applying host configuration: SELinux, Docker, Containerd, and Sysctl settings.');
421
430
  // Disable SELinux (permissive mode)
422
431
  shellExec(`sudo setenforce 0`);
@@ -426,10 +435,14 @@ class UnderpostCluster {
426
435
  shellExec(`sudo systemctl enable --now docker || true`); // Docker might not be needed for K3s
427
436
  shellExec(`sudo systemctl enable --now kubelet || true`); // Kubelet might not be needed for K3s (K3s uses its own agent)
428
437
 
429
- // Configure containerd for SystemdCgroup
438
+ // Configure containerd for SystemdCgroup and explicitly disable SELinux
430
439
  // This is crucial for kubelet/k3s to interact correctly with containerd
431
440
  shellExec(`containerd config default | sudo tee /etc/containerd/config.toml > /dev/null`);
432
441
  shellExec(`sudo sed -i -e "s/SystemdCgroup = false/SystemdCgroup = true/g" /etc/containerd/config.toml`);
442
+ // Add a new line to disable SELinux for the runc runtime
443
+ // shellExec(
444
+ // `sudo sed -i '/SystemdCgroup = true/a selinux_disabled = true' /etc/containerd/config.toml || true`,
445
+ // );
433
446
  shellExec(`sudo service docker restart || true`); // Restart docker after containerd config changes
434
447
  shellExec(`sudo systemctl enable --now containerd.service`);
435
448
  shellExec(`sudo systemctl restart containerd`); // Restart containerd to apply changes
@@ -451,7 +464,9 @@ class UnderpostCluster {
451
464
  net.bridge.bridge-nf-call-ip6tables = 1
452
465
  net.bridge.bridge-nf-call-arptables = 1
453
466
  net.ipv4.ip_forward = 1' | sudo tee ${iptableConfPath}`);
454
- shellExec(`sudo sysctl --system`); // Apply sysctl changes immediately
467
+ // shellExec(`sudo sysctl --system`); // Apply sysctl changes immediately
468
+ // Apply NAT iptables rules.
469
+ shellExec(`${underpostRoot}/manifests/maas/nat-iptables.sh`, { silent: true });
455
470
 
456
471
  // Disable firewalld (common cause of network issues in Kubernetes)
457
472
  shellExec(`sudo systemctl stop firewalld || true`); // Stop if running
@@ -492,22 +507,40 @@ net.ipv4.ip_forward = 1' | sudo tee ${iptableConfPath}`);
492
507
  },
493
508
 
494
509
  /**
495
- * @method reset
496
- * @description Performs a comprehensive reset of Kubernetes and container environments.
497
- * This function is for cleaning up a node, reverting changes made by 'kubeadm init', 'kubeadm join', or 'k3s install'.
498
- * It includes deleting Kind clusters, resetting kubeadm, removing CNI configs,
499
- * cleaning Docker and Podman data, persistent volumes, and resetting kubelet components.
500
- * It avoids aggressive iptables flushing that would break host connectivity, relying on kube-proxy's
501
- * control loop to eventually clean up rules if the cluster is not re-initialized.
510
+ * @method safeReset
511
+ * @description Performs a complete reset of the Kubernetes cluster and its container environments.
512
+ * This version focuses on correcting persistent permission errors (such as 'permission denied'
513
+ * in coredns) by restoring SELinux security contexts and safely cleaning up cluster artifacts.
514
+ * @param {object} [options] - Configuration options for the reset.
515
+ * @param {string} [options.underpostRoot] - The root path of the underpost project.
502
516
  */
503
- async reset() {
504
- logger.info('Starting comprehensive reset of Kubernetes and container environments...');
517
+ async safeReset(options = { underpostRoot: '.' }) {
518
+ logger.info('Starting a safe and comprehensive reset of Kubernetes and container environments...');
505
519
 
506
520
  try {
507
- // Phase 1: Pre-reset Kubernetes Cleanup (while API server is still up)
508
- logger.info('Phase 1/6: Cleaning up Kubernetes resources (PVCs, PVs) while API server is accessible...');
521
+ // Phase 0: Truncate large logs under /var/log to free up immediate space
522
+ logger.info('Phase 0/6: Truncating large log files under /var/log...');
523
+ try {
524
+ const cleanPath = `/var/log/`;
525
+ const largeLogsFiles = shellExec(
526
+ `sudo du -sh ${cleanPath}* | awk '{if ($1 ~ /G$/ && ($1+0) > 1) print}' | sort -rh`,
527
+ {
528
+ stdout: true,
529
+ },
530
+ );
531
+ for (const pathLog of largeLogsFiles
532
+ .split(`\n`)
533
+ .map((p) => p.split(cleanPath)[1])
534
+ .filter((p) => p)) {
535
+ shellExec(`sudo rm -rf ${cleanPath}${pathLog}`);
536
+ }
537
+ } catch (err) {
538
+ logger.warn(` -> Error truncating log files: ${err.message}. Continuing with reset.`);
539
+ }
509
540
 
510
- // Get all Persistent Volumes and identify their host paths for data deletion.
541
+ // Phase 1: Clean up Persistent Volumes with hostPath
542
+ // This targets data created by Kubernetes Persistent Volumes that use hostPath.
543
+ logger.info('Phase 1/6: Cleaning Kubernetes hostPath volumes...');
511
544
  try {
512
545
  const pvListJson = shellExec(`kubectl get pv -o json || echo '{"items":[]}'`, { stdout: true, silent: true });
513
546
  const pvList = JSON.parse(pvListJson);
@@ -527,60 +560,60 @@ net.ipv4.ip_forward = 1' | sudo tee ${iptableConfPath}`);
527
560
  } catch (error) {
528
561
  logger.error('Failed to clean up Persistent Volumes:', error);
529
562
  }
530
-
531
- // Phase 2: Stop Kubelet/K3s agent and remove CNI configuration
532
- logger.info('Phase 2/6: Stopping Kubelet/K3s agent and removing CNI configurations...');
533
- shellExec(`sudo systemctl stop kubelet || true`); // Stop kubelet if it's running (kubeadm)
534
- shellExec(`sudo /usr/local/bin/k3s-uninstall.sh || true`); // Run K3s uninstall script if it exists
535
-
536
- // CNI plugins use /etc/cni/net.d to store their configuration.
563
+ // Phase 2: Restore SELinux and stop services
564
+ // This is critical for fixing the 'permission denied' error you experienced.
565
+ // Enable SELinux permissive mode and restore file contexts.
566
+ logger.info('Phase 2/6: Stopping services and fixing SELinux...');
567
+ logger.info(' -> Ensuring SELinux is in permissive mode...');
568
+ shellExec(`sudo setenforce 0 || true`);
569
+ shellExec(`sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config || true`);
570
+ logger.info(' -> Restoring SELinux contexts for container data directories...');
571
+ // The 'restorecon' command corrects file system security contexts.
572
+ shellExec(`sudo restorecon -Rv /var/lib/containerd || true`);
573
+ shellExec(`sudo restorecon -Rv /var/lib/kubelet || true`);
574
+
575
+ logger.info(' -> Stopping kubelet, docker, and podman services...');
576
+ shellExec('sudo systemctl stop kubelet || true');
577
+ shellExec('sudo systemctl stop docker || true');
578
+ shellExec('sudo systemctl stop podman || true');
579
+ // Safely unmount pod filesystems to avoid errors.
580
+ shellExec('sudo umount -f /var/lib/kubelet/pods/*/* || true');
581
+
582
+ // Phase 3: Execute official uninstallation commands
583
+ logger.info('Phase 3/6: Executing official reset and uninstallation commands...');
584
+ logger.info(' -> Executing kubeadm reset...');
585
+ shellExec('sudo kubeadm reset --force || true');
586
+ logger.info(' -> Executing K3s uninstallation script if it exists...');
587
+ shellExec('sudo /usr/local/bin/k3s-uninstall.sh || true');
588
+ logger.info(' -> Deleting Kind clusters...');
589
+ shellExec('kind get clusters | xargs -r -t -n1 kind delete cluster || true');
590
+
591
+ // Phase 4: File system cleanup
592
+ logger.info('Phase 4/6: Cleaning up remaining file system artifacts...');
593
+ // Remove any leftover configurations and data.
594
+ shellExec('sudo rm -rf /etc/kubernetes/* || true');
537
595
  shellExec('sudo rm -rf /etc/cni/net.d/* || true');
538
-
539
- // Phase 3: Kind Cluster Cleanup
540
- logger.info('Phase 3/6: Cleaning up Kind clusters...');
541
- shellExec(`kind get clusters | xargs -r -t -n1 kind delete cluster || true`);
542
-
543
- // Phase 4: Kubeadm Reset (if applicable)
544
- logger.info('Phase 4/6: Performing kubeadm reset (if applicable)...');
545
- shellExec(`sudo kubeadm reset --force || true`); // Use || true to prevent script from failing if kubeadm is not installed
546
-
547
- // Phase 5: Post-reset File System Cleanup (Local Storage, Kubeconfig)
548
- logger.info('Phase 5/6: Cleaning up local storage provisioner data and kubeconfig...');
596
+ shellExec('sudo rm -rf /var/lib/kubelet/* || true');
597
+ shellExec('sudo rm -rf /var/lib/cni/* || true');
598
+ shellExec('sudo rm -rf /var/lib/docker/* || true');
599
+ shellExec('sudo rm -rf /var/lib/containerd/* || true');
600
+ shellExec('sudo rm -rf /var/lib/containers/storage/* || true');
601
+ // Clean up the current user's kubeconfig.
549
602
  shellExec('rm -rf $HOME/.kube || true');
550
- shellExec(`sudo rm -rf /opt/local-path-provisioner/* || true`);
551
-
552
- // Phase 6: Container Runtime Cleanup (Docker and Podman)
553
- logger.info('Phase 6/6: Cleaning up Docker and Podman data...');
554
- shellExec('sudo docker system prune -a -f || true');
555
- shellExec('sudo service docker stop || true');
556
- shellExec(`sudo rm -rf /var/lib/containers/storage/* || true`);
557
- shellExec(`sudo rm -rf /var/lib/docker/volumes/* || true`);
558
- shellExec(`sudo rm -rf /var/lib/docker~/* || true`);
559
- shellExec(`sudo rm -rf /home/containers/storage/* || true`);
560
- shellExec(`sudo rm -rf /home/docker/* || true`);
561
- shellExec('sudo mkdir -p /home/docker || true');
562
- shellExec('sudo chmod 777 /home/docker || true');
563
- shellExec('sudo ln -sf /home/docker /var/lib/docker || true');
564
-
565
- shellExec(`sudo podman system prune -a -f || true`);
566
- shellExec(`sudo podman system prune --all --volumes --force || true`);
567
- shellExec(`sudo podman system prune --external --force || true`);
568
- shellExec(`sudo mkdir -p /home/containers/storage || true`);
569
- shellExec('sudo chmod 0711 /home/containers/storage || true');
570
- shellExec(
571
- `sudo sed -i -e "s@/var/lib/containers/storage@/home/containers/storage@g" /etc/containers/storage.conf || true`,
572
- );
573
- shellExec(`sudo podman system reset -f || true`);
574
603
 
575
- // Final Kubelet and System Cleanup (after all other operations)
576
- logger.info('Finalizing Kubelet and system file cleanup...');
577
- shellExec(`sudo rm -rf /etc/kubernetes/* || true`);
578
- shellExec(`sudo rm -rf /var/lib/kubelet/* || true`);
579
- shellExec(`sudo rm -rf /root/.local/share/Trash/files/* || true`);
580
- shellExec(`sudo systemctl daemon-reload`);
581
- shellExec(`sudo systemctl start kubelet || true`); // Attempt to start kubelet; might fail if fully reset
582
-
583
- logger.info('Comprehensive reset completed successfully.');
604
+ // Phase 5: Host network cleanup
605
+ logger.info('Phase 5/6: Cleaning up host network configurations...');
606
+ // Remove iptables rules and CNI network interfaces.
607
+ shellExec('sudo iptables -F || true');
608
+ shellExec('sudo iptables -t nat -F || true');
609
+ shellExec('sudo ip link del cni0 || true');
610
+ shellExec('sudo ip link del flannel.1 || true');
611
+
612
+ // Phase 6: Reload daemon and finalize
613
+ logger.info('Phase 6/6: Reloading the system daemon and finalizing...');
614
+ // shellExec('sudo systemctl daemon-reload');
615
+ UnderpostCluster.API.config();
616
+ logger.info('Safe and complete reset finished. The system is ready for a new cluster initialization.');
584
617
  } catch (error) {
585
618
  logger.error(`Error during reset: ${error.message}`);
586
619
  console.error(error);
@@ -623,51 +656,24 @@ net.ipv4.ip_forward = 1' | sudo tee ${iptableConfPath}`);
623
656
  },
624
657
  /**
625
658
  * @method initHost
626
- * @description Installs essential host-level prerequisites for Kubernetes,
627
- * including Docker, Podman, Kind, Kubeadm, and Helm.
628
- *
629
- * Quick-Start Guide for K3s Installation:
630
- * This guide will help you quickly launch a cluster with default options. Make sure your nodes meet the requirements before proceeding.
631
- * Consult the Installation page for greater detail on installing and configuring K3s.
632
- * For information on how K3s components work together, refer to the Architecture page.
633
- * If you are new to Kubernetes, the official Kubernetes docs have great tutorials covering basics that all cluster administrators should be familiar with.
634
- *
635
- * Install Script:
636
- * K3s provides an installation script that is a convenient way to install it as a service on systemd or openrc based systems. This script is available at https://get.k3s.io. To install K3s using this method, just run:
637
- * curl -sfL https://get.k3s.io | sh -
638
- *
639
- * After running this installation:
640
- * - The K3s service will be configured to automatically restart after node reboots or if the process crashes or is killed
641
- * - Additional utilities will be installed, including kubectl, crictl, ctr, k3s-killall.sh, and k3s-uninstall.sh
642
- * - A kubeconfig file will be written to /etc/rancher/k3s/k3s.yaml and the kubectl installed by K3s will automatically use it
643
- *
644
- * A single-node server installation is a fully-functional Kubernetes cluster, including all the datastore, control-plane, kubelet, and container runtime components necessary to host workload pods. It is not necessary to add additional server or agents nodes, but you may want to do so to add additional capacity or redundancy to your cluster.
645
- *
646
- * To install additional agent nodes and add them to the cluster, run the installation script with the K3S_URL and K3S_TOKEN environment variables. Here is an example showing how to join an agent:
647
- * curl -sfL https://get.k3s.io | K3S_URL=https://myserver:6443 K3S_TOKEN=mynodetoken sh -
648
- *
649
- * Setting the K3S_URL parameter causes the installer to configure K3s as an agent, instead of a server. The K3s agent will register with the K3s server listening at the supplied URL. The value to use for K3S_TOKEN is stored at /var/lib/rancher/k3s/server/node-token on your server node.
650
- *
651
- * Note: Each machine must have a unique hostname. If your machines do not have unique hostnames, pass the K3S_NODE_NAME environment variable and provide a value with a valid and unique hostname for each node.
652
- * If you are interested in having more server nodes, see the High Availability Embedded etcd and High Availability External DB pages for more information.
659
+ * @description Installs essential host-level prerequisites for Kubernetes (Docker, Podman, Kind, Kubeadm, Helm).
653
660
  */
654
661
  initHost() {
655
- console.log(
656
- 'Installing essential host-level prerequisites for Kubernetes (Docker, Podman, Kind, Kubeadm, Helm) and providing K3s Quick-Start Guide information...',
657
- );
658
- // Install docker
662
+ const archData = UnderpostBaremetal.API.getHostArch();
663
+ logger.info('Installing essential host-level prerequisites for Kubernetes...', archData);
664
+ // Install Docker and its dependencies
659
665
  shellExec(`sudo dnf -y install dnf-plugins-core`);
660
666
  shellExec(`sudo dnf config-manager --add-repo https://download.docker.com/linux/rhel/docker-ce.repo`);
661
667
  shellExec(`sudo dnf -y install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin`);
662
668
 
663
- // Install podman
669
+ // Install Podman
664
670
  shellExec(`sudo dnf -y install podman`);
665
671
 
666
- // Install kind
667
- shellExec(`[ $(uname -m) = aarch64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.29.0/kind-linux-arm64
672
+ // Install Kind (Kubernetes in Docker)
673
+ shellExec(`[ $(uname -m) = ${archData.name} ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.29.0/kind-linux-${archData.alias}
668
674
  chmod +x ./kind
669
675
  sudo mv ./kind /bin/kind`);
670
- // Install kubeadm, kubelet, kubectl (these are also useful for K3s for kubectl command)
676
+ // Install Kubernetes tools: Kubeadm, Kubelet, and Kubectl
671
677
  shellExec(`cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
672
678
  [kubernetes]
673
679
  name=Kubernetes
@@ -679,14 +685,78 @@ exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
679
685
  EOF`);
680
686
  shellExec(`sudo yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes`);
681
687
 
682
- // Install helm
688
+ // Install Helm
683
689
  shellExec(`curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3`);
684
690
  shellExec(`chmod 700 get_helm.sh`);
685
691
  shellExec(`./get_helm.sh`);
686
692
  shellExec(`chmod +x /usr/local/bin/helm`);
687
693
  shellExec(`sudo mv /usr/local/bin/helm /bin/helm`);
694
+ shellExec(`sudo rm -rf get_helm.sh`);
688
695
  console.log('Host prerequisites installed successfully.');
689
696
  },
697
+ /**
698
+ * @method uninstallHost
699
+ * @description Uninstalls all host components installed by initHost.
700
+ * This includes Docker, Podman, Kind, Kubeadm, Kubelet, Kubectl, and Helm.
701
+ */
702
+ uninstallHost() {
703
+ console.log('Uninstalling host components: Docker, Podman, Kind, Kubeadm, Kubelet, Kubectl, Helm.');
704
+
705
+ // Remove Kind
706
+ console.log('Removing Kind...');
707
+ shellExec(`sudo rm -f /bin/kind || true`);
708
+
709
+ // Remove Helm
710
+ console.log('Removing Helm...');
711
+ shellExec(`sudo rm -f /usr/local/bin/helm || true`);
712
+ shellExec(`sudo rm -f /usr/local/bin/helm.sh || true`); // clean up the install script if it exists
713
+
714
+ // Remove Docker and its dependencies
715
+ console.log('Removing Docker, containerd, and related packages...');
716
+ shellExec(
717
+ `sudo dnf -y remove docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin || true`,
718
+ );
719
+
720
+ // Remove Podman
721
+ console.log('Removing Podman...');
722
+ shellExec(`sudo dnf -y remove podman || true`);
723
+
724
+ // Remove Kubeadm, Kubelet, and Kubectl
725
+ console.log('Removing Kubernetes tools...');
726
+ shellExec(`sudo yum remove -y kubelet kubeadm kubectl || true`);
727
+
728
+ // Remove Kubernetes repo file
729
+ console.log('Removing Kubernetes repository configuration...');
730
+ shellExec(`sudo rm -f /etc/yum.repos.d/kubernetes.repo || true`);
731
+
732
+ // Clean up Kubeadm config and data directories
733
+ console.log('Cleaning up Kubernetes configuration directories...');
734
+ shellExec(`sudo rm -rf /etc/kubernetes/pki || true`);
735
+ shellExec(`sudo rm -rf ~/.kube || true`);
736
+
737
+ // Stop and disable services
738
+ console.log('Stopping and disabling services...');
739
+ shellExec(`sudo systemctl stop docker.service || true`);
740
+ shellExec(`sudo systemctl disable docker.service || true`);
741
+ shellExec(`sudo systemctl stop containerd.service || true`);
742
+ shellExec(`sudo systemctl disable containerd.service || true`);
743
+ shellExec(`sudo systemctl stop kubelet.service || true`);
744
+ shellExec(`sudo systemctl disable kubelet.service || true`);
745
+
746
+ // Clean up config files
747
+ console.log('Removing host configuration files...');
748
+ shellExec(`sudo rm -f /etc/containerd/config.toml || true`);
749
+ shellExec(`sudo rm -f /etc/sysctl.d/k8s.conf || true`);
750
+ shellExec(`sudo rm -f /etc/sysctl.d/99-k8s-ipforward.conf || true`);
751
+ shellExec(`sudo rm -f /etc/sysctl.d/99-k8s.conf || true`);
752
+
753
+ // Restore SELinux to enforcing
754
+ console.log('Restoring SELinux to enforcing mode...');
755
+ // shellExec(`sudo setenforce 1`);
756
+ // shellExec(`sudo sed -i 's/^SELINUX=permissive$/SELINUX=enforcing/' /etc/selinux/config`);
757
+
758
+ console.log('Uninstall process completed.');
759
+ },
690
760
  };
691
761
  }
692
762
  export default UnderpostCluster;
package/src/cli/index.js CHANGED
@@ -128,6 +128,7 @@ program
128
128
  .option('--info-capacity-pod', 'Displays the current machine capacity information per pod.')
129
129
  .option('--pull-image', 'Sets an optional associated image to pull during initialization.')
130
130
  .option('--init-host', 'Installs necessary Kubernetes node CLI tools (e.g., kind, kubeadm, docker, podman, helm).')
131
+ .option('--uninstall-host', 'Uninstalls all host components installed by init-host.')
131
132
  .option('--config', 'Sets the base Kubernetes node configuration.')
132
133
  .option('--worker', 'Sets the context for a worker node.')
133
134
  .option('--chown', 'Sets the appropriate ownership for Kubernetes kubeconfig files.')
@@ -314,6 +315,14 @@ program
314
315
  .description('Manages health server monitoring for specified deployments.')
315
316
  .action(Underpost.monitor.callback);
316
317
 
318
+ // 'run' command: Run a script
319
+ program
320
+ .command('run')
321
+ .argument('[path]', 'The absolute or relative directory path where the script is located.')
322
+ .option('--dev', 'Sets the development context environment for the script.')
323
+ .description('Runs a script from the specified path.')
324
+ .action(Underpost.run.callback);
325
+
317
326
  // 'lxd' command: LXD management
318
327
  program
319
328
  .command('lxd')
@@ -60,7 +60,7 @@ class UnderpostRepository {
60
60
  },
61
61
 
62
62
  push(repoPath = './', gitUri = 'underpostnet/pwa-microservices-template', options = { f: false, g8: false }) {
63
- const gExtension = options.g8 === true ? '.g8' : '.git';
63
+ const gExtension = options.g8 === true || options.G8 === true ? '.g8' : '.git';
64
64
  shellExec(
65
65
  `cd ${repoPath} && git push https://${process.env.GITHUB_TOKEN}@github.com/${gitUri}${gExtension}${
66
66
  options?.f === true ? ' --force' : ''
@@ -71,9 +71,12 @@ class UnderpostRepository {
71
71
  );
72
72
  logger.info(
73
73
  'commit url',
74
- `http://github.com/${gitUri}/commit/${shellExec(`cd ${repoPath} && git rev-parse --verify HEAD`, {
75
- stdout: true,
76
- }).trim()}`,
74
+ `http://github.com/${gitUri}${gExtension === '.g8' ? '.g8' : ''}/commit/${shellExec(
75
+ `cd ${repoPath} && git rev-parse --verify HEAD`,
76
+ {
77
+ stdout: true,
78
+ },
79
+ ).trim()}`,
77
80
  );
78
81
  },
79
82
 
package/src/cli/run.js ADDED
@@ -0,0 +1,50 @@
1
+ import { pbcopy, shellCd, shellExec } from '../server/process.js';
2
+ import read from 'read';
3
+ import { getNpmRootPath } from '../server/conf.js';
4
+
5
+ class UnderpostRun {
6
+ static API = {
7
+ async callback(path, options = { dev: false }) {
8
+ const fileName = path.split('/').pop();
9
+ const npmRoot = getNpmRootPath();
10
+ const underpostRoot = options?.dev === true ? '.' : `${npmRoot}/underpost`;
11
+
12
+ switch (fileName) {
13
+ case 'spark-template': {
14
+ const path = '/home/dd/spark-template';
15
+ shellExec(`sudo rm -rf ${path}`);
16
+ shellCd('/home/dd');
17
+
18
+ // pbcopy(`cd /home/dd && sbt new underpostnet/spark-template.g8`);
19
+ // await read({ prompt: 'Command copy to clipboard, press enter to continue.\n' });
20
+ shellExec(`cd /home/dd && sbt new underpostnet/spark-template.g8 '--name=spark-template'`);
21
+
22
+ shellCd(path);
23
+
24
+ shellExec(`git init && git add . && git commit -m "Base implementation"`);
25
+ shellExec(`chmod +x ./replace_params.sh`);
26
+ shellExec(`chmod +x ./build.sh`);
27
+
28
+ shellExec(`./replace_params.sh`);
29
+ shellExec(`./build.sh`);
30
+
31
+ shellCd('/home/dd/engine');
32
+ break;
33
+ }
34
+ case 'gpu': {
35
+ shellExec(
36
+ `node bin cluster --dev --reset && node bin cluster --dev --dedicated-gpu --kubeadm && kubectl get pods --all-namespaces -o wide -w`,
37
+ );
38
+ break;
39
+ }
40
+ case 'tf':
41
+ shellExec(`kubectl delete configmap tf-gpu-test-script`);
42
+ shellExec(`kubectl delete pod tf-gpu-test-pod`);
43
+ shellExec(`kubectl apply -f ${underpostRoot}/manifests/deployment/tensorflow/tf-gpu-test.yaml`);
44
+ break;
45
+ }
46
+ },
47
+ };
48
+ }
49
+
50
+ export default UnderpostRun;
package/src/index.js CHANGED
@@ -16,6 +16,7 @@ import UnderpostImage from './cli/image.js';
16
16
  import UnderpostLxd from './cli/lxd.js';
17
17
  import UnderpostMonitor from './cli/monitor.js';
18
18
  import UnderpostRepository from './cli/repository.js';
19
+ import UnderpostRun from './cli/run.js';
19
20
  import UnderpostScript from './cli/script.js';
20
21
  import UnderpostSecret from './cli/secrets.js';
21
22
  import UnderpostTest from './cli/test.js';
@@ -33,102 +34,102 @@ class Underpost {
33
34
  * @type {String}
34
35
  * @memberof Underpost
35
36
  */
36
- static version = 'v2.8.821';
37
+ static version = 'v2.8.832';
37
38
  /**
38
39
  * Repository cli API
39
40
  * @static
40
- * @type {UnderpostRepository}
41
+ * @type {UnderpostRepository.API}
41
42
  * @memberof Underpost
42
43
  */
43
44
  static repo = UnderpostRepository.API;
44
45
  /**
45
46
  * Root Env cli API
46
47
  * @static
47
- * @type {UnderpostRootEnv}
48
+ * @type {UnderpostRootEnv.API}
48
49
  * @memberof Underpost
49
50
  */
50
51
  static env = UnderpostRootEnv.API;
51
52
  /**
52
53
  * Test cli API
53
54
  * @static
54
- * @type {UnderpostTest}
55
+ * @type {UnderpostTest.API}
55
56
  * @memberof Underpost
56
57
  */
57
58
  static test = UnderpostTest.API;
58
59
  /**
59
60
  * Underpost Start Up cli API
60
61
  * @static
61
- * @type {UnderpostStartUp}
62
+ * @type {UnderpostStartUp.API}
62
63
  * @memberof Underpost
63
64
  */
64
65
  static start = UnderpostStartUp.API;
65
66
  /**
66
67
  * Cluster cli API
67
68
  * @static
68
- * @type {UnderpostCluster}
69
+ * @type {UnderpostCluster.API}
69
70
  * @memberof Underpost
70
71
  */
71
72
  static cluster = UnderpostCluster.API;
72
73
  /**
73
74
  * Image cli API
74
75
  * @static
75
- * @type {UnderpostImage}
76
+ * @type {UnderpostImage.API}
76
77
  * @memberof Underpost
77
78
  */
78
79
  static image = UnderpostImage.API;
79
80
  /**
80
81
  * Secrets cli API
81
82
  * @static
82
- * @type {UnderpostSecret}
83
+ * @type {UnderpostSecret.API}
83
84
  * @memberof Underpost
84
85
  */
85
86
  static secret = UnderpostSecret.API;
86
87
  /**
87
88
  * Scripts cli API
88
89
  * @static
89
- * @type {UnderpostScript}
90
+ * @type {UnderpostScript.API}
90
91
  * @memberof Underpost
91
92
  */
92
93
  static script = UnderpostScript.API;
93
94
  /**
94
95
  * Database cli API
95
96
  * @static
96
- * @type {UnderpostDB}
97
+ * @type {UnderpostDB.API}
97
98
  * @memberof Underpost
98
99
  */
99
100
  static db = UnderpostDB.API;
100
101
  /**
101
102
  * Deployment cli API
102
103
  * @static
103
- * @type {UnderpostDeploy}
104
+ * @type {UnderpostDeploy.API}
104
105
  * @memberof Underpost
105
106
  */
106
107
  static deploy = UnderpostDeploy.API;
107
108
  /**
108
109
  * Cron cli API
109
110
  * @static
110
- * @type {UnderpostCron}
111
+ * @type {UnderpostCron.API}
111
112
  * @memberof Underpost
112
113
  */
113
114
  static cron = UnderpostCron.API;
114
115
  /**
115
116
  * File Storage cli API
116
117
  * @static
117
- * @type {UnderpostFileStorage}
118
+ * @type {UnderpostFileStorage.API}
118
119
  * @memberof Underpost
119
120
  */
120
121
  static fs = UnderpostFileStorage.API;
121
122
  /**
122
123
  * Monitor cli API
123
124
  * @static
124
- * @type {UnderpostMonitor}
125
+ * @type {UnderpostMonitor.API}
125
126
  * @memberof Underpost
126
127
  */
127
128
  static monitor = UnderpostMonitor.API;
128
129
  /**
129
130
  * LXD cli API
130
131
  * @static
131
- * @type {UnderpostLxd}
132
+ * @type {UnderpostLxd.API}
132
133
  * @memberof Underpost
133
134
  */
134
135
  static lxd = UnderpostLxd.API;
@@ -136,15 +137,23 @@ class Underpost {
136
137
  /**
137
138
  * Cloud Init cli API
138
139
  * @static
139
- * @type {UnderpostCloudInit}
140
+ * @type {UnderpostCloudInit.API}
140
141
  * @memberof Underpost
141
142
  */
142
143
  static cloudInit = UnderpostCloudInit.API;
143
144
 
145
+ /**
146
+ * Run cli API
147
+ * @static
148
+ * @type {UnderpostRun.API}
149
+ * @memberof Underpost
150
+ */
151
+ static run = UnderpostRun.API;
152
+
144
153
  /**
145
154
  * Baremetal cli API
146
155
  * @static
147
- * @type {UnderpostBaremetal}
156
+ * @type {UnderpostBaremetal.API}
148
157
  * @memberof Underpost
149
158
  */
150
159
  static baremetal = UnderpostBaremetal.API;