underpost 2.8.788 → 2.8.792
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/bin/db.js +1 -0
- package/cli.md +23 -14
- package/docker-compose.yml +1 -1
- package/manifests/lxd/lxd-admin-profile.yaml +1 -0
- package/manifests/lxd/underpost-setup.sh +52 -2
- package/package.json +1 -1
- package/src/cli/cluster.js +12 -3
- package/src/cli/index.js +10 -0
- package/src/cli/lxd.js +199 -0
- package/src/index.js +1 -1
package/README.md
CHANGED
package/bin/db.js
CHANGED
package/cli.md
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
## underpost ci/cd cli v2.8.
|
|
1
|
+
## underpost ci/cd cli v2.8.792
|
|
2
2
|
|
|
3
3
|
### Usage: `underpost [options] [command]`
|
|
4
4
|
```
|
|
@@ -475,19 +475,28 @@ Options:
|
|
|
475
475
|
Lxd management
|
|
476
476
|
|
|
477
477
|
Options:
|
|
478
|
-
--init
|
|
479
|
-
--reset
|
|
480
|
-
--install
|
|
481
|
-
--dev
|
|
482
|
-
--create-virtual-network
|
|
483
|
-
--create-admin-profile
|
|
484
|
-
--control
|
|
485
|
-
--worker
|
|
486
|
-
--create-vm <vm-id>
|
|
487
|
-
--init-vm <vm-id>
|
|
488
|
-
--info-vm <vm-id>
|
|
489
|
-
--
|
|
490
|
-
|
|
478
|
+
--init Init lxd
|
|
479
|
+
--reset Reset lxd on current machine
|
|
480
|
+
--install Install lxd on current machine
|
|
481
|
+
--dev Set dev context env
|
|
482
|
+
--create-virtual-network Create lxd virtual network bridge
|
|
483
|
+
--create-admin-profile Create admin profile for lxd management
|
|
484
|
+
--control set control node vm context
|
|
485
|
+
--worker set worker node context
|
|
486
|
+
--create-vm <vm-id> Create default virtual machines
|
|
487
|
+
--init-vm <vm-id> Get init vm underpost script
|
|
488
|
+
--info-vm <vm-id> Get all info vm
|
|
489
|
+
--test <vm-id> Test health, status and network connectivity
|
|
490
|
+
for a VM
|
|
491
|
+
--root-size <gb-size> Set root size vm
|
|
492
|
+
--join-node <nodes> Comma separated worker and control node e.
|
|
493
|
+
g. k8s-worker-1,k8s-control
|
|
494
|
+
--expose <vm-name-ports> Vm name and : separated with Comma separated
|
|
495
|
+
vm port to expose e. g. k8s-control:80,443
|
|
496
|
+
--delete-expose <vm-name-ports> Vm name and : separated with Comma separated
|
|
497
|
+
vm port to remove expose e. g.
|
|
498
|
+
k8s-control:80,443
|
|
499
|
+
-h, --help display help for command
|
|
491
500
|
|
|
492
501
|
```
|
|
493
502
|
|
package/docker-compose.yml
CHANGED
|
@@ -27,6 +27,11 @@ resize2fs /dev/sda2
|
|
|
27
27
|
echo "Disk and filesystem resized successfully."
|
|
28
28
|
sudo dnf install -y tar
|
|
29
29
|
sudo dnf install -y bzip2
|
|
30
|
+
sudo dnf install -y git
|
|
31
|
+
sudo dnf -y update
|
|
32
|
+
sudo dnf -y install epel-release
|
|
33
|
+
sudo dnf install -y ufw
|
|
34
|
+
sudo systemctl enable --now ufw
|
|
30
35
|
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.40.1/install.sh | bash
|
|
31
36
|
NVM_DIR="$([ -z "${XDG_CONFIG_HOME-}" ] && printf %s "${HOME}/.nvm" || printf %s "${XDG_CONFIG_HOME}/nvm")"
|
|
32
37
|
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
|
|
@@ -77,17 +82,62 @@ echo "USE_WORKER = $USE_WORKER"
|
|
|
77
82
|
underpost cluster --kubeadm
|
|
78
83
|
underpost cluster --reset
|
|
79
84
|
|
|
85
|
+
PORTS=(
|
|
86
|
+
22 # SSH
|
|
87
|
+
80 # HTTP
|
|
88
|
+
443 # HTTPS
|
|
89
|
+
53 # DNS (TCP/UDP)
|
|
90
|
+
66 # TFTP
|
|
91
|
+
67 # DHCP
|
|
92
|
+
69 # TFTP
|
|
93
|
+
111 # rpcbind
|
|
94
|
+
179 # Calico BGP
|
|
95
|
+
2049 # NFS
|
|
96
|
+
20048 # NFS mountd
|
|
97
|
+
4011 # PXE boot
|
|
98
|
+
5240 # snapd API
|
|
99
|
+
5248 # Juju controller
|
|
100
|
+
6443 # Kubernetes API
|
|
101
|
+
9153 # CoreDNS metrics
|
|
102
|
+
10250 # Kubelet API
|
|
103
|
+
10251 # kube-scheduler
|
|
104
|
+
10252 # kube-controller-manager
|
|
105
|
+
10255 # Kubelet read-only (deprecated)
|
|
106
|
+
10257 # controller-manager (v1.23+)
|
|
107
|
+
10259 # scheduler (v1.23+)
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
PORT_RANGES=(
|
|
111
|
+
2379:2380 # etcd
|
|
112
|
+
# 30000:32767 # NodePort range
|
|
113
|
+
# 3000:3100 # App node ports
|
|
114
|
+
32765:32766 # Ephemeral ports
|
|
115
|
+
6783:6784 # Weave Net
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
# Open individual ports
|
|
119
|
+
for PORT in "${PORTS[@]}"; do
|
|
120
|
+
ufw allow ${PORT}/tcp
|
|
121
|
+
ufw allow ${PORT}/udp
|
|
122
|
+
done
|
|
123
|
+
|
|
124
|
+
# Open port ranges
|
|
125
|
+
for RANGE in "${PORT_RANGES[@]}"; do
|
|
126
|
+
ufw allow ${RANGE}/tcp
|
|
127
|
+
ufw allow ${RANGE}/udp
|
|
128
|
+
done
|
|
129
|
+
|
|
80
130
|
# Behavior based on flags
|
|
81
131
|
if $USE_KUBEADM; then
|
|
82
132
|
echo "Running control node with kubeadm..."
|
|
83
133
|
underpost cluster --kubeadm
|
|
84
|
-
kubectl get pods --all-namespaces -o wide -w
|
|
134
|
+
# kubectl get pods --all-namespaces -o wide -w
|
|
85
135
|
fi
|
|
86
136
|
|
|
87
137
|
if $USE_KIND; then
|
|
88
138
|
echo "Running control node with kind..."
|
|
89
139
|
underpost cluster
|
|
90
|
-
kubectl get pods --all-namespaces -o wide -w
|
|
140
|
+
# kubectl get pods --all-namespaces -o wide -w
|
|
91
141
|
fi
|
|
92
142
|
|
|
93
143
|
if $USE_WORKER; then
|
package/package.json
CHANGED
package/src/cli/cluster.js
CHANGED
|
@@ -293,6 +293,18 @@ class UnderpostCluster {
|
|
|
293
293
|
shellExec(`sudo systemctl daemon-reload`);
|
|
294
294
|
shellExec(`sudo systemctl restart containerd`);
|
|
295
295
|
shellExec(`sysctl net.bridge.bridge-nf-call-iptables=1`);
|
|
296
|
+
// Clean ip tables
|
|
297
|
+
shellExec(`sudo iptables -F`);
|
|
298
|
+
shellExec(`sudo iptables -X`);
|
|
299
|
+
shellExec(`sudo iptables -t nat -F`);
|
|
300
|
+
shellExec(`sudo iptables -t nat -X`);
|
|
301
|
+
shellExec(`sudo iptables -t raw -F`);
|
|
302
|
+
shellExec(`sudo iptables -t raw -X`);
|
|
303
|
+
shellExec(`sudo iptables -t mangle -F`);
|
|
304
|
+
shellExec(`sudo iptables -t mangle -X`);
|
|
305
|
+
shellExec(`sudo iptables -P INPUT ACCEPT`);
|
|
306
|
+
shellExec(`sudo iptables -P FORWARD ACCEPT`);
|
|
307
|
+
shellExec(`sudo iptables -P OUTPUT ACCEPT`);
|
|
296
308
|
},
|
|
297
309
|
chown() {
|
|
298
310
|
shellExec(`mkdir -p ~/.kube`);
|
|
@@ -475,9 +487,6 @@ Allocatable:
|
|
|
475
487
|
return resources;
|
|
476
488
|
},
|
|
477
489
|
initHost() {
|
|
478
|
-
// Base
|
|
479
|
-
shellExec(`sudo dnf -y update`);
|
|
480
|
-
shellExec(`sudo dnf -y install epel-release`);
|
|
481
490
|
// Install docker
|
|
482
491
|
shellExec(`sudo dnf -y install dnf-plugins-core
|
|
483
492
|
sudo dnf config-manager --add-repo https://download.docker.com/linux/rhel/docker-ce.repo`);
|
package/src/cli/index.js
CHANGED
|
@@ -283,7 +283,17 @@ program
|
|
|
283
283
|
.option('--create-vm <vm-id>', 'Create default virtual machines')
|
|
284
284
|
.option('--init-vm <vm-id>', 'Get init vm underpost script')
|
|
285
285
|
.option('--info-vm <vm-id>', 'Get all info vm')
|
|
286
|
+
.option('--test <vm-id>', 'Test health, status and network connectivity for a VM')
|
|
286
287
|
.option('--root-size <gb-size>', 'Set root size vm')
|
|
288
|
+
.option('--join-node <nodes>', 'Comma separated worker and control node e. g. k8s-worker-1,k8s-control')
|
|
289
|
+
.option(
|
|
290
|
+
'--expose <vm-name-ports>',
|
|
291
|
+
'Vm name and : separated with Comma separated vm port to expose e. g. k8s-control:80,443',
|
|
292
|
+
)
|
|
293
|
+
.option(
|
|
294
|
+
'--delete-expose <vm-name-ports>',
|
|
295
|
+
'Vm name and : separated with Comma separated vm port to remove expose e. g. k8s-control:80,443',
|
|
296
|
+
)
|
|
287
297
|
.description('Lxd management')
|
|
288
298
|
.action(UnderpostLxd.API.callback);
|
|
289
299
|
|
package/src/cli/lxd.js
CHANGED
|
@@ -3,8 +3,34 @@ import { getLocalIPv4Address } from '../server/dns.js';
|
|
|
3
3
|
import { pbcopy, shellExec } from '../server/process.js';
|
|
4
4
|
import fs from 'fs-extra';
|
|
5
5
|
|
|
6
|
+
/**
|
|
7
|
+
* @class UnderpostLxd
|
|
8
|
+
* @description Provides a set of static methods to interact with LXD,
|
|
9
|
+
* encapsulating common LXD operations for VM management and network testing.
|
|
10
|
+
*/
|
|
6
11
|
class UnderpostLxd {
|
|
7
12
|
static API = {
|
|
13
|
+
/**
|
|
14
|
+
* @method callback
|
|
15
|
+
* @description Main entry point for LXD operations based on provided options.
|
|
16
|
+
* @param {object} options - Configuration options for LXD operations.
|
|
17
|
+
* @param {boolean} [options.init=false] - Initialize LXD.
|
|
18
|
+
* @param {boolean} [options.reset=false] - Reset LXD installation.
|
|
19
|
+
* @param {boolean} [options.dev=false] - Run in development mode (adjusts paths).
|
|
20
|
+
* @param {boolean} [options.install=false] - Install LXD snap.
|
|
21
|
+
* @param {boolean} [options.createVirtualNetwork=false] - Create default LXD bridge network (lxdbr0).
|
|
22
|
+
* @param {boolean} [options.createAdminProfile=false] - Create admin-profile for VMs.
|
|
23
|
+
* @param {boolean} [options.control=false] - Flag for control plane VM initialization.
|
|
24
|
+
* @param {boolean} [options.worker=false] - Flag for worker node VM initialization.
|
|
25
|
+
* @param {string} [options.initVm=''] - Initialize a specific VM.
|
|
26
|
+
* @param {string} [options.createVm=''] - Create a new VM with the given name.
|
|
27
|
+
* @param {string} [options.infoVm=''] - Display information about a specific VM.
|
|
28
|
+
* @param {string} [options.rootSize=''] - Root disk size for new VMs (e.g., '32GiB').
|
|
29
|
+
* @param {string} [options.joinNode=''] - Join a worker node to a control plane (format: 'workerName,controlName').
|
|
30
|
+
* @param {string} [options.expose=''] - Expose ports from a VM to the host (format: 'vmName:port1,port2').
|
|
31
|
+
* @param {string} [options.deleteExpose=''] - Delete exposed ports from a VM (format: 'vmName:port1,port2').
|
|
32
|
+
* @param {string} [options.test=''] - Test health, status and network connectivity for a VM.
|
|
33
|
+
*/
|
|
8
34
|
async callback(
|
|
9
35
|
options = {
|
|
10
36
|
init: false,
|
|
@@ -12,12 +38,17 @@ class UnderpostLxd {
|
|
|
12
38
|
dev: false,
|
|
13
39
|
install: false,
|
|
14
40
|
createVirtualNetwork: false,
|
|
41
|
+
createAdminProfile: false,
|
|
15
42
|
control: false,
|
|
16
43
|
worker: false,
|
|
17
44
|
initVm: '',
|
|
18
45
|
createVm: '',
|
|
19
46
|
infoVm: '',
|
|
20
47
|
rootSize: '',
|
|
48
|
+
joinNode: '',
|
|
49
|
+
expose: '',
|
|
50
|
+
deleteExpose: '',
|
|
51
|
+
test: '',
|
|
21
52
|
},
|
|
22
53
|
) {
|
|
23
54
|
const npmRoot = getNpmRootPath();
|
|
@@ -62,17 +93,185 @@ ipv6.address=none`);
|
|
|
62
93
|
let flag = '';
|
|
63
94
|
if (options.control === true) {
|
|
64
95
|
flag = ' -s -- --kubeadm';
|
|
96
|
+
shellExec(`lxc exec ${options.initVm} -- bash -c 'mkdir -p /home/dd/engine'`);
|
|
97
|
+
shellExec(`lxc file push /home/dd/engine/engine-private ${options.initVm}/home/dd/engine --recursive`);
|
|
98
|
+
shellExec(`lxc file push /home/dd/engine/manifests ${options.initVm}/home/dd/engine --recursive`);
|
|
65
99
|
} else if (options.worker == true) {
|
|
66
100
|
flag = ' -s -- --worker';
|
|
67
101
|
}
|
|
68
102
|
pbcopy(`cat ${underpostRoot}/manifests/lxd/underpost-setup.sh | lxc exec ${options.initVm} -- bash${flag}`);
|
|
69
103
|
}
|
|
104
|
+
if (options.joinNode && typeof options.joinNode === 'string') {
|
|
105
|
+
const [workerNode, controlNode] = options.joinNode.split(',');
|
|
106
|
+
const token = shellExec(
|
|
107
|
+
`echo "$(lxc exec ${controlNode} -- bash -c 'sudo kubeadm token create --print-join-command')"`,
|
|
108
|
+
{ stdout: true },
|
|
109
|
+
);
|
|
110
|
+
shellExec(`lxc exec ${workerNode} -- bash -c '${token}'`);
|
|
111
|
+
}
|
|
70
112
|
if (options.infoVm && typeof options.infoVm === 'string') {
|
|
71
113
|
shellExec(`lxc config show ${options.infoVm}`);
|
|
72
114
|
shellExec(`lxc info --show-log ${options.infoVm}`);
|
|
73
115
|
shellExec(`lxc info ${options.infoVm}`);
|
|
74
116
|
shellExec(`lxc list ${options.infoVm}`);
|
|
75
117
|
}
|
|
118
|
+
if (options.expose && typeof options.expose === 'string') {
|
|
119
|
+
const [controlNode, ports] = options.expose.split(':');
|
|
120
|
+
console.log({ controlNode, ports });
|
|
121
|
+
const protocols = ['tcp']; // udp
|
|
122
|
+
const hostIp = getLocalIPv4Address();
|
|
123
|
+
const vmIp = shellExec(
|
|
124
|
+
`lxc list ${controlNode} --format json | jq -r '.[0].state.network.enp5s0.addresses[] | select(.family=="inet") | .address'`,
|
|
125
|
+
{ stdout: true },
|
|
126
|
+
).trim();
|
|
127
|
+
for (const port of ports.split(',')) {
|
|
128
|
+
for (const protocol of protocols) {
|
|
129
|
+
shellExec(`lxc config device remove ${controlNode} ${controlNode}-${protocol}-port-${port}`);
|
|
130
|
+
shellExec(
|
|
131
|
+
`lxc config device add ${controlNode} ${controlNode}-${protocol}-port-${port} proxy listen=${protocol}:${hostIp}:${port} connect=${protocol}:${vmIp}:${port} nat=true`,
|
|
132
|
+
);
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
if (options.deleteExpose && typeof options.deleteExpose === 'string') {
|
|
137
|
+
const [controlNode, ports] = options.deleteExpose.split(':');
|
|
138
|
+
console.log({ controlNode, ports });
|
|
139
|
+
const protocols = ['tcp']; // udp
|
|
140
|
+
for (const port of ports.split(',')) {
|
|
141
|
+
for (const protocol of protocols) {
|
|
142
|
+
shellExec(`lxc config device remove ${controlNode} ${controlNode}-${protocol}-port-${port}`);
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
// New 'test' option implementation
|
|
148
|
+
if (options.test && typeof options.test === 'string') {
|
|
149
|
+
const vmName = options.test;
|
|
150
|
+
console.log(`Starting comprehensive test for VM: ${vmName}`);
|
|
151
|
+
|
|
152
|
+
// 1. Monitor for IPv4 address
|
|
153
|
+
let vmIp = '';
|
|
154
|
+
let retries = 0;
|
|
155
|
+
const maxRetries = 10;
|
|
156
|
+
const delayMs = 5000; // 5 seconds
|
|
157
|
+
|
|
158
|
+
while (!vmIp && retries < maxRetries) {
|
|
159
|
+
try {
|
|
160
|
+
console.log(`Attempting to get IPv4 address for ${vmName} (Attempt ${retries + 1}/${maxRetries})...`);
|
|
161
|
+
vmIp = shellExec(
|
|
162
|
+
`lxc list ${vmName} --format json | jq -r '.[0].state.network.enp5s0.addresses[] | select(.family=="inet") | .address'`,
|
|
163
|
+
{ stdout: true },
|
|
164
|
+
).trim();
|
|
165
|
+
if (vmIp) {
|
|
166
|
+
console.log(`IPv4 address found for ${vmName}: ${vmIp}`);
|
|
167
|
+
} else {
|
|
168
|
+
console.log(`IPv4 address not yet available for ${vmName}. Retrying in ${delayMs / 1000} seconds...`);
|
|
169
|
+
await new Promise((resolve) => setTimeout(resolve, delayMs));
|
|
170
|
+
}
|
|
171
|
+
} catch (error) {
|
|
172
|
+
console.error(`Error getting IPv4 address: ${error.message}`);
|
|
173
|
+
console.log(`Retrying in ${delayMs / 1000} seconds...`);
|
|
174
|
+
await new Promise((resolve) => setTimeout(resolve, delayMs));
|
|
175
|
+
}
|
|
176
|
+
retries++;
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
if (!vmIp) {
|
|
180
|
+
console.error(`Failed to get IPv4 address for ${vmName} after ${maxRetries} attempts. Aborting tests.`);
|
|
181
|
+
return;
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
// 2. Iteratively check connection to google.cl
|
|
185
|
+
let connectedToGoogle = false;
|
|
186
|
+
retries = 0;
|
|
187
|
+
while (!connectedToGoogle && retries < maxRetries) {
|
|
188
|
+
try {
|
|
189
|
+
console.log(`Checking connectivity to google.cl from ${vmName} (Attempt ${retries + 1}/${maxRetries})...`);
|
|
190
|
+
const curlOutput = shellExec(
|
|
191
|
+
`lxc exec ${vmName} -- curl -s -o /dev/null -w "%{http_code}" http://google.cl`,
|
|
192
|
+
{ stdout: true },
|
|
193
|
+
);
|
|
194
|
+
if (curlOutput.startsWith('2') || curlOutput.startsWith('3')) {
|
|
195
|
+
console.log(`Successfully connected to google.cl from ${vmName}.`);
|
|
196
|
+
connectedToGoogle = true;
|
|
197
|
+
} else {
|
|
198
|
+
console.log(`Connectivity to google.cl not yet verified. Retrying in ${delayMs / 1000} seconds...`);
|
|
199
|
+
await new Promise((resolve) => setTimeout(resolve, delayMs));
|
|
200
|
+
}
|
|
201
|
+
} catch (error) {
|
|
202
|
+
console.error(`Error checking connectivity to google.cl: ${error.message}`);
|
|
203
|
+
console.log(`Retrying in ${delayMs / 1000} seconds...`);
|
|
204
|
+
await new Promise((resolve) => setTimeout(resolve, delayMs));
|
|
205
|
+
}
|
|
206
|
+
retries++;
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
if (!connectedToGoogle) {
|
|
210
|
+
console.error(
|
|
211
|
+
`Failed to connect to google.cl from ${vmName} after ${maxRetries} attempts. Aborting further tests.`,
|
|
212
|
+
);
|
|
213
|
+
return;
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
// 3. Check other connectivity, network, and VM health parameters
|
|
217
|
+
console.log(`\n--- Comprehensive Health Report for ${vmName} ---`);
|
|
218
|
+
|
|
219
|
+
// VM Status
|
|
220
|
+
console.log('\n--- VM Status ---');
|
|
221
|
+
try {
|
|
222
|
+
const vmStatus = shellExec(`lxc list ${vmName} --format json`, { stdout: true, silent: true });
|
|
223
|
+
console.log(JSON.stringify(JSON.parse(vmStatus), null, 2));
|
|
224
|
+
} catch (error) {
|
|
225
|
+
console.error(`Error getting VM status: ${error.message}`);
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
// CPU Usage
|
|
229
|
+
console.log('\n--- CPU Usage ---');
|
|
230
|
+
try {
|
|
231
|
+
const cpuUsage = shellExec(`lxc exec ${vmName} -- bash -c 'top -bn1 | grep "Cpu(s)"'`, { stdout: true });
|
|
232
|
+
console.log(cpuUsage.trim());
|
|
233
|
+
} catch (error) {
|
|
234
|
+
console.error(`Error getting CPU usage: ${error.message}`);
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
// Memory Usage
|
|
238
|
+
console.log('\n--- Memory Usage ---');
|
|
239
|
+
try {
|
|
240
|
+
const memoryUsage = shellExec(`lxc exec ${vmName} -- bash -c 'free -m'`, { stdout: true });
|
|
241
|
+
console.log(memoryUsage.trim());
|
|
242
|
+
} catch (error) {
|
|
243
|
+
console.error(`Error getting memory usage: ${error.message}`);
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
// Disk Usage
|
|
247
|
+
console.log('\n--- Disk Usage (Root Partition) ---');
|
|
248
|
+
try {
|
|
249
|
+
const diskUsage = shellExec(`lxc exec ${vmName} -- bash -c 'df -h /'`, { stdout: true });
|
|
250
|
+
console.log(diskUsage.trim());
|
|
251
|
+
} catch (error) {
|
|
252
|
+
console.error(`Error getting disk usage: ${error.message}`);
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
// Network Interface Status
|
|
256
|
+
console.log('\n--- Network Interface Status (ip a) ---');
|
|
257
|
+
try {
|
|
258
|
+
const ipA = shellExec(`lxc exec ${vmName} -- bash -c 'ip a'`, { stdout: true });
|
|
259
|
+
console.log(ipA.trim());
|
|
260
|
+
} catch (error) {
|
|
261
|
+
console.error(`Error getting network interface status: ${error.message}`);
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
// DNS Resolution (resolv.conf)
|
|
265
|
+
console.log('\n--- DNS Configuration (/etc/resolv.conf) ---');
|
|
266
|
+
try {
|
|
267
|
+
const resolvConf = shellExec(`lxc exec ${vmName} -- bash -c 'cat /etc/resolv.conf'`, { stdout: true });
|
|
268
|
+
console.log(resolvConf.trim());
|
|
269
|
+
} catch (error) {
|
|
270
|
+
console.error(`Error getting DNS configuration: ${error.message}`);
|
|
271
|
+
}
|
|
272
|
+
|
|
273
|
+
console.log(`\nComprehensive test for VM: ${vmName} completed.`);
|
|
274
|
+
}
|
|
76
275
|
},
|
|
77
276
|
};
|
|
78
277
|
}
|