underpost 2.8.77 → 2.8.79
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +4 -3
- package/bin/deploy.js +3 -0
- package/cli.md +44 -6
- package/docker-compose.yml +1 -1
- package/manifests/envoy-service-nodeport.yaml +23 -0
- package/manifests/lxd/lxd-admin-profile.yaml +16 -0
- package/manifests/lxd/lxd-preseed.yaml +58 -0
- package/manifests/lxd/underpost-setup.sh +146 -0
- package/manifests/mongodb/kustomization.yaml +1 -1
- package/manifests/mongodb/statefulset.yaml +12 -11
- package/manifests/mongodb/storage-class.yaml +9 -0
- package/manifests/mysql/kustomization.yaml +7 -0
- package/manifests/mysql/pv-pvc.yaml +27 -0
- package/manifests/mysql/statefulset.yaml +55 -0
- package/manifests/valkey/statefulset.yaml +2 -3
- package/package.json +1 -1
- package/src/cli/cluster.js +103 -26
- package/src/cli/deploy.js +17 -10
- package/src/cli/image.js +19 -4
- package/src/cli/index.js +35 -3
- package/src/cli/lxd.js +162 -0
- package/src/index.js +9 -1
- package/src/runtime/lampp/Dockerfile +1 -1
package/README.md
CHANGED
|
@@ -68,7 +68,7 @@ Run dev client server
|
|
|
68
68
|
npm run dev
|
|
69
69
|
```
|
|
70
70
|
<!-- -->
|
|
71
|
-
## underpost ci/cd cli v2.8.
|
|
71
|
+
## underpost ci/cd cli v2.8.79
|
|
72
72
|
|
|
73
73
|
### Usage: `underpost [options] [command]`
|
|
74
74
|
```
|
|
@@ -87,10 +87,10 @@ Commands:
|
|
|
87
87
|
config <operator> [key] [value] Manage configuration, operators
|
|
88
88
|
root Get npm root path
|
|
89
89
|
cluster [options] [pod-name] Manage cluster, for default initialization base kind cluster
|
|
90
|
-
deploy [options]
|
|
90
|
+
deploy [options] [deploy-list] [env] Manage deployment, for default deploy development pods
|
|
91
91
|
secret [options] <platform> Manage secrets
|
|
92
92
|
dockerfile-image-build [options] Build image from Dockerfile
|
|
93
|
-
dockerfile-pull-base-images
|
|
93
|
+
dockerfile-pull-base-images [options] Pull underpost dockerfile images requirements
|
|
94
94
|
install Fast import underpost npm dependencies
|
|
95
95
|
db [options] <deploy-list> Manage databases
|
|
96
96
|
script [options] <operator> <script-name> [script-value] Supports a number of built-in underpost global scripts and their preset life cycle events as well as arbitrary scripts
|
|
@@ -98,6 +98,7 @@ Commands:
|
|
|
98
98
|
fs [options] [path] File storage management, for default upload file
|
|
99
99
|
test [options] [deploy-list] Manage Test, for default run current underpost default test
|
|
100
100
|
monitor [options] <deploy-id> [env] Monitor health server management
|
|
101
|
+
lxd [options] Lxd management
|
|
101
102
|
help [command] display help for command
|
|
102
103
|
|
|
103
104
|
```
|
package/bin/deploy.js
CHANGED
|
@@ -822,6 +822,9 @@ try {
|
|
|
822
822
|
}
|
|
823
823
|
|
|
824
824
|
case 'version-deploy': {
|
|
825
|
+
shellExec(
|
|
826
|
+
`underpost secret underpost --create-from-file /home/dd/engine/engine-private/conf/dd-cron/.env.production`,
|
|
827
|
+
);
|
|
825
828
|
shellExec(`node bin/build dd conf`);
|
|
826
829
|
shellExec(`git add . && cd ./engine-private && git add .`);
|
|
827
830
|
shellExec(`node bin cmt . ci package-pwa-microservices-template`);
|
package/cli.md
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
## underpost ci/cd cli v2.8.
|
|
1
|
+
## underpost ci/cd cli v2.8.79
|
|
2
2
|
|
|
3
3
|
### Usage: `underpost [options] [command]`
|
|
4
4
|
```
|
|
@@ -17,10 +17,10 @@ Commands:
|
|
|
17
17
|
config <operator> [key] [value] Manage configuration, operators
|
|
18
18
|
root Get npm root path
|
|
19
19
|
cluster [options] [pod-name] Manage cluster, for default initialization base kind cluster
|
|
20
|
-
deploy [options]
|
|
20
|
+
deploy [options] [deploy-list] [env] Manage deployment, for default deploy development pods
|
|
21
21
|
secret [options] <platform> Manage secrets
|
|
22
22
|
dockerfile-image-build [options] Build image from Dockerfile
|
|
23
|
-
dockerfile-pull-base-images
|
|
23
|
+
dockerfile-pull-base-images [options] Pull underpost dockerfile images requirements
|
|
24
24
|
install Fast import underpost npm dependencies
|
|
25
25
|
db [options] <deploy-list> Manage databases
|
|
26
26
|
script [options] <operator> <script-name> [script-value] Supports a number of built-in underpost global scripts and their preset life cycle events as well as arbitrary scripts
|
|
@@ -28,6 +28,7 @@ Commands:
|
|
|
28
28
|
fs [options] [path] File storage management, for default upload file
|
|
29
29
|
test [options] [deploy-list] Manage Test, for default run current underpost default test
|
|
30
30
|
monitor [options] <deploy-id> [env] Monitor health server management
|
|
31
|
+
lxd [options] Lxd management
|
|
31
32
|
help [command] display help for command
|
|
32
33
|
|
|
33
34
|
```
|
|
@@ -199,10 +200,10 @@ Arguments:
|
|
|
199
200
|
Options:
|
|
200
201
|
--reset Delete all clusters and prune all data and caches
|
|
201
202
|
--mariadb Init with mariadb statefulset
|
|
203
|
+
--mysql Init with mysql statefulset
|
|
202
204
|
--mongodb Init with mongodb statefulset
|
|
203
205
|
--postgresql Init with postgresql statefulset
|
|
204
206
|
--mongodb4 Init with mongodb 4.4 service
|
|
205
|
-
--istio Init base istio cluster
|
|
206
207
|
--valkey Init with valkey service
|
|
207
208
|
--contour Init with project contour base HTTPProxy and envoy
|
|
208
209
|
--cert-manager Init with letsencrypt-prod ClusterIssuer
|
|
@@ -216,6 +217,11 @@ Options:
|
|
|
216
217
|
--info-capacity display current total machine capacity info
|
|
217
218
|
--info-capacity-pod display current machine capacity pod info
|
|
218
219
|
--pull-image Set optional pull associated image
|
|
220
|
+
--init-host Install k8s node necessary cli env: kind, kubeadm,
|
|
221
|
+
docker, podman, helm
|
|
222
|
+
--config Set k8s base node config
|
|
223
|
+
--worker Set worker node context
|
|
224
|
+
--chown Set k8s kube chown
|
|
219
225
|
-h, --help display help for command
|
|
220
226
|
|
|
221
227
|
```
|
|
@@ -223,7 +229,7 @@ Options:
|
|
|
223
229
|
|
|
224
230
|
### `deploy` :
|
|
225
231
|
```
|
|
226
|
-
Usage: underpost deploy [options]
|
|
232
|
+
Usage: underpost deploy [options] [deploy-list] [env]
|
|
227
233
|
|
|
228
234
|
Manage deployment, for default deploy development pods
|
|
229
235
|
|
|
@@ -250,6 +256,7 @@ Options:
|
|
|
250
256
|
--info-traffic get traffic conf form current resources
|
|
251
257
|
deployments
|
|
252
258
|
--kubeadm Enable kubeadm context
|
|
259
|
+
--restore-hosts Restore defautl etc hosts
|
|
253
260
|
--rebuild-clients-bundle Inside container, rebuild clients bundle,
|
|
254
261
|
only static public or storage client files
|
|
255
262
|
-h, --help display help for command
|
|
@@ -304,7 +311,11 @@ Options:
|
|
|
304
311
|
Pull underpost dockerfile images requirements
|
|
305
312
|
|
|
306
313
|
Options:
|
|
307
|
-
|
|
314
|
+
--path [path] Dockerfile path
|
|
315
|
+
--kind-load Import tar image to Kind cluster
|
|
316
|
+
--kubeadm-load Import tar image to Kubeadm cluster
|
|
317
|
+
--version Set custom version
|
|
318
|
+
-h, --help display help for command
|
|
308
319
|
|
|
309
320
|
```
|
|
310
321
|
|
|
@@ -455,4 +466,31 @@ Options:
|
|
|
455
466
|
-h, --help display help for command
|
|
456
467
|
|
|
457
468
|
```
|
|
469
|
+
|
|
470
|
+
|
|
471
|
+
### `lxd` :
|
|
472
|
+
```
|
|
473
|
+
Usage: underpost lxd [options]
|
|
474
|
+
|
|
475
|
+
Lxd management
|
|
476
|
+
|
|
477
|
+
Options:
|
|
478
|
+
--init Init lxd
|
|
479
|
+
--reset Reset lxd on current machine
|
|
480
|
+
--install Install lxd on current machine
|
|
481
|
+
--dev Set dev context env
|
|
482
|
+
--control set control node vm context
|
|
483
|
+
--worker set worker node context
|
|
484
|
+
--create-vm <vm-id> Create default virtual machines
|
|
485
|
+
--init-vm <vm-id> Get init vm underpost script
|
|
486
|
+
--info-vm <vm-id> Get all info vm
|
|
487
|
+
--start-vm <vm-id> Start vm with networkt config
|
|
488
|
+
--root-size <gb-size> Set root size vm
|
|
489
|
+
--join-node <nodes> Comma separated worker and control node e. g.
|
|
490
|
+
k8s-worker-1,k8s-control
|
|
491
|
+
--expose <vm-name-ports> Vm name and : separated with Comma separated vm
|
|
492
|
+
port to expose e. g. k8s-control:80,443
|
|
493
|
+
-h, --help display help for command
|
|
494
|
+
|
|
495
|
+
```
|
|
458
496
|
|
package/docker-compose.yml
CHANGED
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
apiVersion: v1
|
|
2
|
+
kind: Service
|
|
3
|
+
metadata:
|
|
4
|
+
labels:
|
|
5
|
+
app: envoy
|
|
6
|
+
name: envoy
|
|
7
|
+
namespace: projectcontour
|
|
8
|
+
spec:
|
|
9
|
+
externalTrafficPolicy: Cluster
|
|
10
|
+
ports:
|
|
11
|
+
- name: http
|
|
12
|
+
nodePort: 30080
|
|
13
|
+
port: 80
|
|
14
|
+
protocol: TCP
|
|
15
|
+
targetPort: 8080
|
|
16
|
+
- name: https
|
|
17
|
+
nodePort: 30443
|
|
18
|
+
port: 443
|
|
19
|
+
protocol: TCP
|
|
20
|
+
targetPort: 8443
|
|
21
|
+
selector:
|
|
22
|
+
app: envoy
|
|
23
|
+
type: NodePort
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
config:
|
|
2
|
+
limits.cpu: "2"
|
|
3
|
+
limits.memory: 4GB
|
|
4
|
+
description: vm nat network
|
|
5
|
+
devices:
|
|
6
|
+
eth0:
|
|
7
|
+
name: eth0
|
|
8
|
+
network: lxdbr0
|
|
9
|
+
type: nic
|
|
10
|
+
root:
|
|
11
|
+
path: /
|
|
12
|
+
pool: local # lxc storage list
|
|
13
|
+
size: 100GB
|
|
14
|
+
type: disk
|
|
15
|
+
name: admin-profile
|
|
16
|
+
used_by: []
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
config:
|
|
2
|
+
core.https_address: 127.0.0.1:8443
|
|
3
|
+
|
|
4
|
+
networks:
|
|
5
|
+
- name: lxdbr0
|
|
6
|
+
type: bridge
|
|
7
|
+
config:
|
|
8
|
+
ipv4.address: 10.250.250.1/24
|
|
9
|
+
ipv4.nat: "true"
|
|
10
|
+
ipv4.dhcp: "true"
|
|
11
|
+
ipv4.dhcp.ranges: 10.250.250.2-10.250.250.254
|
|
12
|
+
ipv4.firewall: "false"
|
|
13
|
+
ipv6.address: none
|
|
14
|
+
|
|
15
|
+
storage_pools:
|
|
16
|
+
- name: local
|
|
17
|
+
driver: zfs
|
|
18
|
+
config:
|
|
19
|
+
size: 100GiB
|
|
20
|
+
|
|
21
|
+
profiles:
|
|
22
|
+
- name: default
|
|
23
|
+
config: {}
|
|
24
|
+
description: "default profile"
|
|
25
|
+
devices:
|
|
26
|
+
root:
|
|
27
|
+
path: /
|
|
28
|
+
pool: local
|
|
29
|
+
type: disk
|
|
30
|
+
|
|
31
|
+
- name: admin-profile
|
|
32
|
+
description: "vm nat network admin profile"
|
|
33
|
+
config:
|
|
34
|
+
limits.cpu: "2"
|
|
35
|
+
limits.memory: 4GB
|
|
36
|
+
devices:
|
|
37
|
+
eth0:
|
|
38
|
+
name: eth0
|
|
39
|
+
network: lxdbr0
|
|
40
|
+
type: nic
|
|
41
|
+
root:
|
|
42
|
+
path: /
|
|
43
|
+
pool: local
|
|
44
|
+
size: 100GB
|
|
45
|
+
type: disk
|
|
46
|
+
|
|
47
|
+
projects: []
|
|
48
|
+
|
|
49
|
+
cluster:
|
|
50
|
+
server_name: lxd-node1
|
|
51
|
+
enabled: true
|
|
52
|
+
member_config: []
|
|
53
|
+
cluster_address: ""
|
|
54
|
+
cluster_certificate: ""
|
|
55
|
+
server_address: ""
|
|
56
|
+
cluster_password: ""
|
|
57
|
+
cluster_token: ""
|
|
58
|
+
cluster_certificate_path: ""
|
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
|
|
3
|
+
set -e
|
|
4
|
+
|
|
5
|
+
# Expand /dev/sda2 partition and resize filesystem automatically
|
|
6
|
+
|
|
7
|
+
# Check if parted is installed
|
|
8
|
+
if ! command -v parted &>/dev/null; then
|
|
9
|
+
echo "parted not found, installing..."
|
|
10
|
+
dnf install -y parted
|
|
11
|
+
fi
|
|
12
|
+
|
|
13
|
+
# Get start sector of /dev/sda2
|
|
14
|
+
START_SECTOR=$(parted /dev/sda -ms unit s print | awk -F: '/^2:/{print $2}' | sed 's/s//')
|
|
15
|
+
|
|
16
|
+
# Resize the partition
|
|
17
|
+
parted /dev/sda ---pretend-input-tty <<EOF
|
|
18
|
+
unit s
|
|
19
|
+
resizepart 2 100%
|
|
20
|
+
Yes
|
|
21
|
+
quit
|
|
22
|
+
EOF
|
|
23
|
+
|
|
24
|
+
# Resize the filesystem
|
|
25
|
+
resize2fs /dev/sda2
|
|
26
|
+
|
|
27
|
+
echo "Disk and filesystem resized successfully."
|
|
28
|
+
sudo dnf install -y tar
|
|
29
|
+
sudo dnf install -y bzip2
|
|
30
|
+
sudo dnf install -y git
|
|
31
|
+
sudo dnf -y update
|
|
32
|
+
sudo dnf -y install epel-release
|
|
33
|
+
sudo dnf install -y ufw
|
|
34
|
+
sudo systemctl enable --now ufw
|
|
35
|
+
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.40.1/install.sh | bash
|
|
36
|
+
NVM_DIR="$([ -z "${XDG_CONFIG_HOME-}" ] && printf %s "${HOME}/.nvm" || printf %s "${XDG_CONFIG_HOME}/nvm")"
|
|
37
|
+
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
|
|
38
|
+
nvm install 23.8.0
|
|
39
|
+
nvm use 23.8.0
|
|
40
|
+
echo "
|
|
41
|
+
██╗░░░██╗███╗░░██╗██████╗░███████╗██████╗░██████╗░░█████╗░░██████╗████████╗
|
|
42
|
+
██║░░░██║████╗░██║██╔══██╗██╔════╝██╔══██╗██╔══██╗██╔══██╗██╔════╝╚══██╔══╝
|
|
43
|
+
██║░░░██║██╔██╗██║██║░░██║█████╗░░██████╔╝██████╔╝██║░░██║╚█████╗░░░░██║░░░
|
|
44
|
+
██║░░░██║██║╚████║██║░░██║██╔══╝░░██╔══██╗██╔═══╝░██║░░██║░╚═══██╗░░░██║░░░
|
|
45
|
+
╚██████╔╝██║░╚███║██████╔╝███████╗██║░░██║██║░░░░░╚█████╔╝██████╔╝░░░██║░░░
|
|
46
|
+
░╚═════╝░╚═╝░░╚══╝╚═════╝░╚══════╝╚═╝░░╚═╝╚═╝░░░░░░╚════╝░╚═════╝░░░░╚═╝░░░
|
|
47
|
+
|
|
48
|
+
Installing underpost k8s node ...
|
|
49
|
+
|
|
50
|
+
"
|
|
51
|
+
npm install -g underpost
|
|
52
|
+
chmod +x /root/.nvm/versions/node/v23.8.0/bin/underpost
|
|
53
|
+
sudo modprobe br_netfilter
|
|
54
|
+
mkdir -p /home/dd
|
|
55
|
+
cd $(underpost root)/underpost
|
|
56
|
+
underpost cluster --init-host
|
|
57
|
+
|
|
58
|
+
# Default flags
|
|
59
|
+
USE_KUBEADM=false
|
|
60
|
+
USE_KIND=false
|
|
61
|
+
USE_WORKER=false
|
|
62
|
+
|
|
63
|
+
# Loop through arguments
|
|
64
|
+
for arg in "$@"; do
|
|
65
|
+
case "$arg" in
|
|
66
|
+
--kubeadm)
|
|
67
|
+
USE_KUBEADM=true
|
|
68
|
+
;;
|
|
69
|
+
--kind)
|
|
70
|
+
USE_KIND=true
|
|
71
|
+
;;
|
|
72
|
+
--worker)
|
|
73
|
+
USE_WORKER=true
|
|
74
|
+
;;
|
|
75
|
+
esac
|
|
76
|
+
done
|
|
77
|
+
|
|
78
|
+
echo "USE_KUBEADM = $USE_KUBEADM"
|
|
79
|
+
echo "USE_KIND = $USE_KIND"
|
|
80
|
+
echo "USE_WORKER = $USE_WORKER"
|
|
81
|
+
|
|
82
|
+
underpost cluster --kubeadm
|
|
83
|
+
underpost cluster --reset
|
|
84
|
+
|
|
85
|
+
PORTS=(
|
|
86
|
+
22 # SSH
|
|
87
|
+
80 # HTTP
|
|
88
|
+
443 # HTTPS
|
|
89
|
+
53 # DNS (TCP/UDP)
|
|
90
|
+
66 # TFTP
|
|
91
|
+
67 # DHCP
|
|
92
|
+
69 # TFTP
|
|
93
|
+
111 # rpcbind
|
|
94
|
+
179 # Calico BGP
|
|
95
|
+
2049 # NFS
|
|
96
|
+
20048 # NFS mountd
|
|
97
|
+
4011 # PXE boot
|
|
98
|
+
5240 # snapd API
|
|
99
|
+
5248 # Juju controller
|
|
100
|
+
6443 # Kubernetes API
|
|
101
|
+
9153 # CoreDNS metrics
|
|
102
|
+
10250 # Kubelet API
|
|
103
|
+
10251 # kube-scheduler
|
|
104
|
+
10252 # kube-controller-manager
|
|
105
|
+
10255 # Kubelet read-only (deprecated)
|
|
106
|
+
10257 # controller-manager (v1.23+)
|
|
107
|
+
10259 # scheduler (v1.23+)
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
PORT_RANGES=(
|
|
111
|
+
2379:2380 # etcd
|
|
112
|
+
# 30000:32767 # NodePort range
|
|
113
|
+
# 3000:3100 # App node ports
|
|
114
|
+
32765:32766 # Ephemeral ports
|
|
115
|
+
6783:6784 # Weave Net
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
# Open individual ports
|
|
119
|
+
for PORT in "${PORTS[@]}"; do
|
|
120
|
+
ufw allow ${PORT}/tcp
|
|
121
|
+
ufw allow ${PORT}/udp
|
|
122
|
+
done
|
|
123
|
+
|
|
124
|
+
# Open port ranges
|
|
125
|
+
for RANGE in "${PORT_RANGES[@]}"; do
|
|
126
|
+
ufw allow ${RANGE}/tcp
|
|
127
|
+
ufw allow ${RANGE}/udp
|
|
128
|
+
done
|
|
129
|
+
|
|
130
|
+
# Behavior based on flags
|
|
131
|
+
if $USE_KUBEADM; then
|
|
132
|
+
echo "Running control node with kubeadm..."
|
|
133
|
+
underpost cluster --kubeadm
|
|
134
|
+
# kubectl get pods --all-namespaces -o wide -w
|
|
135
|
+
fi
|
|
136
|
+
|
|
137
|
+
if $USE_KIND; then
|
|
138
|
+
echo "Running control node with kind..."
|
|
139
|
+
underpost cluster
|
|
140
|
+
# kubectl get pods --all-namespaces -o wide -w
|
|
141
|
+
fi
|
|
142
|
+
|
|
143
|
+
if $USE_WORKER; then
|
|
144
|
+
echo "Running worker..."
|
|
145
|
+
underpost cluster --worker --config
|
|
146
|
+
fi
|
|
@@ -3,7 +3,7 @@ kind: StatefulSet
|
|
|
3
3
|
metadata:
|
|
4
4
|
name: mongodb # Specifies the name of the statefulset
|
|
5
5
|
spec:
|
|
6
|
-
serviceName:
|
|
6
|
+
serviceName: "mongodb-service" # Specifies the service to use
|
|
7
7
|
replicas: 2
|
|
8
8
|
selector:
|
|
9
9
|
matchLabels:
|
|
@@ -18,8 +18,8 @@ spec:
|
|
|
18
18
|
image: docker.io/library/mongo:latest
|
|
19
19
|
command:
|
|
20
20
|
- mongod
|
|
21
|
-
-
|
|
22
|
-
-
|
|
21
|
+
- "--replSet"
|
|
22
|
+
- "rs0"
|
|
23
23
|
# - '--config'
|
|
24
24
|
# - '-f'
|
|
25
25
|
# - '/etc/mongod.conf'
|
|
@@ -35,9 +35,9 @@ spec:
|
|
|
35
35
|
# - '--setParameter'
|
|
36
36
|
# - 'authenticationMechanisms=SCRAM-SHA-1'
|
|
37
37
|
# - '--fork'
|
|
38
|
-
-
|
|
39
|
-
-
|
|
40
|
-
-
|
|
38
|
+
- "--logpath"
|
|
39
|
+
- "/var/log/mongodb/mongod.log"
|
|
40
|
+
- "--bind_ip_all"
|
|
41
41
|
# command: ['sh', '-c']
|
|
42
42
|
# args:
|
|
43
43
|
# - |
|
|
@@ -99,11 +99,11 @@ spec:
|
|
|
99
99
|
key: password
|
|
100
100
|
resources:
|
|
101
101
|
requests:
|
|
102
|
-
cpu:
|
|
103
|
-
memory:
|
|
102
|
+
cpu: "100m"
|
|
103
|
+
memory: "256Mi"
|
|
104
104
|
limits:
|
|
105
|
-
cpu:
|
|
106
|
-
memory:
|
|
105
|
+
cpu: "500m"
|
|
106
|
+
memory: "512Mi"
|
|
107
107
|
volumes:
|
|
108
108
|
- name: keyfile
|
|
109
109
|
secret:
|
|
@@ -119,7 +119,8 @@ spec:
|
|
|
119
119
|
- metadata:
|
|
120
120
|
name: mongodb-storage
|
|
121
121
|
spec:
|
|
122
|
-
accessModes: [
|
|
122
|
+
accessModes: ["ReadWriteOnce"]
|
|
123
|
+
storageClassName: mongodb-storage-class
|
|
123
124
|
resources:
|
|
124
125
|
requests:
|
|
125
126
|
storage: 5Gi
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
apiVersion: storage.k8s.io/v1
|
|
2
|
+
kind: StorageClass
|
|
3
|
+
metadata:
|
|
4
|
+
name: mongodb-storage-class
|
|
5
|
+
annotations:
|
|
6
|
+
storageclass.kubernetes.io/is-default-class: "false"
|
|
7
|
+
provisioner: rancher.io/local-path
|
|
8
|
+
reclaimPolicy: Retain
|
|
9
|
+
volumeBindingMode: WaitForFirstConsumer
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
# pv-pvc.yaml
|
|
2
|
+
apiVersion: v1
|
|
3
|
+
kind: PersistentVolume
|
|
4
|
+
metadata:
|
|
5
|
+
name: mysql-pv
|
|
6
|
+
labels:
|
|
7
|
+
type: local
|
|
8
|
+
spec:
|
|
9
|
+
storageClassName: manual
|
|
10
|
+
capacity:
|
|
11
|
+
storage: 20Gi
|
|
12
|
+
accessModes:
|
|
13
|
+
- ReadWriteOnce
|
|
14
|
+
hostPath:
|
|
15
|
+
path: "/mnt/data"
|
|
16
|
+
---
|
|
17
|
+
apiVersion: v1
|
|
18
|
+
kind: PersistentVolumeClaim
|
|
19
|
+
metadata:
|
|
20
|
+
name: mysql-pv-claim
|
|
21
|
+
spec:
|
|
22
|
+
storageClassName: manual
|
|
23
|
+
accessModes:
|
|
24
|
+
- ReadWriteOnce
|
|
25
|
+
resources:
|
|
26
|
+
requests:
|
|
27
|
+
storage: 20Gi
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
apiVersion: v1
|
|
2
|
+
kind: Service
|
|
3
|
+
metadata:
|
|
4
|
+
name: mysql
|
|
5
|
+
labels:
|
|
6
|
+
app: mysql
|
|
7
|
+
spec:
|
|
8
|
+
ports:
|
|
9
|
+
- port: 3306
|
|
10
|
+
name: mysql
|
|
11
|
+
selector:
|
|
12
|
+
app: mysql
|
|
13
|
+
clusterIP: None
|
|
14
|
+
---
|
|
15
|
+
apiVersion: apps/v1
|
|
16
|
+
kind: StatefulSet
|
|
17
|
+
metadata:
|
|
18
|
+
name: mysql
|
|
19
|
+
spec:
|
|
20
|
+
serviceName: "mysql"
|
|
21
|
+
selector:
|
|
22
|
+
matchLabels:
|
|
23
|
+
app: mysql
|
|
24
|
+
replicas: 1
|
|
25
|
+
template:
|
|
26
|
+
metadata:
|
|
27
|
+
labels:
|
|
28
|
+
app: mysql
|
|
29
|
+
spec:
|
|
30
|
+
containers:
|
|
31
|
+
- image: mysql:9
|
|
32
|
+
name: mysql
|
|
33
|
+
env:
|
|
34
|
+
- name: MYSQL_ROOT_PASSWORD
|
|
35
|
+
valueFrom:
|
|
36
|
+
secretKeyRef:
|
|
37
|
+
name: mysql-secret
|
|
38
|
+
key: password
|
|
39
|
+
ports:
|
|
40
|
+
- containerPort: 3306
|
|
41
|
+
name: mysql
|
|
42
|
+
volumeMounts:
|
|
43
|
+
- name: mysql-persistent-storage
|
|
44
|
+
mountPath: /var/lib/mysql
|
|
45
|
+
subPath: mysql
|
|
46
|
+
volumeClaimTemplates:
|
|
47
|
+
- metadata:
|
|
48
|
+
name: mysql-persistent-storage
|
|
49
|
+
spec:
|
|
50
|
+
storageClassName: manual
|
|
51
|
+
accessModes:
|
|
52
|
+
- ReadWriteOnce
|
|
53
|
+
resources:
|
|
54
|
+
requests:
|
|
55
|
+
storage: 20Gi
|
|
@@ -19,9 +19,8 @@ spec:
|
|
|
19
19
|
|
|
20
20
|
containers:
|
|
21
21
|
- name: service-valkey
|
|
22
|
-
image: valkey/valkey:latest
|
|
23
|
-
|
|
24
|
-
imagePullPolicy: Never
|
|
22
|
+
image: docker.io/valkey/valkey:latest
|
|
23
|
+
imagePullPolicy: IfNotPresent
|
|
25
24
|
env:
|
|
26
25
|
- name: TZ
|
|
27
26
|
value: Europe/Zurich
|
package/package.json
CHANGED
package/src/cli/cluster.js
CHANGED
|
@@ -15,6 +15,7 @@ class UnderpostCluster {
|
|
|
15
15
|
mongodb: false,
|
|
16
16
|
mongodb4: false,
|
|
17
17
|
mariadb: false,
|
|
18
|
+
mysql: false,
|
|
18
19
|
postgresql: false,
|
|
19
20
|
valkey: false,
|
|
20
21
|
full: false,
|
|
@@ -30,6 +31,10 @@ class UnderpostCluster {
|
|
|
30
31
|
pullImage: false,
|
|
31
32
|
dedicatedGpu: false,
|
|
32
33
|
kubeadm: false,
|
|
34
|
+
initHost: false,
|
|
35
|
+
config: false,
|
|
36
|
+
worker: false,
|
|
37
|
+
chown: false,
|
|
33
38
|
},
|
|
34
39
|
) {
|
|
35
40
|
// sudo dnf update
|
|
@@ -38,6 +43,9 @@ class UnderpostCluster {
|
|
|
38
43
|
// 3) Install Nvidia drivers from Rocky Linux docs
|
|
39
44
|
// 4) Install LXD with MAAS from Rocky Linux docs
|
|
40
45
|
// 5) Install MAAS src from snap
|
|
46
|
+
if (options.initHost === true) return UnderpostCluster.API.initHost();
|
|
47
|
+
if (options.config === true) UnderpostCluster.API.config();
|
|
48
|
+
if (options.chown === true) UnderpostCluster.API.chown();
|
|
41
49
|
const npmRoot = getNpmRootPath();
|
|
42
50
|
const underpostRoot = options?.dev === true ? '.' : `${npmRoot}/underpost`;
|
|
43
51
|
if (options.infoCapacityPod === true) return logger.info('', UnderpostDeploy.API.resourcesFactory());
|
|
@@ -83,27 +91,22 @@ class UnderpostCluster {
|
|
|
83
91
|
shellExec(`sudo kubectl api-resources`);
|
|
84
92
|
return;
|
|
85
93
|
}
|
|
94
|
+
const alrreadyCluster =
|
|
95
|
+
UnderpostDeploy.API.get('kube-apiserver-kind-control-plane')[0] ||
|
|
96
|
+
UnderpostDeploy.API.get('calico-kube-controllers')[0];
|
|
86
97
|
|
|
87
98
|
if (
|
|
88
|
-
|
|
89
|
-
|
|
99
|
+
!options.worker &&
|
|
100
|
+
!alrreadyCluster &&
|
|
101
|
+
((!options.kubeadm && !UnderpostDeploy.API.get('kube-apiserver-kind-control-plane')[0]) ||
|
|
102
|
+
(options.kubeadm === true && !UnderpostDeploy.API.get('calico-kube-controllers')[0]))
|
|
90
103
|
) {
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
// shellExec(`cp /etc/kubernetes/admin.conf ~/.kube/config`);
|
|
98
|
-
// shellExec(`sudo systemctl restart kubelet`);
|
|
99
|
-
shellExec(`sudo service docker restart`);
|
|
100
|
-
shellExec(`sudo systemctl enable --now containerd.service`);
|
|
101
|
-
shellExec(`sudo swapoff -a; sudo sed -i '/swap/d' /etc/fstab`);
|
|
102
|
-
if (options.istio === true) {
|
|
103
|
-
shellExec(`sysctl net.bridge.bridge-nf-call-iptables=1`);
|
|
104
|
-
shellExec(`sudo kubeadm init --pod-network-cidr=192.168.0.0/16`);
|
|
105
|
-
shellExec(`sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config`);
|
|
106
|
-
shellExec(`sudo chown $(id -u):$(id -g) $HOME/.kube/config**`);
|
|
104
|
+
UnderpostCluster.API.config();
|
|
105
|
+
if (options.kubeadm === true) {
|
|
106
|
+
shellExec(
|
|
107
|
+
`sudo kubeadm init --pod-network-cidr=192.168.0.0/16 --control-plane-endpoint="${os.hostname()}:6443"`,
|
|
108
|
+
);
|
|
109
|
+
UnderpostCluster.API.chown();
|
|
107
110
|
// https://docs.tigera.io/calico/latest/getting-started/kubernetes/quickstart
|
|
108
111
|
shellExec(
|
|
109
112
|
`sudo kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.29.3/manifests/tigera-operator.yaml`,
|
|
@@ -112,14 +115,16 @@ class UnderpostCluster {
|
|
|
112
115
|
// `wget https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/custom-resources.yaml`,
|
|
113
116
|
// );
|
|
114
117
|
shellExec(`sudo kubectl apply -f ${underpostRoot}/manifests/kubeadm-calico-config.yaml`);
|
|
115
|
-
shellExec(`sudo systemctl restart containerd`);
|
|
116
118
|
const nodeName = os.hostname();
|
|
117
119
|
shellExec(`kubectl taint nodes ${nodeName} node-role.kubernetes.io/control-plane:NoSchedule-`);
|
|
120
|
+
shellExec(
|
|
121
|
+
`kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/master/deploy/local-path-storage.yaml`,
|
|
122
|
+
);
|
|
118
123
|
} else {
|
|
119
|
-
shellExec(`sudo systemctl restart containerd`);
|
|
120
124
|
if (options.full === true || options.dedicatedGpu === true) {
|
|
121
125
|
// https://kubernetes.io/docs/tasks/manage-gpus/scheduling-gpus/
|
|
122
126
|
shellExec(`cd ${underpostRoot}/manifests && kind create cluster --config kind-config-cuda.yaml`);
|
|
127
|
+
UnderpostCluster.API.chown();
|
|
123
128
|
} else {
|
|
124
129
|
shellExec(
|
|
125
130
|
`cd ${underpostRoot}/manifests && kind create cluster --config kind-config${
|
|
@@ -127,7 +132,6 @@ class UnderpostCluster {
|
|
|
127
132
|
}.yaml`,
|
|
128
133
|
);
|
|
129
134
|
}
|
|
130
|
-
shellExec(`sudo chown $(id -u):$(id -g) $HOME/.kube/config**`);
|
|
131
135
|
}
|
|
132
136
|
} else logger.warn('Cluster already initialized');
|
|
133
137
|
|
|
@@ -142,7 +146,8 @@ class UnderpostCluster {
|
|
|
142
146
|
|
|
143
147
|
if (options.full === true || options.valkey === true) {
|
|
144
148
|
if (options.pullImage === true) {
|
|
145
|
-
shellExec(`docker pull valkey/valkey`);
|
|
149
|
+
shellExec(`docker pull valkey/valkey:latest`);
|
|
150
|
+
shellExec(`sudo podman pull valkey/valkey:latest`);
|
|
146
151
|
if (!options.kubeadm)
|
|
147
152
|
shellExec(
|
|
148
153
|
`sudo ${
|
|
@@ -157,12 +162,18 @@ class UnderpostCluster {
|
|
|
157
162
|
shellExec(
|
|
158
163
|
`sudo kubectl create secret generic mariadb-secret --from-file=username=/home/dd/engine/engine-private/mariadb-username --from-file=password=/home/dd/engine/engine-private/mariadb-password`,
|
|
159
164
|
);
|
|
160
|
-
shellExec(
|
|
161
|
-
`sudo kubectl create secret generic github-secret --from-literal=GITHUB_TOKEN=${process.env.GITHUB_TOKEN}`,
|
|
162
|
-
);
|
|
163
165
|
shellExec(`kubectl delete statefulset mariadb-statefulset`);
|
|
164
166
|
shellExec(`kubectl apply -k ${underpostRoot}/manifests/mariadb`);
|
|
165
167
|
}
|
|
168
|
+
if (options.full === true || options.mysql === true) {
|
|
169
|
+
shellExec(
|
|
170
|
+
`sudo kubectl create secret generic mysql-secret --from-file=username=/home/dd/engine/engine-private/mysql-username --from-file=password=/home/dd/engine/engine-private/mysql-password`,
|
|
171
|
+
);
|
|
172
|
+
shellExec(`sudo mkdir -p /mnt/data`);
|
|
173
|
+
shellExec(`sudo chmod 777 /mnt/data`);
|
|
174
|
+
shellExec(`sudo chown -R root:root /mnt/data`);
|
|
175
|
+
shellExec(`kubectl apply -k ${underpostRoot}/manifests/mysql`);
|
|
176
|
+
}
|
|
166
177
|
if (options.full === true || options.postgresql === true) {
|
|
167
178
|
if (options.pullImage === true) {
|
|
168
179
|
shellExec(`docker pull postgres:latest`);
|
|
@@ -210,6 +221,9 @@ class UnderpostCluster {
|
|
|
210
221
|
|
|
211
222
|
// await UnderpostTest.API.statusMonitor('mongodb-1');
|
|
212
223
|
} else if (options.full === true || options.mongodb === true) {
|
|
224
|
+
if (options.pullImage === true) {
|
|
225
|
+
shellExec(`docker pull mongo:latest`);
|
|
226
|
+
}
|
|
213
227
|
shellExec(
|
|
214
228
|
`sudo kubectl create secret generic mongodb-keyfile --from-file=/home/dd/engine/engine-private/mongodb-keyfile`,
|
|
215
229
|
);
|
|
@@ -217,6 +231,8 @@ class UnderpostCluster {
|
|
|
217
231
|
`sudo kubectl create secret generic mongodb-secret --from-file=username=/home/dd/engine/engine-private/mongodb-username --from-file=password=/home/dd/engine/engine-private/mongodb-password`,
|
|
218
232
|
);
|
|
219
233
|
shellExec(`kubectl delete statefulset mongodb`);
|
|
234
|
+
if (options.kubeadm === true)
|
|
235
|
+
shellExec(`kubectl apply -f ${underpostRoot}/manifests/mongodb/storage-class.yaml`);
|
|
220
236
|
shellExec(`kubectl apply -k ${underpostRoot}/manifests/mongodb`);
|
|
221
237
|
|
|
222
238
|
const successInstance = await UnderpostTest.API.statusMonitor('mongodb-1');
|
|
@@ -239,8 +255,12 @@ class UnderpostCluster {
|
|
|
239
255
|
}
|
|
240
256
|
}
|
|
241
257
|
|
|
242
|
-
if (options.full === true || options.contour === true)
|
|
258
|
+
if (options.full === true || options.contour === true) {
|
|
243
259
|
shellExec(`kubectl apply -f https://projectcontour.io/quickstart/contour.yaml`);
|
|
260
|
+
if (options.kubeadm === true) {
|
|
261
|
+
shellExec(`sudo kubectl apply -f ${underpostRoot}/manifests/envoy-service-nodeport.yaml`);
|
|
262
|
+
}
|
|
263
|
+
}
|
|
244
264
|
|
|
245
265
|
if (options.full === true || options.certManager === true) {
|
|
246
266
|
if (!UnderpostDeploy.API.get('cert-manager').find((p) => p.STATUS === 'Running')) {
|
|
@@ -259,6 +279,26 @@ class UnderpostCluster {
|
|
|
259
279
|
shellExec(`sudo kubectl apply -f ${underpostRoot}/manifests/${letsEncName}.yaml`);
|
|
260
280
|
}
|
|
261
281
|
},
|
|
282
|
+
|
|
283
|
+
config() {
|
|
284
|
+
shellExec(`sudo setenforce 0`);
|
|
285
|
+
shellExec(`sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config`);
|
|
286
|
+
shellExec(`sudo systemctl enable --now docker`);
|
|
287
|
+
shellExec(`sudo systemctl enable --now kubelet`);
|
|
288
|
+
shellExec(`containerd config default > /etc/containerd/config.toml`);
|
|
289
|
+
shellExec(`sed -i -e "s/SystemdCgroup = false/SystemdCgroup = true/g" /etc/containerd/config.toml`);
|
|
290
|
+
shellExec(`sudo service docker restart`);
|
|
291
|
+
shellExec(`sudo systemctl enable --now containerd.service`);
|
|
292
|
+
shellExec(`sudo swapoff -a; sudo sed -i '/swap/d' /etc/fstab`);
|
|
293
|
+
shellExec(`sudo systemctl daemon-reload`);
|
|
294
|
+
shellExec(`sudo systemctl restart containerd`);
|
|
295
|
+
shellExec(`sysctl net.bridge.bridge-nf-call-iptables=1`);
|
|
296
|
+
},
|
|
297
|
+
chown() {
|
|
298
|
+
shellExec(`mkdir -p ~/.kube`);
|
|
299
|
+
shellExec(`sudo -E cp -i /etc/kubernetes/admin.conf ~/.kube/config`);
|
|
300
|
+
shellExec(`sudo -E chown $(id -u):$(id -g) ~/.kube/config`);
|
|
301
|
+
},
|
|
262
302
|
// This function performs a comprehensive reset of Kubernetes and container environments
|
|
263
303
|
// on the host machine. Its primary goal is to clean up cluster components, temporary files,
|
|
264
304
|
// and container data, ensuring a clean state for re-initialization or fresh deployments,
|
|
@@ -379,6 +419,14 @@ class UnderpostCluster {
|
|
|
379
419
|
// Step 14: Remove the 'kind' Docker network.
|
|
380
420
|
// This cleans up any network bridges or configurations specifically created by Kind.
|
|
381
421
|
// shellExec(`docker network rm kind`);
|
|
422
|
+
|
|
423
|
+
// Reset kubelet
|
|
424
|
+
shellExec(`sudo systemctl stop kubelet`);
|
|
425
|
+
shellExec(`sudo rm -rf /etc/kubernetes/*`);
|
|
426
|
+
shellExec(`sudo rm -rf /var/lib/kubelet/*`);
|
|
427
|
+
shellExec(`sudo rm -rf /etc/cni/net.d/*`);
|
|
428
|
+
shellExec(`sudo systemctl daemon-reload`);
|
|
429
|
+
shellExec(`sudo systemctl start kubelet`);
|
|
382
430
|
},
|
|
383
431
|
|
|
384
432
|
getResourcesCapacity(kubeadm = false) {
|
|
@@ -426,6 +474,35 @@ Allocatable:
|
|
|
426
474
|
|
|
427
475
|
return resources;
|
|
428
476
|
},
|
|
477
|
+
initHost() {
|
|
478
|
+
// Install docker
|
|
479
|
+
shellExec(`sudo dnf -y install dnf-plugins-core
|
|
480
|
+
sudo dnf config-manager --add-repo https://download.docker.com/linux/rhel/docker-ce.repo`);
|
|
481
|
+
shellExec(`sudo dnf -y install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin`);
|
|
482
|
+
// Install podman
|
|
483
|
+
shellExec(`sudo dnf -y install podman`);
|
|
484
|
+
// Install kind
|
|
485
|
+
shellExec(`[ $(uname -m) = aarch64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.29.0/kind-linux-arm64
|
|
486
|
+
chmod +x ./kind
|
|
487
|
+
sudo mv ./kind /bin/kind`);
|
|
488
|
+
// Install kubeadm
|
|
489
|
+
shellExec(`cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
|
|
490
|
+
[kubernetes]
|
|
491
|
+
name=Kubernetes
|
|
492
|
+
baseurl=https://pkgs.k8s.io/core:/stable:/v1.33/rpm/
|
|
493
|
+
enabled=1
|
|
494
|
+
gpgcheck=1
|
|
495
|
+
gpgkey=https://pkgs.k8s.io/core:/stable:/v1.33/rpm/repodata/repomd.xml.key
|
|
496
|
+
exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
|
|
497
|
+
EOF`);
|
|
498
|
+
shellExec(`sudo yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes`);
|
|
499
|
+
// Install helm
|
|
500
|
+
shellExec(`curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
|
|
501
|
+
chmod 700 get_helm.sh
|
|
502
|
+
./get_helm.sh
|
|
503
|
+
chmod +x /usr/local/bin/helm
|
|
504
|
+
sudo mv /usr/local/bin/helm /bin/helm`);
|
|
505
|
+
},
|
|
429
506
|
};
|
|
430
507
|
}
|
|
431
508
|
export default UnderpostCluster;
|
package/src/cli/deploy.js
CHANGED
|
@@ -15,6 +15,7 @@ import dotenv from 'dotenv';
|
|
|
15
15
|
import { DataBaseProvider } from '../db/DataBaseProvider.js';
|
|
16
16
|
import UnderpostRootEnv from './env.js';
|
|
17
17
|
import UnderpostCluster from './cluster.js';
|
|
18
|
+
import Underpost from '../index.js';
|
|
18
19
|
|
|
19
20
|
const logger = loggerFactory(import.meta);
|
|
20
21
|
|
|
@@ -80,14 +81,14 @@ spec:
|
|
|
80
81
|
spec:
|
|
81
82
|
containers:
|
|
82
83
|
- name: ${deployId}-${env}-${suffix}
|
|
83
|
-
image: localhost/debian
|
|
84
|
-
resources:
|
|
85
|
-
requests:
|
|
86
|
-
memory: "${resources.requests.memory}"
|
|
87
|
-
cpu: "${resources.requests.cpu}"
|
|
88
|
-
limits:
|
|
89
|
-
memory: "${resources.limits.memory}"
|
|
90
|
-
cpu: "${resources.limits.cpu}"
|
|
84
|
+
image: localhost/debian-underpost:${Underpost.version}
|
|
85
|
+
# resources:
|
|
86
|
+
# requests:
|
|
87
|
+
# memory: "${resources.requests.memory}"
|
|
88
|
+
# cpu: "${resources.requests.cpu}"
|
|
89
|
+
# limits:
|
|
90
|
+
# memory: "${resources.limits.memory}"
|
|
91
|
+
# cpu: "${resources.limits.cpu}"
|
|
91
92
|
command:
|
|
92
93
|
- /bin/sh
|
|
93
94
|
- -c
|
|
@@ -243,6 +244,7 @@ spec:
|
|
|
243
244
|
traffic: '',
|
|
244
245
|
dashboardUpdate: false,
|
|
245
246
|
replicas: '',
|
|
247
|
+
restoreHosts: false,
|
|
246
248
|
disableUpdateDeployment: false,
|
|
247
249
|
infoTraffic: false,
|
|
248
250
|
rebuildClientsBundle: false,
|
|
@@ -297,11 +299,17 @@ kubectl get configmap kubelet-config -n kube-system -o yaml > kubelet-config.yam
|
|
|
297
299
|
shellExec(
|
|
298
300
|
`kubectl create configmap underpost-config --from-file=/home/dd/engine/engine-private/conf/dd-cron/.env.${env}`,
|
|
299
301
|
);
|
|
302
|
+
let renderHosts = '';
|
|
303
|
+
let concatHots = '';
|
|
300
304
|
const etcHost = (
|
|
301
305
|
concat,
|
|
302
306
|
) => `127.0.0.1 ${concat} localhost localhost.localdomain localhost4 localhost4.localdomain4
|
|
303
307
|
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6`;
|
|
304
|
-
|
|
308
|
+
if (options.restoreHosts === true) {
|
|
309
|
+
renderHosts = etcHost(concatHots);
|
|
310
|
+
fs.writeFileSync(`/etc/hosts`, renderHosts, 'utf8');
|
|
311
|
+
return;
|
|
312
|
+
}
|
|
305
313
|
|
|
306
314
|
for (const _deployId of deployList.split(',')) {
|
|
307
315
|
const deployId = _deployId.trim();
|
|
@@ -342,7 +350,6 @@ kubectl get configmap kubelet-config -n kube-system -o yaml > kubelet-config.yam
|
|
|
342
350
|
shellExec(`sudo kubectl apply -f ./${manifestsPath}/secret.yaml`);
|
|
343
351
|
}
|
|
344
352
|
}
|
|
345
|
-
let renderHosts;
|
|
346
353
|
switch (process.platform) {
|
|
347
354
|
case 'linux':
|
|
348
355
|
{
|
package/src/cli/image.js
CHANGED
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
import fs from 'fs-extra';
|
|
2
|
-
import { shellCd, shellExec } from '../server/process.js';
|
|
3
2
|
import dotenv from 'dotenv';
|
|
4
|
-
import { awaitDeployMonitor, getNpmRootPath } from '../server/conf.js';
|
|
5
3
|
import { loggerFactory } from '../server/logger.js';
|
|
6
|
-
import
|
|
4
|
+
import Underpost from '../index.js';
|
|
5
|
+
import { getUnderpostRootPath } from '../server/conf.js';
|
|
6
|
+
import { shellExec } from '../server/process.js';
|
|
7
7
|
|
|
8
8
|
dotenv.config();
|
|
9
9
|
|
|
@@ -12,8 +12,23 @@ const logger = loggerFactory(import.meta);
|
|
|
12
12
|
class UnderpostImage {
|
|
13
13
|
static API = {
|
|
14
14
|
dockerfile: {
|
|
15
|
-
pullBaseImages(
|
|
15
|
+
pullBaseImages(
|
|
16
|
+
options = {
|
|
17
|
+
kindLoad: false,
|
|
18
|
+
kubeadmLoad: false,
|
|
19
|
+
path: false,
|
|
20
|
+
version: '',
|
|
21
|
+
},
|
|
22
|
+
) {
|
|
16
23
|
shellExec(`sudo podman pull docker.io/library/debian:buster`);
|
|
24
|
+
const IMAGE_NAME = `debian-underpost`;
|
|
25
|
+
const IMAGE_NAME_FULL = `${IMAGE_NAME}:${options.version ?? Underpost.version}`;
|
|
26
|
+
const LOAD_TYPE = options.kindLoad === true ? `--kin-load` : `--kubeadm-load`;
|
|
27
|
+
shellExec(
|
|
28
|
+
`underpost dockerfile-image-build --podman-save --no-cache --image-path=. --path ${
|
|
29
|
+
options.path ?? getUnderpostRootPath()
|
|
30
|
+
} --image-name=${IMAGE_NAME_FULL} ${LOAD_TYPE}`,
|
|
31
|
+
);
|
|
17
32
|
},
|
|
18
33
|
build(
|
|
19
34
|
options = {
|
package/src/cli/index.js
CHANGED
|
@@ -1,10 +1,11 @@
|
|
|
1
1
|
import dotenv from 'dotenv';
|
|
2
2
|
import { Command } from 'commander';
|
|
3
3
|
import Underpost from '../index.js';
|
|
4
|
-
import { getUnderpostRootPath, loadConf } from '../server/conf.js';
|
|
4
|
+
import { getNpmRootPath, getUnderpostRootPath, loadConf } from '../server/conf.js';
|
|
5
5
|
import fs from 'fs-extra';
|
|
6
6
|
import { commitData } from '../client/components/core/CommonJs.js';
|
|
7
7
|
import { shellExec } from '../server/process.js';
|
|
8
|
+
import UnderpostLxd from './lxd.js';
|
|
8
9
|
|
|
9
10
|
const underpostRootPath = getUnderpostRootPath();
|
|
10
11
|
fs.existsSync(`${underpostRootPath}/.env`)
|
|
@@ -92,10 +93,11 @@ program
|
|
|
92
93
|
.argument('[pod-name]', 'Optional pod name filter')
|
|
93
94
|
.option('--reset', `Delete all clusters and prune all data and caches`)
|
|
94
95
|
.option('--mariadb', 'Init with mariadb statefulset')
|
|
96
|
+
.option('--mysql', 'Init with mysql statefulset')
|
|
95
97
|
.option('--mongodb', 'Init with mongodb statefulset')
|
|
96
98
|
.option('--postgresql', 'Init with postgresql statefulset')
|
|
97
99
|
.option('--mongodb4', 'Init with mongodb 4.4 service')
|
|
98
|
-
.option('--istio', 'Init base istio
|
|
100
|
+
// .option('--istio', 'Init base istio service mesh')
|
|
99
101
|
.option('--valkey', 'Init with valkey service')
|
|
100
102
|
.option('--contour', 'Init with project contour base HTTPProxy and envoy')
|
|
101
103
|
.option('--cert-manager', 'Init with letsencrypt-prod ClusterIssuer')
|
|
@@ -109,12 +111,16 @@ program
|
|
|
109
111
|
.option('--info-capacity', 'display current total machine capacity info')
|
|
110
112
|
.option('--info-capacity-pod', 'display current machine capacity pod info')
|
|
111
113
|
.option('--pull-image', 'Set optional pull associated image')
|
|
114
|
+
.option('--init-host', 'Install k8s node necessary cli env: kind, kubeadm, docker, podman, helm')
|
|
115
|
+
.option('--config', 'Set k8s base node config')
|
|
116
|
+
.option('--worker', 'Set worker node context')
|
|
117
|
+
.option('--chown', 'Set k8s kube chown')
|
|
112
118
|
.action(Underpost.cluster.init)
|
|
113
119
|
.description('Manage cluster, for default initialization base kind cluster');
|
|
114
120
|
|
|
115
121
|
program
|
|
116
122
|
.command('deploy')
|
|
117
|
-
.argument('
|
|
123
|
+
.argument('[deploy-list]', 'Deploy id list, e.g. default-a,default-b')
|
|
118
124
|
.argument('[env]', 'Optional environment, for default is development')
|
|
119
125
|
.option('--remove', 'Delete deployments and services')
|
|
120
126
|
.option('--sync', 'Sync deployments env, ports, and replicas')
|
|
@@ -130,6 +136,7 @@ program
|
|
|
130
136
|
.option('--disable-update-deployment', 'Disable update deployments')
|
|
131
137
|
.option('--info-traffic', 'get traffic conf form current resources deployments')
|
|
132
138
|
.option('--kubeadm', 'Enable kubeadm context')
|
|
139
|
+
.option('--restore-hosts', 'Restore defautl etc hosts')
|
|
133
140
|
.option(
|
|
134
141
|
'--rebuild-clients-bundle',
|
|
135
142
|
'Inside container, rebuild clients bundle, only static public or storage client files',
|
|
@@ -169,6 +176,10 @@ program
|
|
|
169
176
|
|
|
170
177
|
program
|
|
171
178
|
.command('dockerfile-pull-base-images')
|
|
179
|
+
.option('--path [path]', 'Dockerfile path')
|
|
180
|
+
.option('--kind-load', 'Import tar image to Kind cluster')
|
|
181
|
+
.option('--kubeadm-load', 'Import tar image to Kubeadm cluster')
|
|
182
|
+
.option('--version', 'Set custom version')
|
|
172
183
|
.description('Pull underpost dockerfile images requirements')
|
|
173
184
|
.action(Underpost.image.dockerfile.pullBaseImages);
|
|
174
185
|
|
|
@@ -259,6 +270,27 @@ program
|
|
|
259
270
|
.description('Monitor health server management')
|
|
260
271
|
.action(Underpost.monitor.callback);
|
|
261
272
|
|
|
273
|
+
program
|
|
274
|
+
.command('lxd')
|
|
275
|
+
.option('--init', 'Init lxd')
|
|
276
|
+
.option('--reset', 'Reset lxd on current machine')
|
|
277
|
+
.option('--install', 'Install lxd on current machine')
|
|
278
|
+
.option('--dev', 'Set dev context env')
|
|
279
|
+
.option('--control', 'set control node vm context')
|
|
280
|
+
.option('--worker', 'set worker node context')
|
|
281
|
+
.option('--create-vm <vm-id>', 'Create default virtual machines')
|
|
282
|
+
.option('--init-vm <vm-id>', 'Get init vm underpost script')
|
|
283
|
+
.option('--info-vm <vm-id>', 'Get all info vm')
|
|
284
|
+
.option('--start-vm <vm-id>', 'Start vm with networkt config')
|
|
285
|
+
.option('--root-size <gb-size>', 'Set root size vm')
|
|
286
|
+
.option('--join-node <nodes>', 'Comma separated worker and control node e. g. k8s-worker-1,k8s-control')
|
|
287
|
+
.option(
|
|
288
|
+
'--expose <vm-name-ports>',
|
|
289
|
+
'Vm name and : separated with Comma separated vm port to expose e. g. k8s-control:80,443',
|
|
290
|
+
)
|
|
291
|
+
.description('Lxd management')
|
|
292
|
+
.action(UnderpostLxd.API.callback);
|
|
293
|
+
|
|
262
294
|
const buildCliDoc = () => {
|
|
263
295
|
let md = shellExec(`node bin help`, { silent: true, stdout: true }).split('Options:');
|
|
264
296
|
const baseOptions =
|
package/src/cli/lxd.js
ADDED
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
import { getNpmRootPath } from '../server/conf.js';
|
|
2
|
+
import { getLocalIPv4Address } from '../server/dns.js';
|
|
3
|
+
import { pbcopy, shellExec } from '../server/process.js';
|
|
4
|
+
import fs from 'fs-extra';
|
|
5
|
+
|
|
6
|
+
class UnderpostLxd {
|
|
7
|
+
static API = {
|
|
8
|
+
async callback(
|
|
9
|
+
options = {
|
|
10
|
+
init: false,
|
|
11
|
+
reset: false,
|
|
12
|
+
dev: false,
|
|
13
|
+
install: false,
|
|
14
|
+
createVirtualNetwork: false,
|
|
15
|
+
control: false,
|
|
16
|
+
worker: false,
|
|
17
|
+
startVm: '',
|
|
18
|
+
initVm: '',
|
|
19
|
+
createVm: '',
|
|
20
|
+
infoVm: '',
|
|
21
|
+
rootSize: '',
|
|
22
|
+
joinNode: '',
|
|
23
|
+
expose: '',
|
|
24
|
+
},
|
|
25
|
+
) {
|
|
26
|
+
const npmRoot = getNpmRootPath();
|
|
27
|
+
const underpostRoot = options?.dev === true ? '.' : `${npmRoot}/underpost`;
|
|
28
|
+
if (options.reset === true) {
|
|
29
|
+
shellExec(`sudo systemctl stop snap.lxd.daemon`);
|
|
30
|
+
shellExec(`sudo snap remove lxd --purge`);
|
|
31
|
+
}
|
|
32
|
+
if (options.install === true) shellExec(`sudo snap install lxd`);
|
|
33
|
+
if (options.init === true) {
|
|
34
|
+
shellExec(`sudo systemctl start snap.lxd.daemon`);
|
|
35
|
+
shellExec(`sudo systemctl status snap.lxd.daemon`);
|
|
36
|
+
const lxdPressedContent = fs
|
|
37
|
+
.readFileSync(`${underpostRoot}/manifests/lxd/lxd-preseed.yaml`, 'utf8')
|
|
38
|
+
.replaceAll(`127.0.0.1`, getLocalIPv4Address());
|
|
39
|
+
// shellExec(`lxc profile show admin-profile`);
|
|
40
|
+
// shellExec(`lxc network show lxdbr0`);
|
|
41
|
+
// shellExec(`lxd init --preseed < ${underpostRoot}/manifests/lxd/lxd-preseed.yaml`);
|
|
42
|
+
shellExec(`echo "${lxdPressedContent}" | lxd init --preseed`);
|
|
43
|
+
shellExec(`lxc cluster list`);
|
|
44
|
+
}
|
|
45
|
+
if (options.createVm && typeof options.createVm === 'string') {
|
|
46
|
+
// lxc launch
|
|
47
|
+
const createVmCommand = `lxc init images:rockylinux/9/cloud ${
|
|
48
|
+
options.createVm
|
|
49
|
+
} --vm --target lxd-node1 -c limits.cpu=2 -c limits.memory=4GB --profile admin-profile -d root,size=${
|
|
50
|
+
options.rootSize && typeof options.rootSize === 'string' ? options.rootSize + 'GiB' : '32GiB'
|
|
51
|
+
}`;
|
|
52
|
+
pbcopy(createVmCommand); // Copy the command to clipboard for user
|
|
53
|
+
}
|
|
54
|
+
if (options.startVm && typeof options.startVm === 'string') {
|
|
55
|
+
const vmIp = UnderpostLxd.API.getNextAvailableIp();
|
|
56
|
+
shellExec(`lxc stop ${options.startVm}`);
|
|
57
|
+
shellExec(
|
|
58
|
+
`lxc config set ${options.startVm} user.network-config="${UnderpostLxd.API.generateCloudInitNetworkConfig(
|
|
59
|
+
vmIp,
|
|
60
|
+
)}"`,
|
|
61
|
+
);
|
|
62
|
+
shellExec(`lxc config device override ${options.startVm} eth0`);
|
|
63
|
+
shellExec(`lxc config device set ${options.startVm} eth0 ipv4.address ${vmIp}`);
|
|
64
|
+
shellExec(
|
|
65
|
+
`lxc config set ${options.startVm} user.user-data="#cloud-config
|
|
66
|
+
runcmd:
|
|
67
|
+
- [touch, /var/log/userdata-ok]"`,
|
|
68
|
+
);
|
|
69
|
+
shellExec(`lxc start ${options.startVm}`);
|
|
70
|
+
}
|
|
71
|
+
if (options.initVm && typeof options.initVm === 'string') {
|
|
72
|
+
let flag = '';
|
|
73
|
+
if (options.control === true) {
|
|
74
|
+
flag = ' -s -- --kubeadm';
|
|
75
|
+
shellExec(`lxc exec ${options.initVm} -- bash -c 'mkdir -p /home/dd/engine'`);
|
|
76
|
+
shellExec(`lxc file push /home/dd/engine/engine-private ${options.initVm}/home/dd/engine --recursive`);
|
|
77
|
+
} else if (options.worker == true) {
|
|
78
|
+
flag = ' -s -- --worker';
|
|
79
|
+
}
|
|
80
|
+
pbcopy(`cat ${underpostRoot}/manifests/lxd/underpost-setup.sh | lxc exec ${options.initVm} -- bash${flag}`);
|
|
81
|
+
}
|
|
82
|
+
if (options.joinNode && typeof options.joinNode === 'string') {
|
|
83
|
+
const [workerNode, controlNode] = options.joinNode.split(',');
|
|
84
|
+
const token = shellExec(
|
|
85
|
+
`echo "$(lxc exec ${controlNode} -- bash -c 'sudo kubeadm token create --print-join-command')"`,
|
|
86
|
+
{ stdout: true },
|
|
87
|
+
);
|
|
88
|
+
shellExec(`lxc exec ${workerNode} -- bash -c '${token}'`);
|
|
89
|
+
}
|
|
90
|
+
if (options.infoVm && typeof options.infoVm === 'string') {
|
|
91
|
+
shellExec(`lxc config show ${options.infoVm}`);
|
|
92
|
+
shellExec(`lxc info --show-log ${options.infoVm}`);
|
|
93
|
+
shellExec(`lxc info ${options.infoVm}`);
|
|
94
|
+
shellExec(`lxc list ${options.infoVm}`);
|
|
95
|
+
}
|
|
96
|
+
if (options.expose && typeof options.expose === 'string') {
|
|
97
|
+
const [controlNode, ports] = options.expose.split(':');
|
|
98
|
+
console.log({ controlNode, ports });
|
|
99
|
+
const protocols = ['tcp', 'udp'];
|
|
100
|
+
const hostIp = getLocalIPv4Address();
|
|
101
|
+
const vmIp = shellExec(
|
|
102
|
+
`lxc list ${controlNode} --format json | jq -r '.[0].state.network.enp5s0.addresses[] | select(.family=="inet") | .address'`,
|
|
103
|
+
{ stdout: true },
|
|
104
|
+
).trim();
|
|
105
|
+
for (const port of ports.split(',')) {
|
|
106
|
+
for (const protocol of protocols) {
|
|
107
|
+
shellExec(`lxc config device remove ${controlNode} ${controlNode}-port-${port}`);
|
|
108
|
+
shellExec(
|
|
109
|
+
`lxc config device add ${controlNode} ${controlNode}-port-${port} proxy listen=${protocol}:${hostIp}:${port} connect=${protocol}:${vmIp}:${port} nat=true`,
|
|
110
|
+
);
|
|
111
|
+
shellExec(`lxc config show ${controlNode} --expanded | grep proxy`);
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
},
|
|
116
|
+
generateCloudInitNetworkConfig(ip) {
|
|
117
|
+
return `version: 2
|
|
118
|
+
ethernets:
|
|
119
|
+
enp5s0:
|
|
120
|
+
dhcp4: false
|
|
121
|
+
addresses:
|
|
122
|
+
- ${ip}/24
|
|
123
|
+
gateway4: 10.250.250.1
|
|
124
|
+
nameservers:
|
|
125
|
+
addresses: [1.1.1.1, 8.8.8.8]`;
|
|
126
|
+
},
|
|
127
|
+
getUsedIpsFromLxd() {
|
|
128
|
+
const json = shellExec('lxc list --format json', { stdout: true, silent: true });
|
|
129
|
+
const vms = JSON.parse(json);
|
|
130
|
+
|
|
131
|
+
const usedIps = [];
|
|
132
|
+
|
|
133
|
+
for (const vm of vms) {
|
|
134
|
+
if (vm.state && vm.state.network) {
|
|
135
|
+
for (const iface of Object.values(vm.state.network)) {
|
|
136
|
+
if (iface.addresses) {
|
|
137
|
+
for (const addr of iface.addresses) {
|
|
138
|
+
if (addr.family === 'inet' && addr.address.startsWith('10.250.250.')) {
|
|
139
|
+
usedIps.push(addr.address);
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
return usedIps;
|
|
148
|
+
},
|
|
149
|
+
getNextAvailableIp(base = '10.250.250.', start = 100, end = 254) {
|
|
150
|
+
const usedIps = UnderpostLxd.API.getUsedIpsFromLxd();
|
|
151
|
+
for (let i = start; i <= end; i++) {
|
|
152
|
+
const candidate = `${base}${i}`;
|
|
153
|
+
if (!usedIps.includes(candidate)) {
|
|
154
|
+
return candidate;
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
throw new Error('No IPs available in the static range');
|
|
158
|
+
},
|
|
159
|
+
};
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
export default UnderpostLxd;
|
package/src/index.js
CHANGED
|
@@ -11,6 +11,7 @@ import UnderpostDeploy from './cli/deploy.js';
|
|
|
11
11
|
import UnderpostRootEnv from './cli/env.js';
|
|
12
12
|
import UnderpostFileStorage from './cli/fs.js';
|
|
13
13
|
import UnderpostImage from './cli/image.js';
|
|
14
|
+
import UnderpostLxd from './cli/lxd.js';
|
|
14
15
|
import UnderpostMonitor from './cli/monitor.js';
|
|
15
16
|
import UnderpostRepository from './cli/repository.js';
|
|
16
17
|
import UnderpostScript from './cli/script.js';
|
|
@@ -30,7 +31,7 @@ class Underpost {
|
|
|
30
31
|
* @type {String}
|
|
31
32
|
* @memberof Underpost
|
|
32
33
|
*/
|
|
33
|
-
static version = 'v2.8.
|
|
34
|
+
static version = 'v2.8.79';
|
|
34
35
|
/**
|
|
35
36
|
* Repository cli API
|
|
36
37
|
* @static
|
|
@@ -122,6 +123,13 @@ class Underpost {
|
|
|
122
123
|
* @memberof Underpost
|
|
123
124
|
*/
|
|
124
125
|
static monitor = UnderpostMonitor.API;
|
|
126
|
+
/**
|
|
127
|
+
* LXD cli API
|
|
128
|
+
* @static
|
|
129
|
+
* @type {UnderpostLxd.API}
|
|
130
|
+
* @memberof Underpost
|
|
131
|
+
*/
|
|
132
|
+
static lxd = UnderpostLxd.API;
|
|
125
133
|
}
|
|
126
134
|
|
|
127
135
|
const up = Underpost;
|