underpost 2.8.794 → 2.8.796
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/bin/deploy.js +19 -2
- package/cli.md +2 -2
- package/docker-compose.yml +1 -1
- package/manifests/deployment/dd-template-development/deployment.yaml +167 -0
- package/manifests/deployment/dd-template-development/proxy.yaml +46 -0
- package/manifests/valkey/service.yaml +2 -2
- package/manifests/valkey/statefulset.yaml +5 -5
- package/package.json +1 -1
- package/src/cli/cluster.js +175 -63
- package/src/cli/deploy.js +5 -5
- package/src/cli/image.js +5 -5
- package/src/cli/index.js +1 -1
- package/src/cli/lxd.js +4 -0
- package/src/index.js +1 -1
- package/src/server/runtime.js +1 -1
- package/src/server/valkey.js +3 -3
package/README.md
CHANGED
package/bin/deploy.js
CHANGED
|
@@ -797,6 +797,13 @@ try {
|
|
|
797
797
|
.replaceAll(`engine.version: '${version}'`, `engine.version: '${newVersion}'`),
|
|
798
798
|
'utf8',
|
|
799
799
|
);
|
|
800
|
+
fs.writeFileSync(
|
|
801
|
+
`./manifests/deployment/dd-template-development/deployment.yaml`,
|
|
802
|
+
fs
|
|
803
|
+
.readFileSync(`./manifests/deployment/dd-template-development/deployment.yaml`, 'utf8')
|
|
804
|
+
.replaceAll(`underpost:v${version}`, `underpost:v${newVersion}`),
|
|
805
|
+
'utf8',
|
|
806
|
+
);
|
|
800
807
|
|
|
801
808
|
if (fs.existsSync(`./.github/workflows/docker-image.yml`))
|
|
802
809
|
fs.writeFileSync(
|
|
@@ -921,6 +928,16 @@ ${shellExec(`git log | grep Author: | sort -u`, { stdout: true }).split(`\n`).jo
|
|
|
921
928
|
};
|
|
922
929
|
DefaultConf.server[host][path].apiBaseProxyPath = '/';
|
|
923
930
|
DefaultConf.server[host][path].apiBaseHost = 'www.nexodev.org';
|
|
931
|
+
} else if (confName === 'template') {
|
|
932
|
+
const host = 'default.net';
|
|
933
|
+
const path = '/';
|
|
934
|
+
DefaultConf.server[host][path].valkey = {
|
|
935
|
+
port: 6379,
|
|
936
|
+
host: 'valkey-service.default.svc.cluster.local',
|
|
937
|
+
};
|
|
938
|
+
// mongodb-0.mongodb-service
|
|
939
|
+
DefaultConf.server[host][path].db.host = 'mongodb://mongodb-service:27017';
|
|
940
|
+
confName = '';
|
|
924
941
|
} else if (confName) {
|
|
925
942
|
DefaultConf.client = JSON.parse(fs.readFileSync(`./engine-private/conf/${confName}/conf.client.json`, 'utf8'));
|
|
926
943
|
DefaultConf.server = JSON.parse(fs.readFileSync(`./engine-private/conf/${confName}/conf.server.json`, 'utf8'));
|
|
@@ -2223,7 +2240,7 @@ EOF`);
|
|
|
2223
2240
|
const args = [
|
|
2224
2241
|
`node bin dockerfile-image-build --path ${path}/backend/`,
|
|
2225
2242
|
`--image-name=${imageName} --image-path=${path}`,
|
|
2226
|
-
`--podman-save --${process.argv.includes('kubeadm') ? 'kubeadm' : 'kind'}-load --
|
|
2243
|
+
`--podman-save --${process.argv.includes('kubeadm') ? 'kubeadm' : 'kind'}-load --reset`,
|
|
2227
2244
|
];
|
|
2228
2245
|
shellExec(args.join(' '));
|
|
2229
2246
|
}
|
|
@@ -2235,7 +2252,7 @@ EOF`);
|
|
|
2235
2252
|
const args = [
|
|
2236
2253
|
`node bin dockerfile-image-build --path ${path}/frontend/`,
|
|
2237
2254
|
`--image-name=${imageName} --image-path=${path}`,
|
|
2238
|
-
`--podman-save --${process.argv.includes('kubeadm') ? 'kubeadm' : 'kind'}-load --
|
|
2255
|
+
`--podman-save --${process.argv.includes('kubeadm') ? 'kubeadm' : 'kind'}-load --reset`,
|
|
2239
2256
|
];
|
|
2240
2257
|
shellExec(args.join(' '));
|
|
2241
2258
|
}
|
package/cli.md
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
## underpost ci/cd cli v2.8.
|
|
1
|
+
## underpost ci/cd cli v2.8.796
|
|
2
2
|
|
|
3
3
|
### Usage: `underpost [options] [command]`
|
|
4
4
|
```
|
|
@@ -298,7 +298,7 @@ Options:
|
|
|
298
298
|
--kubeadm-load Import tar image to Kubeadm cluster
|
|
299
299
|
--secrets Dockerfile env secrets
|
|
300
300
|
--secrets-path [secrets-path] Dockerfile custom path env secrets
|
|
301
|
-
--
|
|
301
|
+
--reset Build without using cache
|
|
302
302
|
-h, --help display help for command
|
|
303
303
|
|
|
304
304
|
```
|
package/docker-compose.yml
CHANGED
|
@@ -0,0 +1,167 @@
|
|
|
1
|
+
---
|
|
2
|
+
apiVersion: apps/v1
|
|
3
|
+
kind: Deployment
|
|
4
|
+
metadata:
|
|
5
|
+
name: dd-template-development-blue
|
|
6
|
+
labels:
|
|
7
|
+
app: dd-template-development-blue
|
|
8
|
+
spec:
|
|
9
|
+
replicas: 1
|
|
10
|
+
selector:
|
|
11
|
+
matchLabels:
|
|
12
|
+
app: dd-template-development-blue
|
|
13
|
+
template:
|
|
14
|
+
metadata:
|
|
15
|
+
labels:
|
|
16
|
+
app: dd-template-development-blue
|
|
17
|
+
spec:
|
|
18
|
+
containers:
|
|
19
|
+
- name: dd-template-development-blue
|
|
20
|
+
image: localhost/debian-underpost:v2.8.796
|
|
21
|
+
# resources:
|
|
22
|
+
# requests:
|
|
23
|
+
# memory: "124Ki"
|
|
24
|
+
# cpu: "100m"
|
|
25
|
+
# limits:
|
|
26
|
+
# memory: "1992Ki"
|
|
27
|
+
# cpu: "1600m"
|
|
28
|
+
command:
|
|
29
|
+
- /bin/sh
|
|
30
|
+
- -c
|
|
31
|
+
- >
|
|
32
|
+
npm install -g npm@11.2.0 &&
|
|
33
|
+
npm install -g underpost &&
|
|
34
|
+
cd $(underpost root)/underpost &&
|
|
35
|
+
node bin/deploy update-default-conf template &&
|
|
36
|
+
mkdir -p /home/dd &&
|
|
37
|
+
cd /home/dd &&
|
|
38
|
+
underpost new engine
|
|
39
|
+
---
|
|
40
|
+
apiVersion: v1
|
|
41
|
+
kind: Service
|
|
42
|
+
metadata:
|
|
43
|
+
name: dd-template-development-blue-service
|
|
44
|
+
spec:
|
|
45
|
+
selector:
|
|
46
|
+
app: dd-template-development-blue
|
|
47
|
+
ports:
|
|
48
|
+
- name: "tcp-4001"
|
|
49
|
+
protocol: TCP
|
|
50
|
+
port: 4001
|
|
51
|
+
targetPort: 4001
|
|
52
|
+
- name: "udp-4001"
|
|
53
|
+
protocol: UDP
|
|
54
|
+
port: 4001
|
|
55
|
+
targetPort: 4001
|
|
56
|
+
|
|
57
|
+
- name: "tcp-4002"
|
|
58
|
+
protocol: TCP
|
|
59
|
+
port: 4002
|
|
60
|
+
targetPort: 4002
|
|
61
|
+
- name: "udp-4002"
|
|
62
|
+
protocol: UDP
|
|
63
|
+
port: 4002
|
|
64
|
+
targetPort: 4002
|
|
65
|
+
|
|
66
|
+
- name: "tcp-4003"
|
|
67
|
+
protocol: TCP
|
|
68
|
+
port: 4003
|
|
69
|
+
targetPort: 4003
|
|
70
|
+
- name: "udp-4003"
|
|
71
|
+
protocol: UDP
|
|
72
|
+
port: 4003
|
|
73
|
+
targetPort: 4003
|
|
74
|
+
|
|
75
|
+
- name: "tcp-4004"
|
|
76
|
+
protocol: TCP
|
|
77
|
+
port: 4004
|
|
78
|
+
targetPort: 4004
|
|
79
|
+
- name: "udp-4004"
|
|
80
|
+
protocol: UDP
|
|
81
|
+
port: 4004
|
|
82
|
+
targetPort: 4004
|
|
83
|
+
type: LoadBalancer
|
|
84
|
+
---
|
|
85
|
+
apiVersion: apps/v1
|
|
86
|
+
kind: Deployment
|
|
87
|
+
metadata:
|
|
88
|
+
name: dd-template-development-green
|
|
89
|
+
labels:
|
|
90
|
+
app: dd-template-development-green
|
|
91
|
+
spec:
|
|
92
|
+
replicas: 1
|
|
93
|
+
selector:
|
|
94
|
+
matchLabels:
|
|
95
|
+
app: dd-template-development-green
|
|
96
|
+
template:
|
|
97
|
+
metadata:
|
|
98
|
+
labels:
|
|
99
|
+
app: dd-template-development-green
|
|
100
|
+
spec:
|
|
101
|
+
containers:
|
|
102
|
+
- name: dd-template-development-green
|
|
103
|
+
image: localhost/debian-underpost:v2.8.796
|
|
104
|
+
# resources:
|
|
105
|
+
# requests:
|
|
106
|
+
# memory: "124Ki"
|
|
107
|
+
# cpu: "100m"
|
|
108
|
+
# limits:
|
|
109
|
+
# memory: "1992Ki"
|
|
110
|
+
# cpu: "1600m"
|
|
111
|
+
command:
|
|
112
|
+
- /bin/sh
|
|
113
|
+
- -c
|
|
114
|
+
- >
|
|
115
|
+
npm install -g npm@11.2.0 &&
|
|
116
|
+
npm install -g underpost &&
|
|
117
|
+
cd $(underpost root)/underpost &&
|
|
118
|
+
node bin/deploy update-default-conf template &&
|
|
119
|
+
mkdir -p /home/dd &&
|
|
120
|
+
cd /home/dd &&
|
|
121
|
+
underpost new engine
|
|
122
|
+
|
|
123
|
+
---
|
|
124
|
+
apiVersion: v1
|
|
125
|
+
kind: Service
|
|
126
|
+
metadata:
|
|
127
|
+
name: dd-template-development-green-service
|
|
128
|
+
spec:
|
|
129
|
+
selector:
|
|
130
|
+
app: dd-template-development-green
|
|
131
|
+
ports:
|
|
132
|
+
- name: "tcp-4001"
|
|
133
|
+
protocol: TCP
|
|
134
|
+
port: 4001
|
|
135
|
+
targetPort: 4001
|
|
136
|
+
- name: "udp-4001"
|
|
137
|
+
protocol: UDP
|
|
138
|
+
port: 4001
|
|
139
|
+
targetPort: 4001
|
|
140
|
+
|
|
141
|
+
- name: "tcp-4002"
|
|
142
|
+
protocol: TCP
|
|
143
|
+
port: 4002
|
|
144
|
+
targetPort: 4002
|
|
145
|
+
- name: "udp-4002"
|
|
146
|
+
protocol: UDP
|
|
147
|
+
port: 4002
|
|
148
|
+
targetPort: 4002
|
|
149
|
+
|
|
150
|
+
- name: "tcp-4003"
|
|
151
|
+
protocol: TCP
|
|
152
|
+
port: 4003
|
|
153
|
+
targetPort: 4003
|
|
154
|
+
- name: "udp-4003"
|
|
155
|
+
protocol: UDP
|
|
156
|
+
port: 4003
|
|
157
|
+
targetPort: 4003
|
|
158
|
+
|
|
159
|
+
- name: "tcp-4004"
|
|
160
|
+
protocol: TCP
|
|
161
|
+
port: 4004
|
|
162
|
+
targetPort: 4004
|
|
163
|
+
- name: "udp-4004"
|
|
164
|
+
protocol: UDP
|
|
165
|
+
port: 4004
|
|
166
|
+
targetPort: 4004
|
|
167
|
+
type: LoadBalancer
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
# "http://default.net:4001/socket.io": "http://localhost:4001/socket.io",
|
|
2
|
+
# "http://default.net:4002/peer": "http://localhost:4002/peer",
|
|
3
|
+
# "http://default.net:4001/": "http://localhost:4001/",
|
|
4
|
+
# "http://www.default.net:4003/": "http://localhost:4003/"
|
|
5
|
+
|
|
6
|
+
---
|
|
7
|
+
apiVersion: projectcontour.io/v1
|
|
8
|
+
kind: HTTPProxy
|
|
9
|
+
metadata:
|
|
10
|
+
name: default.net
|
|
11
|
+
spec:
|
|
12
|
+
virtualhost:
|
|
13
|
+
fqdn: default.net
|
|
14
|
+
routes:
|
|
15
|
+
- conditions:
|
|
16
|
+
- prefix: /
|
|
17
|
+
enableWebsockets: true
|
|
18
|
+
services:
|
|
19
|
+
- name: dd-template-development-blue-service
|
|
20
|
+
port: 4001
|
|
21
|
+
weight: 100
|
|
22
|
+
|
|
23
|
+
- conditions:
|
|
24
|
+
- prefix: /peer
|
|
25
|
+
enableWebsockets: true
|
|
26
|
+
services:
|
|
27
|
+
- name: dd-template-development-blue-service
|
|
28
|
+
port: 4002
|
|
29
|
+
weight: 100
|
|
30
|
+
|
|
31
|
+
---
|
|
32
|
+
apiVersion: projectcontour.io/v1
|
|
33
|
+
kind: HTTPProxy
|
|
34
|
+
metadata:
|
|
35
|
+
name: www.default.net
|
|
36
|
+
spec:
|
|
37
|
+
virtualhost:
|
|
38
|
+
fqdn: www.default.net
|
|
39
|
+
routes:
|
|
40
|
+
- conditions:
|
|
41
|
+
- prefix: /
|
|
42
|
+
enableWebsockets: true
|
|
43
|
+
services:
|
|
44
|
+
- name: dd-template-development-blue-service
|
|
45
|
+
port: 4003
|
|
46
|
+
weight: 100
|
|
@@ -1,22 +1,22 @@
|
|
|
1
1
|
apiVersion: apps/v1
|
|
2
2
|
kind: StatefulSet
|
|
3
3
|
metadata:
|
|
4
|
-
name: service
|
|
4
|
+
name: valkey-service
|
|
5
5
|
namespace: default
|
|
6
6
|
spec:
|
|
7
|
-
serviceName: service
|
|
7
|
+
serviceName: valkey-service
|
|
8
8
|
replicas: 1
|
|
9
9
|
selector:
|
|
10
10
|
matchLabels:
|
|
11
|
-
app: service
|
|
11
|
+
app: valkey-service
|
|
12
12
|
template:
|
|
13
13
|
metadata:
|
|
14
14
|
labels:
|
|
15
|
-
app: service
|
|
15
|
+
app: valkey-service
|
|
16
16
|
spec:
|
|
17
17
|
automountServiceAccountToken: false
|
|
18
18
|
containers:
|
|
19
|
-
- name: service
|
|
19
|
+
- name: valkey-service
|
|
20
20
|
image: docker.io/valkey/valkey:latest
|
|
21
21
|
imagePullPolicy: IfNotPresent
|
|
22
22
|
command: ["valkey-server"]
|
package/package.json
CHANGED
package/src/cli/cluster.js
CHANGED
|
@@ -9,6 +9,37 @@ const logger = loggerFactory(import.meta);
|
|
|
9
9
|
|
|
10
10
|
class UnderpostCluster {
|
|
11
11
|
static API = {
|
|
12
|
+
/**
|
|
13
|
+
* @method init
|
|
14
|
+
* @description Initializes and configures the Kubernetes cluster based on provided options.
|
|
15
|
+
* This method handles host prerequisites, cluster initialization (Kind or Kubeadm),
|
|
16
|
+
* and optional component deployments.
|
|
17
|
+
* @param {string} [podName] - Optional name of a pod for specific operations (e.g., listing).
|
|
18
|
+
* @param {object} [options] - Configuration options for cluster initialization.
|
|
19
|
+
* @param {boolean} [options.mongodb=false] - Deploy MongoDB.
|
|
20
|
+
* @param {boolean} [options.mongodb4=false] - Deploy MongoDB 4.4.
|
|
21
|
+
* @param {boolean} [options.mariadb=false] - Deploy MariaDB.
|
|
22
|
+
* @param {boolean} [options.mysql=false] - Deploy MySQL.
|
|
23
|
+
* @param {boolean} [options.postgresql=false] - Deploy PostgreSQL.
|
|
24
|
+
* @param {boolean} [options.valkey=false] - Deploy Valkey.
|
|
25
|
+
* @param {boolean} [options.full=false] - Deploy a full set of common components.
|
|
26
|
+
* @param {boolean} [options.info=false] - Display extensive Kubernetes cluster information.
|
|
27
|
+
* @param {boolean} [options.certManager=false] - Deploy Cert-Manager for certificate management.
|
|
28
|
+
* @param {boolean} [options.listPods=false] - List Kubernetes pods.
|
|
29
|
+
* @param {boolean} [options.reset=false] - Perform a comprehensive reset of Kubernetes and container environments.
|
|
30
|
+
* @param {boolean} [options.dev=false] - Run in development mode (adjusts paths).
|
|
31
|
+
* @param {string} [options.nsUse=''] - Set the current kubectl namespace.
|
|
32
|
+
* @param {boolean} [options.infoCapacity=false] - Display resource capacity information for the cluster.
|
|
33
|
+
* @param {boolean} [options.infoCapacityPod=false] - Display resource capacity information for pods.
|
|
34
|
+
* @param {boolean} [options.istio=false] - Deploy Istio service mesh.
|
|
35
|
+
* @param {boolean} [options.pullImage=false] - Pull necessary Docker images before deployment.
|
|
36
|
+
* @param {boolean} [options.dedicatedGpu=false] - Configure for dedicated GPU usage (e.g., NVIDIA GPU Operator).
|
|
37
|
+
* @param {boolean} [options.kubeadm=false] - Initialize the cluster using Kubeadm.
|
|
38
|
+
* @param {boolean} [options.initHost=false] - Perform initial host setup (install Docker, Podman, Kind, Kubeadm, Helm).
|
|
39
|
+
* @param {boolean} [options.config=false] - Apply general host configuration (SELinux, containerd, sysctl, firewalld).
|
|
40
|
+
* @param {boolean} [options.worker=false] - Configure as a worker node (for Kubeadm join).
|
|
41
|
+
* @param {boolean} [options.chown=false] - Set up kubectl configuration for the current user.
|
|
42
|
+
*/
|
|
12
43
|
async init(
|
|
13
44
|
podName,
|
|
14
45
|
options = {
|
|
@@ -113,6 +144,12 @@ class UnderpostCluster {
|
|
|
113
144
|
);
|
|
114
145
|
// Configure kubectl for the current user
|
|
115
146
|
UnderpostCluster.API.chown();
|
|
147
|
+
|
|
148
|
+
// Apply kubelet-config.yaml explicitly
|
|
149
|
+
// Using 'kubectl replace --force' to ensure the ConfigMap is updated,
|
|
150
|
+
// even if it was modified by kubeadm or other processes, resolving conflicts.
|
|
151
|
+
// shellExec(`kubectl replace --force -f ${underpostRoot}/manifests/kubelet-config.yaml`);
|
|
152
|
+
|
|
116
153
|
// Install Calico CNI
|
|
117
154
|
logger.info('Installing Calico CNI...');
|
|
118
155
|
shellExec(
|
|
@@ -172,7 +209,7 @@ class UnderpostCluster {
|
|
|
172
209
|
} valkey/valkey:latest`,
|
|
173
210
|
);
|
|
174
211
|
}
|
|
175
|
-
shellExec(`kubectl delete statefulset service
|
|
212
|
+
shellExec(`kubectl delete statefulset valkey-service`);
|
|
176
213
|
shellExec(`kubectl apply -k ${underpostRoot}/manifests/valkey`);
|
|
177
214
|
}
|
|
178
215
|
if (options.full === true || options.mariadb === true) {
|
|
@@ -298,8 +335,9 @@ class UnderpostCluster {
|
|
|
298
335
|
/**
|
|
299
336
|
* @method config
|
|
300
337
|
* @description Configures host-level settings required for Kubernetes.
|
|
301
|
-
*
|
|
302
|
-
*
|
|
338
|
+
* This method ensures proper SELinux, Docker, Containerd, and Sysctl settings
|
|
339
|
+
* are applied for a healthy Kubernetes environment. It explicitly avoids
|
|
340
|
+
* iptables flushing commands to prevent conflicts with Kubernetes' own network management.
|
|
303
341
|
*/
|
|
304
342
|
config() {
|
|
305
343
|
console.log('Applying host configuration: SELinux, Docker, Containerd, and Sysctl settings.');
|
|
@@ -346,7 +384,8 @@ net.ipv4.ip_forward = 1' | sudo tee ${iptableConfPath}`);
|
|
|
346
384
|
/**
|
|
347
385
|
* @method chown
|
|
348
386
|
* @description Sets up kubectl configuration for the current user.
|
|
349
|
-
* This is typically run after kubeadm init on the control plane
|
|
387
|
+
* This is typically run after kubeadm init on the control plane
|
|
388
|
+
* to allow non-root users to interact with the cluster.
|
|
350
389
|
*/
|
|
351
390
|
chown() {
|
|
352
391
|
console.log('Setting up kubectl configuration...');
|
|
@@ -359,74 +398,142 @@ net.ipv4.ip_forward = 1' | sudo tee ${iptableConfPath}`);
|
|
|
359
398
|
/**
|
|
360
399
|
* @method reset
|
|
361
400
|
* @description Performs a comprehensive reset of Kubernetes and container environments.
|
|
362
|
-
* This function is for cleaning up a node,
|
|
363
|
-
* It
|
|
401
|
+
* This function is for cleaning up a node, reverting changes made by 'kubeadm init' or 'kubeadm join'.
|
|
402
|
+
* It includes deleting Kind clusters, resetting kubeadm, removing CNI configs,
|
|
403
|
+
* cleaning Docker and Podman data, persistent volumes, and resetting kubelet components.
|
|
404
|
+
* It avoids aggressive iptables flushing that would break host connectivity, relying on kube-proxy's
|
|
405
|
+
* control loop to eventually clean up rules if the cluster is not re-initialized.
|
|
364
406
|
*/
|
|
365
|
-
reset() {
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
shellExec(`sudo rm -rf /home/docker/*`);
|
|
395
|
-
|
|
396
|
-
// Re-configure Docker's default storage location (if desired).
|
|
397
|
-
shellExec('sudo mv /var/lib/docker /var/lib/docker~ || true'); // Use || true to prevent error if dir doesn't exist
|
|
398
|
-
shellExec('sudo mkdir -p /home/docker');
|
|
399
|
-
shellExec('sudo chmod 777 /home/docker');
|
|
400
|
-
shellExec('sudo ln -s /home/docker /var/lib/docker');
|
|
407
|
+
async reset() {
|
|
408
|
+
logger.info('Starting comprehensive reset of Kubernetes and container environments...');
|
|
409
|
+
|
|
410
|
+
try {
|
|
411
|
+
// Phase 1: Pre-reset Kubernetes Cleanup (while API server is still up)
|
|
412
|
+
logger.info('Phase 1/6: Cleaning up Kubernetes resources (PVCs, PVs) while API server is accessible...');
|
|
413
|
+
|
|
414
|
+
// Delete all Persistent Volume Claims (PVCs) to release the PVs.
|
|
415
|
+
// This must happen before deleting PVs or the host paths.
|
|
416
|
+
// shellExec(`kubectl delete pvc --all-namespaces --all --ignore-not-found || true`);
|
|
417
|
+
|
|
418
|
+
// Get all Persistent Volumes and identify their host paths for data deletion.
|
|
419
|
+
// This needs to be done *before* deleting the PVs themselves.
|
|
420
|
+
// The '|| echo '{"items":[]}'` handles cases where 'kubectl get pv' might return empty or error.
|
|
421
|
+
const pvListJson = shellExec(`kubectl get pv -o json || echo '{"items":[]}'`, { stdout: true, silent: true });
|
|
422
|
+
const pvList = JSON.parse(pvListJson);
|
|
423
|
+
|
|
424
|
+
if (pvList.items && pvList.items.length > 0) {
|
|
425
|
+
for (const pv of pvList.items) {
|
|
426
|
+
// Check if the PV uses hostPath and delete its contents
|
|
427
|
+
if (pv.spec.hostPath && pv.spec.hostPath.path) {
|
|
428
|
+
const hostPath = pv.spec.hostPath.path;
|
|
429
|
+
logger.info(`Removing data from host path for PV '${pv.metadata.name}': ${hostPath}`);
|
|
430
|
+
shellExec(`sudo rm -rf ${hostPath}/* || true`);
|
|
431
|
+
}
|
|
432
|
+
}
|
|
433
|
+
} else {
|
|
434
|
+
logger.info('No Persistent Volumes found with hostPath to clean up.');
|
|
435
|
+
}
|
|
401
436
|
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
437
|
+
// Then, delete all Persistent Volumes (PVs).
|
|
438
|
+
// shellExec(`kubectl delete pv --all --ignore-not-found || true`);
|
|
439
|
+
|
|
440
|
+
// Phase 2: Stop Kubelet and remove CNI configuration
|
|
441
|
+
logger.info('Phase 2/6: Stopping Kubelet and removing CNI configurations...');
|
|
442
|
+
// Stop kubelet service to prevent further activity and release resources.
|
|
443
|
+
shellExec(`sudo systemctl stop kubelet || true`);
|
|
444
|
+
|
|
445
|
+
// CNI plugins use /etc/cni/net.d to store their configuration.
|
|
446
|
+
// Removing this prevents conflicts and potential issues during kubeadm reset.
|
|
447
|
+
shellExec('sudo rm -rf /etc/cni/net.d/* || true');
|
|
448
|
+
|
|
449
|
+
// Phase 3: Kind Cluster Cleanup
|
|
450
|
+
logger.info('Phase 3/6: Cleaning up Kind clusters...');
|
|
451
|
+
// Delete all existing Kind (Kubernetes in Docker) clusters.
|
|
452
|
+
shellExec(`kind get clusters | xargs -r -t -n1 kind delete cluster || true`);
|
|
453
|
+
|
|
454
|
+
// Phase 4: Kubeadm Reset
|
|
455
|
+
logger.info('Phase 4/6: Performing kubeadm reset...');
|
|
456
|
+
// Reset the Kubernetes control-plane components installed by kubeadm.
|
|
457
|
+
// The --force flag skips confirmation prompts. This command will tear down the cluster.
|
|
458
|
+
shellExec(`sudo kubeadm reset --force`);
|
|
459
|
+
|
|
460
|
+
// Phase 5: Post-reset File System Cleanup (Local Storage, Kubeconfig)
|
|
461
|
+
logger.info('Phase 5/6: Cleaning up local storage provisioner data and kubeconfig...');
|
|
462
|
+
// Remove the kubectl configuration file for the current user.
|
|
463
|
+
// This is important to prevent stale credentials after the cluster is reset.
|
|
464
|
+
shellExec('rm -rf $HOME/.kube || true');
|
|
465
|
+
|
|
466
|
+
// Remove local path provisioner data, which stores data for dynamically provisioned PVCs.
|
|
467
|
+
shellExec(`sudo rm -rf /opt/local-path-provisioner/* || true`);
|
|
468
|
+
|
|
469
|
+
// Phase 6: Container Runtime Cleanup (Docker and Podman)
|
|
470
|
+
logger.info('Phase 6/6: Cleaning up Docker and Podman data...');
|
|
471
|
+
// Prune all unused Docker data (containers, images, volumes, networks).
|
|
472
|
+
shellExec('sudo docker system prune -a -f');
|
|
473
|
+
|
|
474
|
+
// Stop the Docker daemon service to ensure all files can be removed.
|
|
475
|
+
shellExec('sudo service docker stop || true');
|
|
476
|
+
|
|
477
|
+
// Aggressively remove container storage data for containerd and Docker.
|
|
478
|
+
// This targets the underlying storage directories.
|
|
479
|
+
shellExec(`sudo rm -rf /var/lib/containers/storage/* || true`);
|
|
480
|
+
shellExec(`sudo rm -rf /var/lib/docker/volumes/* || true`);
|
|
481
|
+
shellExec(`sudo rm -rf /var/lib/docker~/* || true`); // Cleanup any old Docker directories
|
|
482
|
+
shellExec(`sudo rm -rf /home/containers/storage/* || true`);
|
|
483
|
+
shellExec(`sudo rm -rf /home/docker/* || true`);
|
|
484
|
+
|
|
485
|
+
// Ensure Docker's default storage location is clean and re-linked if custom.
|
|
486
|
+
shellExec(`sudo rm -rf /var/lib/docker/* || true`);
|
|
487
|
+
shellExec('sudo mkdir -p /home/docker || true');
|
|
488
|
+
shellExec('sudo chmod 777 /home/docker || true');
|
|
489
|
+
shellExec('sudo ln -sf /home/docker /var/lib/docker || true'); // Use -sf for symbolic link, force and silent
|
|
490
|
+
|
|
491
|
+
// Prune all unused Podman data.
|
|
492
|
+
shellExec(`sudo podman system prune -a -f`);
|
|
493
|
+
shellExec(`sudo podman system prune --all --volumes --force`);
|
|
494
|
+
shellExec(`sudo podman system prune --external --force`);
|
|
495
|
+
|
|
496
|
+
// Create and set permissions for Podman's custom storage directory.
|
|
497
|
+
shellExec(`sudo mkdir -p /home/containers/storage || true`);
|
|
498
|
+
shellExec('sudo chmod 0711 /home/containers/storage || true');
|
|
499
|
+
|
|
500
|
+
// Update Podman's storage configuration file.
|
|
501
|
+
shellExec(
|
|
502
|
+
`sudo sed -i -e "s@/var/lib/containers/storage@/home/containers/storage@g" /etc/containers/storage.conf || true`,
|
|
503
|
+
);
|
|
406
504
|
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
shellExec('sudo chmod 0711 /home/containers/storage');
|
|
505
|
+
// Reset Podman system settings.
|
|
506
|
+
shellExec(`sudo podman system reset -f`);
|
|
410
507
|
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
508
|
+
// Final Kubelet and System Cleanup (after all other operations)
|
|
509
|
+
logger.info('Finalizing Kubelet and system file cleanup...');
|
|
510
|
+
// Remove Kubernetes configuration and kubelet data directories.
|
|
511
|
+
shellExec(`sudo rm -rf /etc/kubernetes/* || true`);
|
|
512
|
+
shellExec(`sudo rm -rf /var/lib/kubelet/* || true`);
|
|
415
513
|
|
|
416
|
-
|
|
417
|
-
|
|
514
|
+
// Clear trash files from the root user's trash directory.
|
|
515
|
+
shellExec('sudo rm -rf /root/.local/share/Trash/files/* || true');
|
|
418
516
|
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
shellExec(`sudo rm -rf /etc/cni/net.d/*`);
|
|
424
|
-
shellExec(`sudo systemctl daemon-reload`);
|
|
425
|
-
shellExec(`sudo systemctl start kubelet`);
|
|
517
|
+
// Reload systemd daemon to pick up any service file changes.
|
|
518
|
+
shellExec(`sudo systemctl daemon-reload`);
|
|
519
|
+
// Attempt to start kubelet; it might fail if the cluster is fully reset, which is expected.
|
|
520
|
+
shellExec(`sudo systemctl start kubelet || true`);
|
|
426
521
|
|
|
427
|
-
|
|
522
|
+
logger.info('Comprehensive reset completed successfully.');
|
|
523
|
+
} catch (error) {
|
|
524
|
+
logger.error(`Error during reset: ${error.message}`);
|
|
525
|
+
console.error(error);
|
|
526
|
+
}
|
|
428
527
|
},
|
|
429
528
|
|
|
529
|
+
/**
|
|
530
|
+
* @method getResourcesCapacity
|
|
531
|
+
* @description Retrieves and returns the allocatable CPU and memory resources
|
|
532
|
+
* of the Kubernetes node.
|
|
533
|
+
* @param {boolean} [kubeadm=false] - If true, assumes a kubeadm-managed node;
|
|
534
|
+
* otherwise, assumes a Kind worker node.
|
|
535
|
+
* @returns {object} An object containing CPU and memory resources with values and units.
|
|
536
|
+
*/
|
|
430
537
|
getResourcesCapacity(kubeadm = false) {
|
|
431
538
|
const resources = {};
|
|
432
539
|
const info = shellExec(
|
|
@@ -457,6 +564,11 @@ net.ipv4.ip_forward = 1' | sudo tee ${iptableConfPath}`);
|
|
|
457
564
|
|
|
458
565
|
return resources;
|
|
459
566
|
},
|
|
567
|
+
/**
|
|
568
|
+
* @method initHost
|
|
569
|
+
* @description Installs essential host-level prerequisites for Kubernetes,
|
|
570
|
+
* including Docker, Podman, Kind, Kubeadm, and Helm.
|
|
571
|
+
*/
|
|
460
572
|
initHost() {
|
|
461
573
|
console.log('Installing Docker, Podman, Kind, Kubeadm, and Helm...');
|
|
462
574
|
// Install docker
|
package/src/cli/deploy.js
CHANGED
|
@@ -256,8 +256,8 @@ kubectl rollout restart deployment/deployment-name
|
|
|
256
256
|
kubectl rollout undo deployment/deployment-name
|
|
257
257
|
kubectl scale statefulsets <stateful-set-name> --replicas=<new-replicas>
|
|
258
258
|
kubectl get pods -w
|
|
259
|
-
kubectl patch statefulset service
|
|
260
|
-
kubectl patch statefulset service
|
|
259
|
+
kubectl patch statefulset valkey-service --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value":"valkey/valkey:latest"}]'
|
|
260
|
+
kubectl patch statefulset valkey-service -p '{"spec":{"template":{"spec":{"containers":[{"name":"valkey-service","imagePullPolicy":"Never"}]}}}}'
|
|
261
261
|
kubectl logs -f <pod-name>
|
|
262
262
|
kubectl describe pod <pod-name>
|
|
263
263
|
kubectl exec -it <pod-name> -- bash
|
|
@@ -274,15 +274,15 @@ kubectl patch ippool default-ipv4-ippool --type='json' -p='[{"op": "replace", "p
|
|
|
274
274
|
sudo podman run --rm localhost/<image-name>:<image-version> <command>
|
|
275
275
|
kubectl get configmap kubelet-config -n kube-system -o yaml > kubelet-config.yaml
|
|
276
276
|
kubectl -n kube-system rollout restart daemonset kube-proxy
|
|
277
|
-
|
|
278
277
|
kubectl get EndpointSlice -o wide --all-namespaces -w
|
|
278
|
+
kubectl apply -k manifests/deployment/adminer/.
|
|
279
279
|
|
|
280
280
|
kubectl run --rm -it test-dns --image=busybox:latest --restart=Never -- /bin/sh -c "
|
|
281
281
|
nslookup kubernetes.default.svc.cluster.local;
|
|
282
282
|
nslookup mongodb-service.default.svc.cluster.local;
|
|
283
|
-
nslookup service
|
|
283
|
+
nslookup valkey-service.default.svc.cluster.local;
|
|
284
284
|
nc -vz mongodb-service 27017;
|
|
285
|
-
nc -vz service
|
|
285
|
+
nc -vz valkey-service 6379;
|
|
286
286
|
echo exit code: \\\$?
|
|
287
287
|
"
|
|
288
288
|
|
package/src/cli/image.js
CHANGED
|
@@ -23,9 +23,9 @@ class UnderpostImage {
|
|
|
23
23
|
shellExec(`sudo podman pull docker.io/library/debian:buster`);
|
|
24
24
|
const IMAGE_NAME = `debian-underpost`;
|
|
25
25
|
const IMAGE_NAME_FULL = `${IMAGE_NAME}:${options.version ?? Underpost.version}`;
|
|
26
|
-
const LOAD_TYPE = options.kindLoad === true ? `--
|
|
26
|
+
const LOAD_TYPE = options.kindLoad === true ? `--kind-load` : `--kubeadm-load`;
|
|
27
27
|
shellExec(
|
|
28
|
-
`underpost dockerfile-image-build --podman-save --
|
|
28
|
+
`underpost dockerfile-image-build --podman-save --reset --image-path=. --path ${
|
|
29
29
|
options.path ?? getUnderpostRootPath()
|
|
30
30
|
} --image-name=${IMAGE_NAME_FULL} ${LOAD_TYPE}`,
|
|
31
31
|
);
|
|
@@ -41,7 +41,7 @@ class UnderpostImage {
|
|
|
41
41
|
kubeadmLoad: false,
|
|
42
42
|
secrets: false,
|
|
43
43
|
secretsPath: '',
|
|
44
|
-
|
|
44
|
+
reset: false,
|
|
45
45
|
},
|
|
46
46
|
) {
|
|
47
47
|
const {
|
|
@@ -53,7 +53,7 @@ class UnderpostImage {
|
|
|
53
53
|
secrets,
|
|
54
54
|
secretsPath,
|
|
55
55
|
kindLoad,
|
|
56
|
-
|
|
56
|
+
reset,
|
|
57
57
|
kubeadmLoad,
|
|
58
58
|
} = options;
|
|
59
59
|
const podManImg = `localhost/${imageName}`;
|
|
@@ -75,7 +75,7 @@ class UnderpostImage {
|
|
|
75
75
|
secretDockerInput += ` --secret id=${key},env=${key} \ `;
|
|
76
76
|
}
|
|
77
77
|
}
|
|
78
|
-
if (
|
|
78
|
+
if (reset === true) cache += ' --rm --no-cache';
|
|
79
79
|
if (path && typeof path === 'string')
|
|
80
80
|
shellExec(
|
|
81
81
|
`cd ${path}${secretsInput}&& sudo podman build -f ./${
|
package/src/cli/index.js
CHANGED
|
@@ -170,7 +170,7 @@ program
|
|
|
170
170
|
.option('--kubeadm-load', 'Import tar image to Kubeadm cluster')
|
|
171
171
|
.option('--secrets', 'Dockerfile env secrets')
|
|
172
172
|
.option('--secrets-path [secrets-path]', 'Dockerfile custom path env secrets')
|
|
173
|
-
.option('--
|
|
173
|
+
.option('--reset', 'Build without using cache')
|
|
174
174
|
.description('Build image from Dockerfile')
|
|
175
175
|
.action(Underpost.image.dockerfile.build);
|
|
176
176
|
|
package/src/cli/lxd.js
CHANGED
|
@@ -75,6 +75,10 @@ ipv4.address=10.250.250.1/24 \
|
|
|
75
75
|
ipv4.nat=true \
|
|
76
76
|
ipv4.dhcp=true \
|
|
77
77
|
ipv6.address=none`);
|
|
78
|
+
// Explicitly set DNS nameservers for lxdbr0 to public DNS (e.g., Google DNS)
|
|
79
|
+
// This makes VM DNS resolution independent of the host's resolv.conf
|
|
80
|
+
shellExec(`lxc network set lxdbr0 dns.nameservers 8.8.8.8,8.8.4.4`);
|
|
81
|
+
shellExec(`lxc network set lxdbr0 dns.mode managed`); // Ensure LXD manages DNS for the bridge
|
|
78
82
|
}
|
|
79
83
|
if (options.createAdminProfile === true) {
|
|
80
84
|
pbcopy(`lxc profile create admin-profile`);
|
package/src/index.js
CHANGED
package/src/server/runtime.js
CHANGED
|
@@ -366,7 +366,7 @@ const buildRuntime = async () => {
|
|
|
366
366
|
if (db && apis) await DataBaseProvider.load({ apis, host, path, db });
|
|
367
367
|
|
|
368
368
|
// valkey server
|
|
369
|
-
await createValkeyConnection({ host, path }, valkey);
|
|
369
|
+
if (valkey) await createValkeyConnection({ host, path }, valkey);
|
|
370
370
|
|
|
371
371
|
if (mailer) {
|
|
372
372
|
const mailerSsrConf = confSSR[getCapVariableName(client)];
|
package/src/server/valkey.js
CHANGED
|
@@ -34,14 +34,14 @@ const selectDtoFactory = (payload, select) => {
|
|
|
34
34
|
const valkeyClientFactory = async (options) => {
|
|
35
35
|
const valkey = new Valkey({
|
|
36
36
|
// port: 6379,
|
|
37
|
-
// host: 'service
|
|
37
|
+
// host: 'valkey-service.default.svc.cluster.local',
|
|
38
38
|
port: options?.port ? options.port : undefined,
|
|
39
|
-
host: options?.
|
|
39
|
+
host: options?.host ? options.host : undefined,
|
|
40
40
|
retryStrategy: (attempt) => {
|
|
41
41
|
if (attempt === 1) {
|
|
42
42
|
valkey.disconnect();
|
|
43
43
|
valkeyEnabled = false;
|
|
44
|
-
logger.warn('Valkey service not enabled', { valkeyEnabled });
|
|
44
|
+
logger.warn('Valkey service not enabled', { ...options, valkeyEnabled });
|
|
45
45
|
return;
|
|
46
46
|
}
|
|
47
47
|
return 1000; // 1 second interval attempt
|