underpost 2.8.67 → 2.8.71
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/bin/deploy.js +171 -1
- package/cli.md +1 -1
- package/docker-compose.yml +1 -1
- package/manifests/deployment/fastapi/backend-deployment.yml +120 -0
- package/manifests/deployment/fastapi/backend-service.yml +19 -0
- package/manifests/deployment/fastapi/frontend-deployment.yml +54 -0
- package/manifests/deployment/fastapi/frontend-service.yml +15 -0
- package/manifests/deployment/fastapi/initial_data.sh +56 -0
- package/manifests/deployment/kafka/deployment.yaml +69 -0
- package/manifests/kubeadm-calico-config.yaml +119 -0
- package/manifests/postgresql/statefulset.yaml +1 -1
- package/package.json +1 -1
- package/src/cli/cluster.js +107 -21
- package/src/cli/deploy.js +13 -0
- package/src/cli/image.js +1 -1
- package/src/index.js +1 -1
package/README.md
CHANGED
package/bin/deploy.js
CHANGED
|
@@ -1197,6 +1197,17 @@ EOF`);
|
|
|
1197
1197
|
break;
|
|
1198
1198
|
}
|
|
1199
1199
|
|
|
1200
|
+
case 'pg-stop': {
|
|
1201
|
+
shellExec(`sudo systemctl stop postgresql-14`);
|
|
1202
|
+
shellExec(`sudo systemctl disable postgresql-14`);
|
|
1203
|
+
break;
|
|
1204
|
+
}
|
|
1205
|
+
case 'pg-start': {
|
|
1206
|
+
shellExec(`sudo systemctl enable postgresql-14`);
|
|
1207
|
+
shellExec(`sudo systemctl restart postgresql-14`);
|
|
1208
|
+
break;
|
|
1209
|
+
}
|
|
1210
|
+
|
|
1200
1211
|
case 'pg-list-db': {
|
|
1201
1212
|
shellExec(`sudo -i -u postgres psql -c "\\l"`);
|
|
1202
1213
|
break;
|
|
@@ -1213,6 +1224,11 @@ EOF`);
|
|
|
1213
1224
|
break;
|
|
1214
1225
|
}
|
|
1215
1226
|
|
|
1227
|
+
case 'maas-stop': {
|
|
1228
|
+
shellExec(`sudo snap stop maas`);
|
|
1229
|
+
break;
|
|
1230
|
+
}
|
|
1231
|
+
|
|
1216
1232
|
case 'maas': {
|
|
1217
1233
|
dotenv.config({ path: `${getUnderpostRootPath()}/.env`, override: true });
|
|
1218
1234
|
const IP_ADDRESS = getLocalIPv4Address();
|
|
@@ -1415,6 +1431,8 @@ EOF`);
|
|
|
1415
1431
|
// Check firewall-cmd
|
|
1416
1432
|
// firewall-cmd --permanent --add-service=rpc-bind
|
|
1417
1433
|
// firewall-cmd --reload
|
|
1434
|
+
// systemctl disable firewalld
|
|
1435
|
+
// sudo firewall-cmd --permanent --add-port=10259/tcp --zone=public
|
|
1418
1436
|
|
|
1419
1437
|
// Image extension transform (.img.xz to .tar.gz):
|
|
1420
1438
|
// tar -cvzf image-name.tar.gz image-name.img.xz
|
|
@@ -2122,8 +2140,160 @@ EOF`);
|
|
|
2122
2140
|
break;
|
|
2123
2141
|
}
|
|
2124
2142
|
|
|
2125
|
-
|
|
2143
|
+
case 'fastapi-models': {
|
|
2144
|
+
shellExec(`chmod +x ../full-stack-fastapi-template/backend/initial_data.sh`);
|
|
2145
|
+
shellExec(`../full-stack-fastapi-template/backend/initial_data.sh`);
|
|
2146
|
+
shellExec(`../full-stack-fastapi-template/backend/initial_data.sh`);
|
|
2126
2147
|
break;
|
|
2148
|
+
}
|
|
2149
|
+
|
|
2150
|
+
case 'fastapi': {
|
|
2151
|
+
// node bin/deploy fastapi reset
|
|
2152
|
+
// node bin/deploy fastapi reset build-back build-front secret run-back run-front
|
|
2153
|
+
// https://github.com/NonsoEchendu/full-stack-fastapi-project
|
|
2154
|
+
// https://github.com/fastapi/full-stack-fastapi-template
|
|
2155
|
+
const path = `../full-stack-fastapi-template`;
|
|
2156
|
+
const VITE_API_URL = `http://localhost:8000`;
|
|
2157
|
+
|
|
2158
|
+
if (process.argv.includes('reset')) shellExec(`sudo rm -rf ${path}`);
|
|
2159
|
+
|
|
2160
|
+
if (!fs.existsSync(path))
|
|
2161
|
+
shellExec(`cd .. && git clone https://github.com/fastapi/full-stack-fastapi-template.git`);
|
|
2162
|
+
|
|
2163
|
+
shellExec(`cd ${path} && git checkout . && git clean -f -d`);
|
|
2164
|
+
const password = fs.readFileSync(`/home/dd/engine/engine-private/postgresql-password`, 'utf8');
|
|
2165
|
+
|
|
2166
|
+
fs.writeFileSync(
|
|
2167
|
+
`${path}/.env`,
|
|
2168
|
+
fs
|
|
2169
|
+
.readFileSync(`${path}/.env`, 'utf8')
|
|
2170
|
+
.replace(`FIRST_SUPERUSER=admin@example.com`, `FIRST_SUPERUSER=development@underpost.net`)
|
|
2171
|
+
.replace(`FIRST_SUPERUSER_PASSWORD=changethis`, `FIRST_SUPERUSER_PASSWORD=${password}`)
|
|
2172
|
+
.replace(`SECRET_KEY=changethis`, `SECRET_KEY=${password}`)
|
|
2173
|
+
.replace(`POSTGRES_DB=app`, `POSTGRES_DB=postgresdb`)
|
|
2174
|
+
.replace(`POSTGRES_USER=postgres`, `POSTGRES_USER=admin`)
|
|
2175
|
+
.replace(`POSTGRES_PASSWORD=changethis`, `POSTGRES_PASSWORD=${password}`),
|
|
2176
|
+
'utf8',
|
|
2177
|
+
);
|
|
2178
|
+
fs.writeFileSync(
|
|
2179
|
+
`${path}/backend/app/core/db.py`,
|
|
2180
|
+
fs
|
|
2181
|
+
.readFileSync(`${path}/backend/app/core/db.py`, 'utf8')
|
|
2182
|
+
.replace(` # from sqlmodel import SQLModel`, ` from sqlmodel import SQLModel`)
|
|
2183
|
+
.replace(` # SQLModel.metadata.create_all(engine)`, ` SQLModel.metadata.create_all(engine)`),
|
|
2184
|
+
|
|
2185
|
+
'utf8',
|
|
2186
|
+
);
|
|
2187
|
+
|
|
2188
|
+
fs.copySync(`./manifests/deployment/fastapi/initial_data.sh`, `${path}/backend/initial_data.sh`);
|
|
2189
|
+
|
|
2190
|
+
fs.writeFileSync(
|
|
2191
|
+
`${path}/frontend/Dockerfile`,
|
|
2192
|
+
fs
|
|
2193
|
+
.readFileSync(`${path}/frontend/Dockerfile`, 'utf8')
|
|
2194
|
+
.replace('ARG VITE_API_URL=${VITE_API_URL}', `ARG VITE_API_URL='${VITE_API_URL}'`),
|
|
2195
|
+
'utf8',
|
|
2196
|
+
);
|
|
2197
|
+
|
|
2198
|
+
fs.writeFileSync(
|
|
2199
|
+
`${path}/frontend/.env`,
|
|
2200
|
+
fs
|
|
2201
|
+
.readFileSync(`${path}/frontend/.env`, 'utf8')
|
|
2202
|
+
.replace(`VITE_API_URL=http://localhost:8000`, `VITE_API_URL=${VITE_API_URL}`)
|
|
2203
|
+
.replace(`MAILCATCHER_HOST=http://localhost:1080`, `MAILCATCHER_HOST=http://localhost:1081`),
|
|
2204
|
+
|
|
2205
|
+
'utf8',
|
|
2206
|
+
);
|
|
2207
|
+
|
|
2208
|
+
if (process.argv.includes('models')) {
|
|
2209
|
+
shellExec(`node bin/deploy fastapi-models`);
|
|
2210
|
+
break;
|
|
2211
|
+
}
|
|
2212
|
+
|
|
2213
|
+
if (process.argv.includes('build-back')) {
|
|
2214
|
+
const imageName = `fastapi-backend:latest`;
|
|
2215
|
+
shellExec(`sudo podman pull docker.io/library/python:3.10`);
|
|
2216
|
+
shellExec(`sudo podman pull ghcr.io/astral-sh/uv:0.5.11`);
|
|
2217
|
+
shellExec(`sudo rm -rf ${path}/${imageName.replace(':', '_')}.tar`);
|
|
2218
|
+
const args = [
|
|
2219
|
+
`node bin dockerfile-image-build --path ${path}/backend/`,
|
|
2220
|
+
`--image-name=${imageName} --image-path=${path}`,
|
|
2221
|
+
`--podman-save --kind-load --no-cache`,
|
|
2222
|
+
];
|
|
2223
|
+
shellExec(args.join(' '));
|
|
2224
|
+
}
|
|
2225
|
+
if (process.argv.includes('build-front')) {
|
|
2226
|
+
const imageName = `fastapi-frontend:latest`;
|
|
2227
|
+
shellExec(`sudo podman pull docker.io/library/node:20`);
|
|
2228
|
+
shellExec(`sudo podman pull docker.io/library/nginx:1`);
|
|
2229
|
+
shellExec(`sudo rm -rf ${path}/${imageName.replace(':', '_')}.tar`);
|
|
2230
|
+
const args = [
|
|
2231
|
+
`node bin dockerfile-image-build --path ${path}/frontend/`,
|
|
2232
|
+
`--image-name=${imageName} --image-path=${path}`,
|
|
2233
|
+
`--podman-save --kind-load --no-cache`,
|
|
2234
|
+
];
|
|
2235
|
+
shellExec(args.join(' '));
|
|
2236
|
+
}
|
|
2237
|
+
if (process.argv.includes('secret')) {
|
|
2238
|
+
{
|
|
2239
|
+
const secretSelector = `fastapi-postgres-credentials`;
|
|
2240
|
+
shellExec(`sudo kubectl delete secret ${secretSelector}`);
|
|
2241
|
+
shellExec(
|
|
2242
|
+
`sudo kubectl create secret generic ${secretSelector}` +
|
|
2243
|
+
` --from-literal=POSTGRES_DB=postgresdb` +
|
|
2244
|
+
` --from-literal=POSTGRES_USER=admin` +
|
|
2245
|
+
` --from-file=POSTGRES_PASSWORD=/home/dd/engine/engine-private/postgresql-password`,
|
|
2246
|
+
);
|
|
2247
|
+
}
|
|
2248
|
+
{
|
|
2249
|
+
const secretSelector = `fastapi-backend-config-secret`;
|
|
2250
|
+
shellExec(`sudo kubectl delete secret ${secretSelector}`);
|
|
2251
|
+
shellExec(
|
|
2252
|
+
`sudo kubectl create secret generic ${secretSelector}` +
|
|
2253
|
+
` --from-file=SECRET_KEY=/home/dd/engine/engine-private/postgresql-password` +
|
|
2254
|
+
` --from-literal=FIRST_SUPERUSER=development@underpost.net` +
|
|
2255
|
+
` --from-file=FIRST_SUPERUSER_PASSWORD=/home/dd/engine/engine-private/postgresql-password`,
|
|
2256
|
+
);
|
|
2257
|
+
}
|
|
2258
|
+
}
|
|
2259
|
+
if (process.argv.includes('run-back')) {
|
|
2260
|
+
shellExec(`sudo kubectl apply -f ./manifests/deployment/fastapi/backend-deployment.yml`);
|
|
2261
|
+
shellExec(`sudo kubectl apply -f ./manifests/deployment/fastapi/backend-service.yml`);
|
|
2262
|
+
}
|
|
2263
|
+
if (process.argv.includes('run-front')) {
|
|
2264
|
+
shellExec(`sudo kubectl apply -f ./manifests/deployment/fastapi/frontend-deployment.yml`);
|
|
2265
|
+
shellExec(`sudo kubectl apply -f ./manifests/deployment/fastapi/frontend-service.yml`);
|
|
2266
|
+
}
|
|
2267
|
+
break;
|
|
2268
|
+
}
|
|
2269
|
+
|
|
2270
|
+
case 'conda': {
|
|
2271
|
+
shellExec(
|
|
2272
|
+
`export PATH="/root/miniconda3/bin:$PATH" && conda init && conda config --set auto_activate_base false`,
|
|
2273
|
+
);
|
|
2274
|
+
shellExec(`conda env list`);
|
|
2275
|
+
break;
|
|
2276
|
+
}
|
|
2277
|
+
|
|
2278
|
+
case 'kafka': {
|
|
2279
|
+
// https://medium.com/@martin.hodges/deploying-kafka-on-a-kind-kubernetes-cluster-for-development-and-testing-purposes-ed7adefe03cb
|
|
2280
|
+
const imageName = `doughgle/kafka-kraft`;
|
|
2281
|
+
shellExec(`docker pull ${imageName}`);
|
|
2282
|
+
shellExec(`kind load docker-image ${imageName}`);
|
|
2283
|
+
shellExec(`kubectl create namespace kafka`);
|
|
2284
|
+
shellExec(`kubectl apply -f ./manifests/deployment/kafka/deployment.yaml`);
|
|
2285
|
+
// kubectl logs kafka-0 -n kafka | grep STARTED
|
|
2286
|
+
// kubectl logs kafka-1 -n kafka | grep STARTED
|
|
2287
|
+
// kubectl logs kafka-2 -n kafka | grep STARTED
|
|
2288
|
+
|
|
2289
|
+
// kafka-topics.sh --create --topic my-topic --bootstrap-server kafka-svc:9092
|
|
2290
|
+
// kafka-topics.sh --list --topic my-topic --bootstrap-server kafka-svc:9092
|
|
2291
|
+
// kafka-topics.sh --delete --topic my-topic --bootstrap-server kafka-svc:9092
|
|
2292
|
+
|
|
2293
|
+
// kafka-console-producer.sh --bootstrap-server kafka-svc:9092 --topic my-topic
|
|
2294
|
+
// kafka-console-consumer.sh --bootstrap-server kafka-svc:9092 --topic my-topic
|
|
2295
|
+
break;
|
|
2296
|
+
}
|
|
2127
2297
|
}
|
|
2128
2298
|
} catch (error) {
|
|
2129
2299
|
logger.error(error, error.stack);
|
package/cli.md
CHANGED
package/docker-compose.yml
CHANGED
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
apiVersion: apps/v1
|
|
2
|
+
kind: Deployment
|
|
3
|
+
metadata:
|
|
4
|
+
name: fastapi-backend
|
|
5
|
+
labels:
|
|
6
|
+
app: fastapi-backend
|
|
7
|
+
spec:
|
|
8
|
+
replicas: 2
|
|
9
|
+
selector:
|
|
10
|
+
matchLabels:
|
|
11
|
+
app: fastapi-backend
|
|
12
|
+
template:
|
|
13
|
+
metadata:
|
|
14
|
+
labels:
|
|
15
|
+
app: fastapi-backend
|
|
16
|
+
spec:
|
|
17
|
+
containers:
|
|
18
|
+
- name: fastapi-backend-container
|
|
19
|
+
image: localhost/fastapi-backend:latest
|
|
20
|
+
imagePullPolicy: IfNotPresent
|
|
21
|
+
|
|
22
|
+
ports:
|
|
23
|
+
- containerPort: 8000
|
|
24
|
+
name: http-api
|
|
25
|
+
|
|
26
|
+
env:
|
|
27
|
+
- name: POSTGRES_SERVER
|
|
28
|
+
value: postgres-service
|
|
29
|
+
- name: POSTGRES_PORT
|
|
30
|
+
value: '5432'
|
|
31
|
+
- name: POSTGRES_DB
|
|
32
|
+
valueFrom:
|
|
33
|
+
secretKeyRef:
|
|
34
|
+
name: fastapi-postgres-credentials
|
|
35
|
+
key: POSTGRES_DB
|
|
36
|
+
- name: POSTGRES_USER
|
|
37
|
+
valueFrom:
|
|
38
|
+
secretKeyRef:
|
|
39
|
+
name: fastapi-postgres-credentials
|
|
40
|
+
key: POSTGRES_USER
|
|
41
|
+
- name: POSTGRES_PASSWORD
|
|
42
|
+
valueFrom:
|
|
43
|
+
secretKeyRef:
|
|
44
|
+
name: fastapi-postgres-credentials
|
|
45
|
+
key: POSTGRES_PASSWORD
|
|
46
|
+
|
|
47
|
+
- name: PROJECT_NAME
|
|
48
|
+
value: 'Full Stack FastAPI Project'
|
|
49
|
+
- name: STACK_NAME
|
|
50
|
+
value: 'full-stack-fastapi-project'
|
|
51
|
+
|
|
52
|
+
- name: BACKEND_CORS_ORIGINS
|
|
53
|
+
value: 'http://localhost,http://localhost:5173,https://localhost,https://localhost:5173'
|
|
54
|
+
- name: SECRET_KEY
|
|
55
|
+
valueFrom:
|
|
56
|
+
secretKeyRef:
|
|
57
|
+
name: fastapi-backend-config-secret
|
|
58
|
+
key: SECRET_KEY
|
|
59
|
+
- name: FIRST_SUPERUSER
|
|
60
|
+
valueFrom:
|
|
61
|
+
secretKeyRef:
|
|
62
|
+
name: fastapi-backend-config-secret
|
|
63
|
+
key: FIRST_SUPERUSER
|
|
64
|
+
- name: FIRST_SUPERUSER_PASSWORD
|
|
65
|
+
valueFrom:
|
|
66
|
+
secretKeyRef:
|
|
67
|
+
name: fastapi-backend-config-secret
|
|
68
|
+
key: FIRST_SUPERUSER_PASSWORD
|
|
69
|
+
- name: USERS_OPEN_REGISTRATION
|
|
70
|
+
value: 'True'
|
|
71
|
+
|
|
72
|
+
# - name: SMTP_HOST
|
|
73
|
+
# valueFrom:
|
|
74
|
+
# secretKeyRef:
|
|
75
|
+
# name: fastapi-backend-config-secret
|
|
76
|
+
# key: SMTP_HOST
|
|
77
|
+
# - name: SMTP_USER
|
|
78
|
+
# valueFrom:
|
|
79
|
+
# secretKeyRef:
|
|
80
|
+
# name: fastapi-backend-config-secret
|
|
81
|
+
# key: SMTP_USER
|
|
82
|
+
# - name: SMTP_PASSWORD
|
|
83
|
+
# valueFrom:
|
|
84
|
+
# secretKeyRef:
|
|
85
|
+
# name: fastapi-backend-config-secret
|
|
86
|
+
# key: SMTP_PASSWORD
|
|
87
|
+
- name: EMAILS_FROM_EMAIL
|
|
88
|
+
value: 'info@example.com'
|
|
89
|
+
- name: SMTP_TLS
|
|
90
|
+
value: 'True'
|
|
91
|
+
- name: SMTP_SSL
|
|
92
|
+
value: 'False'
|
|
93
|
+
- name: SMTP_PORT
|
|
94
|
+
value: '587'
|
|
95
|
+
|
|
96
|
+
livenessProbe:
|
|
97
|
+
httpGet:
|
|
98
|
+
path: /docs
|
|
99
|
+
port: 8000
|
|
100
|
+
initialDelaySeconds: 30
|
|
101
|
+
periodSeconds: 20
|
|
102
|
+
timeoutSeconds: 10
|
|
103
|
+
failureThreshold: 3
|
|
104
|
+
|
|
105
|
+
readinessProbe:
|
|
106
|
+
httpGet:
|
|
107
|
+
path: /docs
|
|
108
|
+
port: 8000
|
|
109
|
+
initialDelaySeconds: 30
|
|
110
|
+
periodSeconds: 20
|
|
111
|
+
timeoutSeconds: 10
|
|
112
|
+
failureThreshold: 3
|
|
113
|
+
|
|
114
|
+
resources:
|
|
115
|
+
requests:
|
|
116
|
+
cpu: 200m
|
|
117
|
+
memory: 256Mi
|
|
118
|
+
limits:
|
|
119
|
+
cpu: 1000m
|
|
120
|
+
memory: 1Gi
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
apiVersion: v1
|
|
2
|
+
kind: Service
|
|
3
|
+
metadata:
|
|
4
|
+
name: fastapi-backend-service
|
|
5
|
+
labels:
|
|
6
|
+
app: fastapi-backend
|
|
7
|
+
spec:
|
|
8
|
+
selector:
|
|
9
|
+
app: fastapi-backend
|
|
10
|
+
ports:
|
|
11
|
+
- name: 'tcp-8000'
|
|
12
|
+
protocol: TCP
|
|
13
|
+
port: 8000
|
|
14
|
+
targetPort: 8000
|
|
15
|
+
- name: 'udp-8000'
|
|
16
|
+
protocol: UDP
|
|
17
|
+
port: 8000
|
|
18
|
+
targetPort: 8000
|
|
19
|
+
type: ClusterIP
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
apiVersion: apps/v1
|
|
2
|
+
kind: Deployment
|
|
3
|
+
metadata:
|
|
4
|
+
name: react-frontend
|
|
5
|
+
labels:
|
|
6
|
+
app: react-frontend
|
|
7
|
+
spec:
|
|
8
|
+
replicas: 2
|
|
9
|
+
selector:
|
|
10
|
+
matchLabels:
|
|
11
|
+
app: react-frontend
|
|
12
|
+
template:
|
|
13
|
+
metadata:
|
|
14
|
+
labels:
|
|
15
|
+
app: react-frontend
|
|
16
|
+
spec:
|
|
17
|
+
containers:
|
|
18
|
+
- name: react-frontend-container
|
|
19
|
+
image: localhost/fastapi-frontend:latest
|
|
20
|
+
imagePullPolicy: IfNotPresent
|
|
21
|
+
|
|
22
|
+
ports:
|
|
23
|
+
- containerPort: 80
|
|
24
|
+
name: http-web
|
|
25
|
+
|
|
26
|
+
env:
|
|
27
|
+
- name: VITE_FASTAPI_URL
|
|
28
|
+
value: '/api'
|
|
29
|
+
|
|
30
|
+
livenessProbe:
|
|
31
|
+
httpGet:
|
|
32
|
+
path: /
|
|
33
|
+
port: 80
|
|
34
|
+
initialDelaySeconds: 5
|
|
35
|
+
periodSeconds: 10
|
|
36
|
+
timeoutSeconds: 3
|
|
37
|
+
failureThreshold: 3
|
|
38
|
+
|
|
39
|
+
readinessProbe:
|
|
40
|
+
httpGet:
|
|
41
|
+
path: /
|
|
42
|
+
port: 80
|
|
43
|
+
initialDelaySeconds: 3
|
|
44
|
+
periodSeconds: 5
|
|
45
|
+
timeoutSeconds: 3
|
|
46
|
+
failureThreshold: 3
|
|
47
|
+
|
|
48
|
+
resources:
|
|
49
|
+
requests:
|
|
50
|
+
cpu: 100m
|
|
51
|
+
memory: 128Mi
|
|
52
|
+
limits:
|
|
53
|
+
cpu: 500m
|
|
54
|
+
memory: 512Mi
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
|
|
3
|
+
# IMPORTANT: For non-interactive scripts, 'conda activate' can be problematic
|
|
4
|
+
# because it relies on the shell's initialization.
|
|
5
|
+
# A more robust and recommended way to run commands within a Conda environment
|
|
6
|
+
# from a script is to use 'conda run'. This command directly executes a process
|
|
7
|
+
# in the specified environment without needing to manually source 'conda.sh'.
|
|
8
|
+
|
|
9
|
+
# Navigate to the application's root directory for module discovery.
|
|
10
|
+
# This is crucial for Python to correctly find your 'app' module using 'python -m'.
|
|
11
|
+
#
|
|
12
|
+
# Let's assume a common project structure:
|
|
13
|
+
# full-stack-fastapi-template/
|
|
14
|
+
# ├── backend/
|
|
15
|
+
# │ ├── app/
|
|
16
|
+
# │ │ └── initial_data.py (the Python script you want to run)
|
|
17
|
+
# │ └── initial_data.sh (this shell script)
|
|
18
|
+
# └── ...
|
|
19
|
+
#
|
|
20
|
+
# If `initial_data.sh` is located in `full-stack-fastapi-template/backend/`,
|
|
21
|
+
# and `app` is a subdirectory of `backend/`, then the Python command
|
|
22
|
+
# `python -m app.initial_data` needs to be executed from the `backend/` directory.
|
|
23
|
+
#
|
|
24
|
+
# If you are running this shell script from a different directory (e.g., `engine/`),
|
|
25
|
+
# Python's module import system won't automatically find 'app' unless the parent
|
|
26
|
+
# directory of 'app' is in the `PYTHONPATH` or you change the current working directory.
|
|
27
|
+
#
|
|
28
|
+
# The safest way is to change the current working directory to the script's location.
|
|
29
|
+
|
|
30
|
+
# Store the current directory to return to it later if needed (good practice for multi-step scripts).
|
|
31
|
+
CURRENT_DIR=$(pwd)
|
|
32
|
+
|
|
33
|
+
# Get the absolute path of the directory where this script is located.
|
|
34
|
+
# This is a robust way to ensure we always navigate to the correct 'backend' directory.
|
|
35
|
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)"
|
|
36
|
+
cd "$SCRIPT_DIR"
|
|
37
|
+
|
|
38
|
+
# Execute your Python script within the specified Conda environment using 'conda run'.
|
|
39
|
+
# -n fastapi_env specifies the Conda environment to use.
|
|
40
|
+
# This completely avoids the 'source conda.sh' issue and is generally more reliable.
|
|
41
|
+
conda run -n fastapi_env python -m app.initial_data
|
|
42
|
+
|
|
43
|
+
# Important Note: The 'ModuleNotFoundError: No module named 'sqlmodel'' indicates that
|
|
44
|
+
# the 'sqlmodel' package is not installed in your 'fastapi_env' Conda environment.
|
|
45
|
+
# After running this script, if you still get the 'sqlmodel' error,
|
|
46
|
+
# you will need to activate your environment manually and install it:
|
|
47
|
+
#
|
|
48
|
+
# conda activate fastapi_env
|
|
49
|
+
# pip install sqlmodel
|
|
50
|
+
# # or if it's a conda package:
|
|
51
|
+
# # conda install sqlmodel
|
|
52
|
+
#
|
|
53
|
+
# Then try running this script again.
|
|
54
|
+
|
|
55
|
+
# Optional Good Practice: Return to the original directory if the script is part of a larger workflow.
|
|
56
|
+
cd "$CURRENT_DIR"
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
apiVersion: apps/v1
|
|
2
|
+
kind: StatefulSet
|
|
3
|
+
metadata:
|
|
4
|
+
name: kafka
|
|
5
|
+
namespace: kafka
|
|
6
|
+
labels:
|
|
7
|
+
app: kafka-app
|
|
8
|
+
spec:
|
|
9
|
+
serviceName: kafka-svc
|
|
10
|
+
replicas: 3
|
|
11
|
+
selector:
|
|
12
|
+
matchLabels:
|
|
13
|
+
app: kafka-app
|
|
14
|
+
template:
|
|
15
|
+
metadata:
|
|
16
|
+
labels:
|
|
17
|
+
app: kafka-app
|
|
18
|
+
spec:
|
|
19
|
+
containers:
|
|
20
|
+
- name: kafka-container
|
|
21
|
+
image: doughgle/kafka-kraft
|
|
22
|
+
ports:
|
|
23
|
+
- containerPort: 9092
|
|
24
|
+
- containerPort: 9093
|
|
25
|
+
env:
|
|
26
|
+
- name: REPLICAS
|
|
27
|
+
value: '3'
|
|
28
|
+
- name: SERVICE
|
|
29
|
+
value: kafka-svc
|
|
30
|
+
- name: NAMESPACE
|
|
31
|
+
value: kafka
|
|
32
|
+
- name: SHARE_DIR
|
|
33
|
+
value: /mnt/kafka
|
|
34
|
+
- name: CLUSTER_ID
|
|
35
|
+
value: bXktY2x1c3Rlci0xMjM0NQ==
|
|
36
|
+
- name: DEFAULT_REPLICATION_FACTOR
|
|
37
|
+
value: '3'
|
|
38
|
+
- name: DEFAULT_MIN_INSYNC_REPLICAS
|
|
39
|
+
value: '2'
|
|
40
|
+
volumeMounts:
|
|
41
|
+
- name: data
|
|
42
|
+
mountPath: /mnt/kafka
|
|
43
|
+
volumeClaimTemplates:
|
|
44
|
+
- metadata:
|
|
45
|
+
name: data
|
|
46
|
+
spec:
|
|
47
|
+
accessModes:
|
|
48
|
+
- 'ReadWriteOnce'
|
|
49
|
+
resources:
|
|
50
|
+
requests:
|
|
51
|
+
storage: '1Gi'
|
|
52
|
+
---
|
|
53
|
+
apiVersion: v1
|
|
54
|
+
kind: Service
|
|
55
|
+
metadata:
|
|
56
|
+
name: kafka-svc
|
|
57
|
+
namespace: kafka
|
|
58
|
+
labels:
|
|
59
|
+
app: kafka-app
|
|
60
|
+
spec:
|
|
61
|
+
type: NodePort
|
|
62
|
+
ports:
|
|
63
|
+
- name: '9092'
|
|
64
|
+
port: 9092
|
|
65
|
+
protocol: TCP
|
|
66
|
+
targetPort: 9092
|
|
67
|
+
nodePort: 30092
|
|
68
|
+
selector:
|
|
69
|
+
app: kafka-app
|
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
# This consolidated YAML file contains configurations for:
|
|
2
|
+
# 1. Calico Installation (Installation and APIServer resources)
|
|
3
|
+
# 2. A permissive Egress NetworkPolicy for the 'default' namespace
|
|
4
|
+
#
|
|
5
|
+
# These are standard Kubernetes resources that can be applied directly using 'kubectl apply'.
|
|
6
|
+
# The kubeadm-specific ClusterConfiguration and InitConfiguration have been removed
|
|
7
|
+
# as they are only processed by the 'kubeadm init' command, not 'kubectl apply'.
|
|
8
|
+
|
|
9
|
+
# --- Calico Installation: Base configuration for Calico ---
|
|
10
|
+
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.Installation
|
|
11
|
+
apiVersion: operator.tigera.io/v1
|
|
12
|
+
kind: Installation
|
|
13
|
+
metadata:
|
|
14
|
+
name: default
|
|
15
|
+
spec:
|
|
16
|
+
# Configures Calico networking.
|
|
17
|
+
calicoNetwork:
|
|
18
|
+
# Note: The ipPools section cannot be modified post-install.
|
|
19
|
+
ipPools:
|
|
20
|
+
- blockSize: 26
|
|
21
|
+
cidr: 192.168.0.0/16
|
|
22
|
+
encapsulation: VXLANCrossSubnet
|
|
23
|
+
natOutgoing: Enabled
|
|
24
|
+
nodeSelector: all()
|
|
25
|
+
|
|
26
|
+
---
|
|
27
|
+
# This section configures the Calico API server.
|
|
28
|
+
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer
|
|
29
|
+
apiVersion: operator.tigera.io/v1
|
|
30
|
+
kind: APIServer
|
|
31
|
+
metadata:
|
|
32
|
+
name: default
|
|
33
|
+
spec: {}
|
|
34
|
+
|
|
35
|
+
---
|
|
36
|
+
# This consolidated NetworkPolicy file ensures that all pods in the specified namespaces
|
|
37
|
+
# have unrestricted egress (outbound) access.
|
|
38
|
+
# This is useful for troubleshooting or for environments where strict egress control
|
|
39
|
+
# is not immediately required for these system/default namespaces.
|
|
40
|
+
|
|
41
|
+
---
|
|
42
|
+
# Policy for the 'default' namespace
|
|
43
|
+
apiVersion: networking.k8s.io/v1
|
|
44
|
+
kind: NetworkPolicy
|
|
45
|
+
metadata:
|
|
46
|
+
name: allow-all-egress-default-namespace
|
|
47
|
+
namespace: default # This policy applies to the 'default' namespace
|
|
48
|
+
spec:
|
|
49
|
+
podSelector: {} # Selects all pods in this namespace
|
|
50
|
+
policyTypes:
|
|
51
|
+
- Egress
|
|
52
|
+
egress:
|
|
53
|
+
- to:
|
|
54
|
+
- ipBlock:
|
|
55
|
+
cidr: 0.0.0.0/0 # Allows traffic to any IPv4 address
|
|
56
|
+
|
|
57
|
+
---
|
|
58
|
+
# Policy for the 'kube-system' namespace
|
|
59
|
+
apiVersion: networking.k8s.io/v1
|
|
60
|
+
kind: NetworkPolicy
|
|
61
|
+
metadata:
|
|
62
|
+
name: allow-all-egress-kube-system-namespace
|
|
63
|
+
namespace: kube-system # This policy applies to the 'kube-system' namespace
|
|
64
|
+
spec:
|
|
65
|
+
podSelector: {} # Selects all pods in this namespace
|
|
66
|
+
policyTypes:
|
|
67
|
+
- Egress
|
|
68
|
+
egress:
|
|
69
|
+
- to:
|
|
70
|
+
- ipBlock:
|
|
71
|
+
cidr: 0.0.0.0/0 # Allows traffic to any IPv4 address
|
|
72
|
+
|
|
73
|
+
---
|
|
74
|
+
# Policy for the 'kube-node-lease' namespace
|
|
75
|
+
apiVersion: networking.k8s.io/v1
|
|
76
|
+
kind: NetworkPolicy
|
|
77
|
+
metadata:
|
|
78
|
+
name: allow-all-egress-kube-node-lease-namespace
|
|
79
|
+
namespace: kube-node-lease # This policy applies to the 'kube-node-lease' namespace
|
|
80
|
+
spec:
|
|
81
|
+
podSelector: {} # Selects all pods in this namespace
|
|
82
|
+
policyTypes:
|
|
83
|
+
- Egress
|
|
84
|
+
egress:
|
|
85
|
+
- to:
|
|
86
|
+
- ipBlock:
|
|
87
|
+
cidr: 0.0.0.0/0 # Allows traffic to any IPv4 address
|
|
88
|
+
|
|
89
|
+
---
|
|
90
|
+
# Policy for the 'kube-public' namespace
|
|
91
|
+
apiVersion: networking.k8s.io/v1
|
|
92
|
+
kind: NetworkPolicy
|
|
93
|
+
metadata:
|
|
94
|
+
name: allow-all-egress-kube-public-namespace
|
|
95
|
+
namespace: kube-public # This policy applies to the 'kube-public' namespace
|
|
96
|
+
spec:
|
|
97
|
+
podSelector: {} # Selects all pods in this namespace
|
|
98
|
+
policyTypes:
|
|
99
|
+
- Egress
|
|
100
|
+
egress:
|
|
101
|
+
- to:
|
|
102
|
+
- ipBlock:
|
|
103
|
+
cidr: 0.0.0.0/0 # Allows traffic to any IPv4 address
|
|
104
|
+
|
|
105
|
+
---
|
|
106
|
+
# Policy for the 'tigera-operator' namespace
|
|
107
|
+
apiVersion: networking.k8s.io/v1
|
|
108
|
+
kind: NetworkPolicy
|
|
109
|
+
metadata:
|
|
110
|
+
name: allow-all-egress-tigera-operator-namespace
|
|
111
|
+
namespace: tigera-operator # This policy applies to the 'tigera-operator' namespace
|
|
112
|
+
spec:
|
|
113
|
+
podSelector: {} # Selects all pods in this namespace
|
|
114
|
+
policyTypes:
|
|
115
|
+
- Egress
|
|
116
|
+
egress:
|
|
117
|
+
- to:
|
|
118
|
+
- ipBlock:
|
|
119
|
+
cidr: 0.0.0.0/0 # Allows traffic to any IPv4 address
|
package/package.json
CHANGED
package/src/cli/cluster.js
CHANGED
|
@@ -29,6 +29,7 @@ class UnderpostCluster {
|
|
|
29
29
|
pullImage: false,
|
|
30
30
|
},
|
|
31
31
|
) {
|
|
32
|
+
// sudo dnf update
|
|
32
33
|
// 1) Install kind, kubeadm, docker, podman
|
|
33
34
|
// 2) Check kubectl, kubelet, containerd.io
|
|
34
35
|
// 3) Install Nvidia drivers from Rocky Linux docs
|
|
@@ -105,7 +106,7 @@ class UnderpostCluster {
|
|
|
105
106
|
// shellExec(
|
|
106
107
|
// `wget https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/custom-resources.yaml`,
|
|
107
108
|
// );
|
|
108
|
-
shellExec(`sudo kubectl apply -f ./manifests/calico-
|
|
109
|
+
shellExec(`sudo kubectl apply -f ./manifests/kubeadm-calico-config.yaml`);
|
|
109
110
|
shellExec(`sudo systemctl restart containerd`);
|
|
110
111
|
} else {
|
|
111
112
|
shellExec(`sudo systemctl restart containerd`);
|
|
@@ -120,14 +121,7 @@ class UnderpostCluster {
|
|
|
120
121
|
|
|
121
122
|
if (options.full === true || options.valkey === true) {
|
|
122
123
|
if (options.pullImage === true) {
|
|
123
|
-
// kubectl patch statefulset service-valkey --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value":"valkey/valkey:latest"}]'
|
|
124
|
-
// kubectl patch statefulset service-valkey -p '{"spec":{"template":{"spec":{"containers":[{"name":"service-valkey","imagePullPolicy":"Never"}]}}}}'
|
|
125
124
|
shellExec(`docker pull valkey/valkey`);
|
|
126
|
-
// shellExec(`sudo kind load docker-image valkey/valkey`);
|
|
127
|
-
// shellExec(`sudo podman pull docker.io/valkey/valkey:latest`);
|
|
128
|
-
// shellExec(`podman save -o valkey.tar valkey/valkey`);
|
|
129
|
-
// shellExec(`sudo kind load image-archive valkey.tar`);
|
|
130
|
-
// shellExec(`sudo rm -rf ./valkey.tar`);
|
|
131
125
|
shellExec(`sudo kind load docker-image valkey/valkey:latest`);
|
|
132
126
|
}
|
|
133
127
|
shellExec(`kubectl delete statefulset service-valkey`);
|
|
@@ -144,6 +138,10 @@ class UnderpostCluster {
|
|
|
144
138
|
shellExec(`kubectl apply -k ${underpostRoot}/manifests/mariadb`);
|
|
145
139
|
}
|
|
146
140
|
if (options.full === true || options.postgresql === true) {
|
|
141
|
+
if (options.pullImage === true) {
|
|
142
|
+
shellExec(`docker pull postgres:latest`);
|
|
143
|
+
shellExec(`sudo kind load docker-image postgres:latest`);
|
|
144
|
+
}
|
|
147
145
|
shellExec(
|
|
148
146
|
`sudo kubectl create secret generic postgres-secret --from-file=password=/home/dd/engine/engine-private/postgresql-password`,
|
|
149
147
|
);
|
|
@@ -225,43 +223,131 @@ class UnderpostCluster {
|
|
|
225
223
|
shellExec(`sudo kubectl apply -f ${underpostRoot}/manifests/${letsEncName}.yaml`);
|
|
226
224
|
}
|
|
227
225
|
},
|
|
226
|
+
// This function performs a comprehensive reset of Kubernetes and container environments
|
|
227
|
+
// on the host machine. Its primary goal is to clean up cluster components, temporary files,
|
|
228
|
+
// and container data, ensuring a clean state for re-initialization or fresh deployments,
|
|
229
|
+
// while also preventing the loss of the host machine's internet connectivity.
|
|
230
|
+
|
|
228
231
|
reset() {
|
|
232
|
+
// Step 1: Delete all existing Kind (Kubernetes in Docker) clusters.
|
|
233
|
+
// 'kind get clusters' lists all Kind clusters.
|
|
234
|
+
// 'xargs -t -n1 kind delete cluster --name' then iterates through each cluster name
|
|
235
|
+
// and executes 'kind delete cluster --name <cluster_name>' to remove them.
|
|
229
236
|
shellExec(`kind get clusters | xargs -t -n1 kind delete cluster --name`);
|
|
237
|
+
|
|
238
|
+
// Step 2: Reset the Kubernetes control-plane components installed by kubeadm.
|
|
239
|
+
// 'kubeadm reset -f' performs a forceful reset, removing installed Kubernetes components,
|
|
240
|
+
// configuration files, and associated network rules (like iptables entries created by kubeadm).
|
|
241
|
+
// The '-f' flag bypasses confirmation prompts.
|
|
230
242
|
shellExec(`sudo kubeadm reset -f`);
|
|
243
|
+
|
|
244
|
+
// Step 3: Remove specific CNI (Container Network Interface) configuration files.
|
|
245
|
+
// This command targets and removes the configuration file for Flannel,
|
|
246
|
+
// a common CNI plugin, which might be left behind after a reset.
|
|
231
247
|
shellExec('sudo rm -f /etc/cni/net.d/10-flannel.conflist');
|
|
232
|
-
|
|
248
|
+
|
|
249
|
+
// Note: The aggressive 'sudo iptables -F ...' command was intentionally removed from previous versions.
|
|
250
|
+
// This command would flush all iptables rules, including those crucial for the host's general
|
|
251
|
+
// internet connectivity, leading to network loss. 'kubeadm reset' and container runtime pruning
|
|
252
|
+
// adequately handle Kubernetes and container-specific iptables rules without affecting the host's
|
|
253
|
+
// default network configuration.
|
|
254
|
+
|
|
255
|
+
// Step 4: Remove the kubectl configuration file from the current user's home directory.
|
|
256
|
+
// This ensures that after a reset, there's no lingering configuration pointing to the old cluster,
|
|
257
|
+
// providing a clean slate for connecting to a new or re-initialized cluster.
|
|
233
258
|
shellExec('sudo rm -f $HOME/.kube/config');
|
|
259
|
+
|
|
260
|
+
// Step 5: Clear trash files from the root user's trash directory.
|
|
261
|
+
// This is a general cleanup step to remove temporary or deleted files.
|
|
234
262
|
shellExec('sudo rm -rf /root/.local/share/Trash/files/*');
|
|
263
|
+
|
|
264
|
+
// Step 6: Prune all unused Docker data.
|
|
265
|
+
// 'docker system prune -a -f' removes:
|
|
266
|
+
// - All stopped containers
|
|
267
|
+
// - All unused networks
|
|
268
|
+
// - All dangling images
|
|
269
|
+
// - All build cache
|
|
270
|
+
// - All unused volumes
|
|
271
|
+
// This aggressively frees up disk space and removes temporary Docker artifacts.
|
|
235
272
|
shellExec('sudo docker system prune -a -f');
|
|
273
|
+
|
|
274
|
+
// Step 7: Stop the Docker daemon service.
|
|
275
|
+
// This step is often necessary to ensure that Docker's files and directories
|
|
276
|
+
// can be safely manipulated or moved in subsequent steps without conflicts.
|
|
236
277
|
shellExec('sudo service docker stop');
|
|
278
|
+
|
|
279
|
+
// Step 8: Aggressively remove container storage data for containerd and Docker.
|
|
280
|
+
// These commands target the default storage locations for containerd and Docker,
|
|
281
|
+
// as well as any custom paths that might have been used (`/home/containers/storage`, `/home/docker`).
|
|
282
|
+
// This ensures a complete wipe of all container images, layers, and volumes.
|
|
237
283
|
shellExec(`sudo rm -rf /var/lib/containers/storage/*`);
|
|
238
284
|
shellExec(`sudo rm -rf /var/lib/docker/volumes/*`);
|
|
239
|
-
shellExec(`sudo rm -rf /var/lib/docker~/*`);
|
|
240
|
-
shellExec(`sudo rm -rf /home/containers/storage/*`);
|
|
241
|
-
shellExec(`sudo rm -rf /home/docker/*`);
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
285
|
+
shellExec(`sudo rm -rf /var/lib/docker~/*`); // Cleans up a potential backup directory for Docker data
|
|
286
|
+
shellExec(`sudo rm -rf /home/containers/storage/*`); // Cleans up custom containerd/Podman storage
|
|
287
|
+
shellExec(`sudo rm -rf /home/docker/*`); // Cleans up custom Docker storage
|
|
288
|
+
|
|
289
|
+
// Step 9: Re-configure Docker's default storage location (if desired).
|
|
290
|
+
// These commands effectively move Docker's data directory from its default `/var/lib/docker`
|
|
291
|
+
// to a new location (`/home/docker`) and create a symbolic link.
|
|
292
|
+
// This is a specific customization to relocate Docker's storage.
|
|
293
|
+
shellExec('sudo mv /var/lib/docker /var/lib/docker~'); // Moves existing /var/lib/docker to /var/lib/docker~ (backup)
|
|
294
|
+
shellExec('sudo mkdir /home/docker'); // Creates the new desired directory for Docker data
|
|
295
|
+
shellExec('sudo chmod 0711 /home/docker'); // Sets appropriate permissions for the new directory
|
|
296
|
+
shellExec('sudo ln -s /home/docker /var/lib/docker'); // Creates a symlink from original path to new path
|
|
297
|
+
|
|
298
|
+
// Step 10: Prune all unused Podman data.
|
|
299
|
+
// Similar to Docker pruning, these commands remove:
|
|
300
|
+
// - All stopped containers
|
|
301
|
+
// - All unused networks
|
|
302
|
+
// - All unused images
|
|
303
|
+
// - All unused volumes ('--volumes')
|
|
304
|
+
// - The '--force' flag bypasses confirmation.
|
|
305
|
+
// '--external' prunes external content not managed by Podman's default storage backend.
|
|
246
306
|
shellExec(`sudo podman system prune -a -f`);
|
|
247
307
|
shellExec(`sudo podman system prune --all --volumes --force`);
|
|
248
308
|
shellExec(`sudo podman system prune --external --force`);
|
|
249
|
-
shellExec(`sudo podman system prune --all --volumes --force`);
|
|
309
|
+
shellExec(`sudo podman system prune --all --volumes --force`); // Redundant but harmless repetition
|
|
310
|
+
|
|
311
|
+
// Step 11: Create and set permissions for Podman's custom storage directory.
|
|
312
|
+
// This ensures the custom path `/home/containers/storage` exists and has correct permissions
|
|
313
|
+
// before Podman attempts to use it.
|
|
250
314
|
shellExec(`sudo mkdir -p /home/containers/storage`);
|
|
251
315
|
shellExec('sudo chmod 0711 /home/containers/storage');
|
|
316
|
+
|
|
317
|
+
// Step 12: Update Podman's storage configuration file.
|
|
318
|
+
// This command uses 'sed' to modify `/etc/containers/storage.conf`,
|
|
319
|
+
// changing the default storage path from `/var/lib/containers/storage`
|
|
320
|
+
// to the customized `/home/containers/storage`.
|
|
252
321
|
shellExec(
|
|
253
322
|
`sudo sed -i -e "s@/var/lib/containers/storage@/home/containers/storage@g" /etc/containers/storage.conf`,
|
|
254
323
|
);
|
|
324
|
+
|
|
325
|
+
// Step 13: Reset Podman system settings.
|
|
326
|
+
// This command resets Podman's system-wide configuration to its default state.
|
|
255
327
|
shellExec(`sudo podman system reset -f`);
|
|
328
|
+
|
|
329
|
+
// Note: The 'sysctl net.bridge.bridge-nf-call-iptables=0' and related commands
|
|
330
|
+
// were previously removed. These sysctl settings (bridge-nf-call-iptables,
|
|
331
|
+
// bridge-nf-call-arptables, bridge-nf-call-ip6tables) are crucial for allowing
|
|
332
|
+
// network traffic through Linux bridges to be processed by iptables.
|
|
333
|
+
// Kubernetes and CNI plugins generally require them to be enabled (set to '1').
|
|
334
|
+
// Re-initializing Kubernetes will typically set these as needed, and leaving them
|
|
335
|
+
// at their system default (or '1' if already configured) is safer for host
|
|
336
|
+
// connectivity during a reset operation.
|
|
337
|
+
|
|
256
338
|
// https://github.com/kubernetes-sigs/kind/issues/2886
|
|
257
|
-
shellExec(`sysctl net.bridge.bridge-nf-call-iptables=0`);
|
|
258
|
-
shellExec(`sysctl net.bridge.bridge-nf-call-arptables=0`);
|
|
259
|
-
shellExec(`sysctl net.bridge.bridge-nf-call-ip6tables=0`);
|
|
339
|
+
// shellExec(`sysctl net.bridge.bridge-nf-call-iptables=0`);
|
|
340
|
+
// shellExec(`sysctl net.bridge.bridge-nf-call-arptables=0`);
|
|
341
|
+
// shellExec(`sysctl net.bridge.bridge-nf-call-ip6tables=0`);
|
|
342
|
+
|
|
343
|
+
// Step 14: Remove the 'kind' Docker network.
|
|
344
|
+
// This cleans up any network bridges or configurations specifically created by Kind.
|
|
260
345
|
shellExec(`docker network rm kind`);
|
|
261
346
|
},
|
|
347
|
+
|
|
262
348
|
getResourcesCapacity() {
|
|
263
349
|
const resources = {};
|
|
264
|
-
const info =
|
|
350
|
+
const info = false
|
|
265
351
|
? `Capacity:
|
|
266
352
|
cpu: 8
|
|
267
353
|
ephemeral-storage: 153131976Ki
|
package/src/cli/deploy.js
CHANGED
|
@@ -256,6 +256,19 @@ kubectl scale statefulsets <stateful-set-name> --replicas=<new-replicas>
|
|
|
256
256
|
kubectl get pods -w
|
|
257
257
|
kubectl patch statefulset service-valkey --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value":"valkey/valkey:latest"}]'
|
|
258
258
|
kubectl patch statefulset service-valkey -p '{"spec":{"template":{"spec":{"containers":[{"name":"service-valkey","imagePullPolicy":"Never"}]}}}}'
|
|
259
|
+
kubectl logs -f <pod-name>
|
|
260
|
+
kubectl describe pod <pod-name>
|
|
261
|
+
kubectl exec -it <pod-name> -- bash
|
|
262
|
+
kubectl exec -it <pod-name> -- sh
|
|
263
|
+
docker exec -it kind-control-plane bash
|
|
264
|
+
curl -4 -v google.com
|
|
265
|
+
kubectl taint nodes <node-name> node-role.kubernetes.io/control-plane:NoSchedule-
|
|
266
|
+
kubectl run test-pod --image=busybox:latest --restart=Never -- /bin/sh -c "while true; do sleep 30; done;"
|
|
267
|
+
kubectl run test-pod --image=alpine/curl:latest --restart=Never -- sh -c "sleep infinity"
|
|
268
|
+
kubectl get ippools -o yaml
|
|
269
|
+
kubectl get node <node-name> -o jsonpath='{.spec.podCIDR}'
|
|
270
|
+
kubectl patch ippool default-ipv4-ippool --type='json' -p='[{"op": "replace", "path": "/spec/cidr", "value": "10.244.0.0/16"}]'
|
|
271
|
+
kubectl patch ippool default-ipv4-ippool --type='json' -p='[{"op": "replace", "path": "/spec/cidr", "value": "192.168.0.0/24"}]'
|
|
259
272
|
`);
|
|
260
273
|
if (deployList === 'dd' && fs.existsSync(`./engine-private/deploy/dd.router`))
|
|
261
274
|
deployList = fs.readFileSync(`./engine-private/deploy/dd.router`, 'utf8');
|
package/src/cli/image.js
CHANGED
|
@@ -54,7 +54,7 @@ class UnderpostImage {
|
|
|
54
54
|
shellExec(
|
|
55
55
|
`cd ${path}${secretsInput}&& sudo podman build -f ./${
|
|
56
56
|
dockerfileName && typeof dockerfileName === 'string' ? dockerfileName : 'Dockerfile'
|
|
57
|
-
} -t ${imageName} --pull=never --cap-add=CAP_AUDIT_WRITE${cache}${secretDockerInput}`,
|
|
57
|
+
} -t ${imageName} --pull=never --cap-add=CAP_AUDIT_WRITE${cache}${secretDockerInput} --network host`,
|
|
58
58
|
);
|
|
59
59
|
|
|
60
60
|
if (podmanSave === true) shellExec(`podman save -o ${tarFile} ${podManImg}`);
|