underpost 2.8.67 → 2.8.75

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,56 @@
1
+ #!/bin/bash
2
+
3
+ # IMPORTANT: For non-interactive scripts, 'conda activate' can be problematic
4
+ # because it relies on the shell's initialization.
5
+ # A more robust and recommended way to run commands within a Conda environment
6
+ # from a script is to use 'conda run'. This command directly executes a process
7
+ # in the specified environment without needing to manually source 'conda.sh'.
8
+
9
+ # Navigate to the application's root directory for module discovery.
10
+ # This is crucial for Python to correctly find your 'app' module using 'python -m'.
11
+ #
12
+ # Let's assume a common project structure:
13
+ # full-stack-fastapi-template/
14
+ # ├── backend/
15
+ # │ ├── app/
16
+ # │ │ └── initial_data.py (the Python script you want to run)
17
+ # │ └── initial_data.sh (this shell script)
18
+ # └── ...
19
+ #
20
+ # If `initial_data.sh` is located in `full-stack-fastapi-template/backend/`,
21
+ # and `app` is a subdirectory of `backend/`, then the Python command
22
+ # `python -m app.initial_data` needs to be executed from the `backend/` directory.
23
+ #
24
+ # If you are running this shell script from a different directory (e.g., `engine/`),
25
+ # Python's module import system won't automatically find 'app' unless the parent
26
+ # directory of 'app' is in the `PYTHONPATH` or you change the current working directory.
27
+ #
28
+ # The safest way is to change the current working directory to the script's location.
29
+
30
+ # Store the current directory to return to it later if needed (good practice for multi-step scripts).
31
+ CURRENT_DIR=$(pwd)
32
+
33
+ # Get the absolute path of the directory where this script is located.
34
+ # This is a robust way to ensure we always navigate to the correct 'backend' directory.
35
+ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)"
36
+ cd "$SCRIPT_DIR"
37
+
38
+ # Execute your Python script within the specified Conda environment using 'conda run'.
39
+ # -n fastapi_env specifies the Conda environment to use.
40
+ # This completely avoids the 'source conda.sh' issue and is generally more reliable.
41
+ conda run -n fastapi_env python -m app.initial_data
42
+
43
+ # Important Note: The 'ModuleNotFoundError: No module named 'sqlmodel'' indicates that
44
+ # the 'sqlmodel' package is not installed in your 'fastapi_env' Conda environment.
45
+ # After running this script, if you still get the 'sqlmodel' error,
46
+ # you will need to activate your environment manually and install it:
47
+ #
48
+ # conda activate fastapi_env
49
+ # pip install sqlmodel
50
+ # # or if it's a conda package:
51
+ # # conda install sqlmodel
52
+ #
53
+ # Then try running this script again.
54
+
55
+ # Optional Good Practice: Return to the original directory if the script is part of a larger workflow.
56
+ cd "$CURRENT_DIR"
@@ -0,0 +1,69 @@
1
+ apiVersion: apps/v1
2
+ kind: StatefulSet
3
+ metadata:
4
+ name: kafka
5
+ namespace: kafka
6
+ labels:
7
+ app: kafka-app
8
+ spec:
9
+ serviceName: kafka-svc
10
+ replicas: 3
11
+ selector:
12
+ matchLabels:
13
+ app: kafka-app
14
+ template:
15
+ metadata:
16
+ labels:
17
+ app: kafka-app
18
+ spec:
19
+ containers:
20
+ - name: kafka-container
21
+ image: doughgle/kafka-kraft
22
+ ports:
23
+ - containerPort: 9092
24
+ - containerPort: 9093
25
+ env:
26
+ - name: REPLICAS
27
+ value: '3'
28
+ - name: SERVICE
29
+ value: kafka-svc
30
+ - name: NAMESPACE
31
+ value: kafka
32
+ - name: SHARE_DIR
33
+ value: /mnt/kafka
34
+ - name: CLUSTER_ID
35
+ value: bXktY2x1c3Rlci0xMjM0NQ==
36
+ - name: DEFAULT_REPLICATION_FACTOR
37
+ value: '3'
38
+ - name: DEFAULT_MIN_INSYNC_REPLICAS
39
+ value: '2'
40
+ volumeMounts:
41
+ - name: data
42
+ mountPath: /mnt/kafka
43
+ volumeClaimTemplates:
44
+ - metadata:
45
+ name: data
46
+ spec:
47
+ accessModes:
48
+ - 'ReadWriteOnce'
49
+ resources:
50
+ requests:
51
+ storage: '1Gi'
52
+ ---
53
+ apiVersion: v1
54
+ kind: Service
55
+ metadata:
56
+ name: kafka-svc
57
+ namespace: kafka
58
+ labels:
59
+ app: kafka-app
60
+ spec:
61
+ type: NodePort
62
+ ports:
63
+ - name: '9092'
64
+ port: 9092
65
+ protocol: TCP
66
+ targetPort: 9092
67
+ nodePort: 30092
68
+ selector:
69
+ app: kafka-app
@@ -0,0 +1,21 @@
1
+ apiVersion: sparkoperator.k8s.io/v1beta2
2
+ kind: SparkApplication
3
+ metadata:
4
+ name: spark-pi-python
5
+ namespace: default
6
+ spec:
7
+ type: Python
8
+ pythonVersion: '3'
9
+ mode: cluster
10
+ image: spark:3.5.3
11
+ imagePullPolicy: IfNotPresent
12
+ mainApplicationFile: local:///opt/spark/examples/src/main/python/pi.py
13
+ sparkVersion: 3.5.3
14
+ driver:
15
+ cores: 1
16
+ memory: 512m
17
+ serviceAccount: spark-operator-spark
18
+ executor:
19
+ instances: 1
20
+ cores: 1
21
+ memory: 512m
@@ -0,0 +1,119 @@
1
+ # This consolidated YAML file contains configurations for:
2
+ # 1. Calico Installation (Installation and APIServer resources)
3
+ # 2. A permissive Egress NetworkPolicy for the 'default' namespace
4
+ #
5
+ # These are standard Kubernetes resources that can be applied directly using 'kubectl apply'.
6
+ # The kubeadm-specific ClusterConfiguration and InitConfiguration have been removed
7
+ # as they are only processed by the 'kubeadm init' command, not 'kubectl apply'.
8
+
9
+ # --- Calico Installation: Base configuration for Calico ---
10
+ # For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.Installation
11
+ apiVersion: operator.tigera.io/v1
12
+ kind: Installation
13
+ metadata:
14
+ name: default
15
+ spec:
16
+ # Configures Calico networking.
17
+ calicoNetwork:
18
+ # Note: The ipPools section cannot be modified post-install.
19
+ ipPools:
20
+ - blockSize: 26
21
+ cidr: 192.168.0.0/16
22
+ encapsulation: VXLANCrossSubnet
23
+ natOutgoing: Enabled
24
+ nodeSelector: all()
25
+
26
+ ---
27
+ # This section configures the Calico API server.
28
+ # For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer
29
+ apiVersion: operator.tigera.io/v1
30
+ kind: APIServer
31
+ metadata:
32
+ name: default
33
+ spec: {}
34
+
35
+ ---
36
+ # This consolidated NetworkPolicy file ensures that all pods in the specified namespaces
37
+ # have unrestricted egress (outbound) access.
38
+ # This is useful for troubleshooting or for environments where strict egress control
39
+ # is not immediately required for these system/default namespaces.
40
+
41
+ ---
42
+ # Policy for the 'default' namespace
43
+ apiVersion: networking.k8s.io/v1
44
+ kind: NetworkPolicy
45
+ metadata:
46
+ name: allow-all-egress-default-namespace
47
+ namespace: default # This policy applies to the 'default' namespace
48
+ spec:
49
+ podSelector: {} # Selects all pods in this namespace
50
+ policyTypes:
51
+ - Egress
52
+ egress:
53
+ - to:
54
+ - ipBlock:
55
+ cidr: 0.0.0.0/0 # Allows traffic to any IPv4 address
56
+
57
+ ---
58
+ # Policy for the 'kube-system' namespace
59
+ apiVersion: networking.k8s.io/v1
60
+ kind: NetworkPolicy
61
+ metadata:
62
+ name: allow-all-egress-kube-system-namespace
63
+ namespace: kube-system # This policy applies to the 'kube-system' namespace
64
+ spec:
65
+ podSelector: {} # Selects all pods in this namespace
66
+ policyTypes:
67
+ - Egress
68
+ egress:
69
+ - to:
70
+ - ipBlock:
71
+ cidr: 0.0.0.0/0 # Allows traffic to any IPv4 address
72
+
73
+ ---
74
+ # Policy for the 'kube-node-lease' namespace
75
+ apiVersion: networking.k8s.io/v1
76
+ kind: NetworkPolicy
77
+ metadata:
78
+ name: allow-all-egress-kube-node-lease-namespace
79
+ namespace: kube-node-lease # This policy applies to the 'kube-node-lease' namespace
80
+ spec:
81
+ podSelector: {} # Selects all pods in this namespace
82
+ policyTypes:
83
+ - Egress
84
+ egress:
85
+ - to:
86
+ - ipBlock:
87
+ cidr: 0.0.0.0/0 # Allows traffic to any IPv4 address
88
+
89
+ ---
90
+ # Policy for the 'kube-public' namespace
91
+ apiVersion: networking.k8s.io/v1
92
+ kind: NetworkPolicy
93
+ metadata:
94
+ name: allow-all-egress-kube-public-namespace
95
+ namespace: kube-public # This policy applies to the 'kube-public' namespace
96
+ spec:
97
+ podSelector: {} # Selects all pods in this namespace
98
+ policyTypes:
99
+ - Egress
100
+ egress:
101
+ - to:
102
+ - ipBlock:
103
+ cidr: 0.0.0.0/0 # Allows traffic to any IPv4 address
104
+
105
+ ---
106
+ # Policy for the 'tigera-operator' namespace
107
+ apiVersion: networking.k8s.io/v1
108
+ kind: NetworkPolicy
109
+ metadata:
110
+ name: allow-all-egress-tigera-operator-namespace
111
+ namespace: tigera-operator # This policy applies to the 'tigera-operator' namespace
112
+ spec:
113
+ podSelector: {} # Selects all pods in this namespace
114
+ policyTypes:
115
+ - Egress
116
+ egress:
117
+ - to:
118
+ - ipBlock:
119
+ cidr: 0.0.0.0/0 # Allows traffic to any IPv4 address
@@ -0,0 +1,65 @@
1
+ apiVersion: v1
2
+ data:
3
+ kubelet: |
4
+ apiVersion: kubelet.config.k8s.io/v1beta1
5
+ authentication:
6
+ anonymous:
7
+ enabled: false
8
+ webhook:
9
+ cacheTTL: 0s
10
+ enabled: true
11
+ x509:
12
+ clientCAFile: /etc/kubernetes/pki/ca.crt
13
+ authorization:
14
+ mode: Webhook
15
+ webhook:
16
+ cacheAuthorizedTTL: 0s
17
+ cacheUnauthorizedTTL: 0s
18
+ cgroupDriver: systemd
19
+ clusterDNS:
20
+ - 10.96.0.10
21
+ clusterDomain: cluster.local
22
+ containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
23
+ cpuManagerReconcilePeriod: 0s
24
+ crashLoopBackOff: {}
25
+ evictionHard:
26
+ imagefs.available: "5%" # Adjusted for more tolerance
27
+ memory.available: "100Mi"
28
+ nodefs.available: "5%" # Adjusted for more tolerance
29
+ nodefs.inodesFree: "5%"
30
+ evictionPressureTransitionPeriod: 0s
31
+ fileCheckFrequency: 0s
32
+ healthzBindAddress: 127.0.0.1
33
+ healthzPort: 10248
34
+ httpCheckFrequency: 0s
35
+ imageMaximumGCAge: 0s
36
+ imageMinimumGCAge: 0s
37
+ kind: KubeletConfiguration
38
+ logging:
39
+ flushFrequency: 0
40
+ options:
41
+ json:
42
+ infoBufferSize: "0"
43
+ text:
44
+ infoBufferSize: "0"
45
+ verbosity: 0
46
+ memorySwap: {}
47
+ nodeStatusReportFrequency: 0s
48
+ nodeStatusUpdateFrequency: 0s
49
+ rotateCertificates: true
50
+ runtimeRequestTimeout: 0s
51
+ shutdownGracePeriod: 0s
52
+ shutdownGracePeriodCriticalPods: 0s
53
+ staticPodPath: /etc/kubernetes/manifests
54
+ streamingConnectionIdleTimeout: 0s
55
+ syncFrequency: 0s
56
+ volumeStatsAggPeriod: 0s
57
+ kind: ConfigMap
58
+ metadata:
59
+ annotations:
60
+ kubeadm.kubernetes.io/component-config.hash: sha256:26488e9fc7c5cb5fdda9996cda2e6651a9af5febce07ea02de11bd3ef3f49e9c
61
+ creationTimestamp: "2025-06-30T12:42:00Z"
62
+ name: kubelet-config
63
+ namespace: kube-system
64
+ resourceVersion: "204"
65
+ uid: a85321a8-f3e0-40fa-8e4e-9d33b8842e7a
@@ -4,7 +4,7 @@ metadata:
4
4
  name: postgres
5
5
  spec:
6
6
  serviceName: postgres
7
- replicas: 3
7
+ replicas: 1
8
8
  selector:
9
9
  matchLabels:
10
10
  app: postgres
package/package.json CHANGED
@@ -2,7 +2,7 @@
2
2
  "type": "module",
3
3
  "main": "src/index.js",
4
4
  "name": "underpost",
5
- "version": "2.8.67",
5
+ "version": "2.8.75",
6
6
  "description": "pwa api rest template",
7
7
  "scripts": {
8
8
  "start": "env-cmd -f .env.production node --max-old-space-size=8192 src/server",
@@ -3,6 +3,7 @@ import { loggerFactory } from '../server/logger.js';
3
3
  import { shellExec } from '../server/process.js';
4
4
  import UnderpostDeploy from './deploy.js';
5
5
  import UnderpostTest from './test.js';
6
+ import os from 'os';
6
7
 
7
8
  const logger = loggerFactory(import.meta);
8
9
 
@@ -27,9 +28,12 @@ class UnderpostCluster {
27
28
  infoCapacityPod: false,
28
29
  istio: false,
29
30
  pullImage: false,
31
+ dedicatedGpu: false,
32
+ kubeadm: false,
30
33
  },
31
34
  ) {
32
- // 1) Install kind, kubeadm, docker, podman
35
+ // sudo dnf update
36
+ // 1) Install kind, kubeadm, docker, podman, helm
33
37
  // 2) Check kubectl, kubelet, containerd.io
34
38
  // 3) Install Nvidia drivers from Rocky Linux docs
35
39
  // 4) Install LXD with MAAS from Rocky Linux docs
@@ -37,7 +41,8 @@ class UnderpostCluster {
37
41
  const npmRoot = getNpmRootPath();
38
42
  const underpostRoot = options?.dev === true ? '.' : `${npmRoot}/underpost`;
39
43
  if (options.infoCapacityPod === true) return logger.info('', UnderpostDeploy.API.resourcesFactory());
40
- if (options.infoCapacity === true) return logger.info('', UnderpostCluster.API.getResourcesCapacity());
44
+ if (options.infoCapacity === true)
45
+ return logger.info('', UnderpostCluster.API.getResourcesCapacity(options.kubeadm));
41
46
  if (options.reset === true) return await UnderpostCluster.API.reset();
42
47
  if (options.listPods === true) return console.table(UnderpostDeploy.API.get(podName ?? undefined));
43
48
 
@@ -65,6 +70,7 @@ class UnderpostCluster {
65
70
  shellExec(
66
71
  `kubectl get pods --all-namespaces -o=jsonpath='{range .items[*]}{"\\n"}{.metadata.name}{":\\t"}{range .spec.containers[*]}{.image}{", "}{end}{end}'`,
67
72
  );
73
+ shellExec(`sudo crictl images`);
68
74
  console.log();
69
75
  logger.info('contour -------------------------------------------------');
70
76
  for (const _k of ['Cluster', 'HTTPProxy', 'ClusterIssuer', 'Certificate']) {
@@ -105,30 +111,44 @@ class UnderpostCluster {
105
111
  // shellExec(
106
112
  // `wget https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/custom-resources.yaml`,
107
113
  // );
108
- shellExec(`sudo kubectl apply -f ./manifests/calico-custom-resources.yaml`);
114
+ shellExec(`sudo kubectl apply -f ${underpostRoot}/manifests/kubeadm-calico-config.yaml`);
109
115
  shellExec(`sudo systemctl restart containerd`);
116
+ const nodeName = os.hostname();
117
+ shellExec(`kubectl taint nodes ${nodeName} node-role.kubernetes.io/control-plane:NoSchedule-`);
110
118
  } else {
111
119
  shellExec(`sudo systemctl restart containerd`);
112
- shellExec(
113
- `cd ${underpostRoot}/manifests && kind create cluster --config kind-config${
114
- options?.dev === true ? '-dev' : ''
115
- }.yaml`,
116
- );
120
+ if (options.full === true || options.dedicatedGpu === true) {
121
+ // https://kubernetes.io/docs/tasks/manage-gpus/scheduling-gpus/
122
+ shellExec(`cd ${underpostRoot}/manifests && kind create cluster --config kind-config-cuda.yaml`);
123
+ } else {
124
+ shellExec(
125
+ `cd ${underpostRoot}/manifests && kind create cluster --config kind-config${
126
+ options?.dev === true ? '-dev' : ''
127
+ }.yaml`,
128
+ );
129
+ }
117
130
  shellExec(`sudo chown $(id -u):$(id -g) $HOME/.kube/config**`);
118
131
  }
119
132
  } else logger.warn('Cluster already initialized');
120
133
 
134
+ // shellExec(`sudo kubectl apply -f ${underpostRoot}/manifests/kubelet-config.yaml`);
135
+
136
+ if (options.full === true || options.dedicatedGpu === true) {
137
+ shellExec(`node ${underpostRoot}/bin/deploy nvidia-gpu-operator`);
138
+ shellExec(
139
+ `node ${underpostRoot}/bin/deploy kubeflow-spark-operator${options.kubeadm === true ? ' kubeadm' : ''}`,
140
+ );
141
+ }
142
+
121
143
  if (options.full === true || options.valkey === true) {
122
144
  if (options.pullImage === true) {
123
- // kubectl patch statefulset service-valkey --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value":"valkey/valkey:latest"}]'
124
- // kubectl patch statefulset service-valkey -p '{"spec":{"template":{"spec":{"containers":[{"name":"service-valkey","imagePullPolicy":"Never"}]}}}}'
125
145
  shellExec(`docker pull valkey/valkey`);
126
- // shellExec(`sudo kind load docker-image valkey/valkey`);
127
- // shellExec(`sudo podman pull docker.io/valkey/valkey:latest`);
128
- // shellExec(`podman save -o valkey.tar valkey/valkey`);
129
- // shellExec(`sudo kind load image-archive valkey.tar`);
130
- // shellExec(`sudo rm -rf ./valkey.tar`);
131
- shellExec(`sudo kind load docker-image valkey/valkey:latest`);
146
+ if (!options.kubeadm)
147
+ shellExec(
148
+ `sudo ${
149
+ options.kubeadm === true ? `ctr -n k8s.io images import` : `kind load docker-image`
150
+ } valkey/valkey:latest`,
151
+ );
132
152
  }
133
153
  shellExec(`kubectl delete statefulset service-valkey`);
134
154
  shellExec(`kubectl apply -k ${underpostRoot}/manifests/valkey`);
@@ -144,15 +164,29 @@ class UnderpostCluster {
144
164
  shellExec(`kubectl apply -k ${underpostRoot}/manifests/mariadb`);
145
165
  }
146
166
  if (options.full === true || options.postgresql === true) {
167
+ if (options.pullImage === true) {
168
+ shellExec(`docker pull postgres:latest`);
169
+ if (!options.kubeadm)
170
+ shellExec(
171
+ `sudo ${
172
+ options.kubeadm === true ? `ctr -n k8s.io images import` : `kind load docker-image`
173
+ } docker-image postgres:latest`,
174
+ );
175
+ }
147
176
  shellExec(
148
177
  `sudo kubectl create secret generic postgres-secret --from-file=password=/home/dd/engine/engine-private/postgresql-password`,
149
178
  );
150
- shellExec(`kubectl apply -k ./manifests/postgresql`);
179
+ shellExec(`kubectl apply -k ${underpostRoot}/manifests/postgresql`);
151
180
  }
152
181
  if (options.mongodb4 === true) {
153
182
  if (options.pullImage === true) {
154
183
  shellExec(`docker pull mongo:4.4`);
155
- shellExec(`sudo kind load docker-image mongo:4.4`);
184
+ if (!options.kubeadm)
185
+ shellExec(
186
+ `sudo ${
187
+ options.kubeadm === true ? `ctr -n k8s.io images import` : `kind load docker-image`
188
+ } docker-image mongo:4.4`,
189
+ );
156
190
  }
157
191
  shellExec(`kubectl apply -k ${underpostRoot}/manifests/mongodb-4.4`);
158
192
 
@@ -225,43 +259,131 @@ class UnderpostCluster {
225
259
  shellExec(`sudo kubectl apply -f ${underpostRoot}/manifests/${letsEncName}.yaml`);
226
260
  }
227
261
  },
262
+ // This function performs a comprehensive reset of Kubernetes and container environments
263
+ // on the host machine. Its primary goal is to clean up cluster components, temporary files,
264
+ // and container data, ensuring a clean state for re-initialization or fresh deployments,
265
+ // while also preventing the loss of the host machine's internet connectivity.
266
+
228
267
  reset() {
268
+ // Step 1: Delete all existing Kind (Kubernetes in Docker) clusters.
269
+ // 'kind get clusters' lists all Kind clusters.
270
+ // 'xargs -t -n1 kind delete cluster --name' then iterates through each cluster name
271
+ // and executes 'kind delete cluster --name <cluster_name>' to remove them.
229
272
  shellExec(`kind get clusters | xargs -t -n1 kind delete cluster --name`);
273
+
274
+ // Step 2: Reset the Kubernetes control-plane components installed by kubeadm.
275
+ // 'kubeadm reset -f' performs a forceful reset, removing installed Kubernetes components,
276
+ // configuration files, and associated network rules (like iptables entries created by kubeadm).
277
+ // The '-f' flag bypasses confirmation prompts.
230
278
  shellExec(`sudo kubeadm reset -f`);
279
+
280
+ // Step 3: Remove specific CNI (Container Network Interface) configuration files.
281
+ // This command targets and removes the configuration file for Flannel,
282
+ // a common CNI plugin, which might be left behind after a reset.
231
283
  shellExec('sudo rm -f /etc/cni/net.d/10-flannel.conflist');
232
- shellExec('sudo iptables -F && sudo iptables -t nat -F && sudo iptables -t mangle -F && sudo iptables -X');
284
+
285
+ // Note: The aggressive 'sudo iptables -F ...' command was intentionally removed from previous versions.
286
+ // This command would flush all iptables rules, including those crucial for the host's general
287
+ // internet connectivity, leading to network loss. 'kubeadm reset' and container runtime pruning
288
+ // adequately handle Kubernetes and container-specific iptables rules without affecting the host's
289
+ // default network configuration.
290
+
291
+ // Step 4: Remove the kubectl configuration file from the current user's home directory.
292
+ // This ensures that after a reset, there's no lingering configuration pointing to the old cluster,
293
+ // providing a clean slate for connecting to a new or re-initialized cluster.
233
294
  shellExec('sudo rm -f $HOME/.kube/config');
295
+
296
+ // Step 5: Clear trash files from the root user's trash directory.
297
+ // This is a general cleanup step to remove temporary or deleted files.
234
298
  shellExec('sudo rm -rf /root/.local/share/Trash/files/*');
299
+
300
+ // Step 6: Prune all unused Docker data.
301
+ // 'docker system prune -a -f' removes:
302
+ // - All stopped containers
303
+ // - All unused networks
304
+ // - All dangling images
305
+ // - All build cache
306
+ // - All unused volumes
307
+ // This aggressively frees up disk space and removes temporary Docker artifacts.
235
308
  shellExec('sudo docker system prune -a -f');
309
+
310
+ // Step 7: Stop the Docker daemon service.
311
+ // This step is often necessary to ensure that Docker's files and directories
312
+ // can be safely manipulated or moved in subsequent steps without conflicts.
236
313
  shellExec('sudo service docker stop');
314
+
315
+ // Step 8: Aggressively remove container storage data for containerd and Docker.
316
+ // These commands target the default storage locations for containerd and Docker,
317
+ // as well as any custom paths that might have been used (`/home/containers/storage`, `/home/docker`).
318
+ // This ensures a complete wipe of all container images, layers, and volumes.
237
319
  shellExec(`sudo rm -rf /var/lib/containers/storage/*`);
238
320
  shellExec(`sudo rm -rf /var/lib/docker/volumes/*`);
239
- shellExec(`sudo rm -rf /var/lib/docker~/*`);
240
- shellExec(`sudo rm -rf /home/containers/storage/*`);
241
- shellExec(`sudo rm -rf /home/docker/*`);
242
- shellExec('sudo mv /var/lib/docker /var/lib/docker~');
243
- shellExec('sudo mkdir /home/docker');
244
- shellExec('sudo chmod 0711 /home/docker');
245
- shellExec('sudo ln -s /home/docker /var/lib/docker');
321
+ shellExec(`sudo rm -rf /var/lib/docker~/*`); // Cleans up a potential backup directory for Docker data
322
+ shellExec(`sudo rm -rf /home/containers/storage/*`); // Cleans up custom containerd/Podman storage
323
+ shellExec(`sudo rm -rf /home/docker/*`); // Cleans up custom Docker storage
324
+
325
+ // Step 9: Re-configure Docker's default storage location (if desired).
326
+ // These commands effectively move Docker's data directory from its default `/var/lib/docker`
327
+ // to a new location (`/home/docker`) and create a symbolic link.
328
+ // This is a specific customization to relocate Docker's storage.
329
+ shellExec('sudo mv /var/lib/docker /var/lib/docker~'); // Moves existing /var/lib/docker to /var/lib/docker~ (backup)
330
+ shellExec('sudo mkdir /home/docker'); // Creates the new desired directory for Docker data
331
+ shellExec('sudo chmod 0711 /home/docker'); // Sets appropriate permissions for the new directory
332
+ shellExec('sudo ln -s /home/docker /var/lib/docker'); // Creates a symlink from original path to new path
333
+
334
+ // Step 10: Prune all unused Podman data.
335
+ // Similar to Docker pruning, these commands remove:
336
+ // - All stopped containers
337
+ // - All unused networks
338
+ // - All unused images
339
+ // - All unused volumes ('--volumes')
340
+ // - The '--force' flag bypasses confirmation.
341
+ // '--external' prunes external content not managed by Podman's default storage backend.
246
342
  shellExec(`sudo podman system prune -a -f`);
247
343
  shellExec(`sudo podman system prune --all --volumes --force`);
248
344
  shellExec(`sudo podman system prune --external --force`);
249
- shellExec(`sudo podman system prune --all --volumes --force`);
345
+ shellExec(`sudo podman system prune --all --volumes --force`); // Redundant but harmless repetition
346
+
347
+ // Step 11: Create and set permissions for Podman's custom storage directory.
348
+ // This ensures the custom path `/home/containers/storage` exists and has correct permissions
349
+ // before Podman attempts to use it.
250
350
  shellExec(`sudo mkdir -p /home/containers/storage`);
251
351
  shellExec('sudo chmod 0711 /home/containers/storage');
352
+
353
+ // Step 12: Update Podman's storage configuration file.
354
+ // This command uses 'sed' to modify `/etc/containers/storage.conf`,
355
+ // changing the default storage path from `/var/lib/containers/storage`
356
+ // to the customized `/home/containers/storage`.
252
357
  shellExec(
253
358
  `sudo sed -i -e "s@/var/lib/containers/storage@/home/containers/storage@g" /etc/containers/storage.conf`,
254
359
  );
360
+
361
+ // Step 13: Reset Podman system settings.
362
+ // This command resets Podman's system-wide configuration to its default state.
255
363
  shellExec(`sudo podman system reset -f`);
364
+
365
+ // Note: The 'sysctl net.bridge.bridge-nf-call-iptables=0' and related commands
366
+ // were previously removed. These sysctl settings (bridge-nf-call-iptables,
367
+ // bridge-nf-call-arptables, bridge-nf-call-ip6tables) are crucial for allowing
368
+ // network traffic through Linux bridges to be processed by iptables.
369
+ // Kubernetes and CNI plugins generally require them to be enabled (set to '1').
370
+ // Re-initializing Kubernetes will typically set these as needed, and leaving them
371
+ // at their system default (or '1' if already configured) is safer for host
372
+ // connectivity during a reset operation.
373
+
256
374
  // https://github.com/kubernetes-sigs/kind/issues/2886
257
- shellExec(`sysctl net.bridge.bridge-nf-call-iptables=0`);
258
- shellExec(`sysctl net.bridge.bridge-nf-call-arptables=0`);
259
- shellExec(`sysctl net.bridge.bridge-nf-call-ip6tables=0`);
260
- shellExec(`docker network rm kind`);
375
+ // shellExec(`sysctl net.bridge.bridge-nf-call-iptables=0`);
376
+ // shellExec(`sysctl net.bridge.bridge-nf-call-arptables=0`);
377
+ // shellExec(`sysctl net.bridge.bridge-nf-call-ip6tables=0`);
378
+
379
+ // Step 14: Remove the 'kind' Docker network.
380
+ // This cleans up any network bridges or configurations specifically created by Kind.
381
+ // shellExec(`docker network rm kind`);
261
382
  },
262
- getResourcesCapacity() {
383
+
384
+ getResourcesCapacity(kubeadm = false) {
263
385
  const resources = {};
264
- const info = true
386
+ const info = false
265
387
  ? `Capacity:
266
388
  cpu: 8
267
389
  ephemeral-storage: 153131976Ki
@@ -276,10 +398,15 @@ Allocatable:
276
398
  hugepages-2Mi: 0
277
399
  memory: 11914720Ki
278
400
  pods: `
279
- : shellExec(`kubectl describe node kind-worker | grep -E '(Allocatable:|Capacity:)' -A 6`, {
280
- stdout: true,
281
- silent: true,
282
- });
401
+ : shellExec(
402
+ `kubectl describe node ${
403
+ kubeadm === true ? os.hostname() : 'kind-worker'
404
+ } | grep -E '(Allocatable:|Capacity:)' -A 6`,
405
+ {
406
+ stdout: true,
407
+ silent: true,
408
+ },
409
+ );
283
410
  info
284
411
  .split('Allocatable:')[1]
285
412
  .split('\n')