underpost 2.8.795 → 2.8.797

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -68,7 +68,7 @@ Run dev client server
68
68
  npm run dev
69
69
  ```
70
70
  <!-- -->
71
- ## underpost ci/cd cli v2.8.795
71
+ ## underpost ci/cd cli v2.8.797
72
72
 
73
73
  ### Usage: `underpost [options] [command]`
74
74
  ```
package/cli.md CHANGED
@@ -1,4 +1,4 @@
1
- ## underpost ci/cd cli v2.8.795
1
+ ## underpost ci/cd cli v2.8.797
2
2
 
3
3
  ### Usage: `underpost [options] [command]`
4
4
  ```
package/conf.js CHANGED
@@ -164,6 +164,10 @@ const DefaultConf = /**/ {
164
164
  auth: { user: 'noreply@default.net', pass: '' },
165
165
  },
166
166
  },
167
+ valkey: {
168
+ port: 6379,
169
+ host: '127.0.0.1',
170
+ },
167
171
  },
168
172
  },
169
173
  'www.default.net': {
@@ -58,7 +58,7 @@ services:
58
58
  cpus: '0.25'
59
59
  memory: 20M
60
60
  labels: # labels in Compose file instead of Dockerfile
61
- engine.version: '2.8.795'
61
+ engine.version: '2.8.797'
62
62
  networks:
63
63
  - load-balancer
64
64
 
@@ -17,7 +17,7 @@ spec:
17
17
  spec:
18
18
  containers:
19
19
  - name: dd-template-development-blue
20
- image: localhost/debian-underpost:v2.8.795
20
+ image: localhost/debian-underpost:v2.8.797
21
21
  # resources:
22
22
  # requests:
23
23
  # memory: "124Ki"
@@ -100,7 +100,7 @@ spec:
100
100
  spec:
101
101
  containers:
102
102
  - name: dd-template-development-green
103
- image: localhost/debian-underpost:v2.8.795
103
+ image: localhost/debian-underpost:v2.8.797
104
104
  # resources:
105
105
  # requests:
106
106
  # memory: "124Ki"
@@ -13,11 +13,11 @@ spec:
13
13
  labels:
14
14
  app: mongodb
15
15
  spec:
16
- hostname: mongo
16
+ hostname: mongodb-service
17
17
  containers:
18
18
  - name: mongodb
19
19
  image: mongo:4.4
20
- command: ['mongod', '--replSet', 'rs0', '--bind_ip_all']
20
+ command: ["mongod", "--replSet", "rs0", "--bind_ip_all"]
21
21
  # -- bash
22
22
  # mongo
23
23
  # use admin
package/package.json CHANGED
@@ -2,7 +2,7 @@
2
2
  "type": "module",
3
3
  "main": "src/index.js",
4
4
  "name": "underpost",
5
- "version": "2.8.795",
5
+ "version": "2.8.797",
6
6
  "description": "pwa api rest template",
7
7
  "scripts": {
8
8
  "start": "env-cmd -f .env.production node --max-old-space-size=8192 src/server",
@@ -9,6 +9,37 @@ const logger = loggerFactory(import.meta);
9
9
 
10
10
  class UnderpostCluster {
11
11
  static API = {
12
+ /**
13
+ * @method init
14
+ * @description Initializes and configures the Kubernetes cluster based on provided options.
15
+ * This method handles host prerequisites, cluster initialization (Kind or Kubeadm),
16
+ * and optional component deployments.
17
+ * @param {string} [podName] - Optional name of a pod for specific operations (e.g., listing).
18
+ * @param {object} [options] - Configuration options for cluster initialization.
19
+ * @param {boolean} [options.mongodb=false] - Deploy MongoDB.
20
+ * @param {boolean} [options.mongodb4=false] - Deploy MongoDB 4.4.
21
+ * @param {boolean} [options.mariadb=false] - Deploy MariaDB.
22
+ * @param {boolean} [options.mysql=false] - Deploy MySQL.
23
+ * @param {boolean} [options.postgresql=false] - Deploy PostgreSQL.
24
+ * @param {boolean} [options.valkey=false] - Deploy Valkey.
25
+ * @param {boolean} [options.full=false] - Deploy a full set of common components.
26
+ * @param {boolean} [options.info=false] - Display extensive Kubernetes cluster information.
27
+ * @param {boolean} [options.certManager=false] - Deploy Cert-Manager for certificate management.
28
+ * @param {boolean} [options.listPods=false] - List Kubernetes pods.
29
+ * @param {boolean} [options.reset=false] - Perform a comprehensive reset of Kubernetes and container environments.
30
+ * @param {boolean} [options.dev=false] - Run in development mode (adjusts paths).
31
+ * @param {string} [options.nsUse=''] - Set the current kubectl namespace.
32
+ * @param {boolean} [options.infoCapacity=false] - Display resource capacity information for the cluster.
33
+ * @param {boolean} [options.infoCapacityPod=false] - Display resource capacity information for pods.
34
+ * @param {boolean} [options.istio=false] - Deploy Istio service mesh.
35
+ * @param {boolean} [options.pullImage=false] - Pull necessary Docker images before deployment.
36
+ * @param {boolean} [options.dedicatedGpu=false] - Configure for dedicated GPU usage (e.g., NVIDIA GPU Operator).
37
+ * @param {boolean} [options.kubeadm=false] - Initialize the cluster using Kubeadm.
38
+ * @param {boolean} [options.initHost=false] - Perform initial host setup (install Docker, Podman, Kind, Kubeadm, Helm).
39
+ * @param {boolean} [options.config=false] - Apply general host configuration (SELinux, containerd, sysctl, firewalld).
40
+ * @param {boolean} [options.worker=false] - Configure as a worker node (for Kubeadm join).
41
+ * @param {boolean} [options.chown=false] - Set up kubectl configuration for the current user.
42
+ */
12
43
  async init(
13
44
  podName,
14
45
  options = {
@@ -113,6 +144,12 @@ class UnderpostCluster {
113
144
  );
114
145
  // Configure kubectl for the current user
115
146
  UnderpostCluster.API.chown();
147
+
148
+ // Apply kubelet-config.yaml explicitly
149
+ // Using 'kubectl replace --force' to ensure the ConfigMap is updated,
150
+ // even if it was modified by kubeadm or other processes, resolving conflicts.
151
+ // shellExec(`kubectl replace --force -f ${underpostRoot}/manifests/kubelet-config.yaml`);
152
+
116
153
  // Install Calico CNI
117
154
  logger.info('Installing Calico CNI...');
118
155
  shellExec(
@@ -225,7 +262,7 @@ class UnderpostCluster {
225
262
  if (successInstance) {
226
263
  const mongoConfig = {
227
264
  _id: 'rs0',
228
- members: [{ _id: 0, host: '127.0.0.1:27017' }],
265
+ members: [{ _id: 0, host: 'mongodb-service:27017' }],
229
266
  };
230
267
 
231
268
  const [pod] = UnderpostDeploy.API.get(deploymentName);
@@ -298,8 +335,9 @@ class UnderpostCluster {
298
335
  /**
299
336
  * @method config
300
337
  * @description Configures host-level settings required for Kubernetes.
301
- * IMPORTANT: This method has been updated to REMOVE all iptables flushing commands
302
- * to prevent conflicts with Kubernetes' own network management.
338
+ * This method ensures proper SELinux, Docker, Containerd, and Sysctl settings
339
+ * are applied for a healthy Kubernetes environment. It explicitly avoids
340
+ * iptables flushing commands to prevent conflicts with Kubernetes' own network management.
303
341
  */
304
342
  config() {
305
343
  console.log('Applying host configuration: SELinux, Docker, Containerd, and Sysctl settings.');
@@ -346,7 +384,8 @@ net.ipv4.ip_forward = 1' | sudo tee ${iptableConfPath}`);
346
384
  /**
347
385
  * @method chown
348
386
  * @description Sets up kubectl configuration for the current user.
349
- * This is typically run after kubeadm init on the control plane.
387
+ * This is typically run after kubeadm init on the control plane
388
+ * to allow non-root users to interact with the cluster.
350
389
  */
351
390
  chown() {
352
391
  console.log('Setting up kubectl configuration...');
@@ -359,74 +398,142 @@ net.ipv4.ip_forward = 1' | sudo tee ${iptableConfPath}`);
359
398
  /**
360
399
  * @method reset
361
400
  * @description Performs a comprehensive reset of Kubernetes and container environments.
362
- * This function is for cleaning up a node, not for initial setup.
363
- * It avoids aggressive iptables flushing that would break host connectivity.
401
+ * This function is for cleaning up a node, reverting changes made by 'kubeadm init' or 'kubeadm join'.
402
+ * It includes deleting Kind clusters, resetting kubeadm, removing CNI configs,
403
+ * cleaning Docker and Podman data, persistent volumes, and resetting kubelet components.
404
+ * It avoids aggressive iptables flushing that would break host connectivity, relying on kube-proxy's
405
+ * control loop to eventually clean up rules if the cluster is not re-initialized.
364
406
  */
365
- reset() {
366
- console.log('Starting comprehensive reset of Kubernetes and container environments...');
367
-
368
- // Delete all existing Kind (Kubernetes in Docker) clusters.
369
- shellExec(`kind get clusters | xargs -r -t -n1 kind delete cluster --name`); // -r for no-op if no clusters
370
-
371
- // Reset the Kubernetes control-plane components installed by kubeadm.
372
- shellExec(`sudo kubeadm reset -f`);
373
-
374
- // Remove specific CNI configuration files (e.g., Flannel)
375
- shellExec('sudo rm -f /etc/cni/net.d/10-flannel.conflist');
376
-
377
- // Remove the kubectl configuration file
378
- shellExec('sudo rm -f $HOME/.kube/config');
379
-
380
- // Clear trash files from the root user's trash directory.
381
- shellExec('sudo rm -rf /root/.local/share/Trash/files/*');
382
-
383
- // Prune all unused Docker data.
384
- shellExec('sudo docker system prune -a -f');
385
-
386
- // Stop the Docker daemon service.
387
- shellExec('sudo service docker stop');
388
-
389
- // Aggressively remove container storage data for containerd and Docker.
390
- shellExec(`sudo rm -rf /var/lib/containers/storage/*`);
391
- shellExec(`sudo rm -rf /var/lib/docker/volumes/*`);
392
- shellExec(`sudo rm -rf /var/lib/docker~/*`);
393
- shellExec(`sudo rm -rf /home/containers/storage/*`);
394
- shellExec(`sudo rm -rf /home/docker/*`);
395
-
396
- // Re-configure Docker's default storage location (if desired).
397
- shellExec('sudo mv /var/lib/docker /var/lib/docker~ || true'); // Use || true to prevent error if dir doesn't exist
398
- shellExec('sudo mkdir -p /home/docker');
399
- shellExec('sudo chmod 777 /home/docker');
400
- shellExec('sudo ln -s /home/docker /var/lib/docker');
407
+ async reset() {
408
+ logger.info('Starting comprehensive reset of Kubernetes and container environments...');
409
+
410
+ try {
411
+ // Phase 1: Pre-reset Kubernetes Cleanup (while API server is still up)
412
+ logger.info('Phase 1/6: Cleaning up Kubernetes resources (PVCs, PVs) while API server is accessible...');
413
+
414
+ // Delete all Persistent Volume Claims (PVCs) to release the PVs.
415
+ // This must happen before deleting PVs or the host paths.
416
+ // shellExec(`kubectl delete pvc --all-namespaces --all --ignore-not-found || true`);
417
+
418
+ // Get all Persistent Volumes and identify their host paths for data deletion.
419
+ // This needs to be done *before* deleting the PVs themselves.
420
+ // The '|| echo '{"items":[]}'` handles cases where 'kubectl get pv' might return empty or error.
421
+ const pvListJson = shellExec(`kubectl get pv -o json || echo '{"items":[]}'`, { stdout: true, silent: true });
422
+ const pvList = JSON.parse(pvListJson);
423
+
424
+ if (pvList.items && pvList.items.length > 0) {
425
+ for (const pv of pvList.items) {
426
+ // Check if the PV uses hostPath and delete its contents
427
+ if (pv.spec.hostPath && pv.spec.hostPath.path) {
428
+ const hostPath = pv.spec.hostPath.path;
429
+ logger.info(`Removing data from host path for PV '${pv.metadata.name}': ${hostPath}`);
430
+ shellExec(`sudo rm -rf ${hostPath}/* || true`);
431
+ }
432
+ }
433
+ } else {
434
+ logger.info('No Persistent Volumes found with hostPath to clean up.');
435
+ }
401
436
 
402
- // Prune all unused Podman data.
403
- shellExec(`sudo podman system prune -a -f`);
404
- shellExec(`sudo podman system prune --all --volumes --force`);
405
- shellExec(`sudo podman system prune --external --force`);
437
+ // Then, delete all Persistent Volumes (PVs).
438
+ // shellExec(`kubectl delete pv --all --ignore-not-found || true`);
439
+
440
+ // Phase 2: Stop Kubelet and remove CNI configuration
441
+ logger.info('Phase 2/6: Stopping Kubelet and removing CNI configurations...');
442
+ // Stop kubelet service to prevent further activity and release resources.
443
+ shellExec(`sudo systemctl stop kubelet || true`);
444
+
445
+ // CNI plugins use /etc/cni/net.d to store their configuration.
446
+ // Removing this prevents conflicts and potential issues during kubeadm reset.
447
+ shellExec('sudo rm -rf /etc/cni/net.d/* || true');
448
+
449
+ // Phase 3: Kind Cluster Cleanup
450
+ logger.info('Phase 3/6: Cleaning up Kind clusters...');
451
+ // Delete all existing Kind (Kubernetes in Docker) clusters.
452
+ shellExec(`kind get clusters | xargs -r -t -n1 kind delete cluster || true`);
453
+
454
+ // Phase 4: Kubeadm Reset
455
+ logger.info('Phase 4/6: Performing kubeadm reset...');
456
+ // Reset the Kubernetes control-plane components installed by kubeadm.
457
+ // The --force flag skips confirmation prompts. This command will tear down the cluster.
458
+ shellExec(`sudo kubeadm reset --force`);
459
+
460
+ // Phase 5: Post-reset File System Cleanup (Local Storage, Kubeconfig)
461
+ logger.info('Phase 5/6: Cleaning up local storage provisioner data and kubeconfig...');
462
+ // Remove the kubectl configuration file for the current user.
463
+ // This is important to prevent stale credentials after the cluster is reset.
464
+ shellExec('rm -rf $HOME/.kube || true');
465
+
466
+ // Remove local path provisioner data, which stores data for dynamically provisioned PVCs.
467
+ shellExec(`sudo rm -rf /opt/local-path-provisioner/* || true`);
468
+
469
+ // Phase 6: Container Runtime Cleanup (Docker and Podman)
470
+ logger.info('Phase 6/6: Cleaning up Docker and Podman data...');
471
+ // Prune all unused Docker data (containers, images, volumes, networks).
472
+ shellExec('sudo docker system prune -a -f');
473
+
474
+ // Stop the Docker daemon service to ensure all files can be removed.
475
+ shellExec('sudo service docker stop || true');
476
+
477
+ // Aggressively remove container storage data for containerd and Docker.
478
+ // This targets the underlying storage directories.
479
+ shellExec(`sudo rm -rf /var/lib/containers/storage/* || true`);
480
+ shellExec(`sudo rm -rf /var/lib/docker/volumes/* || true`);
481
+ shellExec(`sudo rm -rf /var/lib/docker~/* || true`); // Cleanup any old Docker directories
482
+ shellExec(`sudo rm -rf /home/containers/storage/* || true`);
483
+ shellExec(`sudo rm -rf /home/docker/* || true`);
484
+
485
+ // Ensure Docker's default storage location is clean and re-linked if custom.
486
+ shellExec(`sudo rm -rf /var/lib/docker/* || true`);
487
+ shellExec('sudo mkdir -p /home/docker || true');
488
+ shellExec('sudo chmod 777 /home/docker || true');
489
+ shellExec('sudo ln -sf /home/docker /var/lib/docker || true'); // Use -sf for symbolic link, force and silent
490
+
491
+ // Prune all unused Podman data.
492
+ shellExec(`sudo podman system prune -a -f`);
493
+ shellExec(`sudo podman system prune --all --volumes --force`);
494
+ shellExec(`sudo podman system prune --external --force`);
495
+
496
+ // Create and set permissions for Podman's custom storage directory.
497
+ shellExec(`sudo mkdir -p /home/containers/storage || true`);
498
+ shellExec('sudo chmod 0711 /home/containers/storage || true');
499
+
500
+ // Update Podman's storage configuration file.
501
+ shellExec(
502
+ `sudo sed -i -e "s@/var/lib/containers/storage@/home/containers/storage@g" /etc/containers/storage.conf || true`,
503
+ );
406
504
 
407
- // Create and set permissions for Podman's custom storage directory.
408
- shellExec(`sudo mkdir -p /home/containers/storage`);
409
- shellExec('sudo chmod 0711 /home/containers/storage');
505
+ // Reset Podman system settings.
506
+ shellExec(`sudo podman system reset -f`);
410
507
 
411
- // Update Podman's storage configuration file.
412
- shellExec(
413
- `sudo sed -i -e "s@/var/lib/containers/storage@/home/containers/storage@g" /etc/containers/storage.conf`,
414
- );
508
+ // Final Kubelet and System Cleanup (after all other operations)
509
+ logger.info('Finalizing Kubelet and system file cleanup...');
510
+ // Remove Kubernetes configuration and kubelet data directories.
511
+ shellExec(`sudo rm -rf /etc/kubernetes/* || true`);
512
+ shellExec(`sudo rm -rf /var/lib/kubelet/* || true`);
415
513
 
416
- // Reset Podman system settings.
417
- shellExec(`sudo podman system reset -f`);
514
+ // Clear trash files from the root user's trash directory.
515
+ shellExec('sudo rm -rf /root/.local/share/Trash/files/* || true');
418
516
 
419
- // Reset kubelet components
420
- shellExec(`sudo systemctl stop kubelet`);
421
- shellExec(`sudo rm -rf /etc/kubernetes/*`);
422
- shellExec(`sudo rm -rf /var/lib/kubelet/*`);
423
- shellExec(`sudo rm -rf /etc/cni/net.d/*`);
424
- shellExec(`sudo systemctl daemon-reload`);
425
- shellExec(`sudo systemctl start kubelet`);
517
+ // Reload systemd daemon to pick up any service file changes.
518
+ shellExec(`sudo systemctl daemon-reload`);
519
+ // Attempt to start kubelet; it might fail if the cluster is fully reset, which is expected.
520
+ shellExec(`sudo systemctl start kubelet || true`);
426
521
 
427
- console.log('Comprehensive reset completed.');
522
+ logger.info('Comprehensive reset completed successfully.');
523
+ } catch (error) {
524
+ logger.error(`Error during reset: ${error.message}`);
525
+ console.error(error);
526
+ }
428
527
  },
429
528
 
529
+ /**
530
+ * @method getResourcesCapacity
531
+ * @description Retrieves and returns the allocatable CPU and memory resources
532
+ * of the Kubernetes node.
533
+ * @param {boolean} [kubeadm=false] - If true, assumes a kubeadm-managed node;
534
+ * otherwise, assumes a Kind worker node.
535
+ * @returns {object} An object containing CPU and memory resources with values and units.
536
+ */
430
537
  getResourcesCapacity(kubeadm = false) {
431
538
  const resources = {};
432
539
  const info = shellExec(
@@ -457,6 +564,11 @@ net.ipv4.ip_forward = 1' | sudo tee ${iptableConfPath}`);
457
564
 
458
565
  return resources;
459
566
  },
567
+ /**
568
+ * @method initHost
569
+ * @description Installs essential host-level prerequisites for Kubernetes,
570
+ * including Docker, Podman, Kind, Kubeadm, and Helm.
571
+ */
460
572
  initHost() {
461
573
  console.log('Installing Docker, Podman, Kind, Kubeadm, and Helm...');
462
574
  // Install docker
package/src/cli/deploy.js CHANGED
@@ -274,8 +274,12 @@ kubectl patch ippool default-ipv4-ippool --type='json' -p='[{"op": "replace", "p
274
274
  sudo podman run --rm localhost/<image-name>:<image-version> <command>
275
275
  kubectl get configmap kubelet-config -n kube-system -o yaml > kubelet-config.yaml
276
276
  kubectl -n kube-system rollout restart daemonset kube-proxy
277
-
278
277
  kubectl get EndpointSlice -o wide --all-namespaces -w
278
+ kubectl apply -k manifests/deployment/adminer/.
279
+ kubectl wait --for=condition=Ready pod/busybox1
280
+ kubectl wait --for=jsonpath='{.status.phase}'=Running pod/busybox1
281
+ kubectl wait --for='jsonpath={.status.conditions[?(@.type=="Ready")].status}=True' pod/busybox1
282
+ kubectl wait --for=delete pod/busybox1 --timeout=60s
279
283
 
280
284
  kubectl run --rm -it test-dns --image=busybox:latest --restart=Never -- /bin/sh -c "
281
285
  nslookup kubernetes.default.svc.cluster.local;
@@ -327,6 +331,7 @@ EOF
327
331
  if (options.restoreHosts === true) {
328
332
  renderHosts = etcHost(concatHots);
329
333
  fs.writeFileSync(`/etc/hosts`, renderHosts, 'utf8');
334
+ logger.info(renderHosts);
330
335
  return;
331
336
  }
332
337
 
package/src/index.js CHANGED
@@ -31,7 +31,7 @@ class Underpost {
31
31
  * @type {String}
32
32
  * @memberof Underpost
33
33
  */
34
- static version = 'v2.8.795';
34
+ static version = 'v2.8.797';
35
35
  /**
36
36
  * Repository cli API
37
37
  * @static
@@ -366,7 +366,7 @@ const buildRuntime = async () => {
366
366
  if (db && apis) await DataBaseProvider.load({ apis, host, path, db });
367
367
 
368
368
  // valkey server
369
- await createValkeyConnection({ host, path }, valkey);
369
+ if (valkey) await createValkeyConnection({ host, path }, valkey);
370
370
 
371
371
  if (mailer) {
372
372
  const mailerSsrConf = confSSR[getCapVariableName(client)];
@@ -36,12 +36,12 @@ const valkeyClientFactory = async (options) => {
36
36
  // port: 6379,
37
37
  // host: 'valkey-service.default.svc.cluster.local',
38
38
  port: options?.port ? options.port : undefined,
39
- host: options?.port ? options.host : undefined,
39
+ host: options?.host ? options.host : undefined,
40
40
  retryStrategy: (attempt) => {
41
41
  if (attempt === 1) {
42
42
  valkey.disconnect();
43
43
  valkeyEnabled = false;
44
- logger.warn('Valkey service not enabled', { valkeyEnabled });
44
+ logger.warn('Valkey service not enabled', { ...options, valkeyEnabled });
45
45
  return;
46
46
  }
47
47
  return 1000; // 1 second interval attempt