underpost 2.99.7 → 3.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/.env.development +2 -1
  2. package/.env.production +1 -0
  3. package/.env.test +2 -1
  4. package/.github/workflows/npmpkg.ci.yml +2 -1
  5. package/.github/workflows/publish.ci.yml +18 -34
  6. package/.vscode/extensions.json +8 -50
  7. package/.vscode/settings.json +0 -77
  8. package/CHANGELOG.md +91 -1
  9. package/{cli.md → CLI-HELP.md} +48 -41
  10. package/Dockerfile +15 -15
  11. package/README.md +8 -15
  12. package/bin/build.js +4 -15
  13. package/bin/deploy.js +4 -133
  14. package/bin/file.js +9 -48
  15. package/bin/zed.js +63 -2
  16. package/examples/static-page/ssr-components/CustomPage.js +1 -1
  17. package/jsdoc.json +1 -2
  18. package/manifests/cronjobs/dd-cron/dd-cron-backup.yaml +1 -1
  19. package/manifests/cronjobs/dd-cron/dd-cron-dns.yaml +1 -1
  20. package/manifests/deployment/dd-default-development/deployment.yaml +2 -2
  21. package/manifests/deployment/dd-test-development/deployment.yaml +2 -2
  22. package/manifests/deployment/fastapi/initial_data.sh +4 -52
  23. package/manifests/ipfs/configmap.yaml +57 -0
  24. package/manifests/ipfs/headless-service.yaml +35 -0
  25. package/manifests/ipfs/kustomization.yaml +8 -0
  26. package/manifests/ipfs/statefulset.yaml +149 -0
  27. package/manifests/ipfs/storage-class.yaml +9 -0
  28. package/package.json +5 -5
  29. package/scripts/k3s-node-setup.sh +89 -0
  30. package/scripts/lxd-vm-setup.sh +23 -0
  31. package/scripts/rocky-setup.sh +1 -13
  32. package/src/cli/baremetal.js +7 -9
  33. package/src/cli/cluster.js +72 -121
  34. package/src/cli/deploy.js +8 -5
  35. package/src/cli/index.js +31 -30
  36. package/src/cli/ipfs.js +184 -0
  37. package/src/cli/lxd.js +191 -236
  38. package/src/cli/repository.js +4 -1
  39. package/src/client/components/core/VanillaJs.js +0 -25
  40. package/src/client/services/user/user.management.js +0 -5
  41. package/src/client/services/user/user.service.js +1 -1
  42. package/src/db/mariadb/MariaDB.js +2 -2
  43. package/src/index.js +12 -1
  44. package/src/runtime/express/Dockerfile +15 -15
  45. package/src/runtime/lampp/Dockerfile +15 -15
  46. package/src/server/client-build-docs.js +26 -7
  47. package/src/server/conf.js +3 -20
  48. package/src/server/logger.js +22 -10
  49. package/.vscode/zed.keymap.json +0 -39
  50. package/.vscode/zed.settings.json +0 -20
  51. package/bin/cron.js +0 -47
  52. package/bin/db.js +0 -199
  53. package/bin/hwt.js +0 -49
  54. package/bin/util.js +0 -63
  55. package/manifests/lxd/underpost-setup.sh +0 -163
@@ -36,6 +36,7 @@ class UnderpostCluster {
36
36
  * @param {boolean} [options.mysql=false] - Deploy MySQL.
37
37
  * @param {boolean} [options.postgresql=false] - Deploy PostgreSQL.
38
38
  * @param {boolean} [options.valkey=false] - Deploy Valkey.
39
+ * @param {boolean} [options.ipfs=false] - Deploy ipfs-cluster statefulset.
39
40
  * @param {boolean} [options.full=false] - Deploy a full set of common components.
40
41
  * @param {boolean} [options.info=false] - Display extensive Kubernetes cluster information.
41
42
  * @param {boolean} [options.certManager=false] - Deploy Cert-Manager for certificate management.
@@ -57,10 +58,10 @@ class UnderpostCluster {
57
58
  * @param {string} [options.prom=''] - Initialize the cluster with a Prometheus Operator deployment and monitor scrap for specified hosts.
58
59
  * @param {boolean} [options.uninstallHost=false] - Uninstall all host components.
59
60
  * @param {boolean} [options.config=false] - Apply general host configuration (SELinux, containerd, sysctl, firewalld).
60
- * @param {boolean} [options.worker=false] - Configure as a worker node (for Kubeadm or K3s join).
61
61
  * @param {boolean} [options.chown=false] - Set up kubectl configuration for the current user.
62
62
  * @param {boolean} [options.removeVolumeHostPaths=false] - Remove data from host paths used by Persistent Volumes.
63
63
  * @param {string} [options.hosts] - Set custom hosts entries.
64
+ * @param {string} [options.replicas] - Set the number of replicas for certain deployments.
64
65
  * @memberof UnderpostCluster
65
66
  */
66
67
  async init(
@@ -73,6 +74,7 @@ class UnderpostCluster {
73
74
  mysql: false,
74
75
  postgresql: false,
75
76
  valkey: false,
77
+ ipfs: false,
76
78
  full: false,
77
79
  info: false,
78
80
  certManager: false,
@@ -94,10 +96,10 @@ class UnderpostCluster {
94
96
  prom: '',
95
97
  uninstallHost: false,
96
98
  config: false,
97
- worker: false,
98
99
  chown: false,
99
100
  removeVolumeHostPaths: false,
100
101
  hosts: '',
102
+ replicas: '',
101
103
  },
102
104
  ) {
103
105
  // Handles initial host setup (installing docker, podman, kind, kubeadm, helm)
@@ -140,11 +142,14 @@ class UnderpostCluster {
140
142
  }
141
143
 
142
144
  // Reset Kubernetes cluster components (Kind/Kubeadm/K3s) and container runtimes
143
- if (options.reset === true)
145
+ if (options.reset === true) {
146
+ const clusterType = options.k3s === true ? 'k3s' : options.kubeadm === true ? 'kubeadm' : 'kind';
144
147
  return await Underpost.cluster.safeReset({
145
148
  underpostRoot,
146
149
  removeVolumeHostPaths: options.removeVolumeHostPaths,
150
+ clusterType,
147
151
  });
152
+ }
148
153
 
149
154
  // Check if a cluster (Kind, Kubeadm, or K3s) is already initialized
150
155
  const alreadyKubeadmCluster = Underpost.deploy.get('calico-kube-controllers')[0];
@@ -153,66 +158,20 @@ class UnderpostCluster {
153
158
  const alreadyK3sCluster = Underpost.deploy.get('svclb-traefik')[0];
154
159
 
155
160
  // --- Kubeadm/Kind/K3s Cluster Initialization ---
156
- // This block handles the initial setup of the Kubernetes cluster (control plane or worker).
157
- // It prevents re-initialization if a cluster is already detected.
158
- if (!options.worker && !alreadyKubeadmCluster && !alreadyKindCluster && !alreadyK3sCluster) {
161
+ if (!alreadyKubeadmCluster && !alreadyKindCluster && !alreadyK3sCluster) {
159
162
  Underpost.cluster.config();
160
163
  if (options.k3s === true) {
161
164
  logger.info('Initializing K3s control plane...');
162
165
  // Install K3s
163
- console.log('Installing K3s...');
166
+ logger.info('Installing K3s...');
164
167
  shellExec(`curl -sfL https://get.k3s.io | sh -`);
165
- console.log('K3s installation completed.');
166
-
167
- // Move k3s binary to /bin/k3s and make it executable
168
- shellExec(`sudo mv /usr/local/bin/k3s /bin/k3s`);
169
- shellExec(`sudo chmod +x /bin/k3s`);
170
- console.log('K3s binary moved to /bin/k3s and made executable.');
168
+ logger.info('K3s installation completed.');
171
169
 
172
- // Configure kubectl for the current user for K3s *before* checking readiness
173
- // This ensures kubectl can find the K3s kubeconfig immediately after K3s installation.
174
170
  Underpost.cluster.chown('k3s');
175
171
 
176
- // Wait for K3s to be ready
177
172
  logger.info('Waiting for K3s to be ready...');
178
- let k3sReady = false;
179
- let retries = 0;
180
- const maxRetries = 20; // Increased retries for K3s startup
181
- const delayMs = 5000; // 5 seconds
182
-
183
- while (!k3sReady && retries < maxRetries) {
184
- try {
185
- // Explicitly use KUBECONFIG for kubectl commands to ensure it points to K3s config
186
- const nodes = shellExec(`KUBECONFIG=/etc/rancher/k3s/k3s.yaml kubectl get nodes -o json`, {
187
- stdout: true,
188
- silent: true,
189
- });
190
- const parsedNodes = JSON.parse(nodes);
191
- if (
192
- parsedNodes.items.some((node) =>
193
- node.status.conditions.some((cond) => cond.type === 'Ready' && cond.status === 'True'),
194
- )
195
- ) {
196
- k3sReady = true;
197
- logger.info('K3s cluster is ready.');
198
- } else {
199
- logger.info(`K3s not yet ready. Retrying in ${delayMs / 1000} seconds...`);
200
- await new Promise((resolve) => setTimeout(resolve, delayMs));
201
- }
202
- } catch (error) {
203
- logger.info(`Error checking K3s status: ${error.message}. Retrying in ${delayMs / 1000} seconds...`);
204
- await new Promise((resolve) => setTimeout(resolve, delayMs));
205
- }
206
- retries++;
207
- }
208
-
209
- if (!k3sReady) {
210
- logger.error('K3s cluster did not become ready in time. Please check the K3s logs.');
211
- return;
212
- }
213
-
214
- // K3s includes local-path-provisioner by default, so no need to install explicitly.
215
- logger.info('K3s comes with local-path-provisioner by default. Skipping explicit installation.');
173
+ shellExec(`sudo systemctl is-active --wait k3s || sudo systemctl wait --for=active k3s.service`);
174
+ logger.info('K3s service is active.');
216
175
  } else if (options.kubeadm === true) {
217
176
  logger.info('Initializing Kubeadm control plane...');
218
177
  // Set default values if not provided
@@ -254,14 +213,6 @@ class UnderpostCluster {
254
213
  );
255
214
  Underpost.cluster.chown('kind'); // Pass 'kind' to chown
256
215
  }
257
- } else if (options.worker === true) {
258
- // Worker node specific configuration (kubeadm join command needs to be executed separately)
259
- logger.info('Worker node configuration applied. Awaiting join command...');
260
- // No direct cluster initialization here for workers. The `kubeadm join` or `k3s agent` command
261
- // needs to be run on the worker after the control plane is up and a token is created.
262
- // This part of the script is for general worker setup, not the join itself.
263
- } else {
264
- logger.warn('Cluster already initialized or worker flag not set for worker node.');
265
216
  }
266
217
 
267
218
  // --- Optional Component Deployments (Databases, Ingress, Cert-Manager) ---
@@ -307,36 +258,21 @@ EOF
307
258
  }
308
259
 
309
260
  if (options.full === true || options.valkey === true) {
310
- if (options.pullImage === true) {
311
- // shellExec(`sudo podman pull valkey/valkey:latest`);
312
- if (!options.kubeadm && !options.k3s) {
313
- // Only load if not kubeadm/k3s (Kind needs it)
314
- shellExec(`docker pull valkey/valkey:latest`);
315
- shellExec(`sudo kind load docker-image valkey/valkey:latest`);
316
- } else if (options.kubeadm || options.k3s)
317
- // For kubeadm/k3s, ensure it's available for containerd
318
- shellExec(`sudo crictl pull valkey/valkey:latest`);
319
- }
261
+ if (options.pullImage === true) Underpost.cluster.pullImage('valkey/valkey:latest', options);
320
262
  shellExec(`kubectl delete statefulset valkey-service -n ${options.namespace} --ignore-not-found`);
321
263
  shellExec(`kubectl apply -k ${underpostRoot}/manifests/valkey -n ${options.namespace}`);
322
264
  await Underpost.test.statusMonitor('valkey-service', 'Running', 'pods', 1000, 60 * 10);
323
265
  }
266
+ if (options.ipfs) {
267
+ await Underpost.ipfs.deploy(options, underpostRoot);
268
+ }
324
269
  if (options.full === true || options.mariadb === true) {
325
270
  shellExec(
326
271
  `sudo kubectl create secret generic mariadb-secret --from-file=username=/home/dd/engine/engine-private/mariadb-username --from-file=password=/home/dd/engine/engine-private/mariadb-password --dry-run=client -o yaml | kubectl apply -f - -n ${options.namespace}`,
327
272
  );
328
273
  shellExec(`kubectl delete statefulset mariadb-statefulset -n ${options.namespace} --ignore-not-found`);
329
274
 
330
- if (options.pullImage === true) {
331
- // shellExec(`sudo podman pull mariadb:latest`);
332
- if (!options.kubeadm && !options.k3s) {
333
- // Only load if not kubeadm/k3s (Kind needs it)
334
- shellExec(`docker pull mariadb:latest`);
335
- shellExec(`sudo kind load docker-image mariadb:latest`);
336
- } else if (options.kubeadm || options.k3s)
337
- // For kubeadm/k3s, ensure it's available for containerd
338
- shellExec(`sudo crictl pull mariadb:latest`);
339
- }
275
+ if (options.pullImage === true) Underpost.cluster.pullImage('mariadb:latest', options);
340
276
  shellExec(`kubectl apply -f ${underpostRoot}/manifests/mariadb/storage-class.yaml -n ${options.namespace}`);
341
277
  shellExec(`kubectl apply -k ${underpostRoot}/manifests/mariadb -n ${options.namespace}`);
342
278
  }
@@ -350,30 +286,14 @@ EOF
350
286
  shellExec(`kubectl apply -k ${underpostRoot}/manifests/mysql -n ${options.namespace}`);
351
287
  }
352
288
  if (options.full === true || options.postgresql === true) {
353
- if (options.pullImage === true) {
354
- if (!options.kubeadm && !options.k3s) {
355
- // Only load if not kubeadm/k3s (Kind needs it)
356
- shellExec(`docker pull postgres:latest`);
357
- shellExec(`sudo kind load docker-image postgres:latest`);
358
- } else if (options.kubeadm || options.k3s)
359
- // For kubeadm/k3s, ensure it's available for containerd
360
- shellExec(`sudo crictl pull postgres:latest`);
361
- }
289
+ if (options.pullImage === true) Underpost.cluster.pullImage('postgres:latest', options);
362
290
  shellExec(
363
291
  `sudo kubectl create secret generic postgres-secret --from-file=password=/home/dd/engine/engine-private/postgresql-password --dry-run=client -o yaml | kubectl apply -f - -n ${options.namespace}`,
364
292
  );
365
293
  shellExec(`kubectl apply -k ${underpostRoot}/manifests/postgresql -n ${options.namespace}`);
366
294
  }
367
295
  if (options.mongodb4 === true) {
368
- if (options.pullImage === true) {
369
- if (!options.kubeadm && !options.k3s) {
370
- // Only load if not kubeadm/k3s (Kind needs it)
371
- shellExec(`docker pull mongo:4.4`);
372
- shellExec(`sudo kind load docker-image mongo:4.4`);
373
- } else if (options.kubeadm || options.k3s)
374
- // For kubeadm/k3s, ensure it's available for containerd
375
- shellExec(`sudo crictl pull mongo:4.4`);
376
- }
296
+ if (options.pullImage === true) Underpost.cluster.pullImage('mongo:4.4', options);
377
297
  shellExec(`kubectl apply -k ${underpostRoot}/manifests/mongodb-4.4 -n ${options.namespace}`);
378
298
 
379
299
  const deploymentName = 'mongodb-deployment';
@@ -395,15 +315,7 @@ EOF
395
315
  );
396
316
  }
397
317
  } else if (options.full === true || options.mongodb === true) {
398
- if (options.pullImage === true) {
399
- if (!options.kubeadm && !options.k3s) {
400
- // Only load if not kubeadm/k3s (Kind needs it)
401
- shellExec(`docker pull mongo:latest`);
402
- shellExec(`sudo kind load docker-image mongo:latest`);
403
- } else if (options.kubeadm || options.k3s)
404
- // For kubeadm/k3s, ensure it's available for containerd
405
- shellExec(`sudo crictl pull mongo:latest`);
406
- }
318
+ if (options.pullImage === true) Underpost.cluster.pullImage('mongo:latest', options);
407
319
  shellExec(
408
320
  `sudo kubectl create secret generic mongodb-keyfile --from-file=/home/dd/engine/engine-private/mongodb-keyfile --dry-run=client -o yaml | kubectl apply -f - -n ${options.namespace}`,
409
321
  );
@@ -464,6 +376,32 @@ EOF
464
376
  }
465
377
  },
466
378
 
379
+ /**
380
+ * @method pullImage
381
+ * @description Pulls a container image using the appropriate runtime based on the cluster type.
382
+ * - For Kind clusters: pulls via Docker and loads the image into the Kind cluster.
383
+ * - For Kubeadm/K3s clusters: pulls via crictl (containerd).
384
+ * @param {string} image - The fully-qualified container image reference (e.g. 'mongo:latest').
385
+ * @param {object} options - The cluster options object from `init`.
386
+ * @param {boolean} [options.kubeadm=false] - Whether the cluster is Kubeadm-based.
387
+ * @param {boolean} [options.k3s=false] - Whether the cluster is K3s-based.
388
+ * @memberof UnderpostCluster
389
+ */
390
+ pullImage(image, options = { kubeadm: false, k3s: false }) {
391
+ if (!options.kubeadm && !options.k3s) {
392
+ const tarPath = `/tmp/kind-image-${image.replace(/[\/:]/g, '-')}.tar`;
393
+ shellExec(`docker pull ${image}`);
394
+ shellExec(`docker save ${image} -o ${tarPath}`);
395
+ shellExec(
396
+ `for node in $(kind get nodes); do cat ${tarPath} | docker exec -i $node ctr --namespace=k8s.io images import -; done`,
397
+ );
398
+ shellExec(`rm -f ${tarPath}`);
399
+ } else if (options.kubeadm || options.k3s) {
400
+ // Kubeadm / K3s: use crictl to pull directly into containerd
401
+ shellExec(`sudo crictl pull ${image}`);
402
+ }
403
+ },
404
+
467
405
  /**
468
406
  * @method config
469
407
  * @description Configures host-level settings required for Kubernetes.
@@ -570,12 +508,15 @@ net.ipv4.ip_forward = 1' | sudo tee ${iptableConfPath}`,
570
508
  * @description Performs a complete reset of the Kubernetes cluster and its container environments.
571
509
  * This version focuses on correcting persistent permission errors (such as 'permission denied'
572
510
  * in coredns) by restoring SELinux security contexts and safely cleaning up cluster artifacts.
511
+ * Only the uninstall/delete commands specific to the given clusterType are executed; all other
512
+ * cleanup steps (log truncation, filesystem, network) are always run as generic k8s resets.
573
513
  * @param {object} [options] - Configuration options for the reset.
574
514
  * @param {string} [options.underpostRoot] - The root path of the underpost project.
575
515
  * @param {boolean} [options.removeVolumeHostPaths=false] - Whether to remove data from host paths used by Persistent Volumes.
516
+ * @param {string} [options.clusterType='kind'] - The type of cluster to reset: 'kind', 'kubeadm', or 'k3s'.
576
517
  * @memberof UnderpostCluster
577
518
  */
578
- async safeReset(options = { underpostRoot: '.', removeVolumeHostPaths: false }) {
519
+ async safeReset(options = { underpostRoot: '.', removeVolumeHostPaths: false, clusterType: 'kind' }) {
579
520
  logger.info('Starting a safe and comprehensive reset of Kubernetes and container environments...');
580
521
 
581
522
  try {
@@ -645,14 +586,22 @@ net.ipv4.ip_forward = 1' | sudo tee ${iptableConfPath}`,
645
586
  // Safely unmount pod filesystems to avoid errors.
646
587
  shellExec('sudo umount -f /var/lib/kubelet/pods/*/*');
647
588
 
648
- // Phase 3: Execute official uninstallation commands
649
- logger.info('Phase 3/7: Executing official reset and uninstallation commands...');
650
- logger.info(' -> Executing kubeadm reset...');
651
- shellExec('sudo kubeadm reset --force');
652
- logger.info(' -> Executing K3s uninstallation script if it exists...');
653
- shellExec('sudo /usr/local/bin/k3s-uninstall.sh');
654
- logger.info(' -> Deleting Kind clusters...');
655
- shellExec('kind get clusters | xargs -r -t -n1 kind delete cluster');
589
+ // Phase 3: Execute official uninstallation commands (type-specific)
590
+ const clusterType = options.clusterType || 'kind';
591
+ logger.info(
592
+ `Phase 3/7: Executing official reset/uninstallation commands for cluster type: '${clusterType}'...`,
593
+ );
594
+ if (clusterType === 'kubeadm') {
595
+ logger.info(' -> Executing kubeadm reset...');
596
+ shellExec('sudo kubeadm reset --force');
597
+ } else if (clusterType === 'k3s') {
598
+ logger.info(' -> Executing K3s uninstallation script if it exists...');
599
+ shellExec('sudo /usr/local/bin/k3s-uninstall.sh');
600
+ } else {
601
+ // Default: kind
602
+ logger.info(' -> Deleting Kind clusters...');
603
+ shellExec('kind get clusters | xargs -r -t -n1 kind delete cluster');
604
+ }
656
605
 
657
606
  // Phase 4: File system cleanup
658
607
  logger.info('Phase 4/7: Cleaning up remaining file system artifacts...');
@@ -672,9 +621,6 @@ net.ipv4.ip_forward = 1' | sudo tee ${iptableConfPath}`,
672
621
  // Remove iptables rules and CNI network interfaces.
673
622
  shellExec('sudo iptables -F');
674
623
  shellExec('sudo iptables -t nat -F');
675
- // Restore iptables rules
676
- shellExec(`chmod +x ${options.underpostRoot}/scripts/nat-iptables.sh`);
677
- shellExec(`${options.underpostRoot}/scripts/nat-iptables.sh`, { silent: true });
678
624
  shellExec('sudo ip link del cni0');
679
625
  shellExec('sudo ip link del flannel.1');
680
626
 
@@ -772,6 +718,11 @@ EOF`);
772
718
  shellExec(`chmod +x /usr/local/bin/helm`);
773
719
  shellExec(`sudo mv /usr/local/bin/helm /bin/helm`);
774
720
  shellExec(`sudo rm -rf get_helm.sh`);
721
+
722
+ // Install snap
723
+ shellExec(`sudo yum install -y snapd`);
724
+ shellExec(`sudo systemctl enable --now snapd.socket`);
725
+
775
726
  console.log('Host prerequisites installed successfully.');
776
727
  },
777
728
 
package/src/cli/deploy.js CHANGED
@@ -491,6 +491,7 @@ spec:
491
491
  retryPerTryTimeout: '',
492
492
  kindType: '',
493
493
  port: 0,
494
+ exposePort: 0,
494
495
  cmd: '',
495
496
  },
496
497
  ) {
@@ -573,11 +574,13 @@ EOF`);
573
574
  if (options.expose === true) {
574
575
  const kindType = options.kindType ? options.kindType : 'svc';
575
576
  const svc = Underpost.deploy.get(deployId, kindType)[0];
576
- const port = options.port
577
- ? options.port
578
- : kindType !== 'svc'
579
- ? 80
580
- : parseInt(svc[`PORT(S)`].split('/TCP')[0]);
577
+ const port = options.exposePort
578
+ ? parseInt(options.exposePort)
579
+ : options.port
580
+ ? parseInt(options.port)
581
+ : kindType !== 'svc'
582
+ ? 80
583
+ : parseInt(svc[`PORT(S)`].split('/TCP')[0]);
581
584
  logger.info(deployId, {
582
585
  svc,
583
586
  port,
package/src/cli/index.js CHANGED
@@ -214,6 +214,7 @@ program
214
214
  .option('--postgresql', 'Initializes the cluster with a PostgreSQL statefulset.')
215
215
  .option('--mongodb4', 'Initializes the cluster with a MongoDB 4.4 service.')
216
216
  .option('--valkey', 'Initializes the cluster with a Valkey service.')
217
+ .option('--ipfs', 'Initializes the cluster with an ipfs-cluster statefulset.')
217
218
  .option('--contour', 'Initializes the cluster with Project Contour base HTTPProxy and Envoy.')
218
219
  .option('--cert-manager', "Initializes the cluster with a Let's Encrypt production ClusterIssuer.")
219
220
  .option('--dedicated-gpu', 'Initializes the cluster with dedicated GPU base resources and environment settings.')
@@ -242,12 +243,12 @@ program
242
243
  .option('--init-host', 'Installs necessary Kubernetes node CLI tools (e.g., kind, kubeadm, docker, podman, helm).')
243
244
  .option('--uninstall-host', 'Uninstalls all host components installed by init-host.')
244
245
  .option('--config', 'Sets the base Kubernetes node configuration.')
245
- .option('--worker', 'Sets the context for a worker node.')
246
246
  .option('--chown', 'Sets the appropriate ownership for Kubernetes kubeconfig files.')
247
247
  .option('--k3s', 'Initializes the cluster using K3s (Lightweight Kubernetes).')
248
248
  .option('--hosts <hosts>', 'A comma-separated list of cluster hostnames or IP addresses.')
249
249
  .option('--remove-volume-host-paths', 'Removes specified volume host paths after execution.')
250
250
  .option('--namespace <namespace>', 'Kubernetes namespace for cluster operations (defaults to "default").')
251
+ .option('--replicas <replicas>', 'Sets a custom number of replicas for statefulset deployments.')
251
252
  .action(Underpost.cluster.init)
252
253
  .description('Manages Kubernetes clusters, defaulting to Kind cluster initialization.');
253
254
 
@@ -295,6 +296,10 @@ program
295
296
  .option('--namespace <namespace>', 'Kubernetes namespace for deployment operations (defaults to "default").')
296
297
  .option('--kind-type <kind-type>', 'Specifies the Kind cluster type for deployment operations.')
297
298
  .option('--port <port>', 'Sets up port forwarding from local to remote ports.')
299
+ .option(
300
+ '--expose-port <port>',
301
+ 'Sets the local:remote port to expose when --expose is active (overrides auto-detected service port).',
302
+ )
298
303
  .option('--cmd <cmd>', 'Custom initialization command for deployment (comma-separated commands).')
299
304
  .description('Manages application deployments, defaulting to deploying development pods.')
300
305
  .action(Underpost.deploy.callback);
@@ -613,37 +618,33 @@ program
613
618
 
614
619
  program
615
620
  .command('lxd')
616
- .option('--init', 'Initializes LXD on the current machine.')
617
- .option('--reset', 'Resets LXD on the current machine, deleting all configurations.')
618
- .option('--install', 'Installs LXD on the current machine.')
619
- .option('--dev', 'Sets the development context environment for LXD.')
620
- .option('--create-virtual-network', 'Creates an LXD virtual network bridge.')
621
- .option('--create-admin-profile', 'Creates an admin profile for LXD management.')
622
- .option('--control', 'Sets the context for a control node VM.')
623
- .option('--worker', 'Sets the context for a worker node VM.')
624
- .option('--create-vm <vm-id>', 'Creates default virtual machines with the specified ID.')
625
- .option('--init-vm <vm-id>', 'Retrieves the Underpost initialization script for the specified VM.')
626
- .option('--info-vm <vm-id>', 'Retrieves all information about the specified VM.')
627
- .option('--test <vm-id>', 'Tests the health, status, and network connectivity for a VM.')
628
- .option('--root-size <gb-size>', 'Sets the root partition size (in GB) for the VM.')
629
- .option('--k3s', 'Flag to indicate that the VM initialization is for a K3s cluster type.')
621
+ .option('--init', 'Initializes LXD on the current machine via preseed.')
622
+ .option('--reset', 'Removes the LXD snap and purges all data.')
623
+ .option('--install', 'Installs the LXD snap.')
624
+ .option('--dev', 'Use local paths instead of the global npm installation.')
625
+ .option('--create-virtual-network', 'Creates the lxdbr0 bridge network.')
626
+ .option('--ipv4-address <cidr>', 'IPv4 address/CIDR for the lxdbr0 bridge network (default: "10.250.250.1/24").')
627
+ .option('--create-admin-profile', 'Creates the admin-profile for VM management.')
628
+ .option('--control', 'Initialize the target VM as a K3s control plane node.')
629
+ .option('--worker', 'Initialize the target VM as a K3s worker node.')
630
+ .option('--create-vm <vm-name>', 'Copy the LXC launch command for a new K3s VM to the clipboard.')
631
+ .option('--delete-vm <vm-name>', 'Stop and delete the specified VM.')
632
+ .option('--init-vm <vm-name>', 'Run k3s-node-setup.sh on the specified VM (use with --control or --worker).')
633
+ .option('--info-vm <vm-name>', 'Display full configuration and status for the specified VM.')
634
+ .option('--test <vm-name>', 'Run connectivity and health checks on the specified VM.')
635
+ .option('--root-size <gb-size>', 'Root disk size in GiB for --create-vm (default: 32).')
630
636
  .option(
631
637
  '--join-node <nodes>',
632
- 'A comma-separated list of worker and control nodes to join (e.g., "k8s-worker-1,k8s-control").',
633
- )
634
- .option(
635
- '--expose <vm-name-ports>',
636
- 'Exposes specified ports on a VM (e.g., "k8s-control:80,443"). Multiple VM-port pairs can be comma-separated.',
637
- )
638
- .option(
639
- '--delete-expose <vm-name-ports>',
640
- 'Removes exposed ports on a VM (e.g., "k8s-control:80,443"). Multiple VM-port pairs can be comma-separated.',
641
- )
642
- .option('--workflow-id <workflow-id>', 'Sets the workflow ID context for LXD operations.')
643
- .option('--vm-id <vm-id>', 'Sets the VM ID context for LXD operations.')
644
- .option('--deploy-id <deploy-id>', 'Sets the deployment ID context for LXD operations.')
645
- .option('--namespace <namespace>', 'Kubernetes namespace for LXD operations (defaults to "default").')
646
- .description('Manages LXD containers and virtual machines.')
638
+ 'Join a K3s worker to a control plane. Standalone format: "workerName,controlName". ' +
639
+ 'When used with --init-vm --worker, provide just the control node name for auto-join.',
640
+ )
641
+ .option('--expose <vm-name:ports>', 'Proxy host ports to a VM (e.g., "k3s-control:80,443").')
642
+ .option('--delete-expose <vm-name:ports>', 'Remove proxied ports from a VM (e.g., "k3s-control:80,443").')
643
+ .option('--workflow-id <workflow-id>', 'Workflow ID to execute via runWorkflow.')
644
+ .option('--vm-id <vm-name>', 'Target VM name for workflow execution.')
645
+ .option('--deploy-id <deploy-id>', 'Deployment ID context for workflow execution.')
646
+ .option('--namespace <namespace>', 'Kubernetes namespace context (defaults to "default").')
647
+ .description('Manages LXD virtual machines as K3s nodes (control plane or workers).')
647
648
  .action(Underpost.lxd.callback);
648
649
 
649
650
  program
@@ -0,0 +1,184 @@
1
+ /**
2
+ * IPFS Cluster module for managing ipfs-cluster StatefulSet deployment on Kubernetes.
3
+ * @module src/cli/ipfs.js
4
+ * @namespace UnderpostIPFS
5
+ */
6
+
7
+ import { loggerFactory } from '../server/logger.js';
8
+ import { shellExec } from '../server/process.js';
9
+ import fs from 'fs-extra';
10
+ import Underpost from '../index.js';
11
+
12
+ const logger = loggerFactory(import.meta);
13
+
14
+ /**
15
+ * @class UnderpostIPFS
16
+ * @description Manages deployment of an ipfs-cluster StatefulSet on Kubernetes.
17
+ * Credentials (cluster secret + peer identity) are generated once and persisted
18
+ * to engine-private/ so the cluster identity survives redeployments.
19
+ * @memberof UnderpostIPFS
20
+ */
21
+ class UnderpostIPFS {
22
+ static API = {
23
+ /**
24
+ * @method resolveCredentials
25
+ * @description Resolves the IPFS cluster credentials from engine-private/ if they
26
+ * already exist, otherwise generates new ones (hex cluster secret + peer identity
27
+ * via ipfs-cluster-service init) and persists them with mode 0o600.
28
+ * @param {string} privateDir - Absolute path to the engine-private directory.
29
+ * @returns {{ CLUSTER_SECRET: string, IDENTITY_JSON: { id: string, private_key: string } }}
30
+ * @memberof UnderpostIPFS
31
+ */
32
+ resolveCredentials(privateDir) {
33
+ const secretPath = `${privateDir}/ipfs-cluster-secret`;
34
+ const identityPath = `${privateDir}/ipfs-cluster-identity.json`;
35
+
36
+ if (fs.existsSync(secretPath) && fs.existsSync(identityPath)) {
37
+ logger.info('Reusing existing IPFS cluster credentials from engine-private/');
38
+ return {
39
+ CLUSTER_SECRET: fs.readFileSync(secretPath, 'utf8').trim(),
40
+ IDENTITY_JSON: JSON.parse(fs.readFileSync(identityPath, 'utf8')),
41
+ };
42
+ }
43
+
44
+ logger.info('Generating new IPFS cluster credentials and persisting to engine-private/');
45
+
46
+ // ipfs-cluster-service requires CLUSTER_SECRET as a 64-char hex string.
47
+ // base64 (openssl rand -base64 32) contains '/', '+', '=' which are invalid hex bytes.
48
+ const CLUSTER_SECRET = shellExec("od -vN 32 -An -tx1 /dev/urandom | tr -d ' \\n'", {
49
+ stdout: true,
50
+ }).trim();
51
+
52
+ const tmpDir = '/tmp/ipfs-cluster-identity';
53
+ shellExec(`rm -rf ${tmpDir} && mkdir -p ${tmpDir}`);
54
+ shellExec(`docker run --rm -v ${tmpDir}:/data/ipfs-cluster ipfs/ipfs-cluster init -f`);
55
+ const IDENTITY_JSON = JSON.parse(shellExec(`cat ${tmpDir}/identity.json`, { stdout: true }).trim());
56
+ shellExec(`rm -rf ${tmpDir}`);
57
+
58
+ fs.ensureDirSync(privateDir);
59
+ fs.writeFileSync(secretPath, CLUSTER_SECRET, { mode: 0o600 });
60
+ fs.writeFileSync(identityPath, JSON.stringify(IDENTITY_JSON, null, 2), { mode: 0o600 });
61
+
62
+ logger.info(`IPFS cluster credentials saved (peer ID: ${IDENTITY_JSON.id})`);
63
+
64
+ return { CLUSTER_SECRET, IDENTITY_JSON };
65
+ },
66
+
67
+ /**
68
+ * @method teardown
69
+ * @description Deletes the existing ipfs-cluster StatefulSet, its Kubernetes Secret,
70
+ * env ConfigMap, and all PVCs so the next deployment initialises a clean data volume
71
+ * (ensuring the correct datastore profile is applied by the init container).
72
+ * @param {object} options
73
+ * @param {string} options.namespace - Kubernetes namespace.
74
+ * @param {number} ipfsReplicas - Number of replicas whose PVCs must be removed.
75
+ * @memberof UnderpostIPFS
76
+ */
77
+ teardown(options, ipfsReplicas) {
78
+ logger.info(`Tearing down existing ipfs-cluster deployment in namespace '${options.namespace}'`);
79
+ shellExec(`kubectl delete statefulset ipfs-cluster -n ${options.namespace} --ignore-not-found`);
80
+ shellExec(`kubectl delete secret ipfs-cluster-secret -n ${options.namespace} --ignore-not-found`);
81
+ shellExec(`kubectl delete configmap env-config -n ${options.namespace} --ignore-not-found`);
82
+ for (let i = 0; i < ipfsReplicas; i++) {
83
+ shellExec(
84
+ `kubectl delete pvc cluster-storage-ipfs-cluster-${i} ipfs-storage-ipfs-cluster-${i} -n ${options.namespace} --ignore-not-found`,
85
+ );
86
+ }
87
+ },
88
+
89
+ /**
90
+ * @method applySecrets
91
+ * @description Creates (or idempotently updates) the Kubernetes Secret and env ConfigMap
92
+ * that the StatefulSet pods read at startup.
93
+ * - Secret `ipfs-cluster-secret`: cluster-secret + bootstrap-peer-priv-key
94
+ * - ConfigMap `env-config`: bootstrap-peer-id + CLUSTER_SVC_NAME
95
+ * @param {{ CLUSTER_SECRET: string, IDENTITY_JSON: { id: string, private_key: string } }} credentials
96
+ * @param {object} options
97
+ * @param {string} options.namespace - Kubernetes namespace.
98
+ * @memberof UnderpostIPFS
99
+ */
100
+ applySecrets({ CLUSTER_SECRET, IDENTITY_JSON }, options) {
101
+ logger.info('Applying IPFS cluster Kubernetes Secret and env ConfigMap');
102
+
103
+ shellExec(
104
+ `kubectl create secret generic ipfs-cluster-secret \
105
+ --from-literal=cluster-secret=${CLUSTER_SECRET} \
106
+ --from-literal=bootstrap-peer-priv-key=${IDENTITY_JSON.private_key} \
107
+ --dry-run=client -o yaml | kubectl apply -f - -n ${options.namespace}`,
108
+ );
109
+
110
+ shellExec(
111
+ `kubectl create configmap env-config \
112
+ --from-literal=bootstrap-peer-id=${IDENTITY_JSON.id} \
113
+ --from-literal=CLUSTER_SVC_NAME=ipfs-cluster \
114
+ --dry-run=client -o yaml | kubectl apply -f - -n ${options.namespace}`,
115
+ );
116
+ },
117
+
118
+ /**
119
+ * @method applyManifests
120
+ * @description Applies host-level sysctl tuning (Kind clusters only), the storage class,
121
+ * the kustomize manifests, and scales the StatefulSet to the requested replica count.
122
+ * @param {object} options
123
+ * @param {string} options.namespace - Kubernetes namespace.
124
+ * @param {boolean} [options.kubeadm] - Whether the cluster is Kubeadm-based.
125
+ * @param {boolean} [options.k3s] - Whether the cluster is K3s-based.
126
+ * @param {string} underpostRoot - Absolute path to the underpost project root.
127
+ * @param {number} ipfsReplicas - Desired replica count.
128
+ * @memberof UnderpostIPFS
129
+ */
130
+ applyManifests(options, underpostRoot, ipfsReplicas) {
131
+ // Apply UDP buffer sysctl on every Kind node so QUIC (used by IPFS) can reach the
132
+ // recommended 7.5 MB buffer size. Kind nodes are containers and do NOT inherit the
133
+ // host sysctl values, so this must be set via docker exec on each node directly.
134
+ if (!options.kubeadm && !options.k3s) {
135
+ logger.info('Applying UDP buffer sysctl on Kind nodes');
136
+ shellExec(
137
+ `for node in $(kind get nodes); do docker exec $node sysctl -w net.core.rmem_max=7500000 net.core.wmem_max=7500000; done`,
138
+ );
139
+ }
140
+
141
+ shellExec(`kubectl apply -f ${underpostRoot}/manifests/ipfs/storage-class.yaml`);
142
+ shellExec(`kubectl apply -k ${underpostRoot}/manifests/ipfs -n ${options.namespace}`);
143
+
144
+ // statefulset.yaml hardcodes replicas: 3 as the ceiling; scale down here if needed.
145
+ shellExec(`kubectl scale statefulset ipfs-cluster --replicas=${ipfsReplicas} -n ${options.namespace}`);
146
+ },
147
+
148
+ /**
149
+ * @method deploy
150
+ * @description Full orchestration of the ipfs-cluster StatefulSet deployment:
151
+ * optionally pulls images, resolves or generates credentials, tears down any existing
152
+ * deployment, applies secrets, applies manifests, and waits for all pods to be Running.
153
+ * @param {object} options - Cluster init options forwarded from UnderpostCluster.API.init.
154
+ * @param {string} options.namespace - Kubernetes namespace.
155
+ * @param {boolean} [options.pullImage] - Whether to pull container images first.
156
+ * @param {boolean} [options.kubeadm] - Whether the cluster is Kubeadm-based.
157
+ * @param {boolean} [options.k3s] - Whether the cluster is K3s-based.
158
+ * @param {string|number} [options.replicas] - Override replica count (defaults to 3).
159
+ * @param {string} underpostRoot - Absolute path to the underpost project root.
160
+ * @memberof UnderpostIPFS
161
+ */
162
+ async deploy(options, underpostRoot) {
163
+ if (options.pullImage === true) {
164
+ Underpost.cluster.pullImage('ipfs/kubo:latest', options);
165
+ Underpost.cluster.pullImage('ipfs/ipfs-cluster:latest', options);
166
+ }
167
+
168
+ const credentials = Underpost.ipfs.resolveCredentials(`${underpostRoot}/engine-private`);
169
+
170
+ const ipfsReplicas = options.replicas ? parseInt(options.replicas) : 3;
171
+
172
+ Underpost.ipfs.teardown(options, ipfsReplicas);
173
+ Underpost.ipfs.applySecrets(credentials, options);
174
+ Underpost.ipfs.applyManifests(options, underpostRoot, ipfsReplicas);
175
+
176
+ logger.info(`Waiting for ${ipfsReplicas} ipfs-cluster pod(s) to reach Running state`);
177
+ for (let i = 0; i < ipfsReplicas; i++) {
178
+ await Underpost.test.statusMonitor(`ipfs-cluster-${i}`, 'Running', 'pods', 1000, 60 * 15);
179
+ }
180
+ },
181
+ };
182
+ }
183
+
184
+ export default UnderpostIPFS;