underpost 2.8.6 → 2.8.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (103) hide show
  1. package/.vscode/extensions.json +36 -3
  2. package/.vscode/settings.json +2 -0
  3. package/CHANGELOG.md +24 -4
  4. package/Dockerfile +9 -10
  5. package/README.md +41 -2
  6. package/bin/build.js +2 -2
  7. package/bin/db.js +1 -0
  8. package/bin/deploy.js +1521 -130
  9. package/bin/file.js +8 -0
  10. package/bin/index.js +1 -218
  11. package/cli.md +530 -0
  12. package/conf.js +4 -0
  13. package/docker-compose.yml +1 -1
  14. package/jsdoc.json +1 -1
  15. package/manifests/deployment/adminer/deployment.yaml +32 -0
  16. package/manifests/deployment/adminer/kustomization.yaml +7 -0
  17. package/manifests/deployment/adminer/service.yaml +13 -0
  18. package/manifests/deployment/dd-template-development/deployment.yaml +167 -0
  19. package/manifests/deployment/dd-template-development/proxy.yaml +46 -0
  20. package/manifests/deployment/fastapi/backend-deployment.yml +120 -0
  21. package/manifests/deployment/fastapi/backend-service.yml +19 -0
  22. package/manifests/deployment/fastapi/frontend-deployment.yml +54 -0
  23. package/manifests/deployment/fastapi/frontend-service.yml +15 -0
  24. package/manifests/deployment/fastapi/initial_data.sh +56 -0
  25. package/manifests/deployment/kafka/deployment.yaml +69 -0
  26. package/manifests/deployment/spark/spark-pi-py.yaml +21 -0
  27. package/manifests/envoy-service-nodeport.yaml +23 -0
  28. package/manifests/kubeadm-calico-config.yaml +119 -0
  29. package/manifests/kubelet-config.yaml +65 -0
  30. package/manifests/lxd/lxd-admin-profile.yaml +17 -0
  31. package/manifests/lxd/lxd-preseed.yaml +30 -0
  32. package/manifests/lxd/underpost-setup.sh +163 -0
  33. package/manifests/maas/lxd-preseed.yaml +32 -0
  34. package/manifests/maas/maas-setup.sh +82 -0
  35. package/manifests/mariadb/statefulset.yaml +2 -1
  36. package/manifests/mariadb/storage-class.yaml +10 -0
  37. package/manifests/mongodb/kustomization.yaml +1 -1
  38. package/manifests/mongodb/statefulset.yaml +12 -11
  39. package/manifests/mongodb/storage-class.yaml +9 -0
  40. package/manifests/mongodb-4.4/service-deployment.yaml +3 -3
  41. package/manifests/mysql/kustomization.yaml +7 -0
  42. package/manifests/mysql/pv-pvc.yaml +27 -0
  43. package/manifests/mysql/statefulset.yaml +55 -0
  44. package/manifests/postgresql/configmap.yaml +9 -0
  45. package/manifests/postgresql/kustomization.yaml +10 -0
  46. package/manifests/postgresql/pv.yaml +15 -0
  47. package/manifests/postgresql/pvc.yaml +13 -0
  48. package/manifests/postgresql/service.yaml +10 -0
  49. package/manifests/postgresql/statefulset.yaml +37 -0
  50. package/manifests/valkey/service.yaml +3 -9
  51. package/manifests/valkey/statefulset.yaml +12 -13
  52. package/package.json +3 -9
  53. package/src/api/default/default.service.js +1 -1
  54. package/src/api/user/user.service.js +14 -11
  55. package/src/cli/baremetal.js +60 -0
  56. package/src/cli/cluster.js +551 -65
  57. package/src/cli/cron.js +39 -8
  58. package/src/cli/db.js +20 -10
  59. package/src/cli/deploy.js +288 -86
  60. package/src/cli/env.js +10 -4
  61. package/src/cli/fs.js +21 -9
  62. package/src/cli/image.js +116 -124
  63. package/src/cli/index.js +319 -0
  64. package/src/cli/lxd.js +395 -0
  65. package/src/cli/monitor.js +236 -0
  66. package/src/cli/repository.js +14 -8
  67. package/src/client/components/core/Account.js +28 -24
  68. package/src/client/components/core/Blockchain.js +1 -1
  69. package/src/client/components/core/CalendarCore.js +14 -84
  70. package/src/client/components/core/CommonJs.js +2 -1
  71. package/src/client/components/core/Css.js +0 -1
  72. package/src/client/components/core/CssCore.js +10 -2
  73. package/src/client/components/core/Docs.js +1 -1
  74. package/src/client/components/core/EventsUI.js +3 -3
  75. package/src/client/components/core/FileExplorer.js +86 -78
  76. package/src/client/components/core/JoyStick.js +2 -2
  77. package/src/client/components/core/LoadingAnimation.js +1 -17
  78. package/src/client/components/core/LogIn.js +3 -3
  79. package/src/client/components/core/LogOut.js +1 -1
  80. package/src/client/components/core/Modal.js +14 -8
  81. package/src/client/components/core/Panel.js +19 -61
  82. package/src/client/components/core/PanelForm.js +13 -22
  83. package/src/client/components/core/Recover.js +3 -3
  84. package/src/client/components/core/RichText.js +1 -11
  85. package/src/client/components/core/Router.js +3 -1
  86. package/src/client/components/core/SignUp.js +2 -2
  87. package/src/client/components/default/RoutesDefault.js +3 -2
  88. package/src/client/services/default/default.management.js +45 -38
  89. package/src/client/ssr/Render.js +2 -0
  90. package/src/index.js +34 -2
  91. package/src/mailer/MailerProvider.js +3 -0
  92. package/src/runtime/lampp/Dockerfile +65 -0
  93. package/src/server/client-build.js +13 -0
  94. package/src/server/conf.js +151 -1
  95. package/src/server/dns.js +56 -18
  96. package/src/server/json-schema.js +77 -0
  97. package/src/server/logger.js +3 -3
  98. package/src/server/network.js +7 -122
  99. package/src/server/peer.js +2 -2
  100. package/src/server/proxy.js +4 -4
  101. package/src/server/runtime.js +24 -11
  102. package/src/server/start.js +122 -0
  103. package/src/server/valkey.js +27 -13
package/bin/deploy.js CHANGED
@@ -26,15 +26,24 @@ import {
26
26
  fixDependencies,
27
27
  setUpProxyMaintenanceServer,
28
28
  writeEnv,
29
+ getUnderpostRootPath,
30
+ buildCliDoc,
29
31
  } from '../src/server/conf.js';
30
32
  import { buildClient } from '../src/server/client-build.js';
31
- import { range, setPad, timer, uniqueArray } from '../src/client/components/core/CommonJs.js';
33
+ import { range, s4, setPad, timer, uniqueArray } from '../src/client/components/core/CommonJs.js';
32
34
  import { MongooseDB } from '../src/db/mongo/MongooseDB.js';
33
35
  import { Lampp } from '../src/runtime/lampp/Lampp.js';
34
36
  import { DefaultConf } from '../conf.js';
35
37
  import { JSONweb } from '../src/server/client-formatted.js';
36
- import ejs from 'easy-json-schema';
38
+
37
39
  import { Xampp } from '../src/runtime/xampp/Xampp.js';
40
+ import { ejs } from '../src/server/json-schema.js';
41
+ import { getLocalIPv4Address, ip } from '../src/server/dns.js';
42
+ import { Downloader } from '../src/server/downloader.js';
43
+ import colors from 'colors';
44
+ import { program } from '../src/cli/index.js';
45
+
46
+ colors.enable();
38
47
 
39
48
  const logger = loggerFactory(import.meta);
40
49
 
@@ -42,6 +51,82 @@ logger.info('argv', process.argv);
42
51
 
43
52
  const [exe, dir, operator] = process.argv;
44
53
 
54
+ const updateVirtualRoot = async ({ nfsHostPath, IP_ADDRESS, ipaddr }) => {
55
+ const steps = [
56
+ `apt update`,
57
+ `ln -sf /lib/systemd/systemd /sbin/init`,
58
+ // `sudo apt install linux-modules-extra-6.8.0-31-generic`,
59
+ `apt install -y sudo`,
60
+ `apt install -y ntp`,
61
+ `apt install -y openssh-server`,
62
+ `apt install -y iptables`,
63
+ `update-alternatives --set iptables /usr/sbin/iptables-legacy`,
64
+ `update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy`,
65
+ `apt install -y locales`,
66
+ `apt install -y cloud-init`,
67
+ `mkdir -p /var/lib/cloud`,
68
+ `chown -R root:root /var/lib/cloud`,
69
+ `chmod -R 0755 /var/lib/cloud`,
70
+ `mkdir -p /home/root/.ssh`,
71
+ `echo '${fs.readFileSync(
72
+ `/home/dd/engine/engine-private/deploy/id_rsa.pub`,
73
+ 'utf8',
74
+ )}' >> /home/root/.ssh/authorized_keys`,
75
+ `chmod 700 /home/root/.ssh`,
76
+ `chmod 600 /home/root/.ssh/authorized_keys`,
77
+ `systemctl enable ssh`,
78
+ `systemctl enable ntp`,
79
+ `apt install -y linux-generic-hwe-24.04`,
80
+ `modprobe ip_tables`,
81
+ `cat <<EOF_MAAS_CFG > /etc/cloud/cloud.cfg.d/90_maas.cfg
82
+ datasource_list: [ MAAS ]
83
+ datasource:
84
+ MAAS:
85
+ metadata_url: http://${IP_ADDRESS}:5248/MAAS/metadata
86
+ users:
87
+ - name: ${process.env.MAAS_ADMIN_USERNAME}
88
+ ssh_authorized_keys:
89
+ - ${fs.readFileSync(`/home/dd/engine/engine-private/deploy/id_rsa.pub`, 'utf8')}
90
+ sudo: "ALL=(ALL) NOPASSWD:ALL"
91
+ groups: sudo
92
+ shell: /bin/bash
93
+ packages:
94
+ - git
95
+ - htop
96
+ - ufw
97
+ # package_update: true
98
+ runcmd:
99
+ - ufw enable
100
+ - ufw allow ssh
101
+ resize_rootfs: false
102
+ growpart:
103
+ mode: off
104
+ network:
105
+ version: 2
106
+ ethernets:
107
+ ${process.env.RPI4_INTERFACE_NAME}:
108
+ dhcp4: true
109
+ addresses:
110
+ - ${ipaddr}/24
111
+ EOF_MAAS_CFG`,
112
+ ];
113
+
114
+ shellExec(`sudo chroot ${nfsHostPath} /usr/bin/qemu-aarch64-static /bin/bash <<'EOF'
115
+ ${steps
116
+ .map(
117
+ (s, i) => `echo "step ${i + 1}/${steps.length}: ${s.split('\n')[0]}"
118
+ ${s}
119
+ `,
120
+ )
121
+ .join(``)}
122
+ EOF`);
123
+
124
+ shellExec(`sudo chroot ${nfsHostPath} /usr/bin/qemu-aarch64-static /bin/bash <<'EOF'
125
+ echo "nameserver ${process.env.MAAS_DNS}" | tee /etc/resolv.conf > /dev/null
126
+ apt update
127
+ EOF`);
128
+ };
129
+
45
130
  try {
46
131
  switch (operator) {
47
132
  case 'save':
@@ -673,8 +758,8 @@ try {
673
758
  }
674
759
 
675
760
  case 'version-build': {
676
- const newVersion = process.argv[3];
677
761
  const originPackageJson = JSON.parse(fs.readFileSync(`package.json`, 'utf8'));
762
+ const newVersion = process.argv[3] ?? originPackageJson.version;
678
763
  const { version } = originPackageJson;
679
764
  originPackageJson.version = newVersion;
680
765
  fs.writeFileSync(`package.json`, JSON.stringify(originPackageJson, null, 4), 'utf8');
@@ -713,6 +798,13 @@ try {
713
798
  .replaceAll(`engine.version: '${version}'`, `engine.version: '${newVersion}'`),
714
799
  'utf8',
715
800
  );
801
+ fs.writeFileSync(
802
+ `./manifests/deployment/dd-template-development/deployment.yaml`,
803
+ fs
804
+ .readFileSync(`./manifests/deployment/dd-template-development/deployment.yaml`, 'utf8')
805
+ .replaceAll(`underpost:v${version}`, `underpost:v${newVersion}`),
806
+ 'utf8',
807
+ );
716
808
 
717
809
  if (fs.existsSync(`./.github/workflows/docker-image.yml`))
718
810
  fs.writeFileSync(
@@ -728,16 +820,19 @@ try {
728
820
  fs.readFileSync(`./src/index.js`, 'utf8').replaceAll(`${version}`, `${newVersion}`),
729
821
  'utf8',
730
822
  );
731
-
823
+ shellExec(`node bin/deploy cli-docs`);
732
824
  shellExec(`node bin/deploy update-dependencies`);
733
825
  shellExec(`auto-changelog`);
734
826
  shellExec(`node bin/build dd`);
735
- shellExec(`node bin deploy dd --build-manifest --sync --info-router`);
736
- shellExec(`node bin deploy dd production --build-manifest --sync --info-router`);
827
+ shellExec(`node bin deploy --kubeadm --build-manifest --sync --info-router --replicas 1 dd`);
828
+ shellExec(`node bin deploy --kubeadm --build-manifest --sync --info-router --replicas 1 dd production`);
737
829
  break;
738
830
  }
739
831
 
740
832
  case 'version-deploy': {
833
+ shellExec(
834
+ `underpost secret underpost --create-from-file /home/dd/engine/engine-private/conf/dd-cron/.env.production`,
835
+ );
741
836
  shellExec(`node bin/build dd conf`);
742
837
  shellExec(`git add . && cd ./engine-private && git add .`);
743
838
  shellExec(`node bin cmt . ci package-pwa-microservices-template`);
@@ -834,6 +929,16 @@ ${shellExec(`git log | grep Author: | sort -u`, { stdout: true }).split(`\n`).jo
834
929
  };
835
930
  DefaultConf.server[host][path].apiBaseProxyPath = '/';
836
931
  DefaultConf.server[host][path].apiBaseHost = 'www.nexodev.org';
932
+ } else if (confName === 'template') {
933
+ const host = 'default.net';
934
+ const path = '/';
935
+ DefaultConf.server[host][path].valkey = {
936
+ port: 6379,
937
+ host: 'valkey-service.default.svc.cluster.local',
938
+ };
939
+ // mongodb-0.mongodb-service
940
+ DefaultConf.server[host][path].db.host = 'mongodb://mongodb-service:27017';
941
+ confName = '';
837
942
  } else if (confName) {
838
943
  DefaultConf.client = JSON.parse(fs.readFileSync(`./engine-private/conf/${confName}/conf.client.json`, 'utf8'));
839
944
  DefaultConf.server = JSON.parse(fs.readFileSync(`./engine-private/conf/${confName}/conf.server.json`, 'utf8'));
@@ -863,193 +968,1479 @@ ${shellExec(`git log | grep Author: | sort -u`, { stdout: true }).split(`\n`).jo
863
968
 
864
969
  break;
865
970
  }
866
- case 'ssh-export-server-keys': {
867
- fs.copyFile('/etc/ssh/ssh_host_rsa_key', './engine-private/deploy/ssh_host_rsa_key');
868
- fs.copyFile('/etc/ssh/ssh_host_rsa_key.pub', './engine-private/deploy/ssh_host_rsa_key.pub');
971
+
972
+ case 'ssh': {
973
+ const host = process.argv[3] ?? `root@${await ip.public.ipv4()}`;
974
+ const domain = host.split('@')[1];
975
+ const user = 'root'; // host.split('@')[0];
976
+ const password = process.argv[4] ?? '';
977
+ const port = 22;
978
+
979
+ const setUpSSH = () => {
980
+ // Required port forwarding mapping
981
+ // ssh TCP 2222 22 <local-server-ip>
982
+ // ssh UDP 2222 22 <local-server-ip>
983
+
984
+ // Remote connect via public key
985
+ // ssh -i <key-path> <user>@<host>:2222
986
+
987
+ shellExec(`cat ./engine-private/deploy/id_rsa.pub > ~/.ssh/authorized_keys`);
988
+
989
+ // local trust on first use validator
990
+ // check ~/.ssh/known_hosts
991
+
992
+ // shellExec(`sudo sed -i -e "s@#PasswordAuthentication yes@PasswordAuthentication no@g" /etc/ssh/sshd_config`);
993
+ // shellExec(`sudo sed -i -e "s@#UsePAM no@UsePAM yes@g" /etc/ssh/sshd_config`);
994
+
995
+ // Include /etc/ssh/sshd_config.d/*.conf
996
+ // sudo tee /etc/ssh/sshd_config.d/99-custom.conf
997
+ shellExec(`sudo tee /etc/ssh/sshd_config <<EOF
998
+ PasswordAuthentication no
999
+ ChallengeResponseAuthentication yes
1000
+ UsePAM yes
1001
+ PubkeyAuthentication Yes
1002
+ RSAAuthentication Yes
1003
+ PermitRootLogin Yes
1004
+ X11Forwarding yes
1005
+ X11DisplayOffset 10
1006
+ LoginGraceTime 120
1007
+ StrictModes yes
1008
+ SyslogFacility AUTH
1009
+ LogLevel INFO
1010
+ #HostKey /etc/ssh/ssh_host_ecdsa_key
1011
+ HostKey /etc/ssh/ssh_host_ed25519_key
1012
+ #HostKey /etc/ssh/ssh_host_rsa_key
1013
+ AuthorizedKeysFile ~/.ssh/authorized_keys
1014
+ Subsystem sftp /usr/libexec/openssh/sftp-server
1015
+ ListenAddress 0.0.0.0
1016
+ ListenAddress ::
1017
+ ListenAddress ${domain}
1018
+ ListenAddress ${domain}:22
1019
+ EOF`);
1020
+
1021
+ shellExec(`sudo chmod 700 ~/.ssh/`);
1022
+ shellExec(`sudo chmod 600 ~/.ssh/authorized_keys`);
1023
+ shellExec(`sudo chmod 644 ~/.ssh/known_hosts`);
1024
+ shellExec(`sudo chmod 600 ~/.ssh/id_rsa`);
1025
+ shellExec(`sudo chmod 600 /etc/ssh/ssh_host_ed25519_key`);
1026
+ shellExec(`chown -R ${user}:${user} ~/.ssh`);
1027
+
1028
+ shellExec(`ufw allow ${port}/tcp`);
1029
+ shellExec(`ufw allow ${port}/udp`);
1030
+ shellExec(`ufw allow ssh`);
1031
+ shellExec(`ufw allow from 192.168.0.0/16 to any port 22`);
1032
+
1033
+ // active ssh-agent
1034
+ shellExec('eval `ssh-agent -s`' + ` && ssh-add ~/.ssh/id_rsa` + ` && ssh-add -l`);
1035
+ // remove all
1036
+ // shellExec(`ssh-add -D`);
1037
+ // remove single
1038
+ // shellExec(`ssh-add -d ~/.ssh/id_rsa`);
1039
+
1040
+ // shellExec(`echo "@${host.split(`@`)[1]} * $(cat ~/.ssh/id_rsa.pub)" > ~/.ssh/known_hosts`);
1041
+ shellExec('eval `ssh-agent -s`' + `&& ssh-keyscan -H -t ed25519 ${host.split(`@`)[1]} > ~/.ssh/known_hosts`);
1042
+ // shellExec(`sudo echo "" > ~/.ssh/known_hosts`);
1043
+
1044
+ // ssh-copy-id -i ~/.ssh/id_rsa.pub -p <port_number> <username>@<host>
1045
+ shellExec(`ssh-copy-id -i ~/.ssh/id_rsa.pub -p ${port} ${host}`);
1046
+ // debug:
1047
+ // shellExec(`ssh -vvv ${host}`);
1048
+
1049
+ shellExec(`sudo cp ./engine-private/deploy/id_rsa ~/.ssh/id_rsa`);
1050
+ shellExec(`sudo cp ./engine-private/deploy/id_rsa.pub ~/.ssh/id_rsa.pub`);
1051
+
1052
+ shellExec(`sudo echo "" > /etc/ssh/ssh_host_ecdsa_key`);
1053
+ shellExec(`sudo cp ./engine-private/deploy/id_rsa /etc/ssh/ssh_host_ed25519_key`);
1054
+ shellExec(`sudo echo "" > /etc/ssh/ssh_host_rsa_key`);
1055
+
1056
+ shellExec(`sudo echo "" > /etc/ssh/ssh_host_ecdsa_key.pub`);
1057
+ shellExec(`sudo cp ./engine-private/deploy/id_rsa.pub /etc/ssh/ssh_host_ed25519_key.pub`);
1058
+ shellExec(`sudo echo "" > /etc/ssh/ssh_host_rsa_key.pub`);
1059
+
1060
+ shellExec(`sudo systemctl enable sshd`);
1061
+ shellExec(`sudo systemctl restart sshd`);
1062
+
1063
+ const status = shellExec(`sudo systemctl status sshd`, { silent: true, stdout: true });
1064
+ console.log(
1065
+ status.match('running') ? status.replaceAll(`running`, `running`.green) : `ssh service not running`.red,
1066
+ );
1067
+ };
1068
+
1069
+ if (process.argv.includes('import')) {
1070
+ setUpSSH();
1071
+ break;
1072
+ }
1073
+
1074
+ shellExec(`sudo rm -rf ./id_rsa`);
1075
+ shellExec(`sudo rm -rf ./id_rsa.pub`);
1076
+
1077
+ if (process.argv.includes('legacy'))
1078
+ shellExec(`ssh-keygen -t rsa -b 4096 -f id_rsa -N "${password}" -q -C "${host}"`);
1079
+ else shellExec(`ssh-keygen -t ed25519 -f id_rsa -N "${password}" -q -C "${host}"`);
1080
+
1081
+ shellExec(`sudo cp ./id_rsa ~/.ssh/id_rsa`);
1082
+ shellExec(`sudo cp ./id_rsa.pub ~/.ssh/id_rsa.pub`);
1083
+
1084
+ shellExec(`sudo cp ./id_rsa ./engine-private/deploy/id_rsa`);
1085
+ shellExec(`sudo cp ./id_rsa.pub ./engine-private/deploy/id_rsa.pub`);
1086
+
1087
+ shellExec(`sudo rm -rf ./id_rsa`);
1088
+ shellExec(`sudo rm -rf ./id_rsa.pub`);
1089
+ setUpSSH();
869
1090
  break;
870
1091
  }
871
- case 'ssh-import-server-keys': {
872
- fs.copyFile('./engine-private/deploy/ssh_host_rsa_key', '/etc/ssh/ssh_host_rsa_key');
873
- fs.copyFile('./engine-private/deploy/ssh_host_rsa_key.pub', '/etc/ssh/ssh_host_rsa_key.pub');
1092
+
1093
+ case 'valkey': {
1094
+ if (!process.argv.includes('server')) {
1095
+ if (process.argv.includes('rocky')) {
1096
+ // shellExec(`yum install -y https://repo.percona.com/yum/percona-release-latest.noarch.rpm`);
1097
+ // shellExec(`sudo percona-release enable valkey experimental`);
1098
+ shellExec(`sudo dnf install valkey`);
1099
+ shellExec(`chown -R valkey:valkey /etc/valkey`);
1100
+ shellExec(`chown -R valkey:valkey /var/lib/valkey`);
1101
+ shellExec(`chown -R valkey:valkey /var/log/valkey`);
1102
+ shellExec(`sudo systemctl enable valkey.service`);
1103
+ shellExec(`sudo systemctl start valkey`);
1104
+ shellExec(`valkey-cli ping`);
1105
+ } else {
1106
+ shellExec(`cd /home/dd && git clone https://github.com/valkey-io/valkey.git`);
1107
+ shellExec(`cd /home/dd/valkey && make`);
1108
+ shellExec(`apt install valkey-tools`); // valkey-cli
1109
+ }
1110
+ }
1111
+ if (process.argv.includes('rocky')) {
1112
+ shellExec(`sudo systemctl stop valkey`);
1113
+ shellExec(`sudo systemctl start valkey`);
1114
+ } else shellExec(`cd /home/dd/valkey && ./src/valkey-server`);
1115
+
1116
+ break;
1117
+ }
1118
+
1119
+ case 'valkey-service': {
1120
+ shellExec(`pm2 start bin/deploy.js --node-args=\"--max-old-space-size=8192\" --name valkey -- valkey server`);
1121
+ break;
1122
+ }
1123
+
1124
+ case 'update-instances': {
1125
+ shellExec(`node bin deploy dd production --sync --build-manifest --info-router --dashboard-update`);
1126
+ shellExec(`node bin cron --dashboard-update --init`);
1127
+ const deployId = 'dd-core';
1128
+ const host = 'www.nexodev.org';
1129
+ const path = '/';
1130
+
1131
+ {
1132
+ const outputPath = './engine-private/instances';
1133
+ if (fs.existsSync(outputPath)) fs.mkdirSync(outputPath, { recursive: true });
1134
+ const collection = 'instances';
1135
+ if (process.argv.includes('export'))
1136
+ shellExec(
1137
+ `node bin db --export --collections ${collection} --out-path ${outputPath} --hosts ${host} --paths '${path}' ${deployId}`,
1138
+ );
1139
+ if (process.argv.includes('import'))
1140
+ shellExec(
1141
+ `node bin db --import --drop --preserveUUID --out-path ${outputPath} --hosts ${host} --paths '${path}' ${deployId}`,
1142
+ );
1143
+ }
1144
+ {
1145
+ const outputPath = './engine-private/crons';
1146
+ if (fs.existsSync(outputPath)) fs.mkdirSync(outputPath, { recursive: true });
1147
+ const collection = 'crons';
1148
+ if (process.argv.includes('export'))
1149
+ shellExec(
1150
+ `node bin db --export --collections ${collection} --out-path ${outputPath} --hosts ${host} --paths '${path}' ${deployId}`,
1151
+ );
1152
+ if (process.argv.includes('import'))
1153
+ shellExec(
1154
+ `node bin db --import --drop --preserveUUID --out-path ${outputPath} --hosts ${host} --paths '${path}' ${deployId}`,
1155
+ );
1156
+ }
1157
+
874
1158
  break;
875
1159
  }
876
- case 'ssh-import-client-keys': {
877
- const host = process.argv[3];
1160
+
1161
+ case 'cli-docs': {
1162
+ buildCliDoc(program);
1163
+ break;
1164
+ }
1165
+
1166
+ case 'monitor': {
878
1167
  shellExec(
879
- `node bin/deploy set-ssh-keys ./engine-private/deploy/ssh_host_rsa_key ${host ? ` ${host}` : ``} ${
880
- process.argv.includes('clean') ? 'clean' : ''
1168
+ `node bin monitor ${process.argv[6] === 'sync' ? '--sync ' : ''}--type ${process.argv[3]} ${process.argv[4]} ${
1169
+ process.argv[5]
881
1170
  }`,
1171
+ {
1172
+ async: true,
1173
+ },
882
1174
  );
883
1175
  break;
884
1176
  }
885
- case 'ssh-keys': {
886
- // create ssh keys
887
- const sshAccount = process.argv[3]; // [sudo username]@[host/ip]
888
- const destPath = process.argv[4];
889
- // shellExec(`ssh-keygen -t ed25519 -C "${sshAccount}" -f ${destPath}`);
890
- if (fs.existsSync(destPath)) {
891
- fs.removeSync(destPath);
892
- fs.removeSync(destPath + '.pub');
1177
+
1178
+ case 'postgresql': {
1179
+ if (process.argv.includes('install')) {
1180
+ shellExec(`sudo dnf install -y postgresql-server postgresql`);
1181
+ shellExec(`sudo postgresql-setup --initdb`);
1182
+ shellExec(`chown postgres /var/lib/pgsql/data`);
1183
+ shellExec(`sudo systemctl enable postgresql.service`);
1184
+ shellExec(`sudo systemctl start postgresql.service`);
1185
+ } else {
1186
+ shellExec(`sudo systemctl enable postgresql.service`);
1187
+ shellExec(`sudo systemctl restart postgresql.service`);
1188
+ }
1189
+
1190
+ shellExec(`sudo systemctl status postgresql.service`);
1191
+
1192
+ // sudo systemctl stop postgresql
1193
+ // sudo systemctl disable postgresql
1194
+
1195
+ // psql login
1196
+ // psql -U <user> -h 127.0.0.1 -W <db-name>
1197
+
1198
+ // gedit /var/lib/pgsql/data/pg_hba.conf
1199
+ // host <db-name> <db-user> <db-host> md5
1200
+ // local all postgres trust
1201
+ // # "local" is for Unix domain socket connections only
1202
+ // local all all md5
1203
+ // # IPv4 local connections:
1204
+ // host all all 127.0.0.1/32 md5
1205
+ // # IPv6 local connections:
1206
+ // host all all ::1/128 md5
1207
+
1208
+ // gedit /var/lib/pgsql/data/postgresql.conf
1209
+ // listen_addresses = '*'
1210
+
1211
+ break;
1212
+ }
1213
+
1214
+ case 'postgresql-14': {
1215
+ if (process.argv.includes('install')) {
1216
+ shellExec(`sudo dnf module reset postgresql -y`);
1217
+ shellExec(`sudo dnf -qy module disable postgresql`);
1218
+
1219
+ shellExec(`sudo systemctl stop postgresql-14`);
1220
+ shellExec(`sudo systemctl disable postgresql-14`);
1221
+
1222
+ shellExec(`sudo dnf remove -y postgresql14 postgresql14-server postgresql14-contrib`);
1223
+ shellExec(`sudo rm -rf /var/lib/pgsql`);
1224
+
1225
+ shellExec(`sudo dnf install postgresql14 postgresql14-server postgresql14-contrib -y`);
893
1226
  }
894
- shellExec(`ssh-keygen -t rsa -b 4096 -C "${sshAccount}" -f ${destPath}`);
895
- // add host to keyscan
896
- // shellExec(`ssh-keyscan -t rsa ${sshAccount.split(`@`)[1]} >> ~/.ssh/known_hosts`);
1227
+ shellExec(`sudo /usr/pgsql-14/bin/postgresql-14-setup initdb`);
1228
+ shellExec(`sudo systemctl start postgresql-14`);
1229
+ shellExec(`sudo systemctl enable postgresql-14`);
1230
+ shellExec(`sudo systemctl status postgresql-14`);
1231
+ // sudo dnf install postgresql14-contrib
897
1232
  break;
898
1233
  }
899
1234
 
900
- case 'set-ssh-keys': {
901
- const files = ['authorized_keys', 'id_rsa', 'id_rsa.pub', 'known_hosts ', 'known_hosts.old'];
1235
+ case 'pg-stop': {
1236
+ shellExec(`sudo systemctl stop postgresql-14`);
1237
+ shellExec(`sudo systemctl disable postgresql-14`);
1238
+ break;
1239
+ }
1240
+ case 'pg-start': {
1241
+ shellExec(`sudo systemctl enable postgresql-14`);
1242
+ shellExec(`sudo systemctl restart postgresql-14`);
1243
+ break;
1244
+ }
902
1245
 
903
- // > write
904
- // >> append
1246
+ case 'pg-list-db': {
1247
+ shellExec(`sudo -i -u postgres psql -c "\\l"`);
1248
+ break;
1249
+ }
905
1250
 
906
- // /root/.ssh/id_rsa
907
- // /root/.ssh/id_rsa.pub
908
- if (process.argv.includes('clean')) {
909
- for (const file of files) {
910
- if (fs.existsSync(`/root/.ssh/${file}`)) {
911
- logger.info('remove', `/root/.ssh/${file}`);
912
- fs.removeSync(`/root/.ssh/${file}`);
1251
+ case 'pg-list-table': {
1252
+ shellExec(`sudo -i -u postgres psql -c "\\dt *.*"`);
1253
+ // schema_name.*
1254
+ break;
1255
+ }
1256
+ case 'pg-drop-db': {
1257
+ shellExec(`sudo -i -u postgres psql -c "DROP DATABASE ${process.argv[3]} WITH (FORCE)"`);
1258
+ shellExec(`sudo -i -u postgres psql -c "DROP USER ${process.argv[4]}"`);
1259
+ break;
1260
+ }
1261
+
1262
+ case 'maas-stop': {
1263
+ shellExec(`sudo snap stop maas`);
1264
+ break;
1265
+ }
1266
+
1267
+ case 'maas': {
1268
+ dotenv.config({ path: `${getUnderpostRootPath()}/.env`, override: true });
1269
+ const IP_ADDRESS = getLocalIPv4Address();
1270
+ const serverip = IP_ADDRESS;
1271
+ const tftpRoot = process.env.TFTP_ROOT;
1272
+ const ipaddr = process.env.RPI4_IP;
1273
+ const netmask = process.env.NETMASK;
1274
+ const gatewayip = process.env.GATEWAY_IP;
1275
+
1276
+ const machineFactory = (m) => ({
1277
+ system_id: m.interface_set[0].system_id,
1278
+ mac_address: m.interface_set[0].mac_address,
1279
+ hostname: m.hostname,
1280
+ status_name: m.status_name,
1281
+ });
1282
+
1283
+ if (process.argv.includes('db')) {
1284
+ // DROP, ALTER, CREATE, WITH ENCRYPTED
1285
+ // sudo -u <user> -h <host> psql <db-name>
1286
+ shellExec(`DB_PG_MAAS_NAME=${process.env.DB_PG_MAAS_NAME}`);
1287
+ shellExec(`DB_PG_MAAS_PASS=${process.env.DB_PG_MAAS_PASS}`);
1288
+ shellExec(`DB_PG_MAAS_USER=${process.env.DB_PG_MAAS_USER}`);
1289
+ shellExec(`DB_PG_MAAS_HOST=${process.env.DB_PG_MAAS_HOST}`);
1290
+ shellExec(
1291
+ `sudo -i -u postgres psql -c "CREATE USER \"$DB_PG_MAAS_USER\" WITH ENCRYPTED PASSWORD '$DB_PG_MAAS_PASS'"`,
1292
+ );
1293
+ shellExec(
1294
+ `sudo -i -u postgres psql -c "ALTER USER \"$DB_PG_MAAS_USER\" WITH ENCRYPTED PASSWORD '$DB_PG_MAAS_PASS'"`,
1295
+ );
1296
+ const actions = ['LOGIN', 'SUPERUSER', 'INHERIT', 'CREATEDB', 'CREATEROLE', 'REPLICATION'];
1297
+ shellExec(`sudo -i -u postgres psql -c "ALTER USER \"$DB_PG_MAAS_USER\" WITH ${actions.join(' ')}"`);
1298
+ shellExec(`sudo -i -u postgres psql -c "\\du"`);
1299
+
1300
+ shellExec(`sudo -i -u postgres createdb -O "$DB_PG_MAAS_USER" "$DB_PG_MAAS_NAME"`);
1301
+
1302
+ shellExec(`sudo -i -u postgres psql -c "\\l"`);
1303
+ process.exit(0);
1304
+ }
1305
+
1306
+ if (process.argv.includes('ls')) {
1307
+ shellExec(`maas ${process.env.MAAS_ADMIN_USERNAME} boot-sources read`);
1308
+ shellExec(`maas ${process.env.MAAS_ADMIN_USERNAME} commissioning-scripts read`);
1309
+ // shellExec(`maas ${process.env.MAAS_ADMIN_USERNAME} boot-source-selections read 60`);
1310
+ console.table(resources);
1311
+ console.table(machines);
1312
+ process.exit(0);
1313
+ }
1314
+
1315
+ // TODO: - Disable maas proxy (egress forwarding to public dns)
1316
+ // - Configure maas dns forwarding ${process.env.MAAS_DNS}
1317
+ // - Enable DNSSEC validation of upstream zones: Automatic (use default root key)
1318
+
1319
+ if (process.argv.includes('clear')) {
1320
+ for (const machine of machines) {
1321
+ shellExec(`maas ${process.env.MAAS_ADMIN_USERNAME} machine delete ${machine.system_id}`);
1322
+ }
1323
+ // machines = [];
1324
+ shellExec(`maas ${process.env.MAAS_ADMIN_USERNAME} discoveries clear all=true`);
1325
+ if (process.argv.includes('force')) {
1326
+ shellExec(`maas ${process.env.MAAS_ADMIN_USERNAME} discoveries scan force=true`);
1327
+ }
1328
+ process.exit(0);
1329
+ }
1330
+ if (process.argv.includes('grub-arm64')) {
1331
+ shellExec(`sudo dnf install grub2-efi-aa64-modules`);
1332
+ shellExec(`sudo dnf install grub2-efi-x64-modules`);
1333
+ // sudo grub2-mknetdir --net-directory=${tftpRoot} --subdir=/boot/grub --module-path=/usr/lib/grub/arm64-efi arm64-efi
1334
+ process.exit(0);
1335
+ }
1336
+
1337
+ if (process.argv.includes('psql')) {
1338
+ const cmd = `psql -U ${process.env.DB_PG_MAAS_USER} -h ${process.env.DB_PG_MAAS_HOST} -W ${process.env.DB_PG_MAAS_NAME}`;
1339
+ pbcopy(cmd);
1340
+ process.exit(0);
1341
+ }
1342
+ if (process.argv.includes('logs')) {
1343
+ shellExec(`maas status`);
1344
+ const cmd = `journalctl -f -t dhcpd -u snap.maas.pebble.service`;
1345
+ pbcopy(cmd);
1346
+ process.exit(0);
1347
+ }
1348
+ if (process.argv.includes('reset')) {
1349
+ // shellExec(
1350
+ // `maas init region+rack --database-uri "postgres://$DB_PG_MAAS_USER:$DB_PG_MAAS_PASS@$DB_PG_MAAS_HOST/$DB_PG_MAAS_NAME"` +
1351
+ // ` --maas-url http://${IP_ADDRESS}:5240/MAAS`,
1352
+ // );
1353
+ const cmd =
1354
+ `maas init region+rack --database-uri "postgres://${process.env.DB_PG_MAAS_USER}:${process.env.DB_PG_MAAS_PASS}@${process.env.DB_PG_MAAS_HOST}/${process.env.DB_PG_MAAS_NAME}"` +
1355
+ ` --maas-url http://${IP_ADDRESS}:5240/MAAS`;
1356
+ pbcopy(cmd);
1357
+ process.exit(0);
1358
+ }
1359
+ if (process.argv.includes('dhcp')) {
1360
+ const snippets = JSON.parse(
1361
+ shellExec(`maas ${process.env.MAAS_ADMIN_USERNAME} dhcpsnippets read`, {
1362
+ stdout: true,
1363
+ silent: true,
1364
+ disableLog: true,
1365
+ }),
1366
+ );
1367
+ for (const snippet of snippets) {
1368
+ switch (snippet.name) {
1369
+ case 'arm64':
1370
+ snippet.value = snippet.value.split(`\n`);
1371
+ snippet.value[1] = ` filename "http://${IP_ADDRESS}:5248/images/bootloaders/uefi/arm64/grubaa64.efi";`;
1372
+ snippet.value[5] = ` filename "http://${IP_ADDRESS}:5248/images/bootloaders/uefi/arm64/grubaa64.efi";`;
1373
+ snippet.value = snippet.value.join(`\n`);
1374
+ shellExec(
1375
+ `maas ${process.env.MAAS_ADMIN_USERNAME} dhcpsnippet update ${snippet.name} value='${snippet.value}'`,
1376
+ );
1377
+ break;
1378
+
1379
+ default:
1380
+ break;
913
1381
  }
914
- fs.writeFileSync(`/root/.ssh/${file}`, '', 'utf8');
915
1382
  }
916
- shellExec('eval `ssh-agent -s`' + ` && ssh-add -D`);
1383
+
1384
+ console.log(snippets);
1385
+
1386
+ process.exit(0);
1387
+ }
1388
+
1389
+ if (process.argv.includes('restart')) {
1390
+ shellExec(`sudo snap restart maas.pebble`);
1391
+ let secs = 0;
1392
+ while (
1393
+ !(
1394
+ shellExec(`maas status`, { silent: true, disableLog: true, stdout: true })
1395
+ .split(' ')
1396
+ .filter((l) => l.match('inactive')).length === 1
1397
+ )
1398
+ ) {
1399
+ await timer(1000);
1400
+ console.log(`Waiting... (${++secs}s)`);
1401
+ }
1402
+ process.exit(0);
917
1403
  }
918
1404
 
919
- const destPath = process.argv[3];
920
- const sshAuthKeyTarget = '/root/.ssh/authorized_keys';
921
- if (!fs.existsSync(sshAuthKeyTarget)) shellExec(`touch ${sshAuthKeyTarget}`);
922
- shellExec(`cat ${destPath}.pub > ${sshAuthKeyTarget}`);
923
- shellExec(`cat ${destPath} >> ${sshAuthKeyTarget}`);
1405
+ // shellExec(`MAAS_ADMIN_USERNAME=${process.env.MAAS_ADMIN_USERNAME}`);
1406
+ // shellExec(`MAAS_ADMIN_EMAIL=${process.env.MAAS_ADMIN_EMAIL}`);
1407
+ // shellExec(`maas createadmin --username $MAAS_ADMIN_USERNAME --email $MAAS_ADMIN_EMAIL`);
1408
+
1409
+ // MaaS admin CLI:
1410
+ // maas login <maas-admin-username> http://localhost:5240/MAAS
1411
+ // paste GUI API KEY (profile section)
1412
+
1413
+ // Import custom image
1414
+ // maas <maas-admin-username> boot-resources create name='custom/RockyLinuxRpi4' \
1415
+ // title='RockyLinuxRpi4' \
1416
+ // architecture='arm64/generic' \
1417
+ // filetype='tgz' \
1418
+ // content@=/home/RockyLinuxRpi_9-latest.tar.gz
1419
+
1420
+ // Image boot resource:
1421
+ // /var/snap/maas/current/root/snap/maas
1422
+ // /var/snap/maas/common/maas/tftp_root
1423
+ // sudo chmod 755 /var/snap/maas/common/maas/tftp_root
1424
+
1425
+ // /var/snap/maas/common/maas/dhcpd.conf
1426
+ // sudo snap restart maas.pebble
1427
+
1428
+ // PXE Linux files:
1429
+ // /var/snap/maas/common/maas/image-storage/bootloaders/pxe/i386
1430
+ // sudo nmcli con modify <interface-device-name-connection-id> ethtool.feature-rx on ethtool.feature-tx off
1431
+ // sudo nmcli connection up <interface-device-name-connection-id>
1432
+
1433
+ // man nm-settings |grep feature-tx-checksum
1434
+
1435
+ // nmcli c modify <interface-device-name-connection-id> \
1436
+ // ethtool.feature-tx-checksum-fcoe-crc off \
1437
+ // ethtool.feature-tx-checksum-ip-generic off \
1438
+ // ethtool.feature-tx-checksum-ipv4 off \
1439
+ // ethtool.feature-tx-checksum-ipv6 off \
1440
+ // ethtool.feature-tx-checksum-sctp off
1441
+
1442
+ // Ensure Rocky NFS server and /etc/exports configured
1443
+ // sudo systemctl restart nfs-server
1444
+ // Check mounts: showmount -e <server-ip>
1445
+ // Check nfs ports: rpcinfo -p
1446
+ // sudo chown -R root:root ${process.env.NFS_EXPORT_PATH}/rpi4mb
1447
+ // sudo chmod 755 ${process.env.NFS_EXPORT_PATH}/rpi4mb
1448
+
1449
+ // tftp server
1450
+ // sudo chown -R root:root /var/snap/maas/common/maas/tftp_root/rpi4mb
1451
+
1452
+ // tftp client
1453
+ // sudo dnf install tftp
1454
+ // tftp <server-ip> -c get <path>
1455
+
1456
+ // Check firewall-cmd
1457
+ // firewall-cmd --permanent --add-service=rpc-bind
1458
+ // firewall-cmd --reload
1459
+ // systemctl disable firewalld
1460
+ // sudo firewall-cmd --permanent --add-port=10259/tcp --zone=public
1461
+
1462
+ // Image extension transform (.img.xz to .tar.gz):
1463
+ // tar -cvzf image-name.tar.gz image-name.img.xz
1464
+
1465
+ // Rocky network configuration:
1466
+ // /etc/NetworkManager/system-connections
1467
+
1468
+ // Rocky kernel params update
1469
+ // sudo grubby --args="<key>=<value> <key>=<value>" --update-kernel=ALL
1470
+ // sudo reboot now
1471
+
1472
+ // Temporal:
1473
+ // sudo snap install temporal
1474
+ // journalctl -u snap.maas.pebble -t maas-regiond
1475
+ // journalctl -u snap.maas.pebble -t maas-temporal -n 100 --no-pager -f
1476
+
1477
+ // Remove:
1478
+ // sudo dnf remove <package> -y; sudo dnf autoremove -y; sudo dnf clean packages
1479
+ // check: ~
1480
+ // check: ~./cache
1481
+ // check: ~./config
1482
+
1483
+ // Check file logs
1484
+ // grep -i -E -C 1 '<key-a>|<key-b>' /example.log | tail -n 600
1485
+
1486
+ // Back into your firmware setup (UEFI or BIOS config screen).
1487
+ // grub> fwsetup
1488
+
1489
+ // Poweroff:
1490
+ // grub > halt
1491
+ // initramfs > poweroff
1492
+
1493
+ // Check interface
1494
+ // ip link show
1495
+ // nmcli con show
1496
+
1497
+ let resources;
1498
+ try {
1499
+ resources = JSON.parse(
1500
+ shellExec(`maas ${process.env.MAAS_ADMIN_USERNAME} boot-resources read`, {
1501
+ silent: true,
1502
+ stdout: true,
1503
+ }),
1504
+ ).map((o) => ({
1505
+ id: o.id,
1506
+ name: o.name,
1507
+ architecture: o.architecture,
1508
+ }));
1509
+ } catch (error) {
1510
+ logger.error(error);
1511
+ }
1512
+
1513
+ let machines;
1514
+ try {
1515
+ machines = JSON.parse(
1516
+ shellExec(`maas ${process.env.MAAS_ADMIN_USERNAME} machines read`, {
1517
+ stdout: true,
1518
+ silent: true,
1519
+ }),
1520
+ ).map((m) => machineFactory(m));
1521
+ } catch (error) {
1522
+ logger.error(error);
1523
+ }
924
1524
 
925
- if (!fs.existsSync('/root/.ssh/id_rsa')) shellExec(`touch ${'/root/.ssh/id_rsa'}`);
926
- shellExec(`cat ${destPath} > ${'/root/.ssh/id_rsa'}`);
1525
+ let firmwarePath,
1526
+ tftpSubDir,
1527
+ kernelFilesPaths,
1528
+ name,
1529
+ architecture,
1530
+ resource,
1531
+ nfsConnectStr,
1532
+ etcExports,
1533
+ nfsServerRootPath,
1534
+ bootConf,
1535
+ zipFirmwareFileName,
1536
+ zipFirmwareName,
1537
+ zipFirmwareUrl,
1538
+ interfaceName,
1539
+ nfsHost;
1540
+
1541
+ switch (process.argv[3]) {
1542
+ case 'rpi4mb':
1543
+ const resourceId = process.argv[4] ?? '39';
1544
+ tftpSubDir = '/rpi4mb';
1545
+ zipFirmwareFileName = `RPi4_UEFI_Firmware_v1.41.zip`;
1546
+ zipFirmwareName = zipFirmwareFileName.split('.zip')[0];
1547
+ zipFirmwareUrl = `https://github.com/pftf/RPi4/releases/download/v1.41/RPi4_UEFI_Firmware_v1.41.zip`;
1548
+ firmwarePath = `../${zipFirmwareName}`;
1549
+ interfaceName = process.env.RPI4_INTERFACE_NAME;
1550
+ nfsHost = 'rpi4mb';
1551
+ if (!fs.existsSync(firmwarePath)) {
1552
+ await Downloader(zipFirmwareUrl, `../${zipFirmwareFileName}`);
1553
+ shellExec(`cd .. && mkdir ${zipFirmwareName} && cd ${zipFirmwareName} && unzip ../${zipFirmwareFileName}`);
1554
+ }
1555
+ resource = resources.find((o) => o.id == resourceId);
1556
+ name = resource.name;
1557
+ architecture = resource.architecture;
1558
+ resource = resources.find((o) => o.name === name && o.architecture === architecture);
1559
+ nfsServerRootPath = `${process.env.NFS_EXPORT_PATH}/rpi4mb`;
1560
+ // ,anonuid=1001,anongid=100
1561
+ // etcExports = `${nfsServerRootPath} *(rw,all_squash,sync,no_root_squash,insecure)`;
1562
+ etcExports = `${nfsServerRootPath} 192.168.1.0/24(${[
1563
+ 'rw',
1564
+ // 'all_squash',
1565
+ 'sync',
1566
+ 'no_root_squash',
1567
+ 'no_subtree_check',
1568
+ 'insecure',
1569
+ ]})`;
1570
+ const resourceData = JSON.parse(
1571
+ shellExec(`maas ${process.env.MAAS_ADMIN_USERNAME} boot-resource read ${resource.id}`, {
1572
+ stdout: true,
1573
+ silent: true,
1574
+ disableLog: true,
1575
+ }),
1576
+ );
1577
+ const bootFiles = resourceData.sets[Object.keys(resourceData.sets)[0]].files;
1578
+ const suffix = architecture.match('xgene') ? '.xgene' : '';
1579
+
1580
+ kernelFilesPaths = {
1581
+ 'vmlinuz-efi': bootFiles['boot-kernel' + suffix].filename_on_disk,
1582
+ 'initrd.img': bootFiles['boot-initrd' + suffix].filename_on_disk,
1583
+ squashfs: bootFiles['squashfs'].filename_on_disk,
1584
+ };
1585
+ const protocol = 'tcp'; // v3 -> tcp, v4 -> udp
1586
+
1587
+ const mountOptions = [
1588
+ protocol,
1589
+ 'vers=3',
1590
+ 'nfsvers=3',
1591
+ 'nolock',
1592
+ // 'protocol=tcp',
1593
+ // 'hard=true',
1594
+ 'port=2049',
1595
+ // 'sec=none',
1596
+ 'rw',
1597
+ 'hard',
1598
+ 'intr',
1599
+ 'rsize=32768',
1600
+ 'wsize=32768',
1601
+ 'acregmin=0',
1602
+ 'acregmax=0',
1603
+ 'acdirmin=0',
1604
+ 'acdirmax=0',
1605
+ 'noac',
1606
+ // 'nodev',
1607
+ // 'nosuid',
1608
+ ];
1609
+ const cmd = [
1610
+ `console=serial0,115200`,
1611
+ `console=tty1`,
1612
+ // `initrd=-1`,
1613
+ // `net.ifnames=0`,
1614
+ // `dwc_otg.lpm_enable=0`,
1615
+ // `elevator=deadline`,
1616
+ `root=/dev/nfs`,
1617
+ `nfsroot=${serverip}:${process.env.NFS_EXPORT_PATH}/rpi4mb,${mountOptions}`,
1618
+ // `nfsroot=${serverip}:${process.env.NFS_EXPORT_PATH}/rpi4mb`,
1619
+ `ip=${ipaddr}:${serverip}:${gatewayip}:${netmask}:${nfsHost}:${interfaceName}:static`,
1620
+ `rootfstype=nfs`,
1621
+ `rw`,
1622
+ `rootwait`,
1623
+ `fixrtc`,
1624
+ 'initrd=initrd.img',
1625
+ // 'boot=casper',
1626
+ // 'ro',
1627
+ 'netboot=nfs',
1628
+ `cloud-config-url=/dev/null`,
1629
+ // 'ip=dhcp',
1630
+ // 'ip=dfcp',
1631
+ // 'autoinstall',
1632
+ // 'rd.break',
1633
+ ];
1634
+
1635
+ nfsConnectStr = cmd.join(' ');
1636
+ bootConf = `[all]
1637
+ MAC_ADDRESS=00:00:00:00:00:00
1638
+ MAC_ADDRESS_OTP=0,1
1639
+ BOOT_UART=0
1640
+ WAKE_ON_GPIO=1
1641
+ POWER_OFF_ON_HALT=0
1642
+ ENABLE_SELF_UPDATE=1
1643
+ DISABLE_HDMI=0
1644
+ TFTP_IP=${serverip}
1645
+ TFTP_PREFIX=1
1646
+ TFTP_PREFIX_STR=${tftpSubDir.slice(1)}/
1647
+ NET_INSTALL_ENABLED=1
1648
+ DHCP_TIMEOUT=45000
1649
+ DHCP_REQ_TIMEOUT=4000
1650
+ TFTP_FILE_TIMEOUT=30000
1651
+ BOOT_ORDER=0x21`;
927
1652
 
928
- if (!fs.existsSync('/root/.ssh/id_rsa.pub')) shellExec(`touch ${'/root/.ssh/id_rsa.pub'}`);
929
- shellExec(`cat ${destPath}.pub > ${'/root/.ssh/id_rsa.pub'}`);
1653
+ break;
930
1654
 
931
- shellExec(`chmod 700 /root/.ssh/`);
932
- for (const file of files) {
933
- shellExec(`chmod 600 /root/.ssh/${file}`);
1655
+ default:
1656
+ break;
934
1657
  }
935
- const host = process.argv[4];
936
- // add key
937
- shellExec('eval `ssh-agent -s`' + ' && ssh-add /root/.ssh/id_rsa' + ' && ssh-add -l');
938
- if (host) shellExec(`ssh-keyscan -H ${host} >> ~/.ssh/known_hosts`);
939
- shellExec(`sudo systemctl enable ssh`);
940
- shellExec(`sudo systemctl restart ssh`);
941
- shellExec(`sudo systemctl status ssh`);
1658
+ shellExec(`sudo chmod 755 ${process.env.NFS_EXPORT_PATH}/${nfsHost}`);
1659
+
1660
+ shellExec(`sudo rm -rf ${tftpRoot}${tftpSubDir}`);
1661
+ shellExec(`sudo cp -a ${firmwarePath} ${tftpRoot}${tftpSubDir}`);
1662
+ shellExec(`mkdir -p ${tftpRoot}${tftpSubDir}/pxe`);
1663
+
1664
+ fs.writeFileSync(`/etc/exports`, etcExports, 'utf8');
1665
+ if (bootConf) fs.writeFileSync(`${tftpRoot}${tftpSubDir}/boot.conf`, bootConf, 'utf8');
1666
+
1667
+ shellExec(`node bin/deploy nfs`);
1668
+
1669
+ switch (process.argv[3]) {
1670
+ case 'rpi4mb':
1671
+ {
1672
+ // subnet DHCP snippets
1673
+ // # UEFI ARM64
1674
+ // if option arch = 00:0B {
1675
+ // filename "rpi4mb/pxe/grubaa64.efi";
1676
+ // }
1677
+ // elsif option arch = 00:13 {
1678
+ // filename "http://<IP_ADDRESS>:5248/images/bootloaders/uefi/arm64/grubaa64.efi";
1679
+ // option vendor-class-identifier "HTTPClient";
1680
+ // }
1681
+ for (const file of ['bootaa64.efi', 'grubaa64.efi']) {
1682
+ shellExec(
1683
+ `sudo cp -a /var/snap/maas/common/maas/image-storage/bootloaders/uefi/arm64/${file} ${tftpRoot}${tftpSubDir}/pxe/${file}`,
1684
+ );
1685
+ }
1686
+ // const file = 'bcm2711-rpi-4-b.dtb';
1687
+ // shellExec(
1688
+ // `sudo cp -a ${firmwarePath}/${file} /var/snap/maas/common/maas/image-storage/bootloaders/uefi/arm64/${file}`,
1689
+ // );
1690
+
1691
+ // const ipxeSrc = fs
1692
+ // .readFileSync(`${tftpRoot}/ipxe.cfg`, 'utf8')
1693
+ // .replaceAll('amd64', 'arm64')
1694
+ // .replaceAll('${next-server}', IP_ADDRESS);
1695
+ // fs.writeFileSync(`${tftpRoot}/ipxe.cfg`, ipxeSrc, 'utf8');
1696
+
1697
+ {
1698
+ for (const file of Object.keys(kernelFilesPaths)) {
1699
+ shellExec(
1700
+ `sudo cp -a /var/snap/maas/common/maas/image-storage/${kernelFilesPaths[file]} ${tftpRoot}${tftpSubDir}/pxe/${file}`,
1701
+ );
1702
+ }
1703
+ // const configTxtSrc = fs.readFileSync(`${firmwarePath}/config.txt`, 'utf8');
1704
+ // fs.writeFileSync(
1705
+ // `${tftpRoot}${tftpSubDir}/config.txt`,
1706
+ // configTxtSrc
1707
+ // .replace(`kernel=kernel8.img`, `kernel=vmlinuz`)
1708
+ // .replace(`# max_framebuffers=2`, `max_framebuffers=2`)
1709
+ // .replace(`initramfs initramfs8 followkernel`, `initramfs initrd.img followkernel`),
1710
+ // 'utf8',
1711
+ // );
1712
+
1713
+ // grub:
1714
+ // set root=(pxe)
1715
+
1716
+ // UNDERPOST.NET UEFI/GRUB/MAAS RPi4 commissioning (ARM64)
1717
+ const menuentryStr = 'underpost.net rpi4mb maas commissioning (ARM64)';
1718
+ const grubCfgPath = `${tftpRoot}/grub/grub.cfg`;
1719
+ fs.writeFileSync(
1720
+ grubCfgPath,
1721
+ `
1722
+ insmod gzio
1723
+ insmod http
1724
+ insmod nfs
1725
+ set timeout=5
1726
+ set default=0
1727
+
1728
+ menuentry '${menuentryStr}' {
1729
+ set root=(tftp,${serverip})
1730
+ linux ${tftpSubDir}/pxe/vmlinuz-efi ${nfsConnectStr}
1731
+ initrd ${tftpSubDir}/pxe/initrd.img
1732
+ boot
1733
+ }
1734
+
1735
+ `,
1736
+ 'utf8',
1737
+ );
1738
+ }
1739
+ const arm64EfiPath = `${tftpRoot}/grub/arm64-efi`;
1740
+ if (fs.existsSync(arm64EfiPath)) shellExec(`sudo rm -rf ${arm64EfiPath}`);
1741
+ shellExec(`sudo cp -a /usr/lib/grub/arm64-efi ${arm64EfiPath}`);
1742
+ }
1743
+
1744
+ break;
1745
+
1746
+ default:
1747
+ break;
1748
+ }
1749
+
1750
+ logger.info('succes maas deploy', {
1751
+ resource,
1752
+ kernelFilesPaths,
1753
+ tftpRoot,
1754
+ tftpSubDir,
1755
+ firmwarePath,
1756
+ etcExports,
1757
+ nfsServerRootPath,
1758
+ nfsConnectStr,
1759
+ });
1760
+ if (process.argv.includes('restart')) {
1761
+ if (fs.existsSync(`node engine-private/r.js`)) shellExec(`node engine-private/r`);
1762
+ shellExec(`node bin/deploy maas dhcp`);
1763
+ shellExec(`sudo chown -R root:root ${tftpRoot}`);
1764
+ shellExec(`sudo sudo chmod 755 ${tftpRoot}`);
1765
+ }
1766
+ // for (const machine of machines) {
1767
+ // // shellExec(`maas ${process.env.MAAS_ADMIN_USERNAME} machine delete ${machine.system_id}`);
1768
+ // shellExec(`maas ${process.env.MAAS_ADMIN_USERNAME} machine commission ${machine.system_id}`, {
1769
+ // silent: true,
1770
+ // });
1771
+ // }
1772
+ // machines = [];
1773
+
1774
+ const monitor = async () => {
1775
+ // discoveries Query observed discoveries.
1776
+ // discovery Read or delete an observed discovery.
1777
+
1778
+ const discoveries = JSON.parse(
1779
+ shellExec(`maas ${process.env.MAAS_ADMIN_USERNAME} discoveries read`, {
1780
+ silent: true,
1781
+ stdout: true,
1782
+ }),
1783
+ ).filter(
1784
+ (o) => o.ip !== IP_ADDRESS && o.ip !== gatewayip && !machines.find((_o) => _o.mac_address === o.mac_address),
1785
+ );
1786
+
1787
+ // {
1788
+ // "discovery_id": "",
1789
+ // "ip": "192.168.1.189",
1790
+ // "mac_address": "00:00:00:00:00:00",
1791
+ // "last_seen": "2025-05-05T14:17:37.354",
1792
+ // "hostname": null,
1793
+ // "fabric_name": "",
1794
+ // "vid": null,
1795
+ // "mac_organization": "",
1796
+ // "observer": {
1797
+ // "system_id": "",
1798
+ // "hostname": "",
1799
+ // "interface_id": 1,
1800
+ // "interface_name": ""
1801
+ // },
1802
+ // "resource_uri": "/MAAS/api/2.0/discovery/MTkyLjE2OC4xLjE4OSwwMDowMDowMDowMDowMDowMA==/"
1803
+ // },
1804
+
1805
+ for (const discovery of discoveries) {
1806
+ const machine = {
1807
+ architecture: architecture.match('amd') ? 'amd64/generic' : 'arm64/generic',
1808
+ mac_address: discovery.mac_address,
1809
+ hostname: discovery.hostname ?? discovery.mac_organization ?? discovery.domain ?? `generic-host-${s4()}`,
1810
+ // discovery.ip.match(ipaddr)
1811
+ // ? nfsHost
1812
+ // : `unknown-${s4()}`,
1813
+ // description: '',
1814
+ // https://maas.io/docs/reference-power-drivers
1815
+ power_type: 'manual', // manual
1816
+ // power_parameters_power_address: discovery.ip,
1817
+ mac_addresses: discovery.mac_address,
1818
+ };
1819
+ machine.hostname = machine.hostname.replaceAll(' ', '').replaceAll('.', '');
942
1820
 
1821
+ try {
1822
+ let newMachine = shellExec(
1823
+ `maas ${process.env.MAAS_ADMIN_USERNAME} machines create ${Object.keys(machine)
1824
+ .map((k) => `${k}="${machine[k]}"`)
1825
+ .join(' ')}`,
1826
+ {
1827
+ silent: true,
1828
+ stdout: true,
1829
+ },
1830
+ );
1831
+ newMachine = machineFactory(JSON.parse(newMachine));
1832
+ machines.push(newMachine);
1833
+ console.log(newMachine);
1834
+ shellExec(`maas ${process.env.MAAS_ADMIN_USERNAME} machine commission ${newMachine.system_id}`, {
1835
+ silent: true,
1836
+ });
1837
+ } catch (error) {
1838
+ logger.error(error, error.stack);
1839
+ }
1840
+ }
1841
+ // if (discoveries.length > 0) {
1842
+ // shellExec(
1843
+ // `maas ${process.env.MAAS_ADMIN_USERNAME} machines read | jq '.[] | {system_id: .interface_set[0].system_id, hostname, status_name, mac_address: .interface_set[0].mac_address}'`,
1844
+ // );
1845
+ // }
1846
+ await timer(1000);
1847
+ monitor();
1848
+ };
1849
+ // shellExec(`node bin/deploy open-virtual-root ${architecture.match('amd') ? 'amd64' : 'arm64'} ${nfsHost}`);
1850
+ machines = [];
1851
+ shellExec(`node bin/deploy maas clear`);
1852
+ monitor();
943
1853
  break;
944
1854
  }
945
1855
 
946
- case 'ssh': {
947
- if (process.argv.includes('rocky')) {
948
- shellExec(`sudo systemctl enable sshd`);
1856
+ case 'nfs': {
1857
+ // Daemon RPC NFSv3. ports:
949
1858
 
950
- shellExec(`sudo systemctl start sshd`);
1859
+ // 2049 (TCP/UDP) nfsd standard port.
1860
+ // 111 (TCP/UDP) – rpcbind/portmapper.
1861
+ // 20048 (TCP/UDP) – rpc.mountd.
1862
+ // 32765 (TCP/UDP) – rpc.statd.
1863
+ // 32766 (TCP/UDP) – lockd (NLM).
951
1864
 
952
- shellExec(`sudo systemctl status sshd`);
1865
+ // Configure export and permissions:
1866
+ // /etc/exports
953
1867
 
954
- shellExec(`sudo ss -lt`);
955
- } else {
956
- if (!process.argv.includes('server')) {
957
- shellExec(`sudo apt update`);
958
- shellExec(`sudo apt install openssh-server -y`);
959
- shellExec(`sudo apt install ssh-askpass`);
1868
+ // Configure ports:
1869
+ // /etc/nfs.conf
1870
+
1871
+ fs.writeFileSync(
1872
+ `/etc/nfs.conf`,
1873
+ `
1874
+ [mountd]
1875
+ port = 20048
1876
+
1877
+ [statd]
1878
+ port = 32765
1879
+ outgoing-port = 32765
1880
+
1881
+ [nfsd]
1882
+ rdma=y
1883
+ rdma-port=20049
1884
+
1885
+ [lockd]
1886
+ port = 32766
1887
+ udp-port = 32766
1888
+ `,
1889
+ 'utf8',
1890
+ );
1891
+
1892
+ // Client users have read-only access to resources and are identified as anonymous on the server.
1893
+ // /share ip-client(ro,all_squash)
1894
+
1895
+ // Client users can modify resources and keep their UID on the server. Only root is identified as anonymous.
1896
+ // /share ip-client(rw)
1897
+
1898
+ // Users on client workstation 1 can modify resources, while those on client workstation 2 have read-only access.
1899
+ // UIDs are kept on the server, and only root is identified as anonymous.
1900
+ // /share ip-client1(rw) ip-client2(ro)
1901
+
1902
+ // Client1 users can modify resources. Their UID is changed to 1001 and their GID to 100 on the server.
1903
+ // /share ip-client(rw,all_squash,anonuid=1001,anongid=100)
1904
+
1905
+ // sudo dnf install nfs-utils
1906
+ // sudo systemctl enable --now rpcbind // RPC map service
1907
+ // sudo systemctl enable --now nfs-server // nfs domains nfsd
1908
+
1909
+ // Update exports:
1910
+ // shellExec(`sudo exportfs -a -r`);
1911
+ // shellExec(`sudo exportfs -v`);
1912
+
1913
+ // Active nfs
1914
+ shellExec(`sudo exportfs -s`);
1915
+
1916
+ shellExec(`sudo exportfs -rav`);
1917
+
1918
+ // Rocky enable virt_use_nfs
1919
+ // sudo setsebool -P virt_use_nfs 1
1920
+
1921
+ // Disable share:
1922
+ // sudo exportfs -u <client-ip>:${process.env.NFS_EXPORT_PATH}/rpi4mb
1923
+
1924
+ // Nfs client:
1925
+ // mount -t nfs <server-ip>:/server-mnt /mnt
1926
+ // umount /mnt
1927
+
1928
+ shellExec(`sudo systemctl restart nfs-server`);
1929
+ break;
1930
+ }
1931
+ case 'update-virtual-root': {
1932
+ dotenv.config({ path: `${getUnderpostRootPath()}/.env`, override: true });
1933
+ const IP_ADDRESS = getLocalIPv4Address();
1934
+ const architecture = process.argv[3];
1935
+ const host = process.argv[4];
1936
+ const nfsHostPath = `${process.env.NFS_EXPORT_PATH}/${host}`;
1937
+ const ipaddr = process.env.RPI4_IP;
1938
+ await updateVirtualRoot({
1939
+ IP_ADDRESS,
1940
+ architecture,
1941
+ host,
1942
+ nfsHostPath,
1943
+ ipaddr,
1944
+ });
1945
+ break;
1946
+ }
1947
+ case 'open-virtual-root': {
1948
+ dotenv.config({ path: `${getUnderpostRootPath()}/.env`, override: true });
1949
+ const IP_ADDRESS = getLocalIPv4Address();
1950
+ const architecture = process.argv[3];
1951
+ const host = process.argv[4];
1952
+ const nfsHostPath = `${process.env.NFS_EXPORT_PATH}/${host}`;
1953
+ shellExec(`sudo dnf install -y iptables-legacy`);
1954
+ shellExec(`sudo dnf install -y debootstrap`);
1955
+ shellExec(`sudo dnf install kernel-modules-extra-$(uname -r)`);
1956
+ switch (architecture) {
1957
+ case 'arm64':
1958
+ shellExec(`sudo podman run --rm --privileged multiarch/qemu-user-static --reset -p yes`);
1959
+
1960
+ break;
1961
+
1962
+ default:
1963
+ break;
1964
+ }
1965
+
1966
+ shellExec(`sudo modprobe binfmt_misc`);
1967
+ shellExec(`sudo mount -t binfmt_misc binfmt_misc /proc/sys/fs/binfmt_misc`);
1968
+
1969
+ if (process.argv.includes('build')) {
1970
+ // shellExec(`depmod -a`);
1971
+ shellExec(`mkdir -p ${nfsHostPath}`);
1972
+ let cmd;
1973
+ switch (host) {
1974
+ case 'rpi4mb':
1975
+ shellExec(`sudo rm -rf ${nfsHostPath}/*`);
1976
+ shellExec(`sudo chown -R root:root ${nfsHostPath}`);
1977
+ cmd = [
1978
+ `sudo debootstrap`,
1979
+ `--arch=arm64`,
1980
+ `--variant=minbase`,
1981
+ `--foreign`, // arm64 on amd64
1982
+ `noble`,
1983
+ nfsHostPath,
1984
+ `http://ports.ubuntu.com/ubuntu-ports/`,
1985
+ ];
1986
+ break;
1987
+
1988
+ default:
1989
+ break;
1990
+ }
1991
+ shellExec(cmd.join(' '));
1992
+
1993
+ shellExec(`sudo podman create --name extract multiarch/qemu-user-static`);
1994
+ shellExec(`podman ps -a`);
1995
+ shellExec(`sudo podman cp extract:/usr/bin/qemu-aarch64-static ${nfsHostPath}/usr/bin/`);
1996
+ shellExec(`sudo podman rm extract`);
1997
+ shellExec(`podman ps -a`);
1998
+
1999
+ switch (host) {
2000
+ case 'rpi4mb':
2001
+ shellExec(`file ${nfsHostPath}/bin/bash`); // expected: ELF 64-bit LSB pie executable, ARM aarch64 …
2002
+ break;
2003
+
2004
+ default:
2005
+ break;
2006
+ }
2007
+
2008
+ shellExec(`sudo chroot ${nfsHostPath} /usr/bin/qemu-aarch64-static /bin/bash <<'EOF'
2009
+ /debootstrap/debootstrap --second-stage
2010
+ EOF`);
2011
+ }
2012
+ if (process.argv.includes('mount')) {
2013
+ shellExec(`sudo mount --bind /proc ${nfsHostPath}/proc`);
2014
+ shellExec(`sudo mount --bind /sys ${nfsHostPath}/sys`);
2015
+ shellExec(`sudo mount --rbind /dev ${nfsHostPath}/dev`);
2016
+ }
2017
+
2018
+ if (process.argv.includes('build')) {
2019
+ switch (host) {
2020
+ case 'rpi4mb':
2021
+ const ipaddr = process.env.RPI4_IP;
2022
+
2023
+ await updateVirtualRoot({
2024
+ IP_ADDRESS,
2025
+ architecture,
2026
+ host,
2027
+ nfsHostPath,
2028
+ ipaddr,
2029
+ });
2030
+
2031
+ break;
2032
+
2033
+ default:
2034
+ break;
960
2035
  }
961
- shellExec(`sudo systemctl enable ssh`);
962
- shellExec(`sudo systemctl restart ssh`);
963
- shellExec(`sudo systemctl status ssh`);
964
2036
  }
965
- // sudo service ssh restart
966
- shellExec(`ip a`);
2037
+ // if (process.argv.includes('mount')) {
2038
+ // shellExec(`sudo mount --bind /lib/modules ${nfsHostPath}/lib/modules`);
2039
+ // }
2040
+
2041
+ break;
2042
+ }
2043
+
2044
+ case 'close-virtual-root': {
2045
+ const architecture = process.argv[3];
2046
+ const host = process.argv[4];
2047
+ const nfsHostPath = `${process.env.NFS_EXPORT_PATH}/${host}`;
2048
+ shellExec(`sudo umount ${nfsHostPath}/proc`);
2049
+ shellExec(`sudo umount ${nfsHostPath}/sys`);
2050
+ shellExec(`sudo umount ${nfsHostPath}/dev`);
2051
+ // shellExec(`sudo umount ${nfsHostPath}/lib/modules`);
2052
+ break;
2053
+ }
967
2054
 
968
- // adduser newuser
969
- // usermod -aG sudo newuser
2055
+ case 'mount': {
2056
+ const mounts = shellExec(`mount`).split(`\n`);
2057
+ console.table(
2058
+ mounts
2059
+ .filter((l) => l.trim())
2060
+ .map(
2061
+ (o) => (
2062
+ (o = o.split(' ')),
2063
+ {
2064
+ path: o[2],
2065
+ type: o[4],
2066
+ permissions: o[5],
2067
+ }
2068
+ ),
2069
+ ),
2070
+ );
2071
+ break;
2072
+ }
970
2073
 
971
- // ssh -i '/path/to/keyfile' username@server
2074
+ case 'create-ports': {
2075
+ const cmd = [];
2076
+ const ipaddr = getLocalIPv4Address();
2077
+ for (const port of ['5240']) {
2078
+ const name = 'maas';
2079
+ cmd.push(`${name}:${port}-${port}:${ipaddr}`);
2080
+ }
2081
+ pbcopy(`node engine-private/r create-port ${cmd}`);
2082
+ break;
2083
+ }
972
2084
 
973
- // ssh-keygen -t ed25519 -C "your_email@example.com" -f $HOME/.ssh/id_rsa
2085
+ case 'maas-ports': {
2086
+ // Configure firewall:
2087
+
2088
+ // systemctl stop firewalld
2089
+ // systemctl mask firewalld
2090
+
2091
+ // ufw disable
2092
+ // ufw enable
2093
+
2094
+ // sudo snap install ufw
2095
+ // const ports = ['80', '443', '22', '3000-3100'];
2096
+ const ports = [
2097
+ '43',
2098
+ '53',
2099
+ '60',
2100
+ '66',
2101
+ '67',
2102
+ '69',
2103
+ '4011',
2104
+ '111',
2105
+ '2049',
2106
+ '20048',
2107
+ '20049',
2108
+ '32765',
2109
+ '32766',
2110
+ '5248',
2111
+ '5240',
2112
+ ];
2113
+ for (const port of ports) {
2114
+ shellExec(`ufw allow ${port}/tcp`);
2115
+ shellExec(`ufw allow ${port}/udp`);
2116
+ }
974
2117
 
975
- // legacy: ssh-keygen -t rsa -b 4096 -C "your_email@example.com" -f $HOME/.ssh/id_rsa
2118
+ shellExec(`sudo systemctl mask firewalld`);
976
2119
 
977
- // vi .ssh/authorized_keys
978
- // chmod 700 .ssh
979
- // chmod 600 authorized_keys
2120
+ break;
2121
+ }
980
2122
 
981
- // cat id_rsa.pub > .ssh/authorized_keys
2123
+ case 'iptables': {
2124
+ shellExec(`sudo systemctl enable nftables`);
2125
+ shellExec(`sudo systemctl restart nftables`);
982
2126
 
983
- // add public key to authorized keys
984
- // cat .ssh/id_ed25519.pub | ssh [sudo username]@[host/ip] 'cat >> .ssh/authorized_keys'
2127
+ shellExec(`sudo tee /etc/nftables.conf <<EOF
2128
+ table inet filter {
2129
+ chain input {
2130
+ type filter hook input priority 0;
2131
+ policy drop;
2132
+ tcp dport 22 accept
2133
+ }
2134
+ }
2135
+ EOF`);
2136
+ shellExec(`sudo nft -f /etc/nftables.conf`);
985
2137
 
986
- // 2. Open /etc/ssh/sshd_config file
987
- // nano /etc/ssh/sshd_config
2138
+ // sudo systemctl stop nftables
2139
+ // sudo systemctl disable nftables
988
2140
 
989
- // 3. add example code to last line of file
990
- // Match User newuser
991
- // PasswordAuthentication yes
2141
+ break;
2142
+ }
992
2143
 
993
- // ssh [sudo username]@[host/ip]
994
- // open port 22
2144
+ case 'rpi4': {
2145
+ // Rpi4 Run Bootloader:
995
2146
 
996
- // init ssh agent service
997
- // eval `ssh-agent -s`
2147
+ // 1) create boot.conf
998
2148
 
999
- // list keys
1000
- // ssh-add -l
2149
+ // 2) Run lite RPiOs from rpi-imager
2150
+ // with boot.conf files in root disk path
1001
2151
 
1002
- // add key
1003
- // ssh-add /root/.ssh/id_rsa
2152
+ // 3) cd /boot/firmware && sudo rpi-eeprom-config --apply boot.conf
1004
2153
 
1005
- // remove
1006
- // ssh-add -d /path/to/private/key
2154
+ // 4) sudo reboot
1007
2155
 
1008
- // remove all
1009
- // ssh-add -D
2156
+ // 5) check: 'vcgencmd bootloader_version'
2157
+ // 6) check: 'vcgencmd bootloader_config'
1010
2158
 
1011
- // sshpass -p ${{ secrets.PSWD }} ssh -o StrictHostKeyChecking=no -p 22 ${{ secrets.USER}}@${{ secrets.VPS_IP }} 'cd /home/adam && ./deploy.sh'
2159
+ // 7) shutdown and restart without sd card
1012
2160
 
1013
- // copies the public key of your default identity (use -i identity_file for other identities) to the remote host.
1014
- // ssh-copy-id user@hostname.example.com
1015
- // ssh-copy-id "user@hostname.example.com -p <port-number>"
2161
+ // sudo apt update
2162
+ // sudo apt install git
1016
2163
 
1017
2164
  break;
1018
2165
  }
1019
2166
 
1020
- case 'valkey': {
1021
- if (!process.argv.includes('server')) {
1022
- if (process.argv.includes('rocky')) {
1023
- // shellExec(`yum install -y https://repo.percona.com/yum/percona-release-latest.noarch.rpm`);
1024
- // shellExec(`sudo percona-release enable valkey experimental`);
1025
- shellExec(`sudo dnf install valkey`);
1026
- shellExec(`chown -R valkey:valkey /etc/valkey`);
1027
- shellExec(`chown -R valkey:valkey /var/lib/valkey`);
1028
- shellExec(`chown -R valkey:valkey /var/log/valkey`);
1029
- shellExec(`sudo systemctl enable valkey.service`);
1030
- shellExec(`sudo systemctl start valkey`);
1031
- shellExec(`valkey-cli ping`);
1032
- } else {
1033
- shellExec(`cd /home/dd && git clone https://github.com/valkey-io/valkey.git`);
1034
- shellExec(`cd /home/dd/valkey && make`);
1035
- shellExec(`apt install valkey-tools`); // valkey-cli
2167
+ case 'blue': {
2168
+ // lsusb | grep blue -i
2169
+ // rfkill list
2170
+ // sudo service bluetooth start
2171
+ // bluetoothctl show
2172
+ // sudo rfkill unblock bluetooth
2173
+ // dmesg | grep -i bluetooth
2174
+ // journalctl -u bluetooth -f
2175
+ // sudo dnf update bluez bluez-libs bluez-utils
2176
+ // sudo rmmod btusb
2177
+ // sudo modprobe btusb
2178
+ break;
2179
+ }
2180
+
2181
+ case 'fastapi-models': {
2182
+ shellExec(`chmod +x ../full-stack-fastapi-template/backend/initial_data.sh`);
2183
+ shellExec(`../full-stack-fastapi-template/backend/initial_data.sh`);
2184
+ shellExec(`../full-stack-fastapi-template/backend/initial_data.sh`);
2185
+ break;
2186
+ }
2187
+
2188
+ case 'fastapi': {
2189
+ // node bin/deploy fastapi reset
2190
+ // node bin/deploy fastapi reset build-back build-front secret run-back run-front
2191
+ // https://github.com/NonsoEchendu/full-stack-fastapi-project
2192
+ // https://github.com/fastapi/full-stack-fastapi-template
2193
+ const path = `../full-stack-fastapi-template`;
2194
+ const VITE_API_URL = `http://localhost:8000`;
2195
+
2196
+ if (process.argv.includes('reset')) shellExec(`sudo rm -rf ${path}`);
2197
+
2198
+ if (!fs.existsSync(path))
2199
+ shellExec(`cd .. && git clone https://github.com/fastapi/full-stack-fastapi-template.git`);
2200
+
2201
+ shellExec(`cd ${path} && git checkout . && git clean -f -d`);
2202
+ const password = fs.readFileSync(`/home/dd/engine/engine-private/postgresql-password`, 'utf8');
2203
+
2204
+ fs.writeFileSync(
2205
+ `${path}/.env`,
2206
+ fs
2207
+ .readFileSync(`${path}/.env`, 'utf8')
2208
+ .replace(`FIRST_SUPERUSER=admin@example.com`, `FIRST_SUPERUSER=development@underpost.net`)
2209
+ .replace(`FIRST_SUPERUSER_PASSWORD=changethis`, `FIRST_SUPERUSER_PASSWORD=${password}`)
2210
+ .replace(`SECRET_KEY=changethis`, `SECRET_KEY=${password}`)
2211
+ .replace(`POSTGRES_DB=app`, `POSTGRES_DB=postgresdb`)
2212
+ .replace(`POSTGRES_USER=postgres`, `POSTGRES_USER=admin`)
2213
+ .replace(`POSTGRES_PASSWORD=changethis`, `POSTGRES_PASSWORD=${password}`),
2214
+ 'utf8',
2215
+ );
2216
+ fs.writeFileSync(
2217
+ `${path}/backend/app/core/db.py`,
2218
+ fs
2219
+ .readFileSync(`${path}/backend/app/core/db.py`, 'utf8')
2220
+ .replace(` # from sqlmodel import SQLModel`, ` from sqlmodel import SQLModel`)
2221
+ .replace(` # SQLModel.metadata.create_all(engine)`, ` SQLModel.metadata.create_all(engine)`),
2222
+
2223
+ 'utf8',
2224
+ );
2225
+
2226
+ fs.copySync(`./manifests/deployment/fastapi/initial_data.sh`, `${path}/backend/initial_data.sh`);
2227
+
2228
+ fs.writeFileSync(
2229
+ `${path}/frontend/Dockerfile`,
2230
+ fs
2231
+ .readFileSync(`${path}/frontend/Dockerfile`, 'utf8')
2232
+ .replace('ARG VITE_API_URL=${VITE_API_URL}', `ARG VITE_API_URL='${VITE_API_URL}'`),
2233
+ 'utf8',
2234
+ );
2235
+
2236
+ fs.writeFileSync(
2237
+ `${path}/frontend/.env`,
2238
+ fs
2239
+ .readFileSync(`${path}/frontend/.env`, 'utf8')
2240
+ .replace(`VITE_API_URL=http://localhost:8000`, `VITE_API_URL=${VITE_API_URL}`)
2241
+ .replace(`MAILCATCHER_HOST=http://localhost:1080`, `MAILCATCHER_HOST=http://localhost:1081`),
2242
+
2243
+ 'utf8',
2244
+ );
2245
+
2246
+ if (process.argv.includes('models')) {
2247
+ shellExec(`node bin/deploy fastapi-models`);
2248
+ break;
2249
+ }
2250
+
2251
+ if (process.argv.includes('build-back')) {
2252
+ const imageName = `fastapi-backend:latest`;
2253
+ shellExec(`sudo podman pull docker.io/library/python:3.10`);
2254
+ shellExec(`sudo podman pull ghcr.io/astral-sh/uv:0.5.11`);
2255
+ shellExec(`sudo rm -rf ${path}/${imageName.replace(':', '_')}.tar`);
2256
+ const args = [
2257
+ `node bin dockerfile-image-build --path ${path}/backend/`,
2258
+ `--image-name=${imageName} --image-path=${path}`,
2259
+ `--podman-save --${process.argv.includes('kubeadm') ? 'kubeadm' : 'kind'}-load --reset`,
2260
+ ];
2261
+ shellExec(args.join(' '));
2262
+ }
2263
+ if (process.argv.includes('build-front')) {
2264
+ const imageName = `fastapi-frontend:latest`;
2265
+ shellExec(`sudo podman pull docker.io/library/node:20`);
2266
+ shellExec(`sudo podman pull docker.io/library/nginx:1`);
2267
+ shellExec(`sudo rm -rf ${path}/${imageName.replace(':', '_')}.tar`);
2268
+ const args = [
2269
+ `node bin dockerfile-image-build --path ${path}/frontend/`,
2270
+ `--image-name=${imageName} --image-path=${path}`,
2271
+ `--podman-save --${process.argv.includes('kubeadm') ? 'kubeadm' : 'kind'}-load --reset`,
2272
+ ];
2273
+ shellExec(args.join(' '));
2274
+ }
2275
+ if (process.argv.includes('secret')) {
2276
+ {
2277
+ const secretSelector = `fastapi-postgres-credentials`;
2278
+ shellExec(`sudo kubectl delete secret ${secretSelector}`);
2279
+ shellExec(
2280
+ `sudo kubectl create secret generic ${secretSelector}` +
2281
+ ` --from-literal=POSTGRES_DB=postgresdb` +
2282
+ ` --from-literal=POSTGRES_USER=admin` +
2283
+ ` --from-file=POSTGRES_PASSWORD=/home/dd/engine/engine-private/postgresql-password`,
2284
+ );
2285
+ }
2286
+ {
2287
+ const secretSelector = `fastapi-backend-config-secret`;
2288
+ shellExec(`sudo kubectl delete secret ${secretSelector}`);
2289
+ shellExec(
2290
+ `sudo kubectl create secret generic ${secretSelector}` +
2291
+ ` --from-file=SECRET_KEY=/home/dd/engine/engine-private/postgresql-password` +
2292
+ ` --from-literal=FIRST_SUPERUSER=development@underpost.net` +
2293
+ ` --from-file=FIRST_SUPERUSER_PASSWORD=/home/dd/engine/engine-private/postgresql-password`,
2294
+ );
1036
2295
  }
1037
2296
  }
1038
- if (process.argv.includes('rocky')) {
1039
- shellExec(`sudo systemctl stop valkey`);
1040
- shellExec(`sudo systemctl start valkey`);
1041
- } else shellExec(`cd /home/dd/valkey && ./src/valkey-server`);
2297
+ if (process.argv.includes('run-back')) {
2298
+ shellExec(`sudo kubectl apply -f ./manifests/deployment/fastapi/backend-deployment.yml`);
2299
+ shellExec(`sudo kubectl apply -f ./manifests/deployment/fastapi/backend-service.yml`);
2300
+ }
2301
+ if (process.argv.includes('run-front')) {
2302
+ shellExec(`sudo kubectl apply -f ./manifests/deployment/fastapi/frontend-deployment.yml`);
2303
+ shellExec(`sudo kubectl apply -f ./manifests/deployment/fastapi/frontend-service.yml`);
2304
+ }
2305
+ break;
2306
+ }
1042
2307
 
2308
+ case 'conda': {
2309
+ // set -e
2310
+ // ENV_NAME="${1:-cuda_env}"
2311
+ // eval "$(conda shell.bash hook)"
2312
+ // conda activate "${ENV_NAME}"
2313
+ shellExec(
2314
+ `export PATH="/root/miniconda3/bin:$PATH" && conda init && conda config --set auto_activate_base false`,
2315
+ );
2316
+ shellExec(`conda env list`);
1043
2317
  break;
1044
2318
  }
1045
2319
 
1046
- case 'valkey-service': {
1047
- shellExec(`pm2 start bin/deploy.js --node-args=\"--max-old-space-size=8192\" --name valkey -- valkey server`);
2320
+ case 'kafka': {
2321
+ // https://medium.com/@martin.hodges/deploying-kafka-on-a-kind-kubernetes-cluster-for-development-and-testing-purposes-ed7adefe03cb
2322
+ const imageName = `doughgle/kafka-kraft`;
2323
+ shellExec(`docker pull ${imageName}`);
2324
+ if (!process.argv.includes('kubeadm'))
2325
+ shellExec(
2326
+ `${process.argv.includes('kubeadm') ? `ctr -n k8s.io images import` : `kind load docker-image`} ${imageName}`,
2327
+ );
2328
+ shellExec(`kubectl create namespace kafka`);
2329
+ shellExec(`kubectl apply -f ./manifests/deployment/kafka/deployment.yaml`);
2330
+ // kubectl logs kafka-0 -n kafka | grep STARTED
2331
+ // kubectl logs kafka-1 -n kafka | grep STARTED
2332
+ // kubectl logs kafka-2 -n kafka | grep STARTED
2333
+
2334
+ // kafka-topics.sh --create --topic my-topic --bootstrap-server kafka-svc:9092
2335
+ // kafka-topics.sh --list --topic my-topic --bootstrap-server kafka-svc:9092
2336
+ // kafka-topics.sh --delete --topic my-topic --bootstrap-server kafka-svc:9092
2337
+
2338
+ // kafka-console-producer.sh --bootstrap-server kafka-svc:9092 --topic my-topic
2339
+ // kafka-console-consumer.sh --bootstrap-server kafka-svc:9092 --topic my-topic
2340
+ break;
2341
+ }
2342
+
2343
+ case 'nvidia-gpu-operator': {
2344
+ // https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html
2345
+ shellExec(`curl -s -L https://nvidia.github.io/libnvidia-container/stable/rpm/nvidia-container-toolkit.repo | \
2346
+ sudo tee /etc/yum.repos.d/nvidia-container-toolkit.repo`);
2347
+
2348
+ const NVIDIA_CONTAINER_TOOLKIT_VERSION = '1.17.8-1';
2349
+
2350
+ shellExec(`sudo dnf install -y \
2351
+ nvidia-container-toolkit-${NVIDIA_CONTAINER_TOOLKIT_VERSION} \
2352
+ nvidia-container-toolkit-base-${NVIDIA_CONTAINER_TOOLKIT_VERSION} \
2353
+ libnvidia-container-tools-${NVIDIA_CONTAINER_TOOLKIT_VERSION} \
2354
+ libnvidia-container1-${NVIDIA_CONTAINER_TOOLKIT_VERSION}`);
2355
+
2356
+ // https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/latest/getting-started.html
2357
+
2358
+ shellExec(`kubectl create ns gpu-operator`);
2359
+ shellExec(`kubectl label --overwrite ns gpu-operator pod-security.kubernetes.io/enforce=privileged`);
2360
+
2361
+ shellExec(`helm repo add nvidia https://helm.ngc.nvidia.com/nvidia \
2362
+ && helm repo update`);
2363
+
2364
+ // shellExec(`helm install --wait --generate-name \
2365
+ // -n gpu-operator --create-namespace \
2366
+ // nvidia/gpu-operator \
2367
+ // --version=v25.3.1 \
2368
+ // --set toolkit.version=v1.16.1-ubi8`);
2369
+
2370
+ shellExec(`helm install --wait --generate-name \
2371
+ -n gpu-operator --create-namespace \
2372
+ nvidia/gpu-operator \
2373
+ --version=v25.3.1 \
2374
+ --set driver.enabled=false \
2375
+ --set driver.repository=nvcr.io/nvidia \
2376
+ --set cdi.enabled=true \
2377
+ --set cdi.default=true \
2378
+ --set toolkit.env[0].name=CONTAINERD_CONFIG \
2379
+ --set toolkit.env[0].value=/etc/containerd/config.toml \
2380
+ --set toolkit.env[1].name=CONTAINERD_SOCKET \
2381
+ --set toolkit.env[1].value=/run/containerd/containerd.sock \
2382
+ --set toolkit.env[2].name=CONTAINERD_RUNTIME_CLASS \
2383
+ --set toolkit.env[2].value=nvidia \
2384
+ --set-string toolkit.env[3].name=CONTAINERD_SET_AS_DEFAULT \
2385
+ --set-string toolkit.env[3].value=true`);
2386
+
2387
+ // Check gpu drivers
2388
+ shellExec(
2389
+ `break;kubectl get nodes -o json | jq '.items[].metadata.labels | keys | any(startswith("feature.node.kubernetes.io"))'`,
2390
+ );
1048
2391
  break;
1049
2392
  }
1050
2393
 
1051
- default:
2394
+ case 'kubeflow-spark-operator': {
2395
+ // Use case:
2396
+ // Data Processing Pipelines: Used for ETL tasks where Spark can handle large data volumes efficiently.
2397
+ // Real-Time Analytics: Processing data from streaming sources (e.g., Kafka) for real-time analytics.
2398
+ // Machine Learning and Data Science: Training and deploying machine learning models at scale using Spark MLlib.
2399
+
2400
+ shellExec(`helm repo add spark-operator https://kubeflow.github.io/spark-operator`);
2401
+ shellExec(`helm install spark-operator spark-operator/spark-operator \
2402
+ --namespace spark-operator \
2403
+ --create-namespace \
2404
+ --wait`);
2405
+
2406
+ const image = `spark:3.5.5`;
2407
+ shellExec(`sudo docker pull ${image}`);
2408
+ if (!process.argv.includes('kubeadm'))
2409
+ shellExec(
2410
+ `sudo ${
2411
+ process.argv.includes('kubeadm') ? `ctr -n k8s.io images import` : `kind load docker-image`
2412
+ } ${image}`,
2413
+ );
2414
+ shellExec(`kubectl apply -f ./manifests/deployment/spark/spark-pi-py.yaml`);
2415
+
2416
+ // Check the status of the Spark job:
2417
+ // kubectl get sparkapplications.sparkoperator.k8s.io -n default
2418
+ // kubectl get sparkapplication
2419
+
2420
+ // Check case log:
2421
+ // kubectl logs -f spark-pi-python-driver
2422
+ // kubectl logs -f spark-pi-python-driver | grep Pi
2423
+ // kubectl describe sparkapplication spark-gpu-test
2424
+
2425
+ // Uninstall:
2426
+ // kubectl delete sparkapplications.sparkoperator.k8s.io spark-pi-python -n default
2427
+ // helm delete spark-operator -n spark-operator
2428
+
2429
+ // Gpu plugins:
2430
+ // https://github.com/NVIDIA/spark-rapids
2431
+ // RAPIDS Accelerator
1052
2432
  break;
2433
+ }
2434
+
2435
+ case 'sbt': {
2436
+ // https://www.scala-sbt.org/1.x/docs/Installing-sbt-on-Linux.html
2437
+
2438
+ // sudo rm -f /etc/yum.repos.d/bintray-rpm.repo
2439
+ // curl -L https://www.scala-sbt.org/sbt-rpm.repo > sbt-rpm.repo
2440
+ // sudo mv sbt-rpm.repo /etc/yum.repos.d/
2441
+ // sudo yum install sbt
2442
+ break;
2443
+ }
1053
2444
  }
1054
2445
  } catch (error) {
1055
2446
  logger.error(error, error.stack);