underpost 2.8.652 → 2.8.781
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.vscode/extensions.json +37 -2
- package/.vscode/settings.json +2 -0
- package/CHANGELOG.md +24 -4
- package/README.md +5 -4
- package/bin/deploy.js +1455 -144
- package/cli.md +57 -14
- package/docker-compose.yml +1 -1
- package/manifests/deployment/adminer/deployment.yaml +32 -0
- package/manifests/deployment/adminer/kustomization.yaml +7 -0
- package/manifests/deployment/adminer/service.yaml +13 -0
- package/manifests/deployment/fastapi/backend-deployment.yml +120 -0
- package/manifests/deployment/fastapi/backend-service.yml +19 -0
- package/manifests/deployment/fastapi/frontend-deployment.yml +54 -0
- package/manifests/deployment/fastapi/frontend-service.yml +15 -0
- package/manifests/deployment/fastapi/initial_data.sh +56 -0
- package/manifests/deployment/kafka/deployment.yaml +69 -0
- package/manifests/deployment/spark/spark-pi-py.yaml +21 -0
- package/manifests/envoy-service-nodeport.yaml +23 -0
- package/manifests/kubeadm-calico-config.yaml +119 -0
- package/manifests/kubelet-config.yaml +65 -0
- package/manifests/mongodb/kustomization.yaml +1 -1
- package/manifests/mongodb/statefulset.yaml +12 -11
- package/manifests/mongodb/storage-class.yaml +9 -0
- package/manifests/mongodb-4.4/service-deployment.yaml +1 -1
- package/manifests/mysql/kustomization.yaml +7 -0
- package/manifests/mysql/pv-pvc.yaml +27 -0
- package/manifests/mysql/statefulset.yaml +55 -0
- package/manifests/postgresql/configmap.yaml +9 -0
- package/manifests/postgresql/kustomization.yaml +10 -0
- package/manifests/postgresql/pv.yaml +15 -0
- package/manifests/postgresql/pvc.yaml +13 -0
- package/manifests/postgresql/service.yaml +10 -0
- package/manifests/postgresql/statefulset.yaml +37 -0
- package/manifests/valkey/statefulset.yaml +4 -3
- package/package.json +2 -1
- package/src/cli/cluster.js +281 -27
- package/src/cli/deploy.js +81 -15
- package/src/cli/fs.js +14 -3
- package/src/cli/image.js +34 -7
- package/src/cli/index.js +36 -1
- package/src/cli/lxd.js +19 -0
- package/src/cli/monitor.js +75 -30
- package/src/cli/repository.js +9 -6
- package/src/client/components/core/JoyStick.js +2 -2
- package/src/client/components/core/Modal.js +1 -0
- package/src/index.js +1 -1
- package/src/runtime/lampp/Dockerfile +1 -1
- package/src/server/conf.js +5 -1
- package/src/server/dns.js +47 -17
- package/src/server/runtime.js +2 -0
- package/src/server/start.js +0 -1
package/bin/deploy.js
CHANGED
|
@@ -26,9 +26,10 @@ import {
|
|
|
26
26
|
fixDependencies,
|
|
27
27
|
setUpProxyMaintenanceServer,
|
|
28
28
|
writeEnv,
|
|
29
|
+
getUnderpostRootPath,
|
|
29
30
|
} from '../src/server/conf.js';
|
|
30
31
|
import { buildClient } from '../src/server/client-build.js';
|
|
31
|
-
import { range, setPad, timer, uniqueArray } from '../src/client/components/core/CommonJs.js';
|
|
32
|
+
import { range, s4, setPad, timer, uniqueArray } from '../src/client/components/core/CommonJs.js';
|
|
32
33
|
import { MongooseDB } from '../src/db/mongo/MongooseDB.js';
|
|
33
34
|
import { Lampp } from '../src/runtime/lampp/Lampp.js';
|
|
34
35
|
import { DefaultConf } from '../conf.js';
|
|
@@ -37,6 +38,11 @@ import { JSONweb } from '../src/server/client-formatted.js';
|
|
|
37
38
|
import { Xampp } from '../src/runtime/xampp/Xampp.js';
|
|
38
39
|
import { ejs } from '../src/server/json-schema.js';
|
|
39
40
|
import { buildCliDoc } from '../src/cli/index.js';
|
|
41
|
+
import { getLocalIPv4Address, ip } from '../src/server/dns.js';
|
|
42
|
+
import { Downloader } from '../src/server/downloader.js';
|
|
43
|
+
import colors from 'colors';
|
|
44
|
+
|
|
45
|
+
colors.enable();
|
|
40
46
|
|
|
41
47
|
const logger = loggerFactory(import.meta);
|
|
42
48
|
|
|
@@ -44,6 +50,82 @@ logger.info('argv', process.argv);
|
|
|
44
50
|
|
|
45
51
|
const [exe, dir, operator] = process.argv;
|
|
46
52
|
|
|
53
|
+
const updateVirtualRoot = async ({ nfsHostPath, IP_ADDRESS, ipaddr }) => {
|
|
54
|
+
const steps = [
|
|
55
|
+
`apt update`,
|
|
56
|
+
`ln -sf /lib/systemd/systemd /sbin/init`,
|
|
57
|
+
// `sudo apt install linux-modules-extra-6.8.0-31-generic`,
|
|
58
|
+
`apt install -y sudo`,
|
|
59
|
+
`apt install -y ntp`,
|
|
60
|
+
`apt install -y openssh-server`,
|
|
61
|
+
`apt install -y iptables`,
|
|
62
|
+
`update-alternatives --set iptables /usr/sbin/iptables-legacy`,
|
|
63
|
+
`update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy`,
|
|
64
|
+
`apt install -y locales`,
|
|
65
|
+
`apt install -y cloud-init`,
|
|
66
|
+
`mkdir -p /var/lib/cloud`,
|
|
67
|
+
`chown -R root:root /var/lib/cloud`,
|
|
68
|
+
`chmod -R 0755 /var/lib/cloud`,
|
|
69
|
+
`mkdir -p /home/root/.ssh`,
|
|
70
|
+
`echo '${fs.readFileSync(
|
|
71
|
+
`/home/dd/engine/engine-private/deploy/id_rsa.pub`,
|
|
72
|
+
'utf8',
|
|
73
|
+
)}' >> /home/root/.ssh/authorized_keys`,
|
|
74
|
+
`chmod 700 /home/root/.ssh`,
|
|
75
|
+
`chmod 600 /home/root/.ssh/authorized_keys`,
|
|
76
|
+
`systemctl enable ssh`,
|
|
77
|
+
`systemctl enable ntp`,
|
|
78
|
+
`apt install -y linux-generic-hwe-24.04`,
|
|
79
|
+
`modprobe ip_tables`,
|
|
80
|
+
`cat <<EOF_MAAS_CFG > /etc/cloud/cloud.cfg.d/90_maas.cfg
|
|
81
|
+
datasource_list: [ MAAS ]
|
|
82
|
+
datasource:
|
|
83
|
+
MAAS:
|
|
84
|
+
metadata_url: http://${IP_ADDRESS}:5248/MAAS/metadata
|
|
85
|
+
users:
|
|
86
|
+
- name: ${process.env.MAAS_ADMIN_USERNAME}
|
|
87
|
+
ssh_authorized_keys:
|
|
88
|
+
- ${fs.readFileSync(`/home/dd/engine/engine-private/deploy/id_rsa.pub`, 'utf8')}
|
|
89
|
+
sudo: "ALL=(ALL) NOPASSWD:ALL"
|
|
90
|
+
groups: sudo
|
|
91
|
+
shell: /bin/bash
|
|
92
|
+
packages:
|
|
93
|
+
- git
|
|
94
|
+
- htop
|
|
95
|
+
- ufw
|
|
96
|
+
# package_update: true
|
|
97
|
+
runcmd:
|
|
98
|
+
- ufw enable
|
|
99
|
+
- ufw allow ssh
|
|
100
|
+
resize_rootfs: false
|
|
101
|
+
growpart:
|
|
102
|
+
mode: off
|
|
103
|
+
network:
|
|
104
|
+
version: 2
|
|
105
|
+
ethernets:
|
|
106
|
+
${process.env.RPI4_INTERFACE_NAME}:
|
|
107
|
+
dhcp4: true
|
|
108
|
+
addresses:
|
|
109
|
+
- ${ipaddr}/24
|
|
110
|
+
EOF_MAAS_CFG`,
|
|
111
|
+
];
|
|
112
|
+
|
|
113
|
+
shellExec(`sudo chroot ${nfsHostPath} /usr/bin/qemu-aarch64-static /bin/bash <<'EOF'
|
|
114
|
+
${steps
|
|
115
|
+
.map(
|
|
116
|
+
(s, i) => `echo "step ${i + 1}/${steps.length}: ${s.split('\n')[0]}"
|
|
117
|
+
${s}
|
|
118
|
+
`,
|
|
119
|
+
)
|
|
120
|
+
.join(``)}
|
|
121
|
+
EOF`);
|
|
122
|
+
|
|
123
|
+
shellExec(`sudo chroot ${nfsHostPath} /usr/bin/qemu-aarch64-static /bin/bash <<'EOF'
|
|
124
|
+
echo "nameserver ${process.env.MAAS_DNS}" | tee /etc/resolv.conf > /dev/null
|
|
125
|
+
apt update
|
|
126
|
+
EOF`);
|
|
127
|
+
};
|
|
128
|
+
|
|
47
129
|
try {
|
|
48
130
|
switch (operator) {
|
|
49
131
|
case 'save':
|
|
@@ -734,8 +816,8 @@ try {
|
|
|
734
816
|
shellExec(`node bin/deploy update-dependencies`);
|
|
735
817
|
shellExec(`auto-changelog`);
|
|
736
818
|
shellExec(`node bin/build dd`);
|
|
737
|
-
shellExec(`node bin deploy
|
|
738
|
-
shellExec(`node bin deploy
|
|
819
|
+
shellExec(`node bin deploy --kubeadm --build-manifest --sync --info-router --replicas 1 dd`);
|
|
820
|
+
shellExec(`node bin deploy --kubeadm --build-manifest --sync --info-router --replicas 1 dd production`);
|
|
739
821
|
break;
|
|
740
822
|
}
|
|
741
823
|
|
|
@@ -865,157 +947,125 @@ ${shellExec(`git log | grep Author: | sort -u`, { stdout: true }).split(`\n`).jo
|
|
|
865
947
|
|
|
866
948
|
break;
|
|
867
949
|
}
|
|
868
|
-
case 'ssh-export-server-keys': {
|
|
869
|
-
fs.copyFile('/etc/ssh/ssh_host_rsa_key', './engine-private/deploy/ssh_host_rsa_key');
|
|
870
|
-
fs.copyFile('/etc/ssh/ssh_host_rsa_key.pub', './engine-private/deploy/ssh_host_rsa_key.pub');
|
|
871
|
-
break;
|
|
872
|
-
}
|
|
873
|
-
case 'ssh-import-server-keys': {
|
|
874
|
-
fs.copyFile('./engine-private/deploy/ssh_host_rsa_key', '/etc/ssh/ssh_host_rsa_key');
|
|
875
|
-
fs.copyFile('./engine-private/deploy/ssh_host_rsa_key.pub', '/etc/ssh/ssh_host_rsa_key.pub');
|
|
876
|
-
break;
|
|
877
|
-
}
|
|
878
|
-
case 'ssh-import-client-keys': {
|
|
879
|
-
const host = process.argv[3];
|
|
880
|
-
shellExec(
|
|
881
|
-
`node bin/deploy set-ssh-keys ./engine-private/deploy/ssh_host_rsa_key ${host ? ` ${host}` : ``} ${
|
|
882
|
-
process.argv.includes('clean') ? 'clean' : ''
|
|
883
|
-
}`,
|
|
884
|
-
);
|
|
885
|
-
break;
|
|
886
|
-
}
|
|
887
|
-
case 'ssh-keys': {
|
|
888
|
-
// create ssh keys
|
|
889
|
-
const sshAccount = process.argv[3]; // [sudo username]@[host/ip]
|
|
890
|
-
const destPath = process.argv[4];
|
|
891
|
-
// shellExec(`ssh-keygen -t ed25519 -C "${sshAccount}" -f ${destPath}`);
|
|
892
|
-
if (fs.existsSync(destPath)) {
|
|
893
|
-
fs.removeSync(destPath);
|
|
894
|
-
fs.removeSync(destPath + '.pub');
|
|
895
|
-
}
|
|
896
|
-
shellExec(`ssh-keygen -t rsa -b 4096 -C "${sshAccount}" -f ${destPath}`);
|
|
897
|
-
// add host to keyscan
|
|
898
|
-
// shellExec(`ssh-keyscan -t rsa ${sshAccount.split(`@`)[1]} >> ~/.ssh/known_hosts`);
|
|
899
|
-
break;
|
|
900
|
-
}
|
|
901
|
-
|
|
902
|
-
case 'set-ssh-keys': {
|
|
903
|
-
const files = ['authorized_keys', 'id_rsa', 'id_rsa.pub', 'known_hosts ', 'known_hosts.old'];
|
|
904
|
-
|
|
905
|
-
// > write
|
|
906
|
-
// >> append
|
|
907
|
-
|
|
908
|
-
// /root/.ssh/id_rsa
|
|
909
|
-
// /root/.ssh/id_rsa.pub
|
|
910
|
-
if (process.argv.includes('clean')) {
|
|
911
|
-
for (const file of files) {
|
|
912
|
-
if (fs.existsSync(`/root/.ssh/${file}`)) {
|
|
913
|
-
logger.info('remove', `/root/.ssh/${file}`);
|
|
914
|
-
fs.removeSync(`/root/.ssh/${file}`);
|
|
915
|
-
}
|
|
916
|
-
fs.writeFileSync(`/root/.ssh/${file}`, '', 'utf8');
|
|
917
|
-
}
|
|
918
|
-
shellExec('eval `ssh-agent -s`' + ` && ssh-add -D`);
|
|
919
|
-
}
|
|
920
|
-
|
|
921
|
-
const destPath = process.argv[3];
|
|
922
|
-
const sshAuthKeyTarget = '/root/.ssh/authorized_keys';
|
|
923
|
-
if (!fs.existsSync(sshAuthKeyTarget)) shellExec(`touch ${sshAuthKeyTarget}`);
|
|
924
|
-
shellExec(`cat ${destPath}.pub > ${sshAuthKeyTarget}`);
|
|
925
|
-
shellExec(`cat ${destPath} >> ${sshAuthKeyTarget}`);
|
|
926
|
-
|
|
927
|
-
if (!fs.existsSync('/root/.ssh/id_rsa')) shellExec(`touch ${'/root/.ssh/id_rsa'}`);
|
|
928
|
-
shellExec(`cat ${destPath} > ${'/root/.ssh/id_rsa'}`);
|
|
929
|
-
|
|
930
|
-
if (!fs.existsSync('/root/.ssh/id_rsa.pub')) shellExec(`touch ${'/root/.ssh/id_rsa.pub'}`);
|
|
931
|
-
shellExec(`cat ${destPath}.pub > ${'/root/.ssh/id_rsa.pub'}`);
|
|
932
|
-
|
|
933
|
-
shellExec(`chmod 700 /root/.ssh/`);
|
|
934
|
-
for (const file of files) {
|
|
935
|
-
shellExec(`chmod 600 /root/.ssh/${file}`);
|
|
936
|
-
}
|
|
937
|
-
const host = process.argv[4];
|
|
938
|
-
// add key
|
|
939
|
-
shellExec('eval `ssh-agent -s`' + ' && ssh-add /root/.ssh/id_rsa' + ' && ssh-add -l');
|
|
940
|
-
if (host) shellExec(`ssh-keyscan -H ${host} >> ~/.ssh/known_hosts`);
|
|
941
|
-
shellExec(`sudo systemctl enable ssh`);
|
|
942
|
-
shellExec(`sudo systemctl restart ssh`);
|
|
943
|
-
shellExec(`sudo systemctl status ssh`);
|
|
944
|
-
|
|
945
|
-
break;
|
|
946
|
-
}
|
|
947
950
|
|
|
948
951
|
case 'ssh': {
|
|
949
|
-
|
|
950
|
-
|
|
952
|
+
const host = process.argv[3] ?? `root@${await ip.public.ipv4()}`;
|
|
953
|
+
const domain = host.split('@')[1];
|
|
954
|
+
const user = 'root'; // host.split('@')[0];
|
|
955
|
+
const password = process.argv[4] ?? '';
|
|
956
|
+
const port = 22;
|
|
957
|
+
|
|
958
|
+
const setUpSSH = () => {
|
|
959
|
+
// Required port forwarding mapping
|
|
960
|
+
// ssh TCP 2222 22 <local-server-ip>
|
|
961
|
+
// ssh UDP 2222 22 <local-server-ip>
|
|
962
|
+
|
|
963
|
+
// Remote connect via public key
|
|
964
|
+
// ssh -i <key-path> <user>@<host>:2222
|
|
965
|
+
|
|
966
|
+
shellExec(`cat ./engine-private/deploy/id_rsa.pub > ~/.ssh/authorized_keys`);
|
|
967
|
+
|
|
968
|
+
// local trust on first use validator
|
|
969
|
+
// check ~/.ssh/known_hosts
|
|
970
|
+
|
|
971
|
+
// shellExec(`sudo sed -i -e "s@#PasswordAuthentication yes@PasswordAuthentication no@g" /etc/ssh/sshd_config`);
|
|
972
|
+
// shellExec(`sudo sed -i -e "s@#UsePAM no@UsePAM yes@g" /etc/ssh/sshd_config`);
|
|
973
|
+
|
|
974
|
+
// Include /etc/ssh/sshd_config.d/*.conf
|
|
975
|
+
// sudo tee /etc/ssh/sshd_config.d/99-custom.conf
|
|
976
|
+
shellExec(`sudo tee /etc/ssh/sshd_config <<EOF
|
|
977
|
+
PasswordAuthentication no
|
|
978
|
+
ChallengeResponseAuthentication yes
|
|
979
|
+
UsePAM yes
|
|
980
|
+
PubkeyAuthentication Yes
|
|
981
|
+
RSAAuthentication Yes
|
|
982
|
+
PermitRootLogin Yes
|
|
983
|
+
X11Forwarding yes
|
|
984
|
+
X11DisplayOffset 10
|
|
985
|
+
LoginGraceTime 120
|
|
986
|
+
StrictModes yes
|
|
987
|
+
SyslogFacility AUTH
|
|
988
|
+
LogLevel INFO
|
|
989
|
+
#HostKey /etc/ssh/ssh_host_ecdsa_key
|
|
990
|
+
HostKey /etc/ssh/ssh_host_ed25519_key
|
|
991
|
+
#HostKey /etc/ssh/ssh_host_rsa_key
|
|
992
|
+
AuthorizedKeysFile ~/.ssh/authorized_keys
|
|
993
|
+
Subsystem sftp /usr/libexec/openssh/sftp-server
|
|
994
|
+
ListenAddress 0.0.0.0
|
|
995
|
+
ListenAddress ::
|
|
996
|
+
ListenAddress ${domain}
|
|
997
|
+
ListenAddress ${domain}:22
|
|
998
|
+
EOF`);
|
|
999
|
+
|
|
1000
|
+
shellExec(`sudo chmod 700 ~/.ssh/`);
|
|
1001
|
+
shellExec(`sudo chmod 600 ~/.ssh/authorized_keys`);
|
|
1002
|
+
shellExec(`sudo chmod 644 ~/.ssh/known_hosts`);
|
|
1003
|
+
shellExec(`sudo chmod 600 ~/.ssh/id_rsa`);
|
|
1004
|
+
shellExec(`sudo chmod 600 /etc/ssh/ssh_host_ed25519_key`);
|
|
1005
|
+
shellExec(`chown -R ${user}:${user} ~/.ssh`);
|
|
1006
|
+
|
|
1007
|
+
shellExec(`ufw allow ${port}/tcp`);
|
|
1008
|
+
shellExec(`ufw allow ${port}/udp`);
|
|
1009
|
+
shellExec(`ufw allow ssh`);
|
|
1010
|
+
shellExec(`ufw allow from 192.168.0.0/16 to any port 22`);
|
|
1011
|
+
|
|
1012
|
+
// active ssh-agent
|
|
1013
|
+
shellExec('eval `ssh-agent -s`' + ` && ssh-add ~/.ssh/id_rsa` + ` && ssh-add -l`);
|
|
1014
|
+
// remove all
|
|
1015
|
+
// shellExec(`ssh-add -D`);
|
|
1016
|
+
// remove single
|
|
1017
|
+
// shellExec(`ssh-add -d ~/.ssh/id_rsa`);
|
|
1018
|
+
|
|
1019
|
+
// shellExec(`echo "@${host.split(`@`)[1]} * $(cat ~/.ssh/id_rsa.pub)" > ~/.ssh/known_hosts`);
|
|
1020
|
+
shellExec('eval `ssh-agent -s`' + `&& ssh-keyscan -H -t ed25519 ${host.split(`@`)[1]} > ~/.ssh/known_hosts`);
|
|
1021
|
+
// shellExec(`sudo echo "" > ~/.ssh/known_hosts`);
|
|
1022
|
+
|
|
1023
|
+
// ssh-copy-id -i ~/.ssh/id_rsa.pub -p <port_number> <username>@<host>
|
|
1024
|
+
shellExec(`ssh-copy-id -i ~/.ssh/id_rsa.pub -p ${port} ${host}`);
|
|
1025
|
+
// debug:
|
|
1026
|
+
// shellExec(`ssh -vvv ${host}`);
|
|
1027
|
+
|
|
1028
|
+
shellExec(`sudo cp ./engine-private/deploy/id_rsa ~/.ssh/id_rsa`);
|
|
1029
|
+
shellExec(`sudo cp ./engine-private/deploy/id_rsa.pub ~/.ssh/id_rsa.pub`);
|
|
1030
|
+
|
|
1031
|
+
shellExec(`sudo echo "" > /etc/ssh/ssh_host_ecdsa_key`);
|
|
1032
|
+
shellExec(`sudo cp ./engine-private/deploy/id_rsa /etc/ssh/ssh_host_ed25519_key`);
|
|
1033
|
+
shellExec(`sudo echo "" > /etc/ssh/ssh_host_rsa_key`);
|
|
1034
|
+
|
|
1035
|
+
shellExec(`sudo echo "" > /etc/ssh/ssh_host_ecdsa_key.pub`);
|
|
1036
|
+
shellExec(`sudo cp ./engine-private/deploy/id_rsa.pub /etc/ssh/ssh_host_ed25519_key.pub`);
|
|
1037
|
+
shellExec(`sudo echo "" > /etc/ssh/ssh_host_rsa_key.pub`);
|
|
951
1038
|
|
|
952
|
-
shellExec(`sudo systemctl
|
|
1039
|
+
shellExec(`sudo systemctl enable sshd`);
|
|
1040
|
+
shellExec(`sudo systemctl restart sshd`);
|
|
953
1041
|
|
|
954
|
-
shellExec(`sudo systemctl status sshd
|
|
1042
|
+
const status = shellExec(`sudo systemctl status sshd`, { silent: true, stdout: true });
|
|
1043
|
+
console.log(
|
|
1044
|
+
status.match('running') ? status.replaceAll(`running`, `running`.green) : `ssh service not running`.red,
|
|
1045
|
+
);
|
|
1046
|
+
};
|
|
955
1047
|
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
|
|
959
|
-
shellExec(`sudo apt update`);
|
|
960
|
-
shellExec(`sudo apt install openssh-server -y`);
|
|
961
|
-
shellExec(`sudo apt install ssh-askpass`);
|
|
962
|
-
}
|
|
963
|
-
shellExec(`sudo systemctl enable ssh`);
|
|
964
|
-
shellExec(`sudo systemctl restart ssh`);
|
|
965
|
-
shellExec(`sudo systemctl status ssh`);
|
|
1048
|
+
if (process.argv.includes('import')) {
|
|
1049
|
+
setUpSSH();
|
|
1050
|
+
break;
|
|
966
1051
|
}
|
|
967
|
-
// sudo service ssh restart
|
|
968
|
-
shellExec(`ip a`);
|
|
969
|
-
|
|
970
|
-
// adduser newuser
|
|
971
|
-
// usermod -aG sudo newuser
|
|
972
|
-
|
|
973
|
-
// ssh -i '/path/to/keyfile' username@server
|
|
974
1052
|
|
|
975
|
-
|
|
1053
|
+
shellExec(`sudo rm -rf ./id_rsa`);
|
|
1054
|
+
shellExec(`sudo rm -rf ./id_rsa.pub`);
|
|
976
1055
|
|
|
977
|
-
|
|
1056
|
+
if (process.argv.includes('legacy'))
|
|
1057
|
+
shellExec(`ssh-keygen -t rsa -b 4096 -f id_rsa -N "${password}" -q -C "${host}"`);
|
|
1058
|
+
else shellExec(`ssh-keygen -t ed25519 -f id_rsa -N "${password}" -q -C "${host}"`);
|
|
978
1059
|
|
|
979
|
-
|
|
980
|
-
|
|
981
|
-
// chmod 600 authorized_keys
|
|
1060
|
+
shellExec(`sudo cp ./id_rsa ~/.ssh/id_rsa`);
|
|
1061
|
+
shellExec(`sudo cp ./id_rsa.pub ~/.ssh/id_rsa.pub`);
|
|
982
1062
|
|
|
983
|
-
|
|
984
|
-
|
|
985
|
-
// add public key to authorized keys
|
|
986
|
-
// cat .ssh/id_ed25519.pub | ssh [sudo username]@[host/ip] 'cat >> .ssh/authorized_keys'
|
|
987
|
-
|
|
988
|
-
// 2. Open /etc/ssh/sshd_config file
|
|
989
|
-
// nano /etc/ssh/sshd_config
|
|
990
|
-
|
|
991
|
-
// 3. add example code to last line of file
|
|
992
|
-
// Match User newuser
|
|
993
|
-
// PasswordAuthentication yes
|
|
994
|
-
|
|
995
|
-
// ssh [sudo username]@[host/ip]
|
|
996
|
-
// open port 22
|
|
997
|
-
|
|
998
|
-
// init ssh agent service
|
|
999
|
-
// eval `ssh-agent -s`
|
|
1000
|
-
|
|
1001
|
-
// list keys
|
|
1002
|
-
// ssh-add -l
|
|
1003
|
-
|
|
1004
|
-
// add key
|
|
1005
|
-
// ssh-add /root/.ssh/id_rsa
|
|
1006
|
-
|
|
1007
|
-
// remove
|
|
1008
|
-
// ssh-add -d /path/to/private/key
|
|
1009
|
-
|
|
1010
|
-
// remove all
|
|
1011
|
-
// ssh-add -D
|
|
1012
|
-
|
|
1013
|
-
// sshpass -p ${{ secrets.PSWD }} ssh -o StrictHostKeyChecking=no -p 22 ${{ secrets.USER}}@${{ secrets.VPS_IP }} 'cd /home/adam && ./deploy.sh'
|
|
1014
|
-
|
|
1015
|
-
// copies the public key of your default identity (use -i identity_file for other identities) to the remote host.
|
|
1016
|
-
// ssh-copy-id user@hostname.example.com
|
|
1017
|
-
// ssh-copy-id "user@hostname.example.com -p <port-number>"
|
|
1063
|
+
shellExec(`sudo cp ./id_rsa ./engine-private/deploy/id_rsa`);
|
|
1064
|
+
shellExec(`sudo cp ./id_rsa.pub ./engine-private/deploy/id_rsa.pub`);
|
|
1018
1065
|
|
|
1066
|
+
shellExec(`sudo rm -rf ./id_rsa`);
|
|
1067
|
+
shellExec(`sudo rm -rf ./id_rsa.pub`);
|
|
1068
|
+
setUpSSH();
|
|
1019
1069
|
break;
|
|
1020
1070
|
}
|
|
1021
1071
|
|
|
@@ -1092,8 +1142,1269 @@ ${shellExec(`git log | grep Author: | sort -u`, { stdout: true }).split(`\n`).jo
|
|
|
1092
1142
|
break;
|
|
1093
1143
|
}
|
|
1094
1144
|
|
|
1095
|
-
|
|
1145
|
+
case 'monitor': {
|
|
1146
|
+
shellExec(
|
|
1147
|
+
`node bin monitor ${process.argv[6] === 'sync' ? '--sync ' : ''}--type ${process.argv[3]} ${process.argv[4]} ${
|
|
1148
|
+
process.argv[5]
|
|
1149
|
+
}`,
|
|
1150
|
+
{
|
|
1151
|
+
async: true,
|
|
1152
|
+
},
|
|
1153
|
+
);
|
|
1154
|
+
break;
|
|
1155
|
+
}
|
|
1156
|
+
|
|
1157
|
+
case 'postgresql': {
|
|
1158
|
+
if (process.argv.includes('install')) {
|
|
1159
|
+
shellExec(`sudo dnf install -y postgresql-server postgresql`);
|
|
1160
|
+
shellExec(`sudo postgresql-setup --initdb`);
|
|
1161
|
+
shellExec(`chown postgres /var/lib/pgsql/data`);
|
|
1162
|
+
shellExec(`sudo systemctl enable postgresql.service`);
|
|
1163
|
+
shellExec(`sudo systemctl start postgresql.service`);
|
|
1164
|
+
} else {
|
|
1165
|
+
shellExec(`sudo systemctl enable postgresql.service`);
|
|
1166
|
+
shellExec(`sudo systemctl restart postgresql.service`);
|
|
1167
|
+
}
|
|
1168
|
+
|
|
1169
|
+
shellExec(`sudo systemctl status postgresql.service`);
|
|
1170
|
+
|
|
1171
|
+
// sudo systemctl stop postgresql
|
|
1172
|
+
// sudo systemctl disable postgresql
|
|
1173
|
+
|
|
1174
|
+
// psql login
|
|
1175
|
+
// psql -U <user> -h 127.0.0.1 -W <db-name>
|
|
1176
|
+
|
|
1177
|
+
// gedit /var/lib/pgsql/data/pg_hba.conf
|
|
1178
|
+
// host <db-name> <db-user> <db-host> md5
|
|
1179
|
+
// local all postgres trust
|
|
1180
|
+
// # "local" is for Unix domain socket connections only
|
|
1181
|
+
// local all all md5
|
|
1182
|
+
// # IPv4 local connections:
|
|
1183
|
+
// host all all 127.0.0.1/32 md5
|
|
1184
|
+
// # IPv6 local connections:
|
|
1185
|
+
// host all all ::1/128 md5
|
|
1186
|
+
|
|
1187
|
+
// gedit /var/lib/pgsql/data/postgresql.conf
|
|
1188
|
+
// listen_addresses = '*'
|
|
1189
|
+
|
|
1190
|
+
break;
|
|
1191
|
+
}
|
|
1192
|
+
|
|
1193
|
+
case 'postgresql-14': {
|
|
1194
|
+
shellExec(`sudo /usr/pgsql-14/bin/postgresql-14-setup initdb`);
|
|
1195
|
+
shellExec(`sudo systemctl start postgresql-14`);
|
|
1196
|
+
shellExec(`sudo systemctl enable postgresql-14`);
|
|
1197
|
+
shellExec(`sudo systemctl status postgresql-14`);
|
|
1198
|
+
// sudo dnf install postgresql14-contrib
|
|
1199
|
+
break;
|
|
1200
|
+
}
|
|
1201
|
+
|
|
1202
|
+
case 'pg-stop': {
|
|
1203
|
+
shellExec(`sudo systemctl stop postgresql-14`);
|
|
1204
|
+
shellExec(`sudo systemctl disable postgresql-14`);
|
|
1205
|
+
break;
|
|
1206
|
+
}
|
|
1207
|
+
case 'pg-start': {
|
|
1208
|
+
shellExec(`sudo systemctl enable postgresql-14`);
|
|
1209
|
+
shellExec(`sudo systemctl restart postgresql-14`);
|
|
1210
|
+
break;
|
|
1211
|
+
}
|
|
1212
|
+
|
|
1213
|
+
case 'pg-list-db': {
|
|
1214
|
+
shellExec(`sudo -i -u postgres psql -c "\\l"`);
|
|
1215
|
+
break;
|
|
1216
|
+
}
|
|
1217
|
+
|
|
1218
|
+
case 'pg-list-table': {
|
|
1219
|
+
shellExec(`sudo -i -u postgres psql -c "\\dt *.*"`);
|
|
1220
|
+
// schema_name.*
|
|
1096
1221
|
break;
|
|
1222
|
+
}
|
|
1223
|
+
case 'pg-drop-db': {
|
|
1224
|
+
shellExec(`sudo -i -u postgres psql -c "DROP DATABASE ${process.argv[3]} WITH (FORCE)"`);
|
|
1225
|
+
shellExec(`sudo -i -u postgres psql -c "DROP USER ${process.argv[4]}"`);
|
|
1226
|
+
break;
|
|
1227
|
+
}
|
|
1228
|
+
|
|
1229
|
+
case 'maas-stop': {
|
|
1230
|
+
shellExec(`sudo snap stop maas`);
|
|
1231
|
+
break;
|
|
1232
|
+
}
|
|
1233
|
+
|
|
1234
|
+
case 'maas': {
|
|
1235
|
+
dotenv.config({ path: `${getUnderpostRootPath()}/.env`, override: true });
|
|
1236
|
+
const IP_ADDRESS = getLocalIPv4Address();
|
|
1237
|
+
const serverip = IP_ADDRESS;
|
|
1238
|
+
const tftpRoot = process.env.TFTP_ROOT;
|
|
1239
|
+
const ipaddr = process.env.RPI4_IP;
|
|
1240
|
+
const netmask = process.env.NETMASK;
|
|
1241
|
+
const gatewayip = process.env.GATEWAY_IP;
|
|
1242
|
+
|
|
1243
|
+
let resources;
|
|
1244
|
+
try {
|
|
1245
|
+
resources = JSON.parse(
|
|
1246
|
+
shellExec(`maas ${process.env.MAAS_ADMIN_USERNAME} boot-resources read`, {
|
|
1247
|
+
silent: true,
|
|
1248
|
+
stdout: true,
|
|
1249
|
+
}),
|
|
1250
|
+
).map((o) => ({
|
|
1251
|
+
id: o.id,
|
|
1252
|
+
name: o.name,
|
|
1253
|
+
architecture: o.architecture,
|
|
1254
|
+
}));
|
|
1255
|
+
} catch (error) {
|
|
1256
|
+
logger.error(error);
|
|
1257
|
+
}
|
|
1258
|
+
|
|
1259
|
+
const machineFactory = (m) => ({
|
|
1260
|
+
system_id: m.interface_set[0].system_id,
|
|
1261
|
+
mac_address: m.interface_set[0].mac_address,
|
|
1262
|
+
hostname: m.hostname,
|
|
1263
|
+
status_name: m.status_name,
|
|
1264
|
+
});
|
|
1265
|
+
|
|
1266
|
+
let machines;
|
|
1267
|
+
try {
|
|
1268
|
+
machines = JSON.parse(
|
|
1269
|
+
shellExec(`maas ${process.env.MAAS_ADMIN_USERNAME} machines read`, {
|
|
1270
|
+
stdout: true,
|
|
1271
|
+
silent: true,
|
|
1272
|
+
}),
|
|
1273
|
+
).map((m) => machineFactory(m));
|
|
1274
|
+
} catch (error) {
|
|
1275
|
+
logger.error(error);
|
|
1276
|
+
}
|
|
1277
|
+
|
|
1278
|
+
if (process.argv.includes('db')) {
|
|
1279
|
+
// DROP, ALTER, CREATE, WITH ENCRYPTED
|
|
1280
|
+
// sudo -u <user> -h <host> psql <db-name>
|
|
1281
|
+
shellExec(`DB_PG_MAAS_NAME=${process.env.DB_PG_MAAS_NAME}`);
|
|
1282
|
+
shellExec(`DB_PG_MAAS_PASS=${process.env.DB_PG_MAAS_PASS}`);
|
|
1283
|
+
shellExec(`DB_PG_MAAS_USER=${process.env.DB_PG_MAAS_USER}`);
|
|
1284
|
+
shellExec(`DB_PG_MAAS_HOST=${process.env.DB_PG_MAAS_HOST}`);
|
|
1285
|
+
shellExec(
|
|
1286
|
+
`sudo -i -u postgres psql -c "CREATE USER \"$DB_PG_MAAS_USER\" WITH ENCRYPTED PASSWORD '$DB_PG_MAAS_PASS'"`,
|
|
1287
|
+
);
|
|
1288
|
+
shellExec(
|
|
1289
|
+
`sudo -i -u postgres psql -c "ALTER USER \"$DB_PG_MAAS_USER\" WITH ENCRYPTED PASSWORD '$DB_PG_MAAS_PASS'"`,
|
|
1290
|
+
);
|
|
1291
|
+
const actions = ['LOGIN', 'SUPERUSER', 'INHERIT', 'CREATEDB', 'CREATEROLE', 'REPLICATION'];
|
|
1292
|
+
shellExec(`sudo -i -u postgres psql -c "ALTER USER \"$DB_PG_MAAS_USER\" WITH ${actions.join(' ')}"`);
|
|
1293
|
+
shellExec(`sudo -i -u postgres psql -c "\\du"`);
|
|
1294
|
+
|
|
1295
|
+
shellExec(`sudo -i -u postgres createdb -O "$DB_PG_MAAS_USER" "$DB_PG_MAAS_NAME"`);
|
|
1296
|
+
|
|
1297
|
+
shellExec(`sudo -i -u postgres psql -c "\\l"`);
|
|
1298
|
+
}
|
|
1299
|
+
|
|
1300
|
+
if (process.argv.includes('ls')) {
|
|
1301
|
+
shellExec(`maas ${process.env.MAAS_ADMIN_USERNAME} boot-sources read`);
|
|
1302
|
+
shellExec(`maas ${process.env.MAAS_ADMIN_USERNAME} commissioning-scripts read`);
|
|
1303
|
+
// shellExec(`maas ${process.env.MAAS_ADMIN_USERNAME} boot-source-selections read 60`);
|
|
1304
|
+
console.table(resources);
|
|
1305
|
+
console.table(machines);
|
|
1306
|
+
process.exit(0);
|
|
1307
|
+
}
|
|
1308
|
+
|
|
1309
|
+
// TODO: - Disable maas proxy (egress forwarding to public dns)
|
|
1310
|
+
// - Configure maas dns forwarding ${process.env.MAAS_DNS}
|
|
1311
|
+
// - Enable DNSSEC validation of upstream zones: Automatic (use default root key)
|
|
1312
|
+
|
|
1313
|
+
if (process.argv.includes('clear')) {
|
|
1314
|
+
for (const machine of machines) {
|
|
1315
|
+
shellExec(`maas ${process.env.MAAS_ADMIN_USERNAME} machine delete ${machine.system_id}`);
|
|
1316
|
+
}
|
|
1317
|
+
// machines = [];
|
|
1318
|
+
shellExec(`maas ${process.env.MAAS_ADMIN_USERNAME} discoveries clear all=true`);
|
|
1319
|
+
if (process.argv.includes('force')) {
|
|
1320
|
+
shellExec(`maas ${process.env.MAAS_ADMIN_USERNAME} discoveries scan force=true`);
|
|
1321
|
+
}
|
|
1322
|
+
process.exit(0);
|
|
1323
|
+
}
|
|
1324
|
+
if (process.argv.includes('grub-arm64')) {
|
|
1325
|
+
shellExec(`sudo dnf install grub2-efi-aa64-modules`);
|
|
1326
|
+
shellExec(`sudo dnf install grub2-efi-x64-modules`);
|
|
1327
|
+
// sudo grub2-mknetdir --net-directory=${tftpRoot} --subdir=/boot/grub --module-path=/usr/lib/grub/arm64-efi arm64-efi
|
|
1328
|
+
process.exit(0);
|
|
1329
|
+
}
|
|
1330
|
+
|
|
1331
|
+
if (process.argv.includes('psql')) {
|
|
1332
|
+
const cmd = `psql -U ${process.env.DB_PG_MAAS_USER} -h ${process.env.DB_PG_MAAS_HOST} -W ${process.env.DB_PG_MAAS_NAME}`;
|
|
1333
|
+
pbcopy(cmd);
|
|
1334
|
+
process.exit(0);
|
|
1335
|
+
}
|
|
1336
|
+
if (process.argv.includes('logs')) {
|
|
1337
|
+
shellExec(`maas status`);
|
|
1338
|
+
const cmd = `journalctl -f -t dhcpd -u snap.maas.pebble.service`;
|
|
1339
|
+
pbcopy(cmd);
|
|
1340
|
+
process.exit(0);
|
|
1341
|
+
}
|
|
1342
|
+
if (process.argv.includes('reset')) {
|
|
1343
|
+
// shellExec(
|
|
1344
|
+
// `maas init region+rack --database-uri "postgres://$DB_PG_MAAS_USER:$DB_PG_MAAS_PASS@$DB_PG_MAAS_HOST/$DB_PG_MAAS_NAME"` +
|
|
1345
|
+
// ` --maas-url http://${IP_ADDRESS}:5240/MAAS`,
|
|
1346
|
+
// );
|
|
1347
|
+
const cmd =
|
|
1348
|
+
`maas init region+rack --database-uri "postgres://${process.env.DB_PG_MAAS_USER}:${process.env.DB_PG_MAAS_PASS}@${process.env.DB_PG_MAAS_HOST}/${process.env.DB_PG_MAAS_NAME}"` +
|
|
1349
|
+
` --maas-url http://${IP_ADDRESS}:5240/MAAS`;
|
|
1350
|
+
pbcopy(cmd);
|
|
1351
|
+
process.exit(0);
|
|
1352
|
+
}
|
|
1353
|
+
if (process.argv.includes('dhcp')) {
|
|
1354
|
+
const snippets = JSON.parse(
|
|
1355
|
+
shellExec(`maas ${process.env.MAAS_ADMIN_USERNAME} dhcpsnippets read`, {
|
|
1356
|
+
stdout: true,
|
|
1357
|
+
silent: true,
|
|
1358
|
+
disableLog: true,
|
|
1359
|
+
}),
|
|
1360
|
+
);
|
|
1361
|
+
for (const snippet of snippets) {
|
|
1362
|
+
switch (snippet.name) {
|
|
1363
|
+
case 'arm64':
|
|
1364
|
+
snippet.value = snippet.value.split(`\n`);
|
|
1365
|
+
snippet.value[1] = ` filename "http://${IP_ADDRESS}:5248/images/bootloaders/uefi/arm64/grubaa64.efi";`;
|
|
1366
|
+
snippet.value[5] = ` filename "http://${IP_ADDRESS}:5248/images/bootloaders/uefi/arm64/grubaa64.efi";`;
|
|
1367
|
+
snippet.value = snippet.value.join(`\n`);
|
|
1368
|
+
shellExec(
|
|
1369
|
+
`maas ${process.env.MAAS_ADMIN_USERNAME} dhcpsnippet update ${snippet.name} value='${snippet.value}'`,
|
|
1370
|
+
);
|
|
1371
|
+
break;
|
|
1372
|
+
|
|
1373
|
+
default:
|
|
1374
|
+
break;
|
|
1375
|
+
}
|
|
1376
|
+
}
|
|
1377
|
+
|
|
1378
|
+
console.log(snippets);
|
|
1379
|
+
|
|
1380
|
+
process.exit(0);
|
|
1381
|
+
}
|
|
1382
|
+
// shellExec(`MAAS_ADMIN_USERNAME=${process.env.MAAS_ADMIN_USERNAME}`);
|
|
1383
|
+
// shellExec(`MAAS_ADMIN_EMAIL=${process.env.MAAS_ADMIN_EMAIL}`);
|
|
1384
|
+
// shellExec(`maas createadmin --username $MAAS_ADMIN_USERNAME --email $MAAS_ADMIN_EMAIL`);
|
|
1385
|
+
|
|
1386
|
+
// MaaS admin CLI:
|
|
1387
|
+
// maas login <maas-admin-username> http://localhost:5240/MAAS
|
|
1388
|
+
// paste GUI API KEY (profile section)
|
|
1389
|
+
|
|
1390
|
+
// Import custom image
|
|
1391
|
+
// maas <maas-admin-username> boot-resources create name='custom/RockyLinuxRpi4' \
|
|
1392
|
+
// title='RockyLinuxRpi4' \
|
|
1393
|
+
// architecture='arm64/generic' \
|
|
1394
|
+
// filetype='tgz' \
|
|
1395
|
+
// content@=/home/RockyLinuxRpi_9-latest.tar.gz
|
|
1396
|
+
|
|
1397
|
+
// Image boot resource:
|
|
1398
|
+
// /var/snap/maas/current/root/snap/maas
|
|
1399
|
+
// /var/snap/maas/common/maas/tftp_root
|
|
1400
|
+
// sudo chmod 755 /var/snap/maas/common/maas/tftp_root
|
|
1401
|
+
|
|
1402
|
+
// /var/snap/maas/common/maas/dhcpd.conf
|
|
1403
|
+
// sudo snap restart maas.pebble
|
|
1404
|
+
|
|
1405
|
+
// PXE Linux files:
|
|
1406
|
+
// /var/snap/maas/common/maas/image-storage/bootloaders/pxe/i386
|
|
1407
|
+
// sudo nmcli con modify <interface-device-name-connection-id> ethtool.feature-rx on ethtool.feature-tx off
|
|
1408
|
+
// sudo nmcli connection up <interface-device-name-connection-id>
|
|
1409
|
+
|
|
1410
|
+
// man nm-settings |grep feature-tx-checksum
|
|
1411
|
+
|
|
1412
|
+
// nmcli c modify <interface-device-name-connection-id> \
|
|
1413
|
+
// ethtool.feature-tx-checksum-fcoe-crc off \
|
|
1414
|
+
// ethtool.feature-tx-checksum-ip-generic off \
|
|
1415
|
+
// ethtool.feature-tx-checksum-ipv4 off \
|
|
1416
|
+
// ethtool.feature-tx-checksum-ipv6 off \
|
|
1417
|
+
// ethtool.feature-tx-checksum-sctp off
|
|
1418
|
+
|
|
1419
|
+
// Ensure Rocky NFS server and /etc/exports configured
|
|
1420
|
+
// sudo systemctl restart nfs-server
|
|
1421
|
+
// Check mounts: showmount -e <server-ip>
|
|
1422
|
+
// Check nfs ports: rpcinfo -p
|
|
1423
|
+
// sudo chown -R root:root ${process.env.NFS_EXPORT_PATH}/rpi4mb
|
|
1424
|
+
// sudo chmod 755 ${process.env.NFS_EXPORT_PATH}/rpi4mb
|
|
1425
|
+
|
|
1426
|
+
// tftp server
|
|
1427
|
+
// sudo chown -R root:root /var/snap/maas/common/maas/tftp_root/rpi4mb
|
|
1428
|
+
|
|
1429
|
+
// tftp client
|
|
1430
|
+
// sudo dnf install tftp
|
|
1431
|
+
// tftp <server-ip> -c get <path>
|
|
1432
|
+
|
|
1433
|
+
// Check firewall-cmd
|
|
1434
|
+
// firewall-cmd --permanent --add-service=rpc-bind
|
|
1435
|
+
// firewall-cmd --reload
|
|
1436
|
+
// systemctl disable firewalld
|
|
1437
|
+
// sudo firewall-cmd --permanent --add-port=10259/tcp --zone=public
|
|
1438
|
+
|
|
1439
|
+
// Image extension transform (.img.xz to .tar.gz):
|
|
1440
|
+
// tar -cvzf image-name.tar.gz image-name.img.xz
|
|
1441
|
+
|
|
1442
|
+
// Rocky network configuration:
|
|
1443
|
+
// /etc/NetworkManager/system-connections
|
|
1444
|
+
|
|
1445
|
+
// Rocky kernel params update
|
|
1446
|
+
// sudo grubby --args="<key>=<value> <key>=<value>" --update-kernel=ALL
|
|
1447
|
+
// sudo reboot now
|
|
1448
|
+
|
|
1449
|
+
// Temporal:
|
|
1450
|
+
// sudo snap install temporal
|
|
1451
|
+
// journalctl -u snap.maas.pebble -t maas-regiond
|
|
1452
|
+
// journalctl -u snap.maas.pebble -t maas-temporal -n 100 --no-pager -f
|
|
1453
|
+
|
|
1454
|
+
// Remove:
|
|
1455
|
+
// sudo dnf remove <package> -y; sudo dnf autoremove -y; sudo dnf clean packages
|
|
1456
|
+
// check: ~
|
|
1457
|
+
// check: ~./cache
|
|
1458
|
+
// check: ~./config
|
|
1459
|
+
|
|
1460
|
+
// Check file logs
|
|
1461
|
+
// grep -i -E -C 1 '<key-a>|<key-b>' /example.log | tail -n 600
|
|
1462
|
+
|
|
1463
|
+
// Back into your firmware setup (UEFI or BIOS config screen).
|
|
1464
|
+
// grub> fwsetup
|
|
1465
|
+
|
|
1466
|
+
// Poweroff:
|
|
1467
|
+
// grub > halt
|
|
1468
|
+
// initramfs > poweroff
|
|
1469
|
+
|
|
1470
|
+
// Check interface
|
|
1471
|
+
// ip link show
|
|
1472
|
+
// nmcli con show
|
|
1473
|
+
|
|
1474
|
+
let firmwarePath,
|
|
1475
|
+
tftpSubDir,
|
|
1476
|
+
kernelFilesPaths,
|
|
1477
|
+
name,
|
|
1478
|
+
architecture,
|
|
1479
|
+
resource,
|
|
1480
|
+
nfsConnectStr,
|
|
1481
|
+
etcExports,
|
|
1482
|
+
nfsServerRootPath,
|
|
1483
|
+
bootConf,
|
|
1484
|
+
zipFirmwareFileName,
|
|
1485
|
+
zipFirmwareName,
|
|
1486
|
+
zipFirmwareUrl,
|
|
1487
|
+
interfaceName,
|
|
1488
|
+
nfsHost;
|
|
1489
|
+
|
|
1490
|
+
switch (process.argv[3]) {
|
|
1491
|
+
case 'rpi4mb':
|
|
1492
|
+
const resourceId = process.argv[4] ?? '39';
|
|
1493
|
+
tftpSubDir = '/rpi4mb';
|
|
1494
|
+
zipFirmwareFileName = `RPi4_UEFI_Firmware_v1.41.zip`;
|
|
1495
|
+
zipFirmwareName = zipFirmwareFileName.split('.zip')[0];
|
|
1496
|
+
zipFirmwareUrl = `https://github.com/pftf/RPi4/releases/download/v1.41/RPi4_UEFI_Firmware_v1.41.zip`;
|
|
1497
|
+
firmwarePath = `../${zipFirmwareName}`;
|
|
1498
|
+
interfaceName = process.env.RPI4_INTERFACE_NAME;
|
|
1499
|
+
nfsHost = 'rpi4mb';
|
|
1500
|
+
if (!fs.existsSync(firmwarePath)) {
|
|
1501
|
+
await Downloader(zipFirmwareUrl, `../${zipFirmwareFileName}`);
|
|
1502
|
+
shellExec(`cd .. && mkdir ${zipFirmwareName} && cd ${zipFirmwareName} && unzip ../${zipFirmwareFileName}`);
|
|
1503
|
+
}
|
|
1504
|
+
resource = resources.find((o) => o.id == resourceId);
|
|
1505
|
+
name = resource.name;
|
|
1506
|
+
architecture = resource.architecture;
|
|
1507
|
+
resource = resources.find((o) => o.name === name && o.architecture === architecture);
|
|
1508
|
+
nfsServerRootPath = `${process.env.NFS_EXPORT_PATH}/rpi4mb`;
|
|
1509
|
+
// ,anonuid=1001,anongid=100
|
|
1510
|
+
// etcExports = `${nfsServerRootPath} *(rw,all_squash,sync,no_root_squash,insecure)`;
|
|
1511
|
+
etcExports = `${nfsServerRootPath} 192.168.1.0/24(${[
|
|
1512
|
+
'rw',
|
|
1513
|
+
// 'all_squash',
|
|
1514
|
+
'sync',
|
|
1515
|
+
'no_root_squash',
|
|
1516
|
+
'no_subtree_check',
|
|
1517
|
+
'insecure',
|
|
1518
|
+
]})`;
|
|
1519
|
+
const resourceData = JSON.parse(
|
|
1520
|
+
shellExec(`maas ${process.env.MAAS_ADMIN_USERNAME} boot-resource read ${resource.id}`, {
|
|
1521
|
+
stdout: true,
|
|
1522
|
+
silent: true,
|
|
1523
|
+
disableLog: true,
|
|
1524
|
+
}),
|
|
1525
|
+
);
|
|
1526
|
+
const bootFiles = resourceData.sets[Object.keys(resourceData.sets)[0]].files;
|
|
1527
|
+
const suffix = architecture.match('xgene') ? '.xgene' : '';
|
|
1528
|
+
|
|
1529
|
+
kernelFilesPaths = {
|
|
1530
|
+
'vmlinuz-efi': bootFiles['boot-kernel' + suffix].filename_on_disk,
|
|
1531
|
+
'initrd.img': bootFiles['boot-initrd' + suffix].filename_on_disk,
|
|
1532
|
+
squashfs: bootFiles['squashfs'].filename_on_disk,
|
|
1533
|
+
};
|
|
1534
|
+
const protocol = 'tcp'; // v3 -> tcp, v4 -> udp
|
|
1535
|
+
|
|
1536
|
+
const mountOptions = [
|
|
1537
|
+
protocol,
|
|
1538
|
+
'vers=3',
|
|
1539
|
+
'nfsvers=3',
|
|
1540
|
+
'nolock',
|
|
1541
|
+
// 'protocol=tcp',
|
|
1542
|
+
// 'hard=true',
|
|
1543
|
+
'port=2049',
|
|
1544
|
+
// 'sec=none',
|
|
1545
|
+
'rw',
|
|
1546
|
+
'hard',
|
|
1547
|
+
'intr',
|
|
1548
|
+
'rsize=32768',
|
|
1549
|
+
'wsize=32768',
|
|
1550
|
+
'acregmin=0',
|
|
1551
|
+
'acregmax=0',
|
|
1552
|
+
'acdirmin=0',
|
|
1553
|
+
'acdirmax=0',
|
|
1554
|
+
'noac',
|
|
1555
|
+
// 'nodev',
|
|
1556
|
+
// 'nosuid',
|
|
1557
|
+
];
|
|
1558
|
+
const cmd = [
|
|
1559
|
+
`console=serial0,115200`,
|
|
1560
|
+
`console=tty1`,
|
|
1561
|
+
// `initrd=-1`,
|
|
1562
|
+
// `net.ifnames=0`,
|
|
1563
|
+
// `dwc_otg.lpm_enable=0`,
|
|
1564
|
+
// `elevator=deadline`,
|
|
1565
|
+
`root=/dev/nfs`,
|
|
1566
|
+
`nfsroot=${serverip}:${process.env.NFS_EXPORT_PATH}/rpi4mb,${mountOptions}`,
|
|
1567
|
+
// `nfsroot=${serverip}:${process.env.NFS_EXPORT_PATH}/rpi4mb`,
|
|
1568
|
+
`ip=${ipaddr}:${serverip}:${gatewayip}:${netmask}:${nfsHost}:${interfaceName}:static`,
|
|
1569
|
+
`rootfstype=nfs`,
|
|
1570
|
+
`rw`,
|
|
1571
|
+
`rootwait`,
|
|
1572
|
+
`fixrtc`,
|
|
1573
|
+
'initrd=initrd.img',
|
|
1574
|
+
// 'boot=casper',
|
|
1575
|
+
// 'ro',
|
|
1576
|
+
'netboot=nfs',
|
|
1577
|
+
`cloud-config-url=/dev/null`,
|
|
1578
|
+
// 'ip=dhcp',
|
|
1579
|
+
// 'ip=dfcp',
|
|
1580
|
+
// 'autoinstall',
|
|
1581
|
+
// 'rd.break',
|
|
1582
|
+
];
|
|
1583
|
+
|
|
1584
|
+
nfsConnectStr = cmd.join(' ');
|
|
1585
|
+
bootConf = `[all]
|
|
1586
|
+
MAC_ADDRESS=00:00:00:00:00:00
|
|
1587
|
+
MAC_ADDRESS_OTP=0,1
|
|
1588
|
+
BOOT_UART=0
|
|
1589
|
+
WAKE_ON_GPIO=1
|
|
1590
|
+
POWER_OFF_ON_HALT=0
|
|
1591
|
+
ENABLE_SELF_UPDATE=1
|
|
1592
|
+
DISABLE_HDMI=0
|
|
1593
|
+
TFTP_IP=${serverip}
|
|
1594
|
+
TFTP_PREFIX=1
|
|
1595
|
+
TFTP_PREFIX_STR=${tftpSubDir.slice(1)}/
|
|
1596
|
+
NET_INSTALL_ENABLED=1
|
|
1597
|
+
DHCP_TIMEOUT=45000
|
|
1598
|
+
DHCP_REQ_TIMEOUT=4000
|
|
1599
|
+
TFTP_FILE_TIMEOUT=30000
|
|
1600
|
+
BOOT_ORDER=0x21`;
|
|
1601
|
+
|
|
1602
|
+
break;
|
|
1603
|
+
|
|
1604
|
+
default:
|
|
1605
|
+
break;
|
|
1606
|
+
}
|
|
1607
|
+
shellExec(`sudo chmod 755 ${process.env.NFS_EXPORT_PATH}/${nfsHost}`);
|
|
1608
|
+
|
|
1609
|
+
shellExec(`sudo rm -rf ${tftpRoot}${tftpSubDir}`);
|
|
1610
|
+
shellExec(`sudo cp -a ${firmwarePath} ${tftpRoot}${tftpSubDir}`);
|
|
1611
|
+
shellExec(`mkdir -p ${tftpRoot}${tftpSubDir}/pxe`);
|
|
1612
|
+
|
|
1613
|
+
fs.writeFileSync(`/etc/exports`, etcExports, 'utf8');
|
|
1614
|
+
if (bootConf) fs.writeFileSync(`${tftpRoot}${tftpSubDir}/boot.conf`, bootConf, 'utf8');
|
|
1615
|
+
|
|
1616
|
+
shellExec(`node bin/deploy nfs`);
|
|
1617
|
+
|
|
1618
|
+
if (process.argv.includes('restart')) {
|
|
1619
|
+
shellExec(`sudo snap restart maas.pebble`);
|
|
1620
|
+
let secs = 0;
|
|
1621
|
+
while (
|
|
1622
|
+
!(
|
|
1623
|
+
shellExec(`maas status`, { silent: true, disableLog: true, stdout: true })
|
|
1624
|
+
.split(' ')
|
|
1625
|
+
.filter((l) => l.match('inactive')).length === 1
|
|
1626
|
+
)
|
|
1627
|
+
) {
|
|
1628
|
+
await timer(1000);
|
|
1629
|
+
console.log(`Waiting... (${++secs}s)`);
|
|
1630
|
+
}
|
|
1631
|
+
}
|
|
1632
|
+
|
|
1633
|
+
switch (process.argv[3]) {
|
|
1634
|
+
case 'rpi4mb':
|
|
1635
|
+
{
|
|
1636
|
+
// subnet DHCP snippets
|
|
1637
|
+
// # UEFI ARM64
|
|
1638
|
+
// if option arch = 00:0B {
|
|
1639
|
+
// filename "rpi4mb/pxe/grubaa64.efi";
|
|
1640
|
+
// }
|
|
1641
|
+
// elsif option arch = 00:13 {
|
|
1642
|
+
// filename "http://<IP_ADDRESS>:5248/images/bootloaders/uefi/arm64/grubaa64.efi";
|
|
1643
|
+
// option vendor-class-identifier "HTTPClient";
|
|
1644
|
+
// }
|
|
1645
|
+
for (const file of ['bootaa64.efi', 'grubaa64.efi']) {
|
|
1646
|
+
shellExec(
|
|
1647
|
+
`sudo cp -a /var/snap/maas/common/maas/image-storage/bootloaders/uefi/arm64/${file} ${tftpRoot}${tftpSubDir}/pxe/${file}`,
|
|
1648
|
+
);
|
|
1649
|
+
}
|
|
1650
|
+
// const file = 'bcm2711-rpi-4-b.dtb';
|
|
1651
|
+
// shellExec(
|
|
1652
|
+
// `sudo cp -a ${firmwarePath}/${file} /var/snap/maas/common/maas/image-storage/bootloaders/uefi/arm64/${file}`,
|
|
1653
|
+
// );
|
|
1654
|
+
|
|
1655
|
+
// const ipxeSrc = fs
|
|
1656
|
+
// .readFileSync(`${tftpRoot}/ipxe.cfg`, 'utf8')
|
|
1657
|
+
// .replaceAll('amd64', 'arm64')
|
|
1658
|
+
// .replaceAll('${next-server}', IP_ADDRESS);
|
|
1659
|
+
// fs.writeFileSync(`${tftpRoot}/ipxe.cfg`, ipxeSrc, 'utf8');
|
|
1660
|
+
|
|
1661
|
+
{
|
|
1662
|
+
for (const file of Object.keys(kernelFilesPaths)) {
|
|
1663
|
+
shellExec(
|
|
1664
|
+
`sudo cp -a /var/snap/maas/common/maas/image-storage/${kernelFilesPaths[file]} ${tftpRoot}${tftpSubDir}/pxe/${file}`,
|
|
1665
|
+
);
|
|
1666
|
+
}
|
|
1667
|
+
// const configTxtSrc = fs.readFileSync(`${firmwarePath}/config.txt`, 'utf8');
|
|
1668
|
+
// fs.writeFileSync(
|
|
1669
|
+
// `${tftpRoot}${tftpSubDir}/config.txt`,
|
|
1670
|
+
// configTxtSrc
|
|
1671
|
+
// .replace(`kernel=kernel8.img`, `kernel=vmlinuz`)
|
|
1672
|
+
// .replace(`# max_framebuffers=2`, `max_framebuffers=2`)
|
|
1673
|
+
// .replace(`initramfs initramfs8 followkernel`, `initramfs initrd.img followkernel`),
|
|
1674
|
+
// 'utf8',
|
|
1675
|
+
// );
|
|
1676
|
+
|
|
1677
|
+
// grub:
|
|
1678
|
+
// set root=(pxe)
|
|
1679
|
+
|
|
1680
|
+
// UNDERPOST.NET UEFI/GRUB/MAAS RPi4 commissioning (ARM64)
|
|
1681
|
+
const menuentryStr = 'underpost.net rpi4mb maas commissioning (ARM64)';
|
|
1682
|
+
const grubCfgPath = `${tftpRoot}/grub/grub.cfg`;
|
|
1683
|
+
fs.writeFileSync(
|
|
1684
|
+
grubCfgPath,
|
|
1685
|
+
`
|
|
1686
|
+
insmod gzio
|
|
1687
|
+
insmod http
|
|
1688
|
+
insmod nfs
|
|
1689
|
+
set timeout=5
|
|
1690
|
+
set default=0
|
|
1691
|
+
|
|
1692
|
+
menuentry '${menuentryStr}' {
|
|
1693
|
+
set root=(tftp,${serverip})
|
|
1694
|
+
linux ${tftpSubDir}/pxe/vmlinuz-efi ${nfsConnectStr}
|
|
1695
|
+
initrd ${tftpSubDir}/pxe/initrd.img
|
|
1696
|
+
boot
|
|
1697
|
+
}
|
|
1698
|
+
|
|
1699
|
+
`,
|
|
1700
|
+
'utf8',
|
|
1701
|
+
);
|
|
1702
|
+
}
|
|
1703
|
+
const arm64EfiPath = `${tftpRoot}/grub/arm64-efi`;
|
|
1704
|
+
if (fs.existsSync(arm64EfiPath)) shellExec(`sudo rm -rf ${arm64EfiPath}`);
|
|
1705
|
+
shellExec(`sudo cp -a /usr/lib/grub/arm64-efi ${arm64EfiPath}`);
|
|
1706
|
+
}
|
|
1707
|
+
|
|
1708
|
+
break;
|
|
1709
|
+
|
|
1710
|
+
default:
|
|
1711
|
+
break;
|
|
1712
|
+
}
|
|
1713
|
+
|
|
1714
|
+
logger.info('succes maas deploy', {
|
|
1715
|
+
resource,
|
|
1716
|
+
kernelFilesPaths,
|
|
1717
|
+
tftpRoot,
|
|
1718
|
+
tftpSubDir,
|
|
1719
|
+
firmwarePath,
|
|
1720
|
+
etcExports,
|
|
1721
|
+
nfsServerRootPath,
|
|
1722
|
+
nfsConnectStr,
|
|
1723
|
+
});
|
|
1724
|
+
if (process.argv.includes('restart')) {
|
|
1725
|
+
if (fs.existsSync(`node engine-private/r.js`)) shellExec(`node engine-private/r`);
|
|
1726
|
+
shellExec(`node bin/deploy maas dhcp`);
|
|
1727
|
+
shellExec(`sudo chown -R root:root ${tftpRoot}`);
|
|
1728
|
+
shellExec(`sudo sudo chmod 755 ${tftpRoot}`);
|
|
1729
|
+
}
|
|
1730
|
+
// for (const machine of machines) {
|
|
1731
|
+
// // shellExec(`maas ${process.env.MAAS_ADMIN_USERNAME} machine delete ${machine.system_id}`);
|
|
1732
|
+
// shellExec(`maas ${process.env.MAAS_ADMIN_USERNAME} machine commission ${machine.system_id}`, {
|
|
1733
|
+
// silent: true,
|
|
1734
|
+
// });
|
|
1735
|
+
// }
|
|
1736
|
+
// machines = [];
|
|
1737
|
+
|
|
1738
|
+
const monitor = async () => {
|
|
1739
|
+
// discoveries Query observed discoveries.
|
|
1740
|
+
// discovery Read or delete an observed discovery.
|
|
1741
|
+
|
|
1742
|
+
const discoveries = JSON.parse(
|
|
1743
|
+
shellExec(`maas ${process.env.MAAS_ADMIN_USERNAME} discoveries read`, {
|
|
1744
|
+
silent: true,
|
|
1745
|
+
stdout: true,
|
|
1746
|
+
}),
|
|
1747
|
+
).filter(
|
|
1748
|
+
(o) => o.ip !== IP_ADDRESS && o.ip !== gatewayip && !machines.find((_o) => _o.mac_address === o.mac_address),
|
|
1749
|
+
);
|
|
1750
|
+
|
|
1751
|
+
// {
|
|
1752
|
+
// "discovery_id": "",
|
|
1753
|
+
// "ip": "192.168.1.189",
|
|
1754
|
+
// "mac_address": "00:00:00:00:00:00",
|
|
1755
|
+
// "last_seen": "2025-05-05T14:17:37.354",
|
|
1756
|
+
// "hostname": null,
|
|
1757
|
+
// "fabric_name": "",
|
|
1758
|
+
// "vid": null,
|
|
1759
|
+
// "mac_organization": "",
|
|
1760
|
+
// "observer": {
|
|
1761
|
+
// "system_id": "",
|
|
1762
|
+
// "hostname": "",
|
|
1763
|
+
// "interface_id": 1,
|
|
1764
|
+
// "interface_name": ""
|
|
1765
|
+
// },
|
|
1766
|
+
// "resource_uri": "/MAAS/api/2.0/discovery/MTkyLjE2OC4xLjE4OSwwMDowMDowMDowMDowMDowMA==/"
|
|
1767
|
+
// },
|
|
1768
|
+
|
|
1769
|
+
for (const discovery of discoveries) {
|
|
1770
|
+
const machine = {
|
|
1771
|
+
architecture: architecture.match('amd') ? 'amd64/generic' : 'arm64/generic',
|
|
1772
|
+
mac_address: discovery.mac_address,
|
|
1773
|
+
hostname: discovery.hostname ?? discovery.mac_organization ?? discovery.domain ?? `generic-host-${s4()}`,
|
|
1774
|
+
// discovery.ip.match(ipaddr)
|
|
1775
|
+
// ? nfsHost
|
|
1776
|
+
// : `unknown-${s4()}`,
|
|
1777
|
+
// description: '',
|
|
1778
|
+
// https://maas.io/docs/reference-power-drivers
|
|
1779
|
+
power_type: 'manual', // manual
|
|
1780
|
+
// power_parameters_power_address: discovery.ip,
|
|
1781
|
+
mac_addresses: discovery.mac_address,
|
|
1782
|
+
};
|
|
1783
|
+
machine.hostname = machine.hostname.replaceAll(' ', '').replaceAll('.', '');
|
|
1784
|
+
|
|
1785
|
+
try {
|
|
1786
|
+
let newMachine = shellExec(
|
|
1787
|
+
`maas ${process.env.MAAS_ADMIN_USERNAME} machines create ${Object.keys(machine)
|
|
1788
|
+
.map((k) => `${k}="${machine[k]}"`)
|
|
1789
|
+
.join(' ')}`,
|
|
1790
|
+
{
|
|
1791
|
+
silent: true,
|
|
1792
|
+
stdout: true,
|
|
1793
|
+
},
|
|
1794
|
+
);
|
|
1795
|
+
newMachine = machineFactory(JSON.parse(newMachine));
|
|
1796
|
+
machines.push(newMachine);
|
|
1797
|
+
console.log(newMachine);
|
|
1798
|
+
shellExec(`maas ${process.env.MAAS_ADMIN_USERNAME} machine commission ${newMachine.system_id}`, {
|
|
1799
|
+
silent: true,
|
|
1800
|
+
});
|
|
1801
|
+
} catch (error) {
|
|
1802
|
+
logger.error(error, error.stack);
|
|
1803
|
+
}
|
|
1804
|
+
}
|
|
1805
|
+
// if (discoveries.length > 0) {
|
|
1806
|
+
// shellExec(
|
|
1807
|
+
// `maas ${process.env.MAAS_ADMIN_USERNAME} machines read | jq '.[] | {system_id: .interface_set[0].system_id, hostname, status_name, mac_address: .interface_set[0].mac_address}'`,
|
|
1808
|
+
// );
|
|
1809
|
+
// }
|
|
1810
|
+
await timer(1000);
|
|
1811
|
+
monitor();
|
|
1812
|
+
};
|
|
1813
|
+
// shellExec(`node bin/deploy open-virtual-root ${architecture.match('amd') ? 'amd64' : 'arm64'} ${nfsHost}`);
|
|
1814
|
+
machines = [];
|
|
1815
|
+
shellExec(`node bin/deploy maas clear`);
|
|
1816
|
+
monitor();
|
|
1817
|
+
break;
|
|
1818
|
+
}
|
|
1819
|
+
|
|
1820
|
+
case 'nfs': {
|
|
1821
|
+
// Daemon RPC NFSv3. ports:
|
|
1822
|
+
|
|
1823
|
+
// 2049 (TCP/UDP) – nfsd standard port.
|
|
1824
|
+
// 111 (TCP/UDP) – rpcbind/portmapper.
|
|
1825
|
+
// 20048 (TCP/UDP) – rpc.mountd.
|
|
1826
|
+
// 32765 (TCP/UDP) – rpc.statd.
|
|
1827
|
+
// 32766 (TCP/UDP) – lockd (NLM).
|
|
1828
|
+
|
|
1829
|
+
// Configure export and permissions:
|
|
1830
|
+
// /etc/exports
|
|
1831
|
+
|
|
1832
|
+
// Configure ports:
|
|
1833
|
+
// /etc/nfs.conf
|
|
1834
|
+
|
|
1835
|
+
fs.writeFileSync(
|
|
1836
|
+
`/etc/nfs.conf`,
|
|
1837
|
+
`
|
|
1838
|
+
[mountd]
|
|
1839
|
+
port = 20048
|
|
1840
|
+
|
|
1841
|
+
[statd]
|
|
1842
|
+
port = 32765
|
|
1843
|
+
outgoing-port = 32765
|
|
1844
|
+
|
|
1845
|
+
[nfsd]
|
|
1846
|
+
rdma=y
|
|
1847
|
+
rdma-port=20049
|
|
1848
|
+
|
|
1849
|
+
[lockd]
|
|
1850
|
+
port = 32766
|
|
1851
|
+
udp-port = 32766
|
|
1852
|
+
`,
|
|
1853
|
+
'utf8',
|
|
1854
|
+
);
|
|
1855
|
+
|
|
1856
|
+
// Client users have read-only access to resources and are identified as anonymous on the server.
|
|
1857
|
+
// /share ip-client(ro,all_squash)
|
|
1858
|
+
|
|
1859
|
+
// Client users can modify resources and keep their UID on the server. Only root is identified as anonymous.
|
|
1860
|
+
// /share ip-client(rw)
|
|
1861
|
+
|
|
1862
|
+
// Users on client workstation 1 can modify resources, while those on client workstation 2 have read-only access.
|
|
1863
|
+
// UIDs are kept on the server, and only root is identified as anonymous.
|
|
1864
|
+
// /share ip-client1(rw) ip-client2(ro)
|
|
1865
|
+
|
|
1866
|
+
// Client1 users can modify resources. Their UID is changed to 1001 and their GID to 100 on the server.
|
|
1867
|
+
// /share ip-client(rw,all_squash,anonuid=1001,anongid=100)
|
|
1868
|
+
|
|
1869
|
+
// sudo dnf install nfs-utils
|
|
1870
|
+
// sudo systemctl enable --now rpcbind // RPC map service
|
|
1871
|
+
// sudo systemctl enable --now nfs-server // nfs domains nfsd
|
|
1872
|
+
|
|
1873
|
+
// Update exports:
|
|
1874
|
+
// shellExec(`sudo exportfs -a -r`);
|
|
1875
|
+
// shellExec(`sudo exportfs -v`);
|
|
1876
|
+
|
|
1877
|
+
// Active nfs
|
|
1878
|
+
shellExec(`sudo exportfs -s`);
|
|
1879
|
+
|
|
1880
|
+
shellExec(`sudo exportfs -rav`);
|
|
1881
|
+
|
|
1882
|
+
// Rocky enable virt_use_nfs
|
|
1883
|
+
// sudo setsebool -P virt_use_nfs 1
|
|
1884
|
+
|
|
1885
|
+
// Disable share:
|
|
1886
|
+
// sudo exportfs -u <client-ip>:${process.env.NFS_EXPORT_PATH}/rpi4mb
|
|
1887
|
+
|
|
1888
|
+
// Nfs client:
|
|
1889
|
+
// mount -t nfs <server-ip>:/server-mnt /mnt
|
|
1890
|
+
// umount /mnt
|
|
1891
|
+
|
|
1892
|
+
shellExec(`sudo systemctl restart nfs-server`);
|
|
1893
|
+
break;
|
|
1894
|
+
}
|
|
1895
|
+
case 'update-virtual-root': {
|
|
1896
|
+
dotenv.config({ path: `${getUnderpostRootPath()}/.env`, override: true });
|
|
1897
|
+
const IP_ADDRESS = getLocalIPv4Address();
|
|
1898
|
+
const architecture = process.argv[3];
|
|
1899
|
+
const host = process.argv[4];
|
|
1900
|
+
const nfsHostPath = `${process.env.NFS_EXPORT_PATH}/${host}`;
|
|
1901
|
+
const ipaddr = process.env.RPI4_IP;
|
|
1902
|
+
await updateVirtualRoot({
|
|
1903
|
+
IP_ADDRESS,
|
|
1904
|
+
architecture,
|
|
1905
|
+
host,
|
|
1906
|
+
nfsHostPath,
|
|
1907
|
+
ipaddr,
|
|
1908
|
+
});
|
|
1909
|
+
break;
|
|
1910
|
+
}
|
|
1911
|
+
case 'open-virtual-root': {
|
|
1912
|
+
dotenv.config({ path: `${getUnderpostRootPath()}/.env`, override: true });
|
|
1913
|
+
const IP_ADDRESS = getLocalIPv4Address();
|
|
1914
|
+
const architecture = process.argv[3];
|
|
1915
|
+
const host = process.argv[4];
|
|
1916
|
+
const nfsHostPath = `${process.env.NFS_EXPORT_PATH}/${host}`;
|
|
1917
|
+
shellExec(`sudo dnf install -y iptables-legacy`);
|
|
1918
|
+
shellExec(`sudo dnf install -y debootstrap`);
|
|
1919
|
+
shellExec(`sudo dnf install kernel-modules-extra-$(uname -r)`);
|
|
1920
|
+
switch (architecture) {
|
|
1921
|
+
case 'arm64':
|
|
1922
|
+
shellExec(`sudo podman run --rm --privileged multiarch/qemu-user-static --reset -p yes`);
|
|
1923
|
+
|
|
1924
|
+
break;
|
|
1925
|
+
|
|
1926
|
+
default:
|
|
1927
|
+
break;
|
|
1928
|
+
}
|
|
1929
|
+
|
|
1930
|
+
shellExec(`sudo modprobe binfmt_misc`);
|
|
1931
|
+
shellExec(`sudo mount -t binfmt_misc binfmt_misc /proc/sys/fs/binfmt_misc`);
|
|
1932
|
+
|
|
1933
|
+
if (process.argv.includes('build')) {
|
|
1934
|
+
// shellExec(`depmod -a`);
|
|
1935
|
+
shellExec(`mkdir -p ${nfsHostPath}`);
|
|
1936
|
+
let cmd;
|
|
1937
|
+
switch (host) {
|
|
1938
|
+
case 'rpi4mb':
|
|
1939
|
+
shellExec(`sudo rm -rf ${nfsHostPath}/*`);
|
|
1940
|
+
shellExec(`sudo chown -R root:root ${nfsHostPath}`);
|
|
1941
|
+
cmd = [
|
|
1942
|
+
`sudo debootstrap`,
|
|
1943
|
+
`--arch=arm64`,
|
|
1944
|
+
`--variant=minbase`,
|
|
1945
|
+
`--foreign`, // arm64 on amd64
|
|
1946
|
+
`noble`,
|
|
1947
|
+
nfsHostPath,
|
|
1948
|
+
`http://ports.ubuntu.com/ubuntu-ports/`,
|
|
1949
|
+
];
|
|
1950
|
+
break;
|
|
1951
|
+
|
|
1952
|
+
default:
|
|
1953
|
+
break;
|
|
1954
|
+
}
|
|
1955
|
+
shellExec(cmd.join(' '));
|
|
1956
|
+
|
|
1957
|
+
shellExec(`sudo podman create --name extract multiarch/qemu-user-static`);
|
|
1958
|
+
shellExec(`podman ps -a`);
|
|
1959
|
+
shellExec(`sudo podman cp extract:/usr/bin/qemu-aarch64-static ${nfsHostPath}/usr/bin/`);
|
|
1960
|
+
shellExec(`sudo podman rm extract`);
|
|
1961
|
+
shellExec(`podman ps -a`);
|
|
1962
|
+
|
|
1963
|
+
switch (host) {
|
|
1964
|
+
case 'rpi4mb':
|
|
1965
|
+
shellExec(`file ${nfsHostPath}/bin/bash`); // expected: ELF 64-bit LSB pie executable, ARM aarch64 …
|
|
1966
|
+
break;
|
|
1967
|
+
|
|
1968
|
+
default:
|
|
1969
|
+
break;
|
|
1970
|
+
}
|
|
1971
|
+
|
|
1972
|
+
shellExec(`sudo chroot ${nfsHostPath} /usr/bin/qemu-aarch64-static /bin/bash <<'EOF'
|
|
1973
|
+
/debootstrap/debootstrap --second-stage
|
|
1974
|
+
EOF`);
|
|
1975
|
+
}
|
|
1976
|
+
if (process.argv.includes('mount')) {
|
|
1977
|
+
shellExec(`sudo mount --bind /proc ${nfsHostPath}/proc`);
|
|
1978
|
+
shellExec(`sudo mount --bind /sys ${nfsHostPath}/sys`);
|
|
1979
|
+
shellExec(`sudo mount --rbind /dev ${nfsHostPath}/dev`);
|
|
1980
|
+
}
|
|
1981
|
+
|
|
1982
|
+
if (process.argv.includes('build')) {
|
|
1983
|
+
switch (host) {
|
|
1984
|
+
case 'rpi4mb':
|
|
1985
|
+
const ipaddr = process.env.RPI4_IP;
|
|
1986
|
+
|
|
1987
|
+
await updateVirtualRoot({
|
|
1988
|
+
IP_ADDRESS,
|
|
1989
|
+
architecture,
|
|
1990
|
+
host,
|
|
1991
|
+
nfsHostPath,
|
|
1992
|
+
ipaddr,
|
|
1993
|
+
});
|
|
1994
|
+
|
|
1995
|
+
break;
|
|
1996
|
+
|
|
1997
|
+
default:
|
|
1998
|
+
break;
|
|
1999
|
+
}
|
|
2000
|
+
}
|
|
2001
|
+
// if (process.argv.includes('mount')) {
|
|
2002
|
+
// shellExec(`sudo mount --bind /lib/modules ${nfsHostPath}/lib/modules`);
|
|
2003
|
+
// }
|
|
2004
|
+
|
|
2005
|
+
break;
|
|
2006
|
+
}
|
|
2007
|
+
|
|
2008
|
+
case 'close-virtual-root': {
|
|
2009
|
+
const architecture = process.argv[3];
|
|
2010
|
+
const host = process.argv[4];
|
|
2011
|
+
const nfsHostPath = `${process.env.NFS_EXPORT_PATH}/${host}`;
|
|
2012
|
+
shellExec(`sudo umount ${nfsHostPath}/proc`);
|
|
2013
|
+
shellExec(`sudo umount ${nfsHostPath}/sys`);
|
|
2014
|
+
shellExec(`sudo umount ${nfsHostPath}/dev`);
|
|
2015
|
+
// shellExec(`sudo umount ${nfsHostPath}/lib/modules`);
|
|
2016
|
+
break;
|
|
2017
|
+
}
|
|
2018
|
+
|
|
2019
|
+
case 'mount': {
|
|
2020
|
+
const mounts = shellExec(`mount`).split(`\n`);
|
|
2021
|
+
console.table(
|
|
2022
|
+
mounts
|
|
2023
|
+
.filter((l) => l.trim())
|
|
2024
|
+
.map(
|
|
2025
|
+
(o) => (
|
|
2026
|
+
(o = o.split(' ')),
|
|
2027
|
+
{
|
|
2028
|
+
path: o[2],
|
|
2029
|
+
type: o[4],
|
|
2030
|
+
permissions: o[5],
|
|
2031
|
+
}
|
|
2032
|
+
),
|
|
2033
|
+
),
|
|
2034
|
+
);
|
|
2035
|
+
break;
|
|
2036
|
+
}
|
|
2037
|
+
|
|
2038
|
+
case 'create-ports': {
|
|
2039
|
+
const cmd = [];
|
|
2040
|
+
const ipaddr = getLocalIPv4Address();
|
|
2041
|
+
for (const port of ['5240']) {
|
|
2042
|
+
const name = 'maas';
|
|
2043
|
+
cmd.push(`${name}:${port}-${port}:${ipaddr}`);
|
|
2044
|
+
}
|
|
2045
|
+
pbcopy(`node engine-private/r create-port ${cmd}`);
|
|
2046
|
+
break;
|
|
2047
|
+
}
|
|
2048
|
+
|
|
2049
|
+
case 'maas-ports': {
|
|
2050
|
+
// Configure firewall:
|
|
2051
|
+
|
|
2052
|
+
// systemctl stop firewalld
|
|
2053
|
+
// systemctl mask firewalld
|
|
2054
|
+
|
|
2055
|
+
// ufw disable
|
|
2056
|
+
// ufw enable
|
|
2057
|
+
|
|
2058
|
+
// sudo snap install ufw
|
|
2059
|
+
// const ports = ['80', '443', '22', '3000-3100'];
|
|
2060
|
+
const ports = [
|
|
2061
|
+
'43',
|
|
2062
|
+
'53',
|
|
2063
|
+
'60',
|
|
2064
|
+
'66',
|
|
2065
|
+
'67',
|
|
2066
|
+
'69',
|
|
2067
|
+
'4011',
|
|
2068
|
+
'111',
|
|
2069
|
+
'2049',
|
|
2070
|
+
'20048',
|
|
2071
|
+
'20049',
|
|
2072
|
+
'32765',
|
|
2073
|
+
'32766',
|
|
2074
|
+
'5248',
|
|
2075
|
+
'5240',
|
|
2076
|
+
];
|
|
2077
|
+
for (const port of ports) {
|
|
2078
|
+
shellExec(`ufw allow ${port}/tcp`);
|
|
2079
|
+
shellExec(`ufw allow ${port}/udp`);
|
|
2080
|
+
}
|
|
2081
|
+
|
|
2082
|
+
shellExec(`sudo systemctl mask firewalld`);
|
|
2083
|
+
|
|
2084
|
+
break;
|
|
2085
|
+
}
|
|
2086
|
+
|
|
2087
|
+
case 'iptables': {
|
|
2088
|
+
shellExec(`sudo systemctl enable nftables`);
|
|
2089
|
+
shellExec(`sudo systemctl restart nftables`);
|
|
2090
|
+
|
|
2091
|
+
shellExec(`sudo tee /etc/nftables.conf <<EOF
|
|
2092
|
+
table inet filter {
|
|
2093
|
+
chain input {
|
|
2094
|
+
type filter hook input priority 0;
|
|
2095
|
+
policy drop;
|
|
2096
|
+
tcp dport 22 accept
|
|
2097
|
+
}
|
|
2098
|
+
}
|
|
2099
|
+
EOF`);
|
|
2100
|
+
shellExec(`sudo nft -f /etc/nftables.conf`);
|
|
2101
|
+
|
|
2102
|
+
// sudo systemctl stop nftables
|
|
2103
|
+
// sudo systemctl disable nftables
|
|
2104
|
+
|
|
2105
|
+
break;
|
|
2106
|
+
}
|
|
2107
|
+
|
|
2108
|
+
case 'rpi4': {
|
|
2109
|
+
// Rpi4 Run Bootloader:
|
|
2110
|
+
|
|
2111
|
+
// 1) create boot.conf
|
|
2112
|
+
|
|
2113
|
+
// 2) Run lite RPiOs from rpi-imager
|
|
2114
|
+
// with boot.conf files in root disk path
|
|
2115
|
+
|
|
2116
|
+
// 3) cd /boot/firmware && sudo rpi-eeprom-config --apply boot.conf
|
|
2117
|
+
|
|
2118
|
+
// 4) sudo reboot
|
|
2119
|
+
|
|
2120
|
+
// 5) check: 'vcgencmd bootloader_version'
|
|
2121
|
+
// 6) check: 'vcgencmd bootloader_config'
|
|
2122
|
+
|
|
2123
|
+
// 7) shutdown and restart without sd card
|
|
2124
|
+
|
|
2125
|
+
// sudo apt update
|
|
2126
|
+
// sudo apt install git
|
|
2127
|
+
|
|
2128
|
+
break;
|
|
2129
|
+
}
|
|
2130
|
+
|
|
2131
|
+
case 'blue': {
|
|
2132
|
+
// lsusb | grep blue -i
|
|
2133
|
+
// rfkill list
|
|
2134
|
+
// sudo service bluetooth start
|
|
2135
|
+
// bluetoothctl show
|
|
2136
|
+
// sudo rfkill unblock bluetooth
|
|
2137
|
+
// dmesg | grep -i bluetooth
|
|
2138
|
+
// journalctl -u bluetooth -f
|
|
2139
|
+
// sudo dnf update bluez bluez-libs bluez-utils
|
|
2140
|
+
// sudo rmmod btusb
|
|
2141
|
+
// sudo modprobe btusb
|
|
2142
|
+
break;
|
|
2143
|
+
}
|
|
2144
|
+
|
|
2145
|
+
case 'fastapi-models': {
|
|
2146
|
+
shellExec(`chmod +x ../full-stack-fastapi-template/backend/initial_data.sh`);
|
|
2147
|
+
shellExec(`../full-stack-fastapi-template/backend/initial_data.sh`);
|
|
2148
|
+
shellExec(`../full-stack-fastapi-template/backend/initial_data.sh`);
|
|
2149
|
+
break;
|
|
2150
|
+
}
|
|
2151
|
+
|
|
2152
|
+
case 'fastapi': {
|
|
2153
|
+
// node bin/deploy fastapi reset
|
|
2154
|
+
// node bin/deploy fastapi reset build-back build-front secret run-back run-front
|
|
2155
|
+
// https://github.com/NonsoEchendu/full-stack-fastapi-project
|
|
2156
|
+
// https://github.com/fastapi/full-stack-fastapi-template
|
|
2157
|
+
const path = `../full-stack-fastapi-template`;
|
|
2158
|
+
const VITE_API_URL = `http://localhost:8000`;
|
|
2159
|
+
|
|
2160
|
+
if (process.argv.includes('reset')) shellExec(`sudo rm -rf ${path}`);
|
|
2161
|
+
|
|
2162
|
+
if (!fs.existsSync(path))
|
|
2163
|
+
shellExec(`cd .. && git clone https://github.com/fastapi/full-stack-fastapi-template.git`);
|
|
2164
|
+
|
|
2165
|
+
shellExec(`cd ${path} && git checkout . && git clean -f -d`);
|
|
2166
|
+
const password = fs.readFileSync(`/home/dd/engine/engine-private/postgresql-password`, 'utf8');
|
|
2167
|
+
|
|
2168
|
+
fs.writeFileSync(
|
|
2169
|
+
`${path}/.env`,
|
|
2170
|
+
fs
|
|
2171
|
+
.readFileSync(`${path}/.env`, 'utf8')
|
|
2172
|
+
.replace(`FIRST_SUPERUSER=admin@example.com`, `FIRST_SUPERUSER=development@underpost.net`)
|
|
2173
|
+
.replace(`FIRST_SUPERUSER_PASSWORD=changethis`, `FIRST_SUPERUSER_PASSWORD=${password}`)
|
|
2174
|
+
.replace(`SECRET_KEY=changethis`, `SECRET_KEY=${password}`)
|
|
2175
|
+
.replace(`POSTGRES_DB=app`, `POSTGRES_DB=postgresdb`)
|
|
2176
|
+
.replace(`POSTGRES_USER=postgres`, `POSTGRES_USER=admin`)
|
|
2177
|
+
.replace(`POSTGRES_PASSWORD=changethis`, `POSTGRES_PASSWORD=${password}`),
|
|
2178
|
+
'utf8',
|
|
2179
|
+
);
|
|
2180
|
+
fs.writeFileSync(
|
|
2181
|
+
`${path}/backend/app/core/db.py`,
|
|
2182
|
+
fs
|
|
2183
|
+
.readFileSync(`${path}/backend/app/core/db.py`, 'utf8')
|
|
2184
|
+
.replace(` # from sqlmodel import SQLModel`, ` from sqlmodel import SQLModel`)
|
|
2185
|
+
.replace(` # SQLModel.metadata.create_all(engine)`, ` SQLModel.metadata.create_all(engine)`),
|
|
2186
|
+
|
|
2187
|
+
'utf8',
|
|
2188
|
+
);
|
|
2189
|
+
|
|
2190
|
+
fs.copySync(`./manifests/deployment/fastapi/initial_data.sh`, `${path}/backend/initial_data.sh`);
|
|
2191
|
+
|
|
2192
|
+
fs.writeFileSync(
|
|
2193
|
+
`${path}/frontend/Dockerfile`,
|
|
2194
|
+
fs
|
|
2195
|
+
.readFileSync(`${path}/frontend/Dockerfile`, 'utf8')
|
|
2196
|
+
.replace('ARG VITE_API_URL=${VITE_API_URL}', `ARG VITE_API_URL='${VITE_API_URL}'`),
|
|
2197
|
+
'utf8',
|
|
2198
|
+
);
|
|
2199
|
+
|
|
2200
|
+
fs.writeFileSync(
|
|
2201
|
+
`${path}/frontend/.env`,
|
|
2202
|
+
fs
|
|
2203
|
+
.readFileSync(`${path}/frontend/.env`, 'utf8')
|
|
2204
|
+
.replace(`VITE_API_URL=http://localhost:8000`, `VITE_API_URL=${VITE_API_URL}`)
|
|
2205
|
+
.replace(`MAILCATCHER_HOST=http://localhost:1080`, `MAILCATCHER_HOST=http://localhost:1081`),
|
|
2206
|
+
|
|
2207
|
+
'utf8',
|
|
2208
|
+
);
|
|
2209
|
+
|
|
2210
|
+
if (process.argv.includes('models')) {
|
|
2211
|
+
shellExec(`node bin/deploy fastapi-models`);
|
|
2212
|
+
break;
|
|
2213
|
+
}
|
|
2214
|
+
|
|
2215
|
+
if (process.argv.includes('build-back')) {
|
|
2216
|
+
const imageName = `fastapi-backend:latest`;
|
|
2217
|
+
shellExec(`sudo podman pull docker.io/library/python:3.10`);
|
|
2218
|
+
shellExec(`sudo podman pull ghcr.io/astral-sh/uv:0.5.11`);
|
|
2219
|
+
shellExec(`sudo rm -rf ${path}/${imageName.replace(':', '_')}.tar`);
|
|
2220
|
+
const args = [
|
|
2221
|
+
`node bin dockerfile-image-build --path ${path}/backend/`,
|
|
2222
|
+
`--image-name=${imageName} --image-path=${path}`,
|
|
2223
|
+
`--podman-save --${process.argv.includes('kubeadm') ? 'kubeadm' : 'kind'}-load --no-cache`,
|
|
2224
|
+
];
|
|
2225
|
+
shellExec(args.join(' '));
|
|
2226
|
+
}
|
|
2227
|
+
if (process.argv.includes('build-front')) {
|
|
2228
|
+
const imageName = `fastapi-frontend:latest`;
|
|
2229
|
+
shellExec(`sudo podman pull docker.io/library/node:20`);
|
|
2230
|
+
shellExec(`sudo podman pull docker.io/library/nginx:1`);
|
|
2231
|
+
shellExec(`sudo rm -rf ${path}/${imageName.replace(':', '_')}.tar`);
|
|
2232
|
+
const args = [
|
|
2233
|
+
`node bin dockerfile-image-build --path ${path}/frontend/`,
|
|
2234
|
+
`--image-name=${imageName} --image-path=${path}`,
|
|
2235
|
+
`--podman-save --${process.argv.includes('kubeadm') ? 'kubeadm' : 'kind'}-load --no-cache`,
|
|
2236
|
+
];
|
|
2237
|
+
shellExec(args.join(' '));
|
|
2238
|
+
}
|
|
2239
|
+
if (process.argv.includes('secret')) {
|
|
2240
|
+
{
|
|
2241
|
+
const secretSelector = `fastapi-postgres-credentials`;
|
|
2242
|
+
shellExec(`sudo kubectl delete secret ${secretSelector}`);
|
|
2243
|
+
shellExec(
|
|
2244
|
+
`sudo kubectl create secret generic ${secretSelector}` +
|
|
2245
|
+
` --from-literal=POSTGRES_DB=postgresdb` +
|
|
2246
|
+
` --from-literal=POSTGRES_USER=admin` +
|
|
2247
|
+
` --from-file=POSTGRES_PASSWORD=/home/dd/engine/engine-private/postgresql-password`,
|
|
2248
|
+
);
|
|
2249
|
+
}
|
|
2250
|
+
{
|
|
2251
|
+
const secretSelector = `fastapi-backend-config-secret`;
|
|
2252
|
+
shellExec(`sudo kubectl delete secret ${secretSelector}`);
|
|
2253
|
+
shellExec(
|
|
2254
|
+
`sudo kubectl create secret generic ${secretSelector}` +
|
|
2255
|
+
` --from-file=SECRET_KEY=/home/dd/engine/engine-private/postgresql-password` +
|
|
2256
|
+
` --from-literal=FIRST_SUPERUSER=development@underpost.net` +
|
|
2257
|
+
` --from-file=FIRST_SUPERUSER_PASSWORD=/home/dd/engine/engine-private/postgresql-password`,
|
|
2258
|
+
);
|
|
2259
|
+
}
|
|
2260
|
+
}
|
|
2261
|
+
if (process.argv.includes('run-back')) {
|
|
2262
|
+
shellExec(`sudo kubectl apply -f ./manifests/deployment/fastapi/backend-deployment.yml`);
|
|
2263
|
+
shellExec(`sudo kubectl apply -f ./manifests/deployment/fastapi/backend-service.yml`);
|
|
2264
|
+
}
|
|
2265
|
+
if (process.argv.includes('run-front')) {
|
|
2266
|
+
shellExec(`sudo kubectl apply -f ./manifests/deployment/fastapi/frontend-deployment.yml`);
|
|
2267
|
+
shellExec(`sudo kubectl apply -f ./manifests/deployment/fastapi/frontend-service.yml`);
|
|
2268
|
+
}
|
|
2269
|
+
break;
|
|
2270
|
+
}
|
|
2271
|
+
|
|
2272
|
+
case 'conda': {
|
|
2273
|
+
// set -e
|
|
2274
|
+
// ENV_NAME="${1:-cuda_env}"
|
|
2275
|
+
// eval "$(conda shell.bash hook)"
|
|
2276
|
+
// conda activate "${ENV_NAME}"
|
|
2277
|
+
shellExec(
|
|
2278
|
+
`export PATH="/root/miniconda3/bin:$PATH" && conda init && conda config --set auto_activate_base false`,
|
|
2279
|
+
);
|
|
2280
|
+
shellExec(`conda env list`);
|
|
2281
|
+
break;
|
|
2282
|
+
}
|
|
2283
|
+
|
|
2284
|
+
case 'kafka': {
|
|
2285
|
+
// https://medium.com/@martin.hodges/deploying-kafka-on-a-kind-kubernetes-cluster-for-development-and-testing-purposes-ed7adefe03cb
|
|
2286
|
+
const imageName = `doughgle/kafka-kraft`;
|
|
2287
|
+
shellExec(`docker pull ${imageName}`);
|
|
2288
|
+
if (!process.argv.includes('kubeadm'))
|
|
2289
|
+
shellExec(
|
|
2290
|
+
`${process.argv.includes('kubeadm') ? `ctr -n k8s.io images import` : `kind load docker-image`} ${imageName}`,
|
|
2291
|
+
);
|
|
2292
|
+
shellExec(`kubectl create namespace kafka`);
|
|
2293
|
+
shellExec(`kubectl apply -f ./manifests/deployment/kafka/deployment.yaml`);
|
|
2294
|
+
// kubectl logs kafka-0 -n kafka | grep STARTED
|
|
2295
|
+
// kubectl logs kafka-1 -n kafka | grep STARTED
|
|
2296
|
+
// kubectl logs kafka-2 -n kafka | grep STARTED
|
|
2297
|
+
|
|
2298
|
+
// kafka-topics.sh --create --topic my-topic --bootstrap-server kafka-svc:9092
|
|
2299
|
+
// kafka-topics.sh --list --topic my-topic --bootstrap-server kafka-svc:9092
|
|
2300
|
+
// kafka-topics.sh --delete --topic my-topic --bootstrap-server kafka-svc:9092
|
|
2301
|
+
|
|
2302
|
+
// kafka-console-producer.sh --bootstrap-server kafka-svc:9092 --topic my-topic
|
|
2303
|
+
// kafka-console-consumer.sh --bootstrap-server kafka-svc:9092 --topic my-topic
|
|
2304
|
+
break;
|
|
2305
|
+
}
|
|
2306
|
+
|
|
2307
|
+
case 'nvidia-gpu-operator': {
|
|
2308
|
+
// https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html
|
|
2309
|
+
shellExec(`curl -s -L https://nvidia.github.io/libnvidia-container/stable/rpm/nvidia-container-toolkit.repo | \
|
|
2310
|
+
sudo tee /etc/yum.repos.d/nvidia-container-toolkit.repo`);
|
|
2311
|
+
|
|
2312
|
+
const NVIDIA_CONTAINER_TOOLKIT_VERSION = '1.17.8-1';
|
|
2313
|
+
|
|
2314
|
+
shellExec(`sudo dnf install -y \
|
|
2315
|
+
nvidia-container-toolkit-${NVIDIA_CONTAINER_TOOLKIT_VERSION} \
|
|
2316
|
+
nvidia-container-toolkit-base-${NVIDIA_CONTAINER_TOOLKIT_VERSION} \
|
|
2317
|
+
libnvidia-container-tools-${NVIDIA_CONTAINER_TOOLKIT_VERSION} \
|
|
2318
|
+
libnvidia-container1-${NVIDIA_CONTAINER_TOOLKIT_VERSION}`);
|
|
2319
|
+
|
|
2320
|
+
// https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/latest/getting-started.html
|
|
2321
|
+
|
|
2322
|
+
shellExec(`kubectl create ns gpu-operator`);
|
|
2323
|
+
shellExec(`kubectl label --overwrite ns gpu-operator pod-security.kubernetes.io/enforce=privileged`);
|
|
2324
|
+
|
|
2325
|
+
shellExec(`helm repo add nvidia https://helm.ngc.nvidia.com/nvidia \
|
|
2326
|
+
&& helm repo update`);
|
|
2327
|
+
|
|
2328
|
+
// shellExec(`helm install --wait --generate-name \
|
|
2329
|
+
// -n gpu-operator --create-namespace \
|
|
2330
|
+
// nvidia/gpu-operator \
|
|
2331
|
+
// --version=v25.3.1 \
|
|
2332
|
+
// --set toolkit.version=v1.16.1-ubi8`);
|
|
2333
|
+
|
|
2334
|
+
shellExec(`helm install --wait --generate-name \
|
|
2335
|
+
-n gpu-operator --create-namespace \
|
|
2336
|
+
nvidia/gpu-operator \
|
|
2337
|
+
--version=v25.3.1 \
|
|
2338
|
+
--set driver.enabled=false \
|
|
2339
|
+
--set driver.repository=nvcr.io/nvidia \
|
|
2340
|
+
--set cdi.enabled=true \
|
|
2341
|
+
--set cdi.default=true \
|
|
2342
|
+
--set toolkit.env[0].name=CONTAINERD_CONFIG \
|
|
2343
|
+
--set toolkit.env[0].value=/etc/containerd/config.toml \
|
|
2344
|
+
--set toolkit.env[1].name=CONTAINERD_SOCKET \
|
|
2345
|
+
--set toolkit.env[1].value=/run/containerd/containerd.sock \
|
|
2346
|
+
--set toolkit.env[2].name=CONTAINERD_RUNTIME_CLASS \
|
|
2347
|
+
--set toolkit.env[2].value=nvidia \
|
|
2348
|
+
--set-string toolkit.env[3].name=CONTAINERD_SET_AS_DEFAULT \
|
|
2349
|
+
--set-string toolkit.env[3].value=true`);
|
|
2350
|
+
|
|
2351
|
+
// Check gpu drivers
|
|
2352
|
+
shellExec(
|
|
2353
|
+
`break;kubectl get nodes -o json | jq '.items[].metadata.labels | keys | any(startswith("feature.node.kubernetes.io"))'`,
|
|
2354
|
+
);
|
|
2355
|
+
break;
|
|
2356
|
+
}
|
|
2357
|
+
|
|
2358
|
+
case 'kubeflow-spark-operator': {
|
|
2359
|
+
// Use case:
|
|
2360
|
+
// Data Processing Pipelines: Used for ETL tasks where Spark can handle large data volumes efficiently.
|
|
2361
|
+
// Real-Time Analytics: Processing data from streaming sources (e.g., Kafka) for real-time analytics.
|
|
2362
|
+
// Machine Learning and Data Science: Training and deploying machine learning models at scale using Spark MLlib.
|
|
2363
|
+
|
|
2364
|
+
shellExec(`helm repo add spark-operator https://kubeflow.github.io/spark-operator`);
|
|
2365
|
+
shellExec(`helm install spark-operator spark-operator/spark-operator \
|
|
2366
|
+
--namespace spark-operator \
|
|
2367
|
+
--create-namespace \
|
|
2368
|
+
--wait`);
|
|
2369
|
+
|
|
2370
|
+
const image = `spark:3.5.5`;
|
|
2371
|
+
shellExec(`sudo docker pull ${image}`);
|
|
2372
|
+
if (!process.argv.includes('kubeadm'))
|
|
2373
|
+
shellExec(
|
|
2374
|
+
`sudo ${
|
|
2375
|
+
process.argv.includes('kubeadm') ? `ctr -n k8s.io images import` : `kind load docker-image`
|
|
2376
|
+
} ${image}`,
|
|
2377
|
+
);
|
|
2378
|
+
shellExec(`kubectl apply -f ./manifests/deployment/spark/spark-pi-py.yaml`);
|
|
2379
|
+
|
|
2380
|
+
// Check the status of the Spark job:
|
|
2381
|
+
// kubectl get sparkapplications.sparkoperator.k8s.io -n default
|
|
2382
|
+
// kubectl get sparkapplication
|
|
2383
|
+
|
|
2384
|
+
// Check case log:
|
|
2385
|
+
// kubectl logs -f spark-pi-python-driver
|
|
2386
|
+
// kubectl logs -f spark-pi-python-driver | grep Pi
|
|
2387
|
+
// kubectl describe sparkapplication spark-gpu-test
|
|
2388
|
+
|
|
2389
|
+
// Uninstall:
|
|
2390
|
+
// kubectl delete sparkapplications.sparkoperator.k8s.io spark-pi-python -n default
|
|
2391
|
+
// helm delete spark-operator -n spark-operator
|
|
2392
|
+
|
|
2393
|
+
// Gpu plugins:
|
|
2394
|
+
// https://github.com/NVIDIA/spark-rapids
|
|
2395
|
+
// RAPIDS Accelerator
|
|
2396
|
+
break;
|
|
2397
|
+
}
|
|
2398
|
+
|
|
2399
|
+
case 'sbt': {
|
|
2400
|
+
// https://www.scala-sbt.org/1.x/docs/Installing-sbt-on-Linux.html
|
|
2401
|
+
|
|
2402
|
+
// sudo rm -f /etc/yum.repos.d/bintray-rpm.repo
|
|
2403
|
+
// curl -L https://www.scala-sbt.org/sbt-rpm.repo > sbt-rpm.repo
|
|
2404
|
+
// sudo mv sbt-rpm.repo /etc/yum.repos.d/
|
|
2405
|
+
// sudo yum install sbt
|
|
2406
|
+
break;
|
|
2407
|
+
}
|
|
1097
2408
|
}
|
|
1098
2409
|
} catch (error) {
|
|
1099
2410
|
logger.error(error, error.stack);
|