underpost 2.8.79 → 2.8.84

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/.github/workflows/ghpkg.yml +22 -20
  2. package/.github/workflows/npmpkg.yml +15 -10
  3. package/.github/workflows/pwa-microservices-template.page.yml +12 -3
  4. package/.github/workflows/pwa-microservices-template.test.yml +20 -17
  5. package/.vscode/extensions.json +2 -3
  6. package/.vscode/settings.json +2 -42
  7. package/Dockerfile +14 -33
  8. package/README.md +43 -25
  9. package/bin/db.js +1 -0
  10. package/bin/deploy.js +104 -797
  11. package/bin/file.js +18 -1
  12. package/bin/vs.js +18 -3
  13. package/cli.md +367 -207
  14. package/conf.js +4 -0
  15. package/docker-compose.yml +1 -1
  16. package/manifests/deployment/dd-template-development/deployment.yaml +167 -0
  17. package/manifests/deployment/dd-template-development/proxy.yaml +46 -0
  18. package/manifests/deployment/tensorflow/tf-gpu-test.yaml +65 -0
  19. package/manifests/lxd/lxd-admin-profile.yaml +1 -0
  20. package/manifests/lxd/lxd-preseed.yaml +9 -37
  21. package/manifests/lxd/underpost-setup.sh +98 -81
  22. package/manifests/maas/device-scan.sh +43 -0
  23. package/manifests/maas/gpu-diag.sh +19 -0
  24. package/manifests/maas/lxd-preseed.yaml +32 -0
  25. package/manifests/maas/maas-setup.sh +120 -0
  26. package/manifests/maas/nat-iptables.sh +26 -0
  27. package/manifests/maas/snap-clean.sh +26 -0
  28. package/manifests/mariadb/statefulset.yaml +2 -1
  29. package/manifests/mariadb/storage-class.yaml +10 -0
  30. package/manifests/mongodb-4.4/service-deployment.yaml +2 -2
  31. package/manifests/valkey/service.yaml +3 -9
  32. package/manifests/valkey/statefulset.yaml +10 -12
  33. package/package.json +1 -1
  34. package/src/cli/baremetal.js +1280 -0
  35. package/src/cli/cloud-init.js +537 -0
  36. package/src/cli/cluster.js +506 -243
  37. package/src/cli/deploy.js +41 -3
  38. package/src/cli/env.js +2 -2
  39. package/src/cli/image.js +57 -9
  40. package/src/cli/index.js +271 -232
  41. package/src/cli/lxd.js +314 -81
  42. package/src/cli/repository.js +7 -4
  43. package/src/cli/run.js +262 -0
  44. package/src/cli/test.js +1 -1
  45. package/src/index.js +28 -1
  46. package/src/runtime/lampp/Dockerfile +41 -47
  47. package/src/server/conf.js +61 -0
  48. package/src/server/logger.js +3 -3
  49. package/src/server/process.js +16 -19
  50. package/src/server/runtime.js +1 -6
  51. package/src/server/ssl.js +1 -12
  52. package/src/server/valkey.js +3 -3
  53. package/supervisord-openssh-server.conf +0 -5
package/conf.js CHANGED
@@ -164,6 +164,10 @@ const DefaultConf = /**/ {
164
164
  auth: { user: 'noreply@default.net', pass: '' },
165
165
  },
166
166
  },
167
+ valkey: {
168
+ port: 6379,
169
+ host: '127.0.0.1',
170
+ },
167
171
  },
168
172
  },
169
173
  'www.default.net': {
@@ -58,7 +58,7 @@ services:
58
58
  cpus: '0.25'
59
59
  memory: 20M
60
60
  labels: # labels in Compose file instead of Dockerfile
61
- engine.version: '2.8.79'
61
+ engine.version: '2.8.84'
62
62
  networks:
63
63
  - load-balancer
64
64
 
@@ -0,0 +1,167 @@
1
+ ---
2
+ apiVersion: apps/v1
3
+ kind: Deployment
4
+ metadata:
5
+ name: dd-template-development-blue
6
+ labels:
7
+ app: dd-template-development-blue
8
+ spec:
9
+ replicas: 1
10
+ selector:
11
+ matchLabels:
12
+ app: dd-template-development-blue
13
+ template:
14
+ metadata:
15
+ labels:
16
+ app: dd-template-development-blue
17
+ spec:
18
+ containers:
19
+ - name: dd-template-development-blue
20
+ image: localhost/rockylinux9-underpost:v2.8.84
21
+ # resources:
22
+ # requests:
23
+ # memory: "124Ki"
24
+ # cpu: "100m"
25
+ # limits:
26
+ # memory: "1992Ki"
27
+ # cpu: "1600m"
28
+ command:
29
+ - /bin/sh
30
+ - -c
31
+ - >
32
+ npm install -g npm@11.2.0 &&
33
+ npm install -g underpost &&
34
+ cd $(underpost root)/underpost &&
35
+ node bin/deploy update-default-conf template &&
36
+ mkdir -p /home/dd &&
37
+ cd /home/dd &&
38
+ underpost new engine
39
+ ---
40
+ apiVersion: v1
41
+ kind: Service
42
+ metadata:
43
+ name: dd-template-development-blue-service
44
+ spec:
45
+ selector:
46
+ app: dd-template-development-blue
47
+ ports:
48
+ - name: 'tcp-4001'
49
+ protocol: TCP
50
+ port: 4001
51
+ targetPort: 4001
52
+ - name: 'udp-4001'
53
+ protocol: UDP
54
+ port: 4001
55
+ targetPort: 4001
56
+
57
+ - name: 'tcp-4002'
58
+ protocol: TCP
59
+ port: 4002
60
+ targetPort: 4002
61
+ - name: 'udp-4002'
62
+ protocol: UDP
63
+ port: 4002
64
+ targetPort: 4002
65
+
66
+ - name: 'tcp-4003'
67
+ protocol: TCP
68
+ port: 4003
69
+ targetPort: 4003
70
+ - name: 'udp-4003'
71
+ protocol: UDP
72
+ port: 4003
73
+ targetPort: 4003
74
+
75
+ - name: 'tcp-4004'
76
+ protocol: TCP
77
+ port: 4004
78
+ targetPort: 4004
79
+ - name: 'udp-4004'
80
+ protocol: UDP
81
+ port: 4004
82
+ targetPort: 4004
83
+ type: LoadBalancer
84
+ ---
85
+ apiVersion: apps/v1
86
+ kind: Deployment
87
+ metadata:
88
+ name: dd-template-development-green
89
+ labels:
90
+ app: dd-template-development-green
91
+ spec:
92
+ replicas: 1
93
+ selector:
94
+ matchLabels:
95
+ app: dd-template-development-green
96
+ template:
97
+ metadata:
98
+ labels:
99
+ app: dd-template-development-green
100
+ spec:
101
+ containers:
102
+ - name: dd-template-development-green
103
+ image: localhost/rockylinux9-underpost:v2.8.84
104
+ # resources:
105
+ # requests:
106
+ # memory: "124Ki"
107
+ # cpu: "100m"
108
+ # limits:
109
+ # memory: "1992Ki"
110
+ # cpu: "1600m"
111
+ command:
112
+ - /bin/sh
113
+ - -c
114
+ - >
115
+ npm install -g npm@11.2.0 &&
116
+ npm install -g underpost &&
117
+ cd $(underpost root)/underpost &&
118
+ node bin/deploy update-default-conf template &&
119
+ mkdir -p /home/dd &&
120
+ cd /home/dd &&
121
+ underpost new engine
122
+
123
+ ---
124
+ apiVersion: v1
125
+ kind: Service
126
+ metadata:
127
+ name: dd-template-development-green-service
128
+ spec:
129
+ selector:
130
+ app: dd-template-development-green
131
+ ports:
132
+ - name: 'tcp-4001'
133
+ protocol: TCP
134
+ port: 4001
135
+ targetPort: 4001
136
+ - name: 'udp-4001'
137
+ protocol: UDP
138
+ port: 4001
139
+ targetPort: 4001
140
+
141
+ - name: 'tcp-4002'
142
+ protocol: TCP
143
+ port: 4002
144
+ targetPort: 4002
145
+ - name: 'udp-4002'
146
+ protocol: UDP
147
+ port: 4002
148
+ targetPort: 4002
149
+
150
+ - name: 'tcp-4003'
151
+ protocol: TCP
152
+ port: 4003
153
+ targetPort: 4003
154
+ - name: 'udp-4003'
155
+ protocol: UDP
156
+ port: 4003
157
+ targetPort: 4003
158
+
159
+ - name: 'tcp-4004'
160
+ protocol: TCP
161
+ port: 4004
162
+ targetPort: 4004
163
+ - name: 'udp-4004'
164
+ protocol: UDP
165
+ port: 4004
166
+ targetPort: 4004
167
+ type: LoadBalancer
@@ -0,0 +1,46 @@
1
+ # "http://default.net:4001/socket.io": "http://localhost:4001/socket.io",
2
+ # "http://default.net:4002/peer": "http://localhost:4002/peer",
3
+ # "http://default.net:4001/": "http://localhost:4001/",
4
+ # "http://www.default.net:4003/": "http://localhost:4003/"
5
+
6
+ ---
7
+ apiVersion: projectcontour.io/v1
8
+ kind: HTTPProxy
9
+ metadata:
10
+ name: default.net
11
+ spec:
12
+ virtualhost:
13
+ fqdn: default.net
14
+ routes:
15
+ - conditions:
16
+ - prefix: /
17
+ enableWebsockets: true
18
+ services:
19
+ - name: dd-template-development-blue-service
20
+ port: 4001
21
+ weight: 100
22
+
23
+ - conditions:
24
+ - prefix: /peer
25
+ enableWebsockets: true
26
+ services:
27
+ - name: dd-template-development-blue-service
28
+ port: 4002
29
+ weight: 100
30
+
31
+ ---
32
+ apiVersion: projectcontour.io/v1
33
+ kind: HTTPProxy
34
+ metadata:
35
+ name: www.default.net
36
+ spec:
37
+ virtualhost:
38
+ fqdn: www.default.net
39
+ routes:
40
+ - conditions:
41
+ - prefix: /
42
+ enableWebsockets: true
43
+ services:
44
+ - name: dd-template-development-blue-service
45
+ port: 4003
46
+ weight: 100
@@ -0,0 +1,65 @@
1
+ ---
2
+ apiVersion: v1
3
+ kind: ConfigMap
4
+ metadata:
5
+ name: tf-gpu-test-script
6
+ namespace: default
7
+ data:
8
+ main_tf_gpu_test.py: |
9
+ import os
10
+ import tensorflow as tf
11
+
12
+ print("--- Starting GPU and Library Check ---")
13
+
14
+ gpus = tf.config.list_physical_devices("GPU")
15
+ if gpus:
16
+ try:
17
+ tf.config.set_visible_devices(gpus[0], "GPU")
18
+ logical_gpus = tf.config.list_logical_devices("GPU")
19
+ print(
20
+ f"TensorFlow detected {len(gpus)} Physical GPUs, {len(logical_gpus)} Logical GPUs. Using: {gpus[0].name}"
21
+ )
22
+ except RuntimeError as e:
23
+ print(f"RuntimeError during GPU configuration: {e}")
24
+ else:
25
+ print("TensorFlow did not detect any GPU devices. Running on CPU.")
26
+
27
+ print(f"XLA_FLAGS environment variable: {os.environ.get('XLA_FLAGS')}")
28
+ print(f"TF_XLA_FLAGS environment variable: {os.environ.get('TF_XLA_FLAGS')}")
29
+
30
+ print(f"TensorFlow version: {tf.__version__}")
31
+ print(f"Built with CUDA: {tf.test.is_built_with_cuda()}")
32
+ print(f"Is GPU available: {tf.config.list_physical_devices('GPU') != []}")
33
+
34
+ print("--- GPU and Library Check Complete ---")
35
+ ---
36
+ apiVersion: v1
37
+ kind: Pod
38
+ metadata:
39
+ name: tf-gpu-test-pod
40
+ namespace: default
41
+ spec:
42
+ restartPolicy: Never
43
+ runtimeClassName: nvidia
44
+ containers:
45
+ - name: tensorflow-gpu-tester
46
+ image: nvcr.io/nvidia/tensorflow:24.04-tf2-py3
47
+ imagePullPolicy: IfNotPresent
48
+ command: ['python']
49
+ args: ['/app/main_tf_gpu_test.py']
50
+ resources:
51
+ limits:
52
+ nvidia.com/gpu: '1'
53
+ env:
54
+ - name: NVIDIA_VISIBLE_DEVICES
55
+ value: all
56
+ volumeMounts:
57
+ - name: tf-script-volume
58
+ mountPath: /app
59
+ volumes:
60
+ - name: tf-script-volume
61
+ configMap:
62
+ name: tf-gpu-test-script
63
+ items:
64
+ - key: main_tf_gpu_test.py
65
+ path: main_tf_gpu_test.py
@@ -7,6 +7,7 @@ devices:
7
7
  name: eth0
8
8
  network: lxdbr0
9
9
  type: nic
10
+ ipv4.address: 10.250.250.100
10
11
  root:
11
12
  path: /
12
13
  pool: local # lxc storage list
@@ -1,51 +1,23 @@
1
1
  config:
2
2
  core.https_address: 127.0.0.1:8443
3
-
4
- networks:
5
- - name: lxdbr0
6
- type: bridge
7
- config:
8
- ipv4.address: 10.250.250.1/24
9
- ipv4.nat: "true"
10
- ipv4.dhcp: "true"
11
- ipv4.dhcp.ranges: 10.250.250.2-10.250.250.254
12
- ipv4.firewall: "false"
13
- ipv6.address: none
14
-
3
+ networks: []
15
4
  storage_pools:
16
- - name: local
17
- driver: zfs
18
- config:
5
+ - config:
19
6
  size: 100GiB
20
-
7
+ description: ""
8
+ name: local
9
+ driver: zfs
10
+ storage_volumes: []
21
11
  profiles:
22
- - name: default
23
- config: {}
24
- description: "default profile"
25
- devices:
26
- root:
27
- path: /
28
- pool: local
29
- type: disk
30
-
31
- - name: admin-profile
32
- description: "vm nat network admin profile"
33
- config:
34
- limits.cpu: "2"
35
- limits.memory: 4GB
12
+ - config: {}
13
+ description: ""
36
14
  devices:
37
- eth0:
38
- name: eth0
39
- network: lxdbr0
40
- type: nic
41
15
  root:
42
16
  path: /
43
17
  pool: local
44
- size: 100GB
45
18
  type: disk
46
-
19
+ name: default
47
20
  projects: []
48
-
49
21
  cluster:
50
22
  server_name: lxd-node1
51
23
  enabled: true
@@ -1,20 +1,25 @@
1
1
  #!/bin/bash
2
2
 
3
+ # Exit immediately if a command exits with a non-zero status.
3
4
  set -e
4
5
 
5
- # Expand /dev/sda2 partition and resize filesystem automatically
6
+ echo "Starting Underpost Kubernetes Node Setup for Production (Kubeadm/K3s Use Case)..."
7
+
8
+ # --- Disk Partition Resizing (Keep as is, seems functional) ---
9
+ echo "Expanding /dev/sda2 partition and resizing filesystem..."
6
10
 
7
11
  # Check if parted is installed
8
12
  if ! command -v parted &>/dev/null; then
9
13
  echo "parted not found, installing..."
10
- dnf install -y parted
14
+ sudo dnf install -y parted
11
15
  fi
12
16
 
13
17
  # Get start sector of /dev/sda2
14
- START_SECTOR=$(parted /dev/sda -ms unit s print | awk -F: '/^2:/{print $2}' | sed 's/s//')
18
+ START_SECTOR=$(sudo parted /dev/sda -ms unit s print | awk -F: '/^2:/{print $2}' | sed 's/s//')
15
19
 
16
20
  # Resize the partition
17
- parted /dev/sda ---pretend-input-tty <<EOF
21
+ # Using 'sudo' for parted commands
22
+ sudo parted /dev/sda ---pretend-input-tty <<EOF
18
23
  unit s
19
24
  resizepart 2 100%
20
25
  Yes
@@ -22,21 +27,28 @@ quit
22
27
  EOF
23
28
 
24
29
  # Resize the filesystem
25
- resize2fs /dev/sda2
30
+ sudo resize2fs /dev/sda2
26
31
 
27
32
  echo "Disk and filesystem resized successfully."
28
- sudo dnf install -y tar
29
- sudo dnf install -y bzip2
30
- sudo dnf install -y git
33
+
34
+ # --- Essential System Package Installation ---
35
+ echo "Installing essential system packages..."
36
+ sudo dnf install -y tar bzip2 git epel-release
37
+
38
+ # Perform a system update to ensure all packages are up-to-date
31
39
  sudo dnf -y update
32
- sudo dnf -y install epel-release
33
- sudo dnf install -y ufw
34
- sudo systemctl enable --now ufw
40
+
41
+ # --- NVM and Node.js Installation ---
42
+ echo "Installing NVM and Node.js v23.8.0..."
35
43
  curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.40.1/install.sh | bash
36
- NVM_DIR="$([ -z "${XDG_CONFIG_HOME-}" ] && printf %s "${HOME}/.nvm" || printf %s "${XDG_CONFIG_HOME}/nvm")"
44
+
45
+ # Load nvm for the current session
46
+ export NVM_DIR="$([ -z "${XDG_CONFIG_HOME-}" ] && printf %s "${HOME}/.nvm" || printf %s "${XDG_CONFIG_HOME}/nvm")"
37
47
  [ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
48
+
38
49
  nvm install 23.8.0
39
50
  nvm use 23.8.0
51
+
40
52
  echo "
41
53
  ██╗░░░██╗███╗░░██╗██████╗░███████╗██████╗░██████╗░░█████╗░░██████╗████████╗
42
54
  ██║░░░██║████╗░██║██╔══██╗██╔════╝██╔══██╗██╔══██╗██╔══██╗██╔════╝╚══██╔══╝
@@ -45,22 +57,37 @@ echo "
45
57
  ╚██████╔╝██║░╚███║██████╔╝███████╗██║░░██║██║░░░░░╚█████╔╝██████╔╝░░░██║░░░
46
58
  ░╚═════╝░╚═╝░░╚══╝╚═════╝░╚══════╝╚═╝░░╚═╝╚═╝░░░░░░╚════╝░╚═════╝░░░░╚═╝░░░
47
59
 
48
- Installing underpost k8s node ...
49
-
60
+ Installing underpost k8s node...
50
61
  "
62
+
63
+ # Install underpost globally
51
64
  npm install -g underpost
52
- chmod +x /root/.nvm/versions/node/v23.8.0/bin/underpost
65
+
66
+ # Ensure underpost executable is in PATH and has execute permissions
67
+ # Adjusting this for global npm install which usually handles permissions
68
+ # If you still face issues, ensure /root/.nvm/versions/node/v23.8.0/bin is in your PATH
69
+ # For global installs, it's usually handled automatically.
70
+ # chmod +x /root/.nvm/versions/node/v23.8.0/bin/underpost # This might not be necessary for global npm installs
71
+
72
+ # --- Kernel Module for Bridge Filtering ---
73
+ # This is crucial for Kubernetes networking (CNI)
74
+ echo "Loading br_netfilter kernel module..."
53
75
  sudo modprobe br_netfilter
54
- mkdir -p /home/dd
55
- cd $(underpost root)/underpost
76
+
77
+ # --- Initial Host Setup for Kubernetes Prerequisites ---
78
+ # This calls the initHost method in cluster.js to install Docker, Podman, Kind, Kubeadm, Helm.
79
+ echo "Running initial host setup for Kubernetes prerequisites..."
80
+ # Ensure the current directory is where 'underpost' expects its root, or use absolute paths.
81
+ # Assuming 'underpost root' correctly points to the base directory of your project.
82
+ cd "$(underpost root)/underpost"
56
83
  underpost cluster --init-host
57
84
 
58
- # Default flags
85
+ # --- Argument Parsing for Kubeadm/Kind/K3s/Worker ---
59
86
  USE_KUBEADM=false
60
- USE_KIND=false
87
+ USE_KIND=false # Not the primary focus for this request, but keeping the logic
88
+ USE_K3S=false # New K3s option
61
89
  USE_WORKER=false
62
90
 
63
- # Loop through arguments
64
91
  for arg in "$@"; do
65
92
  case "$arg" in
66
93
  --kubeadm)
@@ -69,6 +96,9 @@ for arg in "$@"; do
69
96
  --kind)
70
97
  USE_KIND=true
71
98
  ;;
99
+ --k3s) # New K3s argument
100
+ USE_K3S=true
101
+ ;;
72
102
  --worker)
73
103
  USE_WORKER=true
74
104
  ;;
@@ -76,71 +106,58 @@ for arg in "$@"; do
76
106
  done
77
107
 
78
108
  echo "USE_KUBEADM = $USE_KUBEADM"
79
- echo "USE_KIND = $USE_KIND"
80
- echo "USE_WORKER = $USE_WORKER"
81
-
82
- underpost cluster --kubeadm
83
- underpost cluster --reset
84
-
85
- PORTS=(
86
- 22 # SSH
87
- 80 # HTTP
88
- 443 # HTTPS
89
- 53 # DNS (TCP/UDP)
90
- 66 # TFTP
91
- 67 # DHCP
92
- 69 # TFTP
93
- 111 # rpcbind
94
- 179 # Calico BGP
95
- 2049 # NFS
96
- 20048 # NFS mountd
97
- 4011 # PXE boot
98
- 5240 # snapd API
99
- 5248 # Juju controller
100
- 6443 # Kubernetes API
101
- 9153 # CoreDNS metrics
102
- 10250 # Kubelet API
103
- 10251 # kube-scheduler
104
- 10252 # kube-controller-manager
105
- 10255 # Kubelet read-only (deprecated)
106
- 10257 # controller-manager (v1.23+)
107
- 10259 # scheduler (v1.23+)
108
- )
109
-
110
- PORT_RANGES=(
111
- 2379:2380 # etcd
112
- # 30000:32767 # NodePort range
113
- # 3000:3100 # App node ports
114
- 32765:32766 # Ephemeral ports
115
- 6783:6784 # Weave Net
116
- )
117
-
118
- # Open individual ports
119
- for PORT in "${PORTS[@]}"; do
120
- ufw allow ${PORT}/tcp
121
- ufw allow ${PORT}/udp
122
- done
109
+ echo "USE_KIND = $USE_KIND"
110
+ echo "USE_K3S = $USE_K3S" # Display K3s flag status
111
+ echo "USE_WORKER = $USE_WORKER"
123
112
 
124
- # Open port ranges
125
- for RANGE in "${PORT_RANGES[@]}"; do
126
- ufw allow ${RANGE}/tcp
127
- ufw allow ${RANGE}/udp
128
- done
113
+ # --- Kubernetes Cluster Initialization Logic ---
129
114
 
130
- # Behavior based on flags
131
- if $USE_KUBEADM; then
132
- echo "Running control node with kubeadm..."
133
- underpost cluster --kubeadm
134
- # kubectl get pods --all-namespaces -o wide -w
135
- fi
115
+ # Apply host configuration (SELinux, Containerd, Sysctl, and now firewalld disabling)
116
+ echo "Applying Kubernetes host configuration (SELinux, Containerd, Sysctl, Firewalld)..."
117
+ underpost cluster --config
136
118
 
137
- if $USE_KIND; then
119
+ if $USE_KUBEADM; then
120
+ if $USE_WORKER; then
121
+ echo "Running worker node setup for kubeadm..."
122
+ # For worker nodes, the 'underpost cluster --worker' command will handle joining
123
+ # the cluster. The join command itself needs to be provided from the control plane.
124
+ # This script assumes the join command will be executed separately or passed in.
125
+ # Example: underpost cluster --worker --join-command "kubeadm join ..."
126
+ # For now, this just runs the worker-specific config.
127
+ underpost cluster --worker
128
+ underpost cluster --chown
129
+ echo "Worker node setup initiated. You will need to manually join this worker to your control plane."
130
+ echo "On your control plane, run 'kubeadm token create --print-join-command' and execute the output here."
131
+ else
132
+ echo "Running control plane setup with kubeadm..."
133
+ # This will initialize the kubeadm control plane and install Calico
134
+ underpost cluster --kubeadm
135
+ echo "Kubeadm control plane initialized. Check cluster status with 'kubectl get nodes'."
136
+ fi
137
+ elif $USE_K3S; then # New K3s initialization block
138
+ if $USE_WORKER; then
139
+ echo "Running worker node setup for K3s..."
140
+ # For K3s worker nodes, the 'underpost cluster --worker' command will handle joining
141
+ # the cluster. The K3s join command (k3s agent --server ...) needs to be provided.
142
+ underpost cluster --worker --k3s
143
+ underpost cluster --chown
144
+ echo "K3s Worker node setup initiated. You will need to manually join this worker to your control plane."
145
+ echo "On your K3s control plane, get the K3S_TOKEN from /var/lib/rancher/k3s/server/node-token"
146
+ echo "and the K3S_URL (e.g., https://<control-plane-ip>:6443)."
147
+ echo "Then execute: K3S_URL=${K3S_URL} K3S_TOKEN=${K3S_TOKEN} curl -sfL https://get.k3s.io | sh -"
148
+ else
149
+ echo "Running control plane setup with K3s..."
150
+ underpost cluster --k3s
151
+ echo "K3s control plane initialized. Check cluster status with 'kubectl get nodes'."
152
+ fi
153
+ elif $USE_KIND; then
138
154
  echo "Running control node with kind..."
139
155
  underpost cluster
140
- # kubectl get pods --all-namespaces -o wide -w
156
+ echo "Kind cluster initialized. Check cluster status with 'kubectl get nodes'."
157
+ else
158
+ echo "No specific cluster role (--kubeadm, --kind, --k3s, --worker) specified. Please provide one."
159
+ exit 1
141
160
  fi
142
161
 
143
- if $USE_WORKER; then
144
- echo "Running worker..."
145
- underpost cluster --worker --config
146
- fi
162
+ echo "Underpost Kubernetes Node Setup completed."
163
+ echo "Remember to verify cluster health with 'kubectl get nodes' and 'kubectl get pods --all-namespaces'."
@@ -0,0 +1,43 @@
1
+ #!/usr/bin/env bash
2
+
3
+ for iface_path in /sys/class/net/*; do
4
+ name=$(basename "$iface_path")
5
+ mac=$(< "$iface_path/address")
6
+ ip=$(ip -4 addr show dev "$name" \
7
+ | grep -oP '(?<=inet\s)\d+(\.\d+){3}' || echo "—")
8
+ operstate=$(< "$iface_path/operstate")
9
+ mtu=$(< "$iface_path/mtu")
10
+
11
+ # Driver
12
+ if [ -L "$iface_path/device/driver" ]; then
13
+ driver=$(basename "$(readlink -f "$iface_path/device/driver")")
14
+ else
15
+ driver="—"
16
+ fi
17
+
18
+ # Vendor device ID PCI
19
+ pci_dev="$iface_path/device"
20
+ if [ -f "$pci_dev/vendor" ] && [ -f "$pci_dev/device" ]; then
21
+ vendor_id=$(< "$pci_dev/vendor")
22
+ device_id=$(< "$pci_dev/device")
23
+ # pasamos de 0x8086 a 8086, etc.
24
+ vendor_id=${vendor_id#0x}
25
+ device_id=${device_id#0x}
26
+ pci="${vendor_id}:${device_id}"
27
+ else
28
+ pci="—"
29
+ fi
30
+
31
+ # Link Speed
32
+ speed=$(cat "$iface_path/speed" 2>/dev/null || echo "—")
33
+
34
+ echo "Interface: $name"
35
+ echo " MAC: $mac"
36
+ echo " IPv4: $ip"
37
+ echo " State: $operstate"
38
+ echo " MTU: $mtu"
39
+ echo " Driver: $driver"
40
+ echo " PCI Vendor:Device ID: $pci"
41
+ echo " Link Speed: ${speed}Mb/s"
42
+ echo
43
+ done
@@ -0,0 +1,19 @@
1
+ # GPUs and drivers in use
2
+ sudo lspci -nnk | egrep -i 'vga|3d' -A3
3
+
4
+ # modules loaded relevant
5
+ lsmod | egrep 'nvidia|nouveau|amdgpu' || true
6
+
7
+ # if exists nvidia tool
8
+ nvidia-smi 2>/dev/null || echo "nvidia-smi no disponible / driver no cargado"
9
+
10
+ # kernel related errors
11
+ sudo dmesg | egrep -i 'nvidia|nouveau|amdgpu' --color=auto
12
+
13
+ # recent system errors / gdm / mutter / X
14
+ sudo journalctl -b -p err --no-pager | head -n 200
15
+ journalctl -b _COMM=gdm --no-pager | tail -n 200
16
+ journalctl -b _COMM=Xorg --no-pager | tail -n 200
17
+
18
+ # X log (if exists)
19
+ sudo grep -E "(EE|WW|NVIDIA|nouveau|amdgpu)" /var/log/Xorg.0.log || true