underpost 2.8.6 → 2.8.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (103) hide show
  1. package/.vscode/extensions.json +36 -3
  2. package/.vscode/settings.json +2 -0
  3. package/CHANGELOG.md +24 -4
  4. package/Dockerfile +9 -10
  5. package/README.md +41 -2
  6. package/bin/build.js +2 -2
  7. package/bin/db.js +1 -0
  8. package/bin/deploy.js +1521 -130
  9. package/bin/file.js +8 -0
  10. package/bin/index.js +1 -218
  11. package/cli.md +530 -0
  12. package/conf.js +4 -0
  13. package/docker-compose.yml +1 -1
  14. package/jsdoc.json +1 -1
  15. package/manifests/deployment/adminer/deployment.yaml +32 -0
  16. package/manifests/deployment/adminer/kustomization.yaml +7 -0
  17. package/manifests/deployment/adminer/service.yaml +13 -0
  18. package/manifests/deployment/dd-template-development/deployment.yaml +167 -0
  19. package/manifests/deployment/dd-template-development/proxy.yaml +46 -0
  20. package/manifests/deployment/fastapi/backend-deployment.yml +120 -0
  21. package/manifests/deployment/fastapi/backend-service.yml +19 -0
  22. package/manifests/deployment/fastapi/frontend-deployment.yml +54 -0
  23. package/manifests/deployment/fastapi/frontend-service.yml +15 -0
  24. package/manifests/deployment/fastapi/initial_data.sh +56 -0
  25. package/manifests/deployment/kafka/deployment.yaml +69 -0
  26. package/manifests/deployment/spark/spark-pi-py.yaml +21 -0
  27. package/manifests/envoy-service-nodeport.yaml +23 -0
  28. package/manifests/kubeadm-calico-config.yaml +119 -0
  29. package/manifests/kubelet-config.yaml +65 -0
  30. package/manifests/lxd/lxd-admin-profile.yaml +17 -0
  31. package/manifests/lxd/lxd-preseed.yaml +30 -0
  32. package/manifests/lxd/underpost-setup.sh +163 -0
  33. package/manifests/maas/lxd-preseed.yaml +32 -0
  34. package/manifests/maas/maas-setup.sh +82 -0
  35. package/manifests/mariadb/statefulset.yaml +2 -1
  36. package/manifests/mariadb/storage-class.yaml +10 -0
  37. package/manifests/mongodb/kustomization.yaml +1 -1
  38. package/manifests/mongodb/statefulset.yaml +12 -11
  39. package/manifests/mongodb/storage-class.yaml +9 -0
  40. package/manifests/mongodb-4.4/service-deployment.yaml +3 -3
  41. package/manifests/mysql/kustomization.yaml +7 -0
  42. package/manifests/mysql/pv-pvc.yaml +27 -0
  43. package/manifests/mysql/statefulset.yaml +55 -0
  44. package/manifests/postgresql/configmap.yaml +9 -0
  45. package/manifests/postgresql/kustomization.yaml +10 -0
  46. package/manifests/postgresql/pv.yaml +15 -0
  47. package/manifests/postgresql/pvc.yaml +13 -0
  48. package/manifests/postgresql/service.yaml +10 -0
  49. package/manifests/postgresql/statefulset.yaml +37 -0
  50. package/manifests/valkey/service.yaml +3 -9
  51. package/manifests/valkey/statefulset.yaml +12 -13
  52. package/package.json +3 -9
  53. package/src/api/default/default.service.js +1 -1
  54. package/src/api/user/user.service.js +14 -11
  55. package/src/cli/baremetal.js +60 -0
  56. package/src/cli/cluster.js +551 -65
  57. package/src/cli/cron.js +39 -8
  58. package/src/cli/db.js +20 -10
  59. package/src/cli/deploy.js +288 -86
  60. package/src/cli/env.js +10 -4
  61. package/src/cli/fs.js +21 -9
  62. package/src/cli/image.js +116 -124
  63. package/src/cli/index.js +319 -0
  64. package/src/cli/lxd.js +395 -0
  65. package/src/cli/monitor.js +236 -0
  66. package/src/cli/repository.js +14 -8
  67. package/src/client/components/core/Account.js +28 -24
  68. package/src/client/components/core/Blockchain.js +1 -1
  69. package/src/client/components/core/CalendarCore.js +14 -84
  70. package/src/client/components/core/CommonJs.js +2 -1
  71. package/src/client/components/core/Css.js +0 -1
  72. package/src/client/components/core/CssCore.js +10 -2
  73. package/src/client/components/core/Docs.js +1 -1
  74. package/src/client/components/core/EventsUI.js +3 -3
  75. package/src/client/components/core/FileExplorer.js +86 -78
  76. package/src/client/components/core/JoyStick.js +2 -2
  77. package/src/client/components/core/LoadingAnimation.js +1 -17
  78. package/src/client/components/core/LogIn.js +3 -3
  79. package/src/client/components/core/LogOut.js +1 -1
  80. package/src/client/components/core/Modal.js +14 -8
  81. package/src/client/components/core/Panel.js +19 -61
  82. package/src/client/components/core/PanelForm.js +13 -22
  83. package/src/client/components/core/Recover.js +3 -3
  84. package/src/client/components/core/RichText.js +1 -11
  85. package/src/client/components/core/Router.js +3 -1
  86. package/src/client/components/core/SignUp.js +2 -2
  87. package/src/client/components/default/RoutesDefault.js +3 -2
  88. package/src/client/services/default/default.management.js +45 -38
  89. package/src/client/ssr/Render.js +2 -0
  90. package/src/index.js +34 -2
  91. package/src/mailer/MailerProvider.js +3 -0
  92. package/src/runtime/lampp/Dockerfile +65 -0
  93. package/src/server/client-build.js +13 -0
  94. package/src/server/conf.js +151 -1
  95. package/src/server/dns.js +56 -18
  96. package/src/server/json-schema.js +77 -0
  97. package/src/server/logger.js +3 -3
  98. package/src/server/network.js +7 -122
  99. package/src/server/peer.js +2 -2
  100. package/src/server/proxy.js +4 -4
  101. package/src/server/runtime.js +24 -11
  102. package/src/server/start.js +122 -0
  103. package/src/server/valkey.js +27 -13
@@ -0,0 +1,119 @@
1
+ # This consolidated YAML file contains configurations for:
2
+ # 1. Calico Installation (Installation and APIServer resources)
3
+ # 2. A permissive Egress NetworkPolicy for the 'default' namespace
4
+ #
5
+ # These are standard Kubernetes resources that can be applied directly using 'kubectl apply'.
6
+ # The kubeadm-specific ClusterConfiguration and InitConfiguration have been removed
7
+ # as they are only processed by the 'kubeadm init' command, not 'kubectl apply'.
8
+
9
+ # --- Calico Installation: Base configuration for Calico ---
10
+ # For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.Installation
11
+ apiVersion: operator.tigera.io/v1
12
+ kind: Installation
13
+ metadata:
14
+ name: default
15
+ spec:
16
+ # Configures Calico networking.
17
+ calicoNetwork:
18
+ # Note: The ipPools section cannot be modified post-install.
19
+ ipPools:
20
+ - blockSize: 26
21
+ cidr: 192.168.0.0/16
22
+ encapsulation: VXLANCrossSubnet
23
+ natOutgoing: Enabled
24
+ nodeSelector: all()
25
+
26
+ ---
27
+ # This section configures the Calico API server.
28
+ # For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer
29
+ apiVersion: operator.tigera.io/v1
30
+ kind: APIServer
31
+ metadata:
32
+ name: default
33
+ spec: {}
34
+
35
+ ---
36
+ # This consolidated NetworkPolicy file ensures that all pods in the specified namespaces
37
+ # have unrestricted egress (outbound) access.
38
+ # This is useful for troubleshooting or for environments where strict egress control
39
+ # is not immediately required for these system/default namespaces.
40
+
41
+ ---
42
+ # Policy for the 'default' namespace
43
+ apiVersion: networking.k8s.io/v1
44
+ kind: NetworkPolicy
45
+ metadata:
46
+ name: allow-all-egress-default-namespace
47
+ namespace: default # This policy applies to the 'default' namespace
48
+ spec:
49
+ podSelector: {} # Selects all pods in this namespace
50
+ policyTypes:
51
+ - Egress
52
+ egress:
53
+ - to:
54
+ - ipBlock:
55
+ cidr: 0.0.0.0/0 # Allows traffic to any IPv4 address
56
+
57
+ ---
58
+ # Policy for the 'kube-system' namespace
59
+ apiVersion: networking.k8s.io/v1
60
+ kind: NetworkPolicy
61
+ metadata:
62
+ name: allow-all-egress-kube-system-namespace
63
+ namespace: kube-system # This policy applies to the 'kube-system' namespace
64
+ spec:
65
+ podSelector: {} # Selects all pods in this namespace
66
+ policyTypes:
67
+ - Egress
68
+ egress:
69
+ - to:
70
+ - ipBlock:
71
+ cidr: 0.0.0.0/0 # Allows traffic to any IPv4 address
72
+
73
+ ---
74
+ # Policy for the 'kube-node-lease' namespace
75
+ apiVersion: networking.k8s.io/v1
76
+ kind: NetworkPolicy
77
+ metadata:
78
+ name: allow-all-egress-kube-node-lease-namespace
79
+ namespace: kube-node-lease # This policy applies to the 'kube-node-lease' namespace
80
+ spec:
81
+ podSelector: {} # Selects all pods in this namespace
82
+ policyTypes:
83
+ - Egress
84
+ egress:
85
+ - to:
86
+ - ipBlock:
87
+ cidr: 0.0.0.0/0 # Allows traffic to any IPv4 address
88
+
89
+ ---
90
+ # Policy for the 'kube-public' namespace
91
+ apiVersion: networking.k8s.io/v1
92
+ kind: NetworkPolicy
93
+ metadata:
94
+ name: allow-all-egress-kube-public-namespace
95
+ namespace: kube-public # This policy applies to the 'kube-public' namespace
96
+ spec:
97
+ podSelector: {} # Selects all pods in this namespace
98
+ policyTypes:
99
+ - Egress
100
+ egress:
101
+ - to:
102
+ - ipBlock:
103
+ cidr: 0.0.0.0/0 # Allows traffic to any IPv4 address
104
+
105
+ ---
106
+ # Policy for the 'tigera-operator' namespace
107
+ apiVersion: networking.k8s.io/v1
108
+ kind: NetworkPolicy
109
+ metadata:
110
+ name: allow-all-egress-tigera-operator-namespace
111
+ namespace: tigera-operator # This policy applies to the 'tigera-operator' namespace
112
+ spec:
113
+ podSelector: {} # Selects all pods in this namespace
114
+ policyTypes:
115
+ - Egress
116
+ egress:
117
+ - to:
118
+ - ipBlock:
119
+ cidr: 0.0.0.0/0 # Allows traffic to any IPv4 address
@@ -0,0 +1,65 @@
1
+ apiVersion: v1
2
+ data:
3
+ kubelet: |
4
+ apiVersion: kubelet.config.k8s.io/v1beta1
5
+ authentication:
6
+ anonymous:
7
+ enabled: false
8
+ webhook:
9
+ cacheTTL: 0s
10
+ enabled: true
11
+ x509:
12
+ clientCAFile: /etc/kubernetes/pki/ca.crt
13
+ authorization:
14
+ mode: Webhook
15
+ webhook:
16
+ cacheAuthorizedTTL: 0s
17
+ cacheUnauthorizedTTL: 0s
18
+ cgroupDriver: systemd
19
+ clusterDNS:
20
+ - 10.96.0.10
21
+ clusterDomain: cluster.local
22
+ containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
23
+ cpuManagerReconcilePeriod: 0s
24
+ crashLoopBackOff: {}
25
+ evictionHard:
26
+ imagefs.available: "5%" # Adjusted for more tolerance
27
+ memory.available: "100Mi"
28
+ nodefs.available: "5%" # Adjusted for more tolerance
29
+ nodefs.inodesFree: "5%"
30
+ evictionPressureTransitionPeriod: 0s
31
+ fileCheckFrequency: 0s
32
+ healthzBindAddress: 127.0.0.1
33
+ healthzPort: 10248
34
+ httpCheckFrequency: 0s
35
+ imageMaximumGCAge: 0s
36
+ imageMinimumGCAge: 0s
37
+ kind: KubeletConfiguration
38
+ logging:
39
+ flushFrequency: 0
40
+ options:
41
+ json:
42
+ infoBufferSize: "0"
43
+ text:
44
+ infoBufferSize: "0"
45
+ verbosity: 0
46
+ memorySwap: {}
47
+ nodeStatusReportFrequency: 0s
48
+ nodeStatusUpdateFrequency: 0s
49
+ rotateCertificates: true
50
+ runtimeRequestTimeout: 0s
51
+ shutdownGracePeriod: 0s
52
+ shutdownGracePeriodCriticalPods: 0s
53
+ staticPodPath: /etc/kubernetes/manifests
54
+ streamingConnectionIdleTimeout: 0s
55
+ syncFrequency: 0s
56
+ volumeStatsAggPeriod: 0s
57
+ kind: ConfigMap
58
+ metadata:
59
+ annotations:
60
+ kubeadm.kubernetes.io/component-config.hash: sha256:26488e9fc7c5cb5fdda9996cda2e6651a9af5febce07ea02de11bd3ef3f49e9c
61
+ creationTimestamp: "2025-06-30T12:42:00Z"
62
+ name: kubelet-config
63
+ namespace: kube-system
64
+ resourceVersion: "204"
65
+ uid: a85321a8-f3e0-40fa-8e4e-9d33b8842e7a
@@ -0,0 +1,17 @@
1
+ config:
2
+ limits.cpu: "2"
3
+ limits.memory: 4GB
4
+ description: vm nat network
5
+ devices:
6
+ eth0:
7
+ name: eth0
8
+ network: lxdbr0
9
+ type: nic
10
+ ipv4.address: 10.250.250.100
11
+ root:
12
+ path: /
13
+ pool: local # lxc storage list
14
+ size: 100GB
15
+ type: disk
16
+ name: admin-profile
17
+ used_by: []
@@ -0,0 +1,30 @@
1
+ config:
2
+ core.https_address: 127.0.0.1:8443
3
+ networks: []
4
+ storage_pools:
5
+ - config:
6
+ size: 100GiB
7
+ description: ""
8
+ name: local
9
+ driver: zfs
10
+ storage_volumes: []
11
+ profiles:
12
+ - config: {}
13
+ description: ""
14
+ devices:
15
+ root:
16
+ path: /
17
+ pool: local
18
+ type: disk
19
+ name: default
20
+ projects: []
21
+ cluster:
22
+ server_name: lxd-node1
23
+ enabled: true
24
+ member_config: []
25
+ cluster_address: ""
26
+ cluster_certificate: ""
27
+ server_address: ""
28
+ cluster_password: ""
29
+ cluster_token: ""
30
+ cluster_certificate_path: ""
@@ -0,0 +1,163 @@
1
+ #!/bin/bash
2
+
3
+ # Exit immediately if a command exits with a non-zero status.
4
+ set -e
5
+
6
+ echo "Starting Underpost Kubernetes Node Setup for Production (Kubeadm/K3s Use Case)..."
7
+
8
+ # --- Disk Partition Resizing (Keep as is, seems functional) ---
9
+ echo "Expanding /dev/sda2 partition and resizing filesystem..."
10
+
11
+ # Check if parted is installed
12
+ if ! command -v parted &>/dev/null; then
13
+ echo "parted not found, installing..."
14
+ sudo dnf install -y parted
15
+ fi
16
+
17
+ # Get start sector of /dev/sda2
18
+ START_SECTOR=$(sudo parted /dev/sda -ms unit s print | awk -F: '/^2:/{print $2}' | sed 's/s//')
19
+
20
+ # Resize the partition
21
+ # Using 'sudo' for parted commands
22
+ sudo parted /dev/sda ---pretend-input-tty <<EOF
23
+ unit s
24
+ resizepart 2 100%
25
+ Yes
26
+ quit
27
+ EOF
28
+
29
+ # Resize the filesystem
30
+ sudo resize2fs /dev/sda2
31
+
32
+ echo "Disk and filesystem resized successfully."
33
+
34
+ # --- Essential System Package Installation ---
35
+ echo "Installing essential system packages..."
36
+ sudo dnf install -y tar bzip2 git epel-release
37
+
38
+ # Perform a system update to ensure all packages are up-to-date
39
+ sudo dnf -y update
40
+
41
+ # --- NVM and Node.js Installation ---
42
+ echo "Installing NVM and Node.js v23.8.0..."
43
+ curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.40.1/install.sh | bash
44
+
45
+ # Load nvm for the current session
46
+ export NVM_DIR="$([ -z "${XDG_CONFIG_HOME-}" ] && printf %s "${HOME}/.nvm" || printf %s "${XDG_CONFIG_HOME}/nvm")"
47
+ [ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
48
+
49
+ nvm install 23.8.0
50
+ nvm use 23.8.0
51
+
52
+ echo "
53
+ ██╗░░░██╗███╗░░██╗██████╗░███████╗██████╗░██████╗░░█████╗░░██████╗████████╗
54
+ ██║░░░██║████╗░██║██╔══██╗██╔════╝██╔══██╗██╔══██╗██╔══██╗██╔════╝╚══██╔══╝
55
+ ██║░░░██║██╔██╗██║██║░░██║█████╗░░██████╔╝██████╔╝██║░░██║╚█████╗░░░░██║░░░
56
+ ██║░░░██║██║╚████║██║░░██║██╔══╝░░██╔══██╗██╔═╝░░░██║░░██║░╚═══██╗░░░██║░░░
57
+ ╚██████╔╝██║░╚███║██████╔╝███████╗██║░░██║██║░░░░░╚█████╔╝██████╔╝░░░██║░░░
58
+ ░╚═════╝░╚═╝░░╚══╝╚═════╝░╚══════╝╚═╝░░╚═╝╚═╝░░░░░░╚════╝░╚═════╝░░░░╚═╝░░░
59
+
60
+ Installing underpost k8s node...
61
+ "
62
+
63
+ # Install underpost globally
64
+ npm install -g underpost
65
+
66
+ # Ensure underpost executable is in PATH and has execute permissions
67
+ # Adjusting this for global npm install which usually handles permissions
68
+ # If you still face issues, ensure /root/.nvm/versions/node/v23.8.0/bin is in your PATH
69
+ # For global installs, it's usually handled automatically.
70
+ # chmod +x /root/.nvm/versions/node/v23.8.0/bin/underpost # This might not be necessary for global npm installs
71
+
72
+ # --- Kernel Module for Bridge Filtering ---
73
+ # This is crucial for Kubernetes networking (CNI)
74
+ echo "Loading br_netfilter kernel module..."
75
+ sudo modprobe br_netfilter
76
+
77
+ # --- Initial Host Setup for Kubernetes Prerequisites ---
78
+ # This calls the initHost method in cluster.js to install Docker, Podman, Kind, Kubeadm, Helm.
79
+ echo "Running initial host setup for Kubernetes prerequisites..."
80
+ # Ensure the current directory is where 'underpost' expects its root, or use absolute paths.
81
+ # Assuming 'underpost root' correctly points to the base directory of your project.
82
+ cd "$(underpost root)/underpost"
83
+ underpost cluster --init-host
84
+
85
+ # --- Argument Parsing for Kubeadm/Kind/K3s/Worker ---
86
+ USE_KUBEADM=false
87
+ USE_KIND=false # Not the primary focus for this request, but keeping the logic
88
+ USE_K3S=false # New K3s option
89
+ USE_WORKER=false
90
+
91
+ for arg in "$@"; do
92
+ case "$arg" in
93
+ --kubeadm)
94
+ USE_KUBEADM=true
95
+ ;;
96
+ --kind)
97
+ USE_KIND=true
98
+ ;;
99
+ --k3s) # New K3s argument
100
+ USE_K3S=true
101
+ ;;
102
+ --worker)
103
+ USE_WORKER=true
104
+ ;;
105
+ esac
106
+ done
107
+
108
+ echo "USE_KUBEADM = $USE_KUBEADM"
109
+ echo "USE_KIND = $USE_KIND"
110
+ echo "USE_K3S = $USE_K3S" # Display K3s flag status
111
+ echo "USE_WORKER = $USE_WORKER"
112
+
113
+ # --- Kubernetes Cluster Initialization Logic ---
114
+
115
+ # Apply host configuration (SELinux, Containerd, Sysctl, and now firewalld disabling)
116
+ echo "Applying Kubernetes host configuration (SELinux, Containerd, Sysctl, Firewalld)..."
117
+ underpost cluster --config
118
+
119
+ if $USE_KUBEADM; then
120
+ if $USE_WORKER; then
121
+ echo "Running worker node setup for kubeadm..."
122
+ # For worker nodes, the 'underpost cluster --worker' command will handle joining
123
+ # the cluster. The join command itself needs to be provided from the control plane.
124
+ # This script assumes the join command will be executed separately or passed in.
125
+ # Example: underpost cluster --worker --join-command "kubeadm join ..."
126
+ # For now, this just runs the worker-specific config.
127
+ underpost cluster --worker
128
+ underpost cluster --chown
129
+ echo "Worker node setup initiated. You will need to manually join this worker to your control plane."
130
+ echo "On your control plane, run 'kubeadm token create --print-join-command' and execute the output here."
131
+ else
132
+ echo "Running control plane setup with kubeadm..."
133
+ # This will initialize the kubeadm control plane and install Calico
134
+ underpost cluster --kubeadm
135
+ echo "Kubeadm control plane initialized. Check cluster status with 'kubectl get nodes'."
136
+ fi
137
+ elif $USE_K3S; then # New K3s initialization block
138
+ if $USE_WORKER; then
139
+ echo "Running worker node setup for K3s..."
140
+ # For K3s worker nodes, the 'underpost cluster --worker' command will handle joining
141
+ # the cluster. The K3s join command (k3s agent --server ...) needs to be provided.
142
+ underpost cluster --worker --k3s
143
+ underpost cluster --chown
144
+ echo "K3s Worker node setup initiated. You will need to manually join this worker to your control plane."
145
+ echo "On your K3s control plane, get the K3S_TOKEN from /var/lib/rancher/k3s/server/node-token"
146
+ echo "and the K3S_URL (e.g., https://<control-plane-ip>:6443)."
147
+ echo "Then execute: K3S_URL=${K3S_URL} K3S_TOKEN=${K3S_TOKEN} curl -sfL https://get.k3s.io | sh -"
148
+ else
149
+ echo "Running control plane setup with K3s..."
150
+ underpost cluster --k3s
151
+ echo "K3s control plane initialized. Check cluster status with 'kubectl get nodes'."
152
+ fi
153
+ elif $USE_KIND; then
154
+ echo "Running control node with kind..."
155
+ underpost cluster
156
+ echo "Kind cluster initialized. Check cluster status with 'kubectl get nodes'."
157
+ else
158
+ echo "No specific cluster role (--kubeadm, --kind, --k3s, --worker) specified. Please provide one."
159
+ exit 1
160
+ fi
161
+
162
+ echo "Underpost Kubernetes Node Setup completed."
163
+ echo "Remember to verify cluster health with 'kubectl get nodes' and 'kubectl get pods --all-namespaces'."
@@ -0,0 +1,32 @@
1
+ config:
2
+ core.https_address: "[::]:8443"
3
+ # core.trust_password: password
4
+ networks:
5
+ - config:
6
+ ipv4.address: 10.10.10.1/24
7
+ ipv6.address: none
8
+ description: ""
9
+ name: lxdbr0
10
+ type: ""
11
+ project: default
12
+ storage_pools:
13
+ - config:
14
+ size: 500GB
15
+ description: ""
16
+ name: default
17
+ driver: zfs
18
+ profiles:
19
+ - config: {}
20
+ description: ""
21
+ devices:
22
+ eth0:
23
+ name: eth0
24
+ network: lxdbr0
25
+ type: nic
26
+ root:
27
+ path: /
28
+ pool: default
29
+ type: disk
30
+ name: default
31
+ projects: []
32
+ cluster: null
@@ -0,0 +1,82 @@
1
+ #!/bin/bash
2
+ set -euo pipefail
3
+
4
+ # Update LXD and install dependencies
5
+ sudo snap install --channel=latest/stable lxd
6
+ sudo snap refresh --channel=latest/stable lxd
7
+ sudo snap install jq
8
+ sudo snap install maas
9
+
10
+ # Get default interface and IP address
11
+ INTERFACE=$(ip route | grep default | awk '{print $5}')
12
+ IP_ADDRESS=$(ip -4 addr show dev "$INTERFACE" | grep -oP '(?<=inet\s)\d+(\.\d+){3}')
13
+
14
+ # Install and persist iptables NAT rules (Rocky Linux compatible)
15
+ sudo dnf install -y iptables-services
16
+ sudo systemctl enable --now iptables
17
+
18
+ # Enable IP forwarding and configure NAT
19
+ sudo sed -i 's/#net.ipv4.ip_forward=1/net.ipv4.ip_forward=1/' /etc/sysctl.conf
20
+ sudo sysctl -p
21
+ sudo iptables -t nat -A POSTROUTING -o "$INTERFACE" -j SNAT --to "$IP_ADDRESS"
22
+ sudo service iptables save
23
+
24
+ # LXD preseed
25
+ cd /home/dd/engine
26
+ lxd init --preseed <manifests/maas/lxd-preseed.yaml
27
+
28
+ # Wait for LXD to be ready
29
+ lxd waitready
30
+
31
+ # Load secrets
32
+ underpost secret underpost --create-from-file /home/dd/engine/engine-private/conf/dd-cron/.env.production
33
+
34
+ # Extract config values
35
+ DB_PG_MAAS_USER=$(node bin config get --plain DB_PG_MAAS_USER)
36
+ DB_PG_MAAS_PASS=$(node bin config get --plain DB_PG_MAAS_PASS)
37
+ DB_PG_MAAS_HOST=$(node bin config get --plain DB_PG_MAAS_HOST)
38
+ DB_PG_MAAS_NAME=$(node bin config get --plain DB_PG_MAAS_NAME)
39
+
40
+ MAAS_ADMIN_USERNAME=$(node bin config get --plain MAAS_ADMIN_USERNAME)
41
+ MAAS_ADMIN_EMAIL=$(node bin config get --plain MAAS_ADMIN_EMAIL)
42
+ MAAS_ADMIN_PASS=$(node bin config get --plain MAAS_ADMIN_PASS)
43
+
44
+ # Initialize MAAS
45
+ maas init region+rack \
46
+ --database-uri "postgres://${DB_PG_MAAS_USER}:${DB_PG_MAAS_PASS}@${DB_PG_MAAS_HOST}/${DB_PG_MAAS_NAME}" \
47
+ --maas-url http://${IP_ADDRESS}:5240/MAAS
48
+
49
+ # Let MAAS initialize
50
+ sleep 30
51
+
52
+ # Create admin and get API key
53
+ maas createadmin \
54
+ --username "$MAAS_ADMIN_USERNAME" \
55
+ --password "$MAAS_ADMIN_PASS" \
56
+ --email "$MAAS_ADMIN_EMAIL"
57
+
58
+ APIKEY=$(maas apikey --username "$MAAS_ADMIN_USERNAME")
59
+
60
+ # Login to MAAS
61
+ maas login "$MAAS_ADMIN_USERNAME" "http://localhost:5240/MAAS/" "$APIKEY"
62
+
63
+ # Configure MAAS networking
64
+ SUBNET=10.10.10.0/24
65
+ FABRIC_ID=$(maas "$MAAS_ADMIN_USERNAME" subnet read "$SUBNET" | jq -r ".vlan.fabric_id")
66
+ VLAN_TAG=$(maas "$MAAS_ADMIN_USERNAME" subnet read "$SUBNET" | jq -r ".vlan.vid")
67
+ PRIMARY_RACK=$(maas "$MAAS_ADMIN_USERNAME" rack-controllers read | jq -r ".[] | .system_id")
68
+
69
+ maas "$MAAS_ADMIN_USERNAME" subnet update "$SUBNET" gateway_ip=10.10.10.1
70
+ maas "$MAAS_ADMIN_USERNAME" ipranges create type=dynamic start_ip=10.10.10.200 end_ip=10.10.10.254
71
+ maas "$MAAS_ADMIN_USERNAME" vlan update "$FABRIC_ID" "$VLAN_TAG" dhcp_on=True primary_rack="$PRIMARY_RACK"
72
+ maas "$MAAS_ADMIN_USERNAME" maas set-config name=upstream_dns value=8.8.8.8
73
+
74
+ # Register LXD as VM host
75
+ VM_HOST_ID=$(maas "$MAAS_ADMIN_USERNAME" vm-hosts create \
76
+ password=password \
77
+ type=lxd \
78
+ power_address="https://${IP_ADDRESS}:8443" \
79
+ project=maas | jq '.id')
80
+
81
+ # Set VM host CPU oversubscription
82
+ maas "$MAAS_ADMIN_USERNAME" vm-host update "$VM_HOST_ID" cpu_over_commit_ratio=4
@@ -49,7 +49,8 @@ spec:
49
49
  - metadata:
50
50
  name: mariadb-storage
51
51
  spec:
52
- accessModes: ['ReadWriteOnce']
52
+ accessModes: ["ReadWriteOnce"]
53
+ storageClassName: mariadb-storage-class
53
54
  resources:
54
55
  requests:
55
56
  storage: 1Gi
@@ -0,0 +1,10 @@
1
+ apiVersion: storage.k8s.io/v1
2
+ kind: StorageClass
3
+ metadata:
4
+ name: mariadb-storage-class # Renamed for clarity
5
+ annotations:
6
+ # Set this to "true" if you want this to be the default StorageClass
7
+ # storageclass.kubernetes.io/is-default-class: "true"
8
+ provisioner: rancher.io/local-path # Ensure this provisioner is installed in your cluster
9
+ reclaimPolicy: Retain # Or Delete, depending on your data retention policy
10
+ volumeBindingMode: WaitForFirstConsumer
@@ -6,6 +6,6 @@ resources:
6
6
  - pv-pvc.yaml
7
7
  - headless-service.yaml
8
8
  - statefulset.yaml
9
- - backup-pv-pvc.yaml
9
+ # - backup-pv-pvc.yaml
10
10
  # - backup-cronjob.yaml
11
11
  # - backup-access.yaml
@@ -3,7 +3,7 @@ kind: StatefulSet
3
3
  metadata:
4
4
  name: mongodb # Specifies the name of the statefulset
5
5
  spec:
6
- serviceName: 'mongodb-service' # Specifies the service to use
6
+ serviceName: "mongodb-service" # Specifies the service to use
7
7
  replicas: 2
8
8
  selector:
9
9
  matchLabels:
@@ -18,8 +18,8 @@ spec:
18
18
  image: docker.io/library/mongo:latest
19
19
  command:
20
20
  - mongod
21
- - '--replSet'
22
- - 'rs0'
21
+ - "--replSet"
22
+ - "rs0"
23
23
  # - '--config'
24
24
  # - '-f'
25
25
  # - '/etc/mongod.conf'
@@ -35,9 +35,9 @@ spec:
35
35
  # - '--setParameter'
36
36
  # - 'authenticationMechanisms=SCRAM-SHA-1'
37
37
  # - '--fork'
38
- - '--logpath'
39
- - '/var/log/mongodb/mongod.log'
40
- - '--bind_ip_all'
38
+ - "--logpath"
39
+ - "/var/log/mongodb/mongod.log"
40
+ - "--bind_ip_all"
41
41
  # command: ['sh', '-c']
42
42
  # args:
43
43
  # - |
@@ -99,11 +99,11 @@ spec:
99
99
  key: password
100
100
  resources:
101
101
  requests:
102
- cpu: '100m'
103
- memory: '256Mi'
102
+ cpu: "100m"
103
+ memory: "256Mi"
104
104
  limits:
105
- cpu: '500m'
106
- memory: '512Mi'
105
+ cpu: "500m"
106
+ memory: "512Mi"
107
107
  volumes:
108
108
  - name: keyfile
109
109
  secret:
@@ -119,7 +119,8 @@ spec:
119
119
  - metadata:
120
120
  name: mongodb-storage
121
121
  spec:
122
- accessModes: ['ReadWriteOnce']
122
+ accessModes: ["ReadWriteOnce"]
123
+ storageClassName: mongodb-storage-class
123
124
  resources:
124
125
  requests:
125
126
  storage: 5Gi
@@ -0,0 +1,9 @@
1
+ apiVersion: storage.k8s.io/v1
2
+ kind: StorageClass
3
+ metadata:
4
+ name: mongodb-storage-class
5
+ annotations:
6
+ storageclass.kubernetes.io/is-default-class: "false"
7
+ provisioner: rancher.io/local-path
8
+ reclaimPolicy: Retain
9
+ volumeBindingMode: WaitForFirstConsumer
@@ -13,11 +13,11 @@ spec:
13
13
  labels:
14
14
  app: mongodb
15
15
  spec:
16
- hostname: mongo
16
+ hostname: mongodb-service
17
17
  containers:
18
18
  - name: mongodb
19
- image: docker.io/library/mongo:4.4
20
- command: ['mongod', '--replSet', 'rs0', '--bind_ip_all']
19
+ image: mongo:4.4
20
+ command: ["mongod", "--replSet", "rs0", "--bind_ip_all"]
21
21
  # -- bash
22
22
  # mongo
23
23
  # use admin
@@ -0,0 +1,7 @@
1
+ ---
2
+ # kubectl apply -k core/.
3
+ apiVersion: kustomize.config.k8s.io/v1beta1
4
+ kind: Kustomization
5
+ resources:
6
+ - pv-pvc.yaml
7
+ - statefulset.yaml
@@ -0,0 +1,27 @@
1
+ # pv-pvc.yaml
2
+ apiVersion: v1
3
+ kind: PersistentVolume
4
+ metadata:
5
+ name: mysql-pv
6
+ labels:
7
+ type: local
8
+ spec:
9
+ storageClassName: manual
10
+ capacity:
11
+ storage: 20Gi
12
+ accessModes:
13
+ - ReadWriteOnce
14
+ hostPath:
15
+ path: "/mnt/data"
16
+ ---
17
+ apiVersion: v1
18
+ kind: PersistentVolumeClaim
19
+ metadata:
20
+ name: mysql-pv-claim
21
+ spec:
22
+ storageClassName: manual
23
+ accessModes:
24
+ - ReadWriteOnce
25
+ resources:
26
+ requests:
27
+ storage: 20Gi