underpost 2.8.7 → 2.8.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/.vscode/extensions.json +34 -2
  2. package/README.md +7 -5
  3. package/bin/db.js +1 -0
  4. package/bin/deploy.js +259 -74
  5. package/cli.md +88 -9
  6. package/conf.js +4 -0
  7. package/docker-compose.yml +1 -1
  8. package/manifests/deployment/adminer/service.yaml +1 -1
  9. package/manifests/deployment/dd-template-development/deployment.yaml +167 -0
  10. package/manifests/deployment/dd-template-development/proxy.yaml +46 -0
  11. package/manifests/deployment/fastapi/initial_data.sh +56 -0
  12. package/manifests/deployment/spark/spark-pi-py.yaml +21 -0
  13. package/manifests/envoy-service-nodeport.yaml +23 -0
  14. package/manifests/kubelet-config.yaml +65 -0
  15. package/manifests/lxd/lxd-admin-profile.yaml +17 -0
  16. package/manifests/lxd/lxd-preseed.yaml +30 -0
  17. package/manifests/lxd/underpost-setup.sh +163 -0
  18. package/manifests/maas/lxd-preseed.yaml +32 -0
  19. package/manifests/maas/maas-setup.sh +82 -0
  20. package/manifests/mariadb/statefulset.yaml +2 -1
  21. package/manifests/mariadb/storage-class.yaml +10 -0
  22. package/manifests/mongodb/kustomization.yaml +1 -1
  23. package/manifests/mongodb/statefulset.yaml +12 -11
  24. package/manifests/mongodb/storage-class.yaml +9 -0
  25. package/manifests/mongodb-4.4/service-deployment.yaml +2 -2
  26. package/manifests/mysql/kustomization.yaml +7 -0
  27. package/manifests/mysql/pv-pvc.yaml +27 -0
  28. package/manifests/mysql/statefulset.yaml +55 -0
  29. package/manifests/postgresql/statefulset.yaml +1 -1
  30. package/manifests/valkey/service.yaml +3 -9
  31. package/manifests/valkey/statefulset.yaml +12 -15
  32. package/package.json +1 -1
  33. package/src/cli/baremetal.js +60 -0
  34. package/src/cli/cluster.js +506 -207
  35. package/src/cli/deploy.js +47 -14
  36. package/src/cli/env.js +2 -2
  37. package/src/cli/image.js +83 -9
  38. package/src/cli/index.js +68 -61
  39. package/src/cli/lxd.js +395 -0
  40. package/src/cli/repository.js +9 -6
  41. package/src/index.js +17 -1
  42. package/src/runtime/lampp/Dockerfile +1 -1
  43. package/src/server/conf.js +58 -0
  44. package/src/server/logger.js +3 -3
  45. package/src/server/runtime.js +1 -1
  46. package/src/server/valkey.js +3 -3
  47. package/manifests/calico-custom-resources.yaml +0 -25
@@ -0,0 +1,163 @@
1
+ #!/bin/bash
2
+
3
+ # Exit immediately if a command exits with a non-zero status.
4
+ set -e
5
+
6
+ echo "Starting Underpost Kubernetes Node Setup for Production (Kubeadm/K3s Use Case)..."
7
+
8
+ # --- Disk Partition Resizing (Keep as is, seems functional) ---
9
+ echo "Expanding /dev/sda2 partition and resizing filesystem..."
10
+
11
+ # Check if parted is installed
12
+ if ! command -v parted &>/dev/null; then
13
+ echo "parted not found, installing..."
14
+ sudo dnf install -y parted
15
+ fi
16
+
17
+ # Get start sector of /dev/sda2
18
+ START_SECTOR=$(sudo parted /dev/sda -ms unit s print | awk -F: '/^2:/{print $2}' | sed 's/s//')
19
+
20
+ # Resize the partition
21
+ # Using 'sudo' for parted commands
22
+ sudo parted /dev/sda ---pretend-input-tty <<EOF
23
+ unit s
24
+ resizepart 2 100%
25
+ Yes
26
+ quit
27
+ EOF
28
+
29
+ # Resize the filesystem
30
+ sudo resize2fs /dev/sda2
31
+
32
+ echo "Disk and filesystem resized successfully."
33
+
34
+ # --- Essential System Package Installation ---
35
+ echo "Installing essential system packages..."
36
+ sudo dnf install -y tar bzip2 git epel-release
37
+
38
+ # Perform a system update to ensure all packages are up-to-date
39
+ sudo dnf -y update
40
+
41
+ # --- NVM and Node.js Installation ---
42
+ echo "Installing NVM and Node.js v23.8.0..."
43
+ curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.40.1/install.sh | bash
44
+
45
+ # Load nvm for the current session
46
+ export NVM_DIR="$([ -z "${XDG_CONFIG_HOME-}" ] && printf %s "${HOME}/.nvm" || printf %s "${XDG_CONFIG_HOME}/nvm")"
47
+ [ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
48
+
49
+ nvm install 23.8.0
50
+ nvm use 23.8.0
51
+
52
+ echo "
53
+ ██╗░░░██╗███╗░░██╗██████╗░███████╗██████╗░██████╗░░█████╗░░██████╗████████╗
54
+ ██║░░░██║████╗░██║██╔══██╗██╔════╝██╔══██╗██╔══██╗██╔══██╗██╔════╝╚══██╔══╝
55
+ ██║░░░██║██╔██╗██║██║░░██║█████╗░░██████╔╝██████╔╝██║░░██║╚█████╗░░░░██║░░░
56
+ ██║░░░██║██║╚████║██║░░██║██╔══╝░░██╔══██╗██╔═╝░░░██║░░██║░╚═══██╗░░░██║░░░
57
+ ╚██████╔╝██║░╚███║██████╔╝███████╗██║░░██║██║░░░░░╚█████╔╝██████╔╝░░░██║░░░
58
+ ░╚═════╝░╚═╝░░╚══╝╚═════╝░╚══════╝╚═╝░░╚═╝╚═╝░░░░░░╚════╝░╚═════╝░░░░╚═╝░░░
59
+
60
+ Installing underpost k8s node...
61
+ "
62
+
63
+ # Install underpost globally
64
+ npm install -g underpost
65
+
66
+ # Ensure underpost executable is in PATH and has execute permissions
67
+ # Adjusting this for global npm install which usually handles permissions
68
+ # If you still face issues, ensure /root/.nvm/versions/node/v23.8.0/bin is in your PATH
69
+ # For global installs, it's usually handled automatically.
70
+ # chmod +x /root/.nvm/versions/node/v23.8.0/bin/underpost # This might not be necessary for global npm installs
71
+
72
+ # --- Kernel Module for Bridge Filtering ---
73
+ # This is crucial for Kubernetes networking (CNI)
74
+ echo "Loading br_netfilter kernel module..."
75
+ sudo modprobe br_netfilter
76
+
77
+ # --- Initial Host Setup for Kubernetes Prerequisites ---
78
+ # This calls the initHost method in cluster.js to install Docker, Podman, Kind, Kubeadm, Helm.
79
+ echo "Running initial host setup for Kubernetes prerequisites..."
80
+ # Ensure the current directory is where 'underpost' expects its root, or use absolute paths.
81
+ # Assuming 'underpost root' correctly points to the base directory of your project.
82
+ cd "$(underpost root)/underpost"
83
+ underpost cluster --init-host
84
+
85
+ # --- Argument Parsing for Kubeadm/Kind/K3s/Worker ---
86
+ USE_KUBEADM=false
87
+ USE_KIND=false # Not the primary focus for this request, but keeping the logic
88
+ USE_K3S=false # New K3s option
89
+ USE_WORKER=false
90
+
91
+ for arg in "$@"; do
92
+ case "$arg" in
93
+ --kubeadm)
94
+ USE_KUBEADM=true
95
+ ;;
96
+ --kind)
97
+ USE_KIND=true
98
+ ;;
99
+ --k3s) # New K3s argument
100
+ USE_K3S=true
101
+ ;;
102
+ --worker)
103
+ USE_WORKER=true
104
+ ;;
105
+ esac
106
+ done
107
+
108
+ echo "USE_KUBEADM = $USE_KUBEADM"
109
+ echo "USE_KIND = $USE_KIND"
110
+ echo "USE_K3S = $USE_K3S" # Display K3s flag status
111
+ echo "USE_WORKER = $USE_WORKER"
112
+
113
+ # --- Kubernetes Cluster Initialization Logic ---
114
+
115
+ # Apply host configuration (SELinux, Containerd, Sysctl, and now firewalld disabling)
116
+ echo "Applying Kubernetes host configuration (SELinux, Containerd, Sysctl, Firewalld)..."
117
+ underpost cluster --config
118
+
119
+ if $USE_KUBEADM; then
120
+ if $USE_WORKER; then
121
+ echo "Running worker node setup for kubeadm..."
122
+ # For worker nodes, the 'underpost cluster --worker' command will handle joining
123
+ # the cluster. The join command itself needs to be provided from the control plane.
124
+ # This script assumes the join command will be executed separately or passed in.
125
+ # Example: underpost cluster --worker --join-command "kubeadm join ..."
126
+ # For now, this just runs the worker-specific config.
127
+ underpost cluster --worker
128
+ underpost cluster --chown
129
+ echo "Worker node setup initiated. You will need to manually join this worker to your control plane."
130
+ echo "On your control plane, run 'kubeadm token create --print-join-command' and execute the output here."
131
+ else
132
+ echo "Running control plane setup with kubeadm..."
133
+ # This will initialize the kubeadm control plane and install Calico
134
+ underpost cluster --kubeadm
135
+ echo "Kubeadm control plane initialized. Check cluster status with 'kubectl get nodes'."
136
+ fi
137
+ elif $USE_K3S; then # New K3s initialization block
138
+ if $USE_WORKER; then
139
+ echo "Running worker node setup for K3s..."
140
+ # For K3s worker nodes, the 'underpost cluster --worker' command will handle joining
141
+ # the cluster. The K3s join command (k3s agent --server ...) needs to be provided.
142
+ underpost cluster --worker --k3s
143
+ underpost cluster --chown
144
+ echo "K3s Worker node setup initiated. You will need to manually join this worker to your control plane."
145
+ echo "On your K3s control plane, get the K3S_TOKEN from /var/lib/rancher/k3s/server/node-token"
146
+ echo "and the K3S_URL (e.g., https://<control-plane-ip>:6443)."
147
+ echo "Then execute: K3S_URL=${K3S_URL} K3S_TOKEN=${K3S_TOKEN} curl -sfL https://get.k3s.io | sh -"
148
+ else
149
+ echo "Running control plane setup with K3s..."
150
+ underpost cluster --k3s
151
+ echo "K3s control plane initialized. Check cluster status with 'kubectl get nodes'."
152
+ fi
153
+ elif $USE_KIND; then
154
+ echo "Running control node with kind..."
155
+ underpost cluster
156
+ echo "Kind cluster initialized. Check cluster status with 'kubectl get nodes'."
157
+ else
158
+ echo "No specific cluster role (--kubeadm, --kind, --k3s, --worker) specified. Please provide one."
159
+ exit 1
160
+ fi
161
+
162
+ echo "Underpost Kubernetes Node Setup completed."
163
+ echo "Remember to verify cluster health with 'kubectl get nodes' and 'kubectl get pods --all-namespaces'."
@@ -0,0 +1,32 @@
1
+ config:
2
+ core.https_address: "[::]:8443"
3
+ # core.trust_password: password
4
+ networks:
5
+ - config:
6
+ ipv4.address: 10.10.10.1/24
7
+ ipv6.address: none
8
+ description: ""
9
+ name: lxdbr0
10
+ type: ""
11
+ project: default
12
+ storage_pools:
13
+ - config:
14
+ size: 500GB
15
+ description: ""
16
+ name: default
17
+ driver: zfs
18
+ profiles:
19
+ - config: {}
20
+ description: ""
21
+ devices:
22
+ eth0:
23
+ name: eth0
24
+ network: lxdbr0
25
+ type: nic
26
+ root:
27
+ path: /
28
+ pool: default
29
+ type: disk
30
+ name: default
31
+ projects: []
32
+ cluster: null
@@ -0,0 +1,82 @@
1
+ #!/bin/bash
2
+ set -euo pipefail
3
+
4
+ # Update LXD and install dependencies
5
+ sudo snap install --channel=latest/stable lxd
6
+ sudo snap refresh --channel=latest/stable lxd
7
+ sudo snap install jq
8
+ sudo snap install maas
9
+
10
+ # Get default interface and IP address
11
+ INTERFACE=$(ip route | grep default | awk '{print $5}')
12
+ IP_ADDRESS=$(ip -4 addr show dev "$INTERFACE" | grep -oP '(?<=inet\s)\d+(\.\d+){3}')
13
+
14
+ # Install and persist iptables NAT rules (Rocky Linux compatible)
15
+ sudo dnf install -y iptables-services
16
+ sudo systemctl enable --now iptables
17
+
18
+ # Enable IP forwarding and configure NAT
19
+ sudo sed -i 's/#net.ipv4.ip_forward=1/net.ipv4.ip_forward=1/' /etc/sysctl.conf
20
+ sudo sysctl -p
21
+ sudo iptables -t nat -A POSTROUTING -o "$INTERFACE" -j SNAT --to "$IP_ADDRESS"
22
+ sudo service iptables save
23
+
24
+ # LXD preseed
25
+ cd /home/dd/engine
26
+ lxd init --preseed <manifests/maas/lxd-preseed.yaml
27
+
28
+ # Wait for LXD to be ready
29
+ lxd waitready
30
+
31
+ # Load secrets
32
+ underpost secret underpost --create-from-file /home/dd/engine/engine-private/conf/dd-cron/.env.production
33
+
34
+ # Extract config values
35
+ DB_PG_MAAS_USER=$(node bin config get --plain DB_PG_MAAS_USER)
36
+ DB_PG_MAAS_PASS=$(node bin config get --plain DB_PG_MAAS_PASS)
37
+ DB_PG_MAAS_HOST=$(node bin config get --plain DB_PG_MAAS_HOST)
38
+ DB_PG_MAAS_NAME=$(node bin config get --plain DB_PG_MAAS_NAME)
39
+
40
+ MAAS_ADMIN_USERNAME=$(node bin config get --plain MAAS_ADMIN_USERNAME)
41
+ MAAS_ADMIN_EMAIL=$(node bin config get --plain MAAS_ADMIN_EMAIL)
42
+ MAAS_ADMIN_PASS=$(node bin config get --plain MAAS_ADMIN_PASS)
43
+
44
+ # Initialize MAAS
45
+ maas init region+rack \
46
+ --database-uri "postgres://${DB_PG_MAAS_USER}:${DB_PG_MAAS_PASS}@${DB_PG_MAAS_HOST}/${DB_PG_MAAS_NAME}" \
47
+ --maas-url http://${IP_ADDRESS}:5240/MAAS
48
+
49
+ # Let MAAS initialize
50
+ sleep 30
51
+
52
+ # Create admin and get API key
53
+ maas createadmin \
54
+ --username "$MAAS_ADMIN_USERNAME" \
55
+ --password "$MAAS_ADMIN_PASS" \
56
+ --email "$MAAS_ADMIN_EMAIL"
57
+
58
+ APIKEY=$(maas apikey --username "$MAAS_ADMIN_USERNAME")
59
+
60
+ # Login to MAAS
61
+ maas login "$MAAS_ADMIN_USERNAME" "http://localhost:5240/MAAS/" "$APIKEY"
62
+
63
+ # Configure MAAS networking
64
+ SUBNET=10.10.10.0/24
65
+ FABRIC_ID=$(maas "$MAAS_ADMIN_USERNAME" subnet read "$SUBNET" | jq -r ".vlan.fabric_id")
66
+ VLAN_TAG=$(maas "$MAAS_ADMIN_USERNAME" subnet read "$SUBNET" | jq -r ".vlan.vid")
67
+ PRIMARY_RACK=$(maas "$MAAS_ADMIN_USERNAME" rack-controllers read | jq -r ".[] | .system_id")
68
+
69
+ maas "$MAAS_ADMIN_USERNAME" subnet update "$SUBNET" gateway_ip=10.10.10.1
70
+ maas "$MAAS_ADMIN_USERNAME" ipranges create type=dynamic start_ip=10.10.10.200 end_ip=10.10.10.254
71
+ maas "$MAAS_ADMIN_USERNAME" vlan update "$FABRIC_ID" "$VLAN_TAG" dhcp_on=True primary_rack="$PRIMARY_RACK"
72
+ maas "$MAAS_ADMIN_USERNAME" maas set-config name=upstream_dns value=8.8.8.8
73
+
74
+ # Register LXD as VM host
75
+ VM_HOST_ID=$(maas "$MAAS_ADMIN_USERNAME" vm-hosts create \
76
+ password=password \
77
+ type=lxd \
78
+ power_address="https://${IP_ADDRESS}:8443" \
79
+ project=maas | jq '.id')
80
+
81
+ # Set VM host CPU oversubscription
82
+ maas "$MAAS_ADMIN_USERNAME" vm-host update "$VM_HOST_ID" cpu_over_commit_ratio=4
@@ -49,7 +49,8 @@ spec:
49
49
  - metadata:
50
50
  name: mariadb-storage
51
51
  spec:
52
- accessModes: ['ReadWriteOnce']
52
+ accessModes: ["ReadWriteOnce"]
53
+ storageClassName: mariadb-storage-class
53
54
  resources:
54
55
  requests:
55
56
  storage: 1Gi
@@ -0,0 +1,10 @@
1
+ apiVersion: storage.k8s.io/v1
2
+ kind: StorageClass
3
+ metadata:
4
+ name: mariadb-storage-class # Renamed for clarity
5
+ annotations:
6
+ # Set this to "true" if you want this to be the default StorageClass
7
+ # storageclass.kubernetes.io/is-default-class: "true"
8
+ provisioner: rancher.io/local-path # Ensure this provisioner is installed in your cluster
9
+ reclaimPolicy: Retain # Or Delete, depending on your data retention policy
10
+ volumeBindingMode: WaitForFirstConsumer
@@ -6,6 +6,6 @@ resources:
6
6
  - pv-pvc.yaml
7
7
  - headless-service.yaml
8
8
  - statefulset.yaml
9
- - backup-pv-pvc.yaml
9
+ # - backup-pv-pvc.yaml
10
10
  # - backup-cronjob.yaml
11
11
  # - backup-access.yaml
@@ -3,7 +3,7 @@ kind: StatefulSet
3
3
  metadata:
4
4
  name: mongodb # Specifies the name of the statefulset
5
5
  spec:
6
- serviceName: 'mongodb-service' # Specifies the service to use
6
+ serviceName: "mongodb-service" # Specifies the service to use
7
7
  replicas: 2
8
8
  selector:
9
9
  matchLabels:
@@ -18,8 +18,8 @@ spec:
18
18
  image: docker.io/library/mongo:latest
19
19
  command:
20
20
  - mongod
21
- - '--replSet'
22
- - 'rs0'
21
+ - "--replSet"
22
+ - "rs0"
23
23
  # - '--config'
24
24
  # - '-f'
25
25
  # - '/etc/mongod.conf'
@@ -35,9 +35,9 @@ spec:
35
35
  # - '--setParameter'
36
36
  # - 'authenticationMechanisms=SCRAM-SHA-1'
37
37
  # - '--fork'
38
- - '--logpath'
39
- - '/var/log/mongodb/mongod.log'
40
- - '--bind_ip_all'
38
+ - "--logpath"
39
+ - "/var/log/mongodb/mongod.log"
40
+ - "--bind_ip_all"
41
41
  # command: ['sh', '-c']
42
42
  # args:
43
43
  # - |
@@ -99,11 +99,11 @@ spec:
99
99
  key: password
100
100
  resources:
101
101
  requests:
102
- cpu: '100m'
103
- memory: '256Mi'
102
+ cpu: "100m"
103
+ memory: "256Mi"
104
104
  limits:
105
- cpu: '500m'
106
- memory: '512Mi'
105
+ cpu: "500m"
106
+ memory: "512Mi"
107
107
  volumes:
108
108
  - name: keyfile
109
109
  secret:
@@ -119,7 +119,8 @@ spec:
119
119
  - metadata:
120
120
  name: mongodb-storage
121
121
  spec:
122
- accessModes: ['ReadWriteOnce']
122
+ accessModes: ["ReadWriteOnce"]
123
+ storageClassName: mongodb-storage-class
123
124
  resources:
124
125
  requests:
125
126
  storage: 5Gi
@@ -0,0 +1,9 @@
1
+ apiVersion: storage.k8s.io/v1
2
+ kind: StorageClass
3
+ metadata:
4
+ name: mongodb-storage-class
5
+ annotations:
6
+ storageclass.kubernetes.io/is-default-class: "false"
7
+ provisioner: rancher.io/local-path
8
+ reclaimPolicy: Retain
9
+ volumeBindingMode: WaitForFirstConsumer
@@ -13,11 +13,11 @@ spec:
13
13
  labels:
14
14
  app: mongodb
15
15
  spec:
16
- hostname: mongo
16
+ hostname: mongodb-service
17
17
  containers:
18
18
  - name: mongodb
19
19
  image: mongo:4.4
20
- command: ['mongod', '--replSet', 'rs0', '--bind_ip_all']
20
+ command: ["mongod", "--replSet", "rs0", "--bind_ip_all"]
21
21
  # -- bash
22
22
  # mongo
23
23
  # use admin
@@ -0,0 +1,7 @@
1
+ ---
2
+ # kubectl apply -k core/.
3
+ apiVersion: kustomize.config.k8s.io/v1beta1
4
+ kind: Kustomization
5
+ resources:
6
+ - pv-pvc.yaml
7
+ - statefulset.yaml
@@ -0,0 +1,27 @@
1
+ # pv-pvc.yaml
2
+ apiVersion: v1
3
+ kind: PersistentVolume
4
+ metadata:
5
+ name: mysql-pv
6
+ labels:
7
+ type: local
8
+ spec:
9
+ storageClassName: manual
10
+ capacity:
11
+ storage: 20Gi
12
+ accessModes:
13
+ - ReadWriteOnce
14
+ hostPath:
15
+ path: "/mnt/data"
16
+ ---
17
+ apiVersion: v1
18
+ kind: PersistentVolumeClaim
19
+ metadata:
20
+ name: mysql-pv-claim
21
+ spec:
22
+ storageClassName: manual
23
+ accessModes:
24
+ - ReadWriteOnce
25
+ resources:
26
+ requests:
27
+ storage: 20Gi
@@ -0,0 +1,55 @@
1
+ apiVersion: v1
2
+ kind: Service
3
+ metadata:
4
+ name: mysql
5
+ labels:
6
+ app: mysql
7
+ spec:
8
+ ports:
9
+ - port: 3306
10
+ name: mysql
11
+ selector:
12
+ app: mysql
13
+ clusterIP: None
14
+ ---
15
+ apiVersion: apps/v1
16
+ kind: StatefulSet
17
+ metadata:
18
+ name: mysql
19
+ spec:
20
+ serviceName: "mysql"
21
+ selector:
22
+ matchLabels:
23
+ app: mysql
24
+ replicas: 1
25
+ template:
26
+ metadata:
27
+ labels:
28
+ app: mysql
29
+ spec:
30
+ containers:
31
+ - image: mysql:9
32
+ name: mysql
33
+ env:
34
+ - name: MYSQL_ROOT_PASSWORD
35
+ valueFrom:
36
+ secretKeyRef:
37
+ name: mysql-secret
38
+ key: password
39
+ ports:
40
+ - containerPort: 3306
41
+ name: mysql
42
+ volumeMounts:
43
+ - name: mysql-persistent-storage
44
+ mountPath: /var/lib/mysql
45
+ subPath: mysql
46
+ volumeClaimTemplates:
47
+ - metadata:
48
+ name: mysql-persistent-storage
49
+ spec:
50
+ storageClassName: manual
51
+ accessModes:
52
+ - ReadWriteOnce
53
+ resources:
54
+ requests:
55
+ storage: 20Gi
@@ -4,7 +4,7 @@ metadata:
4
4
  name: postgres
5
5
  spec:
6
6
  serviceName: postgres
7
- replicas: 3
7
+ replicas: 1
8
8
  selector:
9
9
  matchLabels:
10
10
  app: postgres
@@ -1,17 +1,11 @@
1
- ---
2
1
  apiVersion: v1
3
2
  kind: Service
4
3
  metadata:
5
- name: service-valkey
4
+ name: valkey-service
6
5
  namespace: default
7
6
  spec:
7
+ selector:
8
+ app: valkey-service
8
9
  ports:
9
10
  - port: 6379
10
11
  targetPort: 6379
11
- selector:
12
- app: service-valkey
13
- ipFamilyPolicy: PreferDualStack
14
- ipFamilies:
15
- - IPv4
16
- # - IPv6
17
- type: ClusterIP
@@ -1,41 +1,38 @@
1
1
  apiVersion: apps/v1
2
2
  kind: StatefulSet
3
3
  metadata:
4
- name: service-valkey
4
+ name: valkey-service
5
5
  namespace: default
6
6
  spec:
7
- serviceName: service-valkey
7
+ serviceName: valkey-service
8
8
  replicas: 1
9
9
  selector:
10
10
  matchLabels:
11
- app: service-valkey
11
+ app: valkey-service
12
12
  template:
13
13
  metadata:
14
14
  labels:
15
- app: service-valkey
15
+ app: valkey-service
16
16
  spec:
17
- # Prevent automatic token mounting if you're not using the default ServiceAccount
18
17
  automountServiceAccountToken: false
19
-
20
18
  containers:
21
- - name: service-valkey
22
- image: valkey/valkey:latest
23
- # Ensure you pull only if not present (Never will error if missing)
24
- imagePullPolicy: Never
25
- env:
26
- - name: TZ
27
- value: Europe/Zurich
19
+ - name: valkey-service
20
+ image: docker.io/valkey/valkey:latest
21
+ imagePullPolicy: IfNotPresent
22
+ command: ["valkey-server"]
23
+ args: ["--port", "6379"]
28
24
  ports:
29
25
  - containerPort: 6379
30
26
  startupProbe:
31
27
  tcpSocket:
32
28
  port: 6379
33
- failureThreshold: 30
34
29
  periodSeconds: 5
35
30
  timeoutSeconds: 5
31
+ failureThreshold: 30
36
32
  livenessProbe:
37
33
  tcpSocket:
38
34
  port: 6379
39
- failureThreshold: 2
35
+ initialDelaySeconds: 10
40
36
  periodSeconds: 30
41
37
  timeoutSeconds: 5
38
+ failureThreshold: 2
package/package.json CHANGED
@@ -2,7 +2,7 @@
2
2
  "type": "module",
3
3
  "main": "src/index.js",
4
4
  "name": "underpost",
5
- "version": "2.8.7",
5
+ "version": "2.8.8",
6
6
  "description": "pwa api rest template",
7
7
  "scripts": {
8
8
  "start": "env-cmd -f .env.production node --max-old-space-size=8192 src/server",
@@ -0,0 +1,60 @@
1
+ import { getNpmRootPath, getUnderpostRootPath } from '../server/conf.js';
2
+ import { shellExec } from '../server/process.js';
3
+ import dotenv from 'dotenv';
4
+ class UnderpostBaremetal {
5
+ static API = {
6
+ callback(
7
+ options = {
8
+ dev: false,
9
+ controlServerInstall: false,
10
+ controlServerInitDb: false,
11
+ controlServerInit: false,
12
+ controlServerUninstall: false,
13
+ controlServerStop: false,
14
+ controlServerStart: false,
15
+ },
16
+ ) {
17
+ dotenv.config({ path: `${getUnderpostRootPath()}/.env`, override: true });
18
+ const npmRoot = getNpmRootPath();
19
+ const underpostRoot = options?.dev === true ? '.' : `${npmRoot}/underpost`;
20
+ const dbProviderId = 'postgresql-14';
21
+ if (options.controlServerUninstall === true) {
22
+ // Stop MAAS services
23
+ shellExec(`sudo systemctl stop maas.pebble || true`);
24
+ shellExec(`sudo snap stop maas`);
25
+ shellExec(`sudo snap remove maas --purge || true`);
26
+
27
+ // Remove Snap residual data
28
+ shellExec(`sudo rm -rf /var/snap/maas`);
29
+ shellExec(`sudo rm -rf ~/snap/maas`);
30
+
31
+ // Remove MAAS config and data directories
32
+ shellExec(`sudo rm -rf /etc/maas`);
33
+ shellExec(`sudo rm -rf /var/lib/maas`);
34
+ shellExec(`sudo rm -rf /var/log/maas`);
35
+ }
36
+ if (options.controlServerStart === true) {
37
+ shellExec(`sudo snap restart maas`);
38
+ }
39
+ if (options.controlServerStop === true) {
40
+ shellExec(`sudo snap stop maas`);
41
+ }
42
+ if (options.controlServerInitDb === true) {
43
+ shellExec(`node ${underpostRoot}/bin/deploy ${dbProviderId} install`);
44
+ shellExec(
45
+ `node ${underpostRoot}/bin/deploy pg-drop-db ${process.env.DB_PG_MAAS_NAME} ${process.env.DB_PG_MAAS_USER}`,
46
+ );
47
+ shellExec(`node ${underpostRoot}/bin/deploy maas db`);
48
+ }
49
+ if (options.controlServerInstall === true) {
50
+ shellExec(`chmod +x ${underpostRoot}/manifests/maas/maas-setup.sh`);
51
+ shellExec(`${underpostRoot}/manifests/maas/maas-setup.sh`);
52
+ }
53
+ if (options.controlServerInit === true) {
54
+ shellExec(`node ${underpostRoot}/bin/deploy maas reset`);
55
+ }
56
+ },
57
+ };
58
+ }
59
+
60
+ export default UnderpostBaremetal;