cluster-builder 0.3.0__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cluster-builder might be problematic. Click here for more details.
- cluster_builder/config/cluster.py +30 -5
- cluster_builder/config/postgres.py +4 -1
- cluster_builder/infrastructure/executor.py +48 -48
- cluster_builder/infrastructure/templates.py +2 -2
- cluster_builder/swarmchestrate.py +261 -47
- cluster_builder/templates/aws/main.tf +109 -46
- cluster_builder/templates/copy_manifest.tf +36 -0
- cluster_builder/templates/edge/main.tf +98 -0
- cluster_builder/templates/ha_user_data.sh.tpl +32 -1
- cluster_builder/templates/master_user_data.sh.tpl +36 -5
- cluster_builder/templates/openstack/main.tf +218 -0
- cluster_builder/templates/openstack_provider.tf +70 -0
- cluster_builder/templates/worker_user_data.sh.tpl +33 -1
- cluster_builder/utils/hcl.py +91 -15
- cluster_builder-0.3.1.dist-info/METADATA +321 -0
- cluster_builder-0.3.1.dist-info/RECORD +25 -0
- {cluster_builder-0.3.0.dist-info → cluster_builder-0.3.1.dist-info}/WHEEL +1 -1
- cluster_builder/templates/edge/main.tf.j2 +0 -40
- cluster_builder/templates/openstack/main.tf.j2 +0 -76
- cluster_builder/templates/openstack/network_security_group.tf.j2 +0 -34
- cluster_builder-0.3.0.dist-info/METADATA +0 -264
- cluster_builder-0.3.0.dist-info/RECORD +0 -24
- {cluster_builder-0.3.0.dist-info → cluster_builder-0.3.1.dist-info}/licenses/LICENSE +0 -0
- {cluster_builder-0.3.0.dist-info → cluster_builder-0.3.1.dist-info}/top_level.txt +0 -0
|
@@ -1,2 +1,34 @@
|
|
|
1
1
|
#!/bin/bash
|
|
2
|
-
|
|
2
|
+
set -euo pipefail
|
|
3
|
+
|
|
4
|
+
LOG_FILE="/var/log/k3s_agent_install.log"
|
|
5
|
+
exec > >(tee -a "$LOG_FILE") 2>&1
|
|
6
|
+
echo "=== K3s Agent Install Script Started at $(date) ==="
|
|
7
|
+
|
|
8
|
+
# Function to log messages with timestamp
|
|
9
|
+
log_message() {
|
|
10
|
+
echo "$(date) - $1"
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
# Use the provided public IP
|
|
14
|
+
log_message "Using provided public IP: ${public_ip}"
|
|
15
|
+
|
|
16
|
+
# Check if K3s agent is already running
|
|
17
|
+
if systemctl is-active --quiet k3s-agent; then
|
|
18
|
+
log_message "K3s agent is already running. Skipping installation."
|
|
19
|
+
else
|
|
20
|
+
log_message "K3s agent is not running. Proceeding with installation..."
|
|
21
|
+
|
|
22
|
+
export K3S_URL="https://${master_ip}:6443"
|
|
23
|
+
export K3S_TOKEN="${k3s_token}"
|
|
24
|
+
|
|
25
|
+
# Install the K3s agent and join the cluster
|
|
26
|
+
if ! curl -sfL https://get.k3s.io | sh -s - agent --node-external-ip="${public_ip}" --node-name="${node_name}"; then
|
|
27
|
+
log_message "ERROR: K3s agent installation failed!"
|
|
28
|
+
exit 1
|
|
29
|
+
else
|
|
30
|
+
log_message "K3s agent installation succeeded."
|
|
31
|
+
fi
|
|
32
|
+
fi
|
|
33
|
+
|
|
34
|
+
log_message "=== Script completed at $(date) ==="
|
cluster_builder/utils/hcl.py
CHANGED
|
@@ -1,8 +1,11 @@
|
|
|
1
|
+
import json
|
|
1
2
|
import os
|
|
2
3
|
import hcl2
|
|
3
4
|
from lark import Tree, Token
|
|
5
|
+
import logging
|
|
6
|
+
import re
|
|
4
7
|
|
|
5
|
-
|
|
8
|
+
logger = logging.getLogger("cluster_builder")
|
|
6
9
|
def add_backend_config(backend_tf_path, conn_str, schema_name):
|
|
7
10
|
"""
|
|
8
11
|
Adds a PostgreSQL backend configuration to a Terraform file.
|
|
@@ -14,7 +17,7 @@ def add_backend_config(backend_tf_path, conn_str, schema_name):
|
|
|
14
17
|
if os.path.exists(backend_tf_path):
|
|
15
18
|
with open(backend_tf_path) as f:
|
|
16
19
|
if 'backend "pg"' in f.read():
|
|
17
|
-
|
|
20
|
+
logger.debug("⚠️ Backend config already exists, skipping: %s", backend_tf_path)
|
|
18
21
|
return
|
|
19
22
|
|
|
20
23
|
# Build the backend configuration block
|
|
@@ -34,7 +37,7 @@ def add_backend_config(backend_tf_path, conn_str, schema_name):
|
|
|
34
37
|
) as f: # Use "w" instead of "a" to create/overwrite the file
|
|
35
38
|
f.write("\n".join(lines) + "\n")
|
|
36
39
|
|
|
37
|
-
|
|
40
|
+
logger.debug("✅ Added PostgreSQL backend config to %s", backend_tf_path)
|
|
38
41
|
|
|
39
42
|
|
|
40
43
|
def add_module_block(main_tf_path, module_name, config):
|
|
@@ -48,7 +51,7 @@ def add_module_block(main_tf_path, module_name, config):
|
|
|
48
51
|
if os.path.exists(main_tf_path):
|
|
49
52
|
with open(main_tf_path) as f:
|
|
50
53
|
if f'module "{module_name}"' in f.read():
|
|
51
|
-
|
|
54
|
+
logger.warning("⚠️ Module '%s' already exists, skipping in %s", module_name, main_tf_path)
|
|
52
55
|
return
|
|
53
56
|
|
|
54
57
|
# Build the module block
|
|
@@ -60,6 +63,8 @@ def add_module_block(main_tf_path, module_name, config):
|
|
|
60
63
|
v_str = "true" if v else "false"
|
|
61
64
|
elif isinstance(v, (int, float)):
|
|
62
65
|
v_str = str(v)
|
|
66
|
+
elif isinstance(v, (list, dict)):
|
|
67
|
+
v_str = json.dumps(v)
|
|
63
68
|
elif v is None:
|
|
64
69
|
continue
|
|
65
70
|
else:
|
|
@@ -71,40 +76,51 @@ def add_module_block(main_tf_path, module_name, config):
|
|
|
71
76
|
with open(main_tf_path, "a") as f:
|
|
72
77
|
f.write("\n\n" + "\n".join(lines) + "\n")
|
|
73
78
|
|
|
74
|
-
|
|
79
|
+
logger.debug("✅ Added module '%s' to %s", module_name, main_tf_path)
|
|
75
80
|
|
|
76
81
|
|
|
77
82
|
def is_target_module_block(tree: Tree, module_name: str) -> bool:
|
|
78
83
|
"""
|
|
79
84
|
Check if the tree is a module block with the specified name.
|
|
80
85
|
"""
|
|
86
|
+
logger.info(f"Checking tree with data: {tree.data}, children count: {len(tree.children)}")
|
|
87
|
+
logger.info(f"Children types and values: {[ (type(c), getattr(c, 'value', None)) for c in tree.children ]}")
|
|
88
|
+
|
|
81
89
|
if tree.data != "block":
|
|
90
|
+
logger.debug(f"Rejected: tree.data is '{tree.data}', expected 'block'")
|
|
82
91
|
return False
|
|
83
92
|
|
|
84
93
|
# Need at least 3 children: identifier, name, body
|
|
85
94
|
if len(tree.children) < 3:
|
|
95
|
+
logger.debug(f"Rejected: tree has less than 3 children ({len(tree.children)})")
|
|
86
96
|
return False
|
|
87
97
|
|
|
88
98
|
# First child should be an identifier tree
|
|
89
99
|
first_child = tree.children[0]
|
|
90
100
|
if not isinstance(first_child, Tree) or first_child.data != "identifier":
|
|
101
|
+
logger.debug(f"Rejected: first child is not an identifier Tree (found {type(first_child)} with data '{getattr(first_child, 'data', None)}')")
|
|
91
102
|
return False
|
|
92
103
|
|
|
93
104
|
# First child should have a NAME token with 'module'
|
|
94
105
|
if len(first_child.children) == 0 or not isinstance(first_child.children[0], Token):
|
|
106
|
+
logger.debug("Rejected: first child has no Token children")
|
|
95
107
|
return False
|
|
96
108
|
|
|
97
|
-
|
|
109
|
+
first_value = first_child.children[0].value
|
|
110
|
+
if first_value != "module":
|
|
111
|
+
logger.debug(f"Rejected: first child token value '{first_value}' is not 'module'")
|
|
98
112
|
return False
|
|
99
113
|
|
|
100
|
-
# Second child
|
|
114
|
+
# Second child: could be a Token or Tree with Token child for module name
|
|
101
115
|
second_child = tree.children[1]
|
|
116
|
+
|
|
102
117
|
if not isinstance(second_child, Token) or second_child.value != f'"{module_name}"':
|
|
118
|
+
logger.debug(f"Second child check failed: type={type(second_child)}, value={getattr(second_child, 'value', None)} expected=\"{module_name}\"")
|
|
103
119
|
return False
|
|
104
120
|
|
|
121
|
+
logger.info(f"Module block matched for module name '{module_name}'")
|
|
105
122
|
return True
|
|
106
123
|
|
|
107
|
-
|
|
108
124
|
def simple_remove_module(tree, module_name, removed=False):
|
|
109
125
|
"""
|
|
110
126
|
A simpler function to remove module blocks that maintains the exact Tree structure
|
|
@@ -116,6 +132,9 @@ def simple_remove_module(tree, module_name, removed=False):
|
|
|
116
132
|
body_node = tree.children[0]
|
|
117
133
|
|
|
118
134
|
if isinstance(body_node, Tree) and body_node.data == "body":
|
|
135
|
+
# Debug: Log body node children
|
|
136
|
+
logger.debug("Body Node Children: %s", body_node.children)
|
|
137
|
+
|
|
119
138
|
# Create new children list for the body node
|
|
120
139
|
new_body_children = []
|
|
121
140
|
skip_next = False
|
|
@@ -133,6 +152,7 @@ def simple_remove_module(tree, module_name, removed=False):
|
|
|
133
152
|
and is_target_module_block(child, module_name)
|
|
134
153
|
):
|
|
135
154
|
removed = True
|
|
155
|
+
print(f"Module {module_name} found and removed.") # Debug log
|
|
136
156
|
|
|
137
157
|
# Check if the next node is a new_line_or_comment, and skip it as well
|
|
138
158
|
if i + 1 < len(body_node.children):
|
|
@@ -158,14 +178,16 @@ def remove_module_block(main_tf_path, module_name: str):
|
|
|
158
178
|
Removes a module block by name from main.tf for this cluster.
|
|
159
179
|
"""
|
|
160
180
|
if not os.path.exists(main_tf_path):
|
|
161
|
-
|
|
181
|
+
logger.warning("⚠️ No main.tf found at %s", main_tf_path)
|
|
162
182
|
return
|
|
163
183
|
|
|
164
184
|
try:
|
|
165
185
|
with open(main_tf_path, "r") as f:
|
|
166
186
|
tree = hcl2.parse(f)
|
|
187
|
+
# Debug: Log the parsed tree structure
|
|
188
|
+
logger.debug("Parsed Tree: %s", tree)
|
|
167
189
|
except Exception as e:
|
|
168
|
-
|
|
190
|
+
logger.error("❌ Failed to parse HCL in %s: %s", main_tf_path, e, exc_info=True)
|
|
169
191
|
return
|
|
170
192
|
|
|
171
193
|
# Process tree to remove target module block
|
|
@@ -173,8 +195,11 @@ def remove_module_block(main_tf_path, module_name: str):
|
|
|
173
195
|
|
|
174
196
|
# If no modules were removed
|
|
175
197
|
if not removed:
|
|
176
|
-
|
|
198
|
+
logger.warning("⚠️ No module named '%s' found in %s", module_name, main_tf_path)
|
|
177
199
|
return
|
|
200
|
+
|
|
201
|
+
# Debug: Log the final tree structure after removal
|
|
202
|
+
logger.debug("Final Tree after module removal: %s", new_tree)
|
|
178
203
|
|
|
179
204
|
try:
|
|
180
205
|
# Reconstruct HCL
|
|
@@ -184,9 +209,9 @@ def remove_module_block(main_tf_path, module_name: str):
|
|
|
184
209
|
with open(main_tf_path, "w") as f:
|
|
185
210
|
f.write(new_source)
|
|
186
211
|
|
|
187
|
-
|
|
212
|
+
logger.info("🗑️ Removed module '%s' from %s", module_name, main_tf_path)
|
|
188
213
|
except Exception as e:
|
|
189
|
-
|
|
214
|
+
logger.error("❌ Failed to reconstruct HCL in %s: %s", main_tf_path, e, exc_info=True)
|
|
190
215
|
# Print more detailed error information
|
|
191
216
|
import traceback
|
|
192
217
|
|
|
@@ -222,10 +247,61 @@ def extract_template_variables(template_path):
|
|
|
222
247
|
return variables
|
|
223
248
|
|
|
224
249
|
except FileNotFoundError:
|
|
225
|
-
|
|
250
|
+
logger.warning(f"⚠️ Template file not found: {template_path}")
|
|
226
251
|
return {}
|
|
227
252
|
|
|
228
253
|
except Exception as e:
|
|
229
254
|
error_msg = f"Failed to extract variables from {template_path}: {e}"
|
|
230
|
-
|
|
255
|
+
logger.error(f"❌ {error_msg}")
|
|
231
256
|
raise ValueError(error_msg)
|
|
257
|
+
|
|
258
|
+
def add_output_blocks(outputs_tf_path, module_name, output_names):
|
|
259
|
+
existing_text = ""
|
|
260
|
+
|
|
261
|
+
# Read existing content if the file exists
|
|
262
|
+
if os.path.exists(outputs_tf_path):
|
|
263
|
+
with open(outputs_tf_path, "r") as f:
|
|
264
|
+
existing_text = f.read()
|
|
265
|
+
|
|
266
|
+
lines_to_add = []
|
|
267
|
+
updated_lines = []
|
|
268
|
+
|
|
269
|
+
# Check and add output blocks
|
|
270
|
+
for output_name in output_names:
|
|
271
|
+
output_block = f'output "{output_name}" {{\n value = module.{module_name}.{output_name}\n}}'.strip()
|
|
272
|
+
|
|
273
|
+
if f'output "{output_name}"' in existing_text:
|
|
274
|
+
# Check if the output block already exists in the file
|
|
275
|
+
logger.debug(f"⚠️ Output '{output_name}' already exists in {outputs_tf_path}. Checking if it needs an update.")
|
|
276
|
+
|
|
277
|
+
# Only update if the value is None in the current output
|
|
278
|
+
if output_name in ["worker_ip", "ha_ip"] and "None" in existing_text:
|
|
279
|
+
updated_lines.append(output_block)
|
|
280
|
+
elif output_block not in existing_text:
|
|
281
|
+
# If it's there but not the same, we need to update it
|
|
282
|
+
updated_lines.append(output_block)
|
|
283
|
+
else:
|
|
284
|
+
logger.debug(f"Output '{output_name}' is already correctly defined in {outputs_tf_path}.")
|
|
285
|
+
continue
|
|
286
|
+
else:
|
|
287
|
+
# If the output doesn't exist, add it
|
|
288
|
+
lines_to_add.append(output_block)
|
|
289
|
+
|
|
290
|
+
# Remove old output blocks before adding or updating new ones
|
|
291
|
+
if lines_to_add or updated_lines:
|
|
292
|
+
# Remove old output definitions for those outputs that will be replaced
|
|
293
|
+
for output_name in output_names:
|
|
294
|
+
existing_text = re.sub(
|
|
295
|
+
f'output "{output_name}".*?}}', '', existing_text, flags=re.DOTALL
|
|
296
|
+
)
|
|
297
|
+
|
|
298
|
+
# Combine all new output blocks and updates to add
|
|
299
|
+
final_output = "\n\n".join(lines_to_add + updated_lines)
|
|
300
|
+
|
|
301
|
+
# Append new or updated blocks
|
|
302
|
+
with open(outputs_tf_path, "w") as f:
|
|
303
|
+
f.write(existing_text.strip() + "\n\n" + final_output + "\n")
|
|
304
|
+
|
|
305
|
+
logger.debug(f"✅ Added/updated outputs for module '{module_name}'")
|
|
306
|
+
else:
|
|
307
|
+
logger.debug(f"⚠️ No new outputs to add or update in {outputs_tf_path}.")
|
|
@@ -0,0 +1,321 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: cluster-builder
|
|
3
|
+
Version: 0.3.1
|
|
4
|
+
Summary: Swarmchestrate cluster builder
|
|
5
|
+
Author-email: Gunjan <G.Kotak@westminster.ac.uk>, Jay <J.Deslauriers@westminster.ac.uk>
|
|
6
|
+
License: Apache2
|
|
7
|
+
Description-Content-Type: text/markdown
|
|
8
|
+
License-File: LICENSE
|
|
9
|
+
Requires-Dist: names_generator==0.2.0
|
|
10
|
+
Requires-Dist: python-hcl2==7.2
|
|
11
|
+
Requires-Dist: lark-parser==0.12.0
|
|
12
|
+
Requires-Dist: python-dotenv==1.1.1
|
|
13
|
+
Requires-Dist: psycopg2-binary==2.9.10
|
|
14
|
+
Requires-Dist: yaspin==3.1.0
|
|
15
|
+
Dynamic: license-file
|
|
16
|
+
|
|
17
|
+
# Swarmchestrate - Cluster Builder
|
|
18
|
+
|
|
19
|
+
This repository contains the codebase for **[cluster-builder]**, which builds K3s clusters for Swarmchestrate using OpenTofu.
|
|
20
|
+
|
|
21
|
+
Key features:
|
|
22
|
+
- **Create**: Provisions infrastructure using OpenTofu and installs K3s.
|
|
23
|
+
- **Add**: Add worker or HA nodes to existing clusters.
|
|
24
|
+
- **Remove**: Selectively remove nodes from existing clusters.
|
|
25
|
+
- **Delete**: Destroys the provisioned infrastructure when no longer required.
|
|
26
|
+
|
|
27
|
+
---
|
|
28
|
+
|
|
29
|
+
## Prerequisites
|
|
30
|
+
|
|
31
|
+
Before proceeding, ensure the following prerequisites are installed:
|
|
32
|
+
|
|
33
|
+
1. **Git**: For cloning the repository.
|
|
34
|
+
2. **Python**: Version 3.9 or higher.
|
|
35
|
+
3. **pip**: Python package manager.
|
|
36
|
+
4. **OpenTofu**: Version 1.6 or higher for infrastructure provisioning.
|
|
37
|
+
6. **Make**: To run the provided `Makefile`.
|
|
38
|
+
7. **PostgreSQL**: For storing OpenTofu state.
|
|
39
|
+
8. (Optional) **Docker**: To create a dev Postgres
|
|
40
|
+
9. For detailed instructions on **edge device requirements**, refer to the [Edge Device Requirements](docs/edge-requirements.md) document.
|
|
41
|
+
|
|
42
|
+
---
|
|
43
|
+
|
|
44
|
+
## Getting Started
|
|
45
|
+
|
|
46
|
+
### 1. Clone the Repository
|
|
47
|
+
|
|
48
|
+
To get started, clone this repository:
|
|
49
|
+
|
|
50
|
+
```bash
|
|
51
|
+
git clone https://github.com/Swarmchestrate/cluster-builder.git
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
### 2. Navigate to the Project Directory
|
|
55
|
+
|
|
56
|
+
```bash
|
|
57
|
+
cd cluster-builder
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
### 3. Install Dependencies and Tools
|
|
61
|
+
|
|
62
|
+
Run the Makefile to install all necessary dependencies, including OpenTofu:
|
|
63
|
+
|
|
64
|
+
```bash
|
|
65
|
+
make install
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
This command will:
|
|
69
|
+
- Install Python dependencies listed in requirements.txt.
|
|
70
|
+
- Download and configure OpenTofu for infrastructure management.
|
|
71
|
+
|
|
72
|
+
```bash
|
|
73
|
+
make db
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
This command will:
|
|
77
|
+
- Spin up an empty dev Postgres DB (in Docker) for storing state
|
|
78
|
+
|
|
79
|
+
in ths makefile database details are provide you update or use that ones name pg-db -e POSTGRES_USER=admin -e POSTGRES_PASSWORD=adminpass -e POSTGRES_DB=swarmchestrate
|
|
80
|
+
|
|
81
|
+
For database setup as a service, refer to the [database setup as service](docs/database_setup.md) document
|
|
82
|
+
|
|
83
|
+
### 4. Populate .env file with access config
|
|
84
|
+
The .env file is used to store environment variables required by the application. It contains configuration details for connecting to your cloud providers, the PostgreSQL database, and any other necessary resources.
|
|
85
|
+
|
|
86
|
+
#### 4.1. Rename or copy the example file to **.env**
|
|
87
|
+
|
|
88
|
+
```bash
|
|
89
|
+
cp .env_example .env
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
#### 4.2. Open the **.env** file and add the necessary configuration for your cloud providers and PostgreSQL:
|
|
93
|
+
|
|
94
|
+
```ini
|
|
95
|
+
## PG Configuration
|
|
96
|
+
POSTGRES_USER=postgres
|
|
97
|
+
POSTGRES_PASSWORD=secret
|
|
98
|
+
POSTGRES_HOST=db.example.com
|
|
99
|
+
POSTGRES_DATABASE=terraform_state
|
|
100
|
+
POSTGRES_SSLMODE=prefer
|
|
101
|
+
|
|
102
|
+
## AWS Auth
|
|
103
|
+
TF_VAR_aws_region=us-west-2
|
|
104
|
+
TF_VAR_aws_access_key=AKIAXXXXXXXXXXXXXXXX
|
|
105
|
+
TF_VAR_aws_secret_key=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
|
106
|
+
|
|
107
|
+
## OpenStack Auth - AppCreds Mode
|
|
108
|
+
TF_VAR_openstack_auth_method=appcreds
|
|
109
|
+
TF_VAR_openstack_auth_url=https://openstack.example.com:5000
|
|
110
|
+
TF_VAR_openstack_application_credential_id=fdXXXXXXXXXXXXXXXX
|
|
111
|
+
TF_VAR_openstack_application_credential_secret=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
|
112
|
+
TF_VAR_openstack_region=RegionOne
|
|
113
|
+
|
|
114
|
+
## OpenStack Auth - User/Pass Mode
|
|
115
|
+
# TF_VAR_openstack_auth_method=userpass
|
|
116
|
+
# TF_VAR_openstack_auth_url=https://openstack.example.com:5000
|
|
117
|
+
# TF_VAR_openstack_region=RegionOne
|
|
118
|
+
# TF_VAR_openstack_user_name=myuser
|
|
119
|
+
# TF_VAR_openstack_password=mypassword
|
|
120
|
+
# TF_VAR_openstack_project_id=project-id-123
|
|
121
|
+
# TF_VAR_openstack_user_domain_name=Default
|
|
122
|
+
```
|
|
123
|
+
|
|
124
|
+
---
|
|
125
|
+
|
|
126
|
+
## Basic Usage
|
|
127
|
+
|
|
128
|
+
### Initialisation
|
|
129
|
+
|
|
130
|
+
```python
|
|
131
|
+
from cluster_builder import Swarmchestrate
|
|
132
|
+
|
|
133
|
+
# Initialise the orchestrator
|
|
134
|
+
orchestrator = Swarmchestrate(
|
|
135
|
+
template_dir="/path/to/templates",
|
|
136
|
+
output_dir="/path/to/output"
|
|
137
|
+
)
|
|
138
|
+
```
|
|
139
|
+
|
|
140
|
+
### Creating a New Cluster
|
|
141
|
+
|
|
142
|
+
To create a new k3s cluster, use the **add_node** method with the **master** role:
|
|
143
|
+
|
|
144
|
+
```python
|
|
145
|
+
# Configuration for a new cluster
|
|
146
|
+
config = {
|
|
147
|
+
"cloud": "aws", # Can be 'aws', 'openstack', or 'edge'
|
|
148
|
+
"k3s_role": "master", # Role can be 'master', 'worker', or 'ha'
|
|
149
|
+
"ha": False, # Set to True for high availability (HA) deployments
|
|
150
|
+
"instance_type": "t2.small", # AWS instance type
|
|
151
|
+
"ssh_key_name": "g", # SSH key name for AWS or OpenStack
|
|
152
|
+
"ssh_user": "ec2-user", # SSH user for the instance
|
|
153
|
+
"ssh_private_key_path": "/workspaces/cluster-builder/scripts/g.pem", # Path to SSH private key
|
|
154
|
+
"ami": "ami-0c0493bbac867d427", # AMI ID for AWS (specific to region)
|
|
155
|
+
"tcp_ports": [10020], # Optional list of TCP ports to open
|
|
156
|
+
"udp_ports": [1003] # Optional list of UDP ports to open
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
# Create the cluster (returns the cluster name)
|
|
160
|
+
cluster_name = orchestrator.add_node(config)
|
|
161
|
+
print(f"Created cluster: {cluster_name}")
|
|
162
|
+
```
|
|
163
|
+
|
|
164
|
+
### Adding Nodes to an Existing Cluster
|
|
165
|
+
|
|
166
|
+
To add worker or high-availability nodes to an existing cluster:
|
|
167
|
+
|
|
168
|
+
```python
|
|
169
|
+
# Configuration for adding a worker node
|
|
170
|
+
worker_config = {
|
|
171
|
+
"cloud": "aws", # Cloud provider (can be 'aws', 'openstack', or 'edge')
|
|
172
|
+
"k3s_role": "worker", # Role can be 'worker' or 'ha'
|
|
173
|
+
"ha": False, # Set to True for high availability (HA) deployments
|
|
174
|
+
"instance_type": "t2.small", # AWS instance type
|
|
175
|
+
"ssh_key_name": "g", # SSH key name
|
|
176
|
+
"ssh_user": "ec2-user", # SSH user for the instance
|
|
177
|
+
"ssh_private_key_path": "/workspaces/cluster-builder/scripts/g.pem", # Path to SSH private key
|
|
178
|
+
"ami": "ami-0c0493bbac867d427", # AMI ID for AWS
|
|
179
|
+
# Optional parameters:
|
|
180
|
+
# "master_ip": "12.13.14.15", # IP address of the master node (required for worker/HA roles)
|
|
181
|
+
# "cluster_name": "elastic_mcnulty", # Name of the cluster
|
|
182
|
+
# "security_group_id": "sg-xxxxxxxxxxxxxxx", # Security group ID for AWS or OpenStack
|
|
183
|
+
# "tcp_ports": [80, 443], # List of TCP ports to open
|
|
184
|
+
# "udp_ports": [53] # List of UDP ports to open
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
# Add the worker node
|
|
188
|
+
cluster_name = orchestrator.add_node(worker_config)
|
|
189
|
+
print(f"Added worker node to cluster: {cluster_name}")
|
|
190
|
+
```
|
|
191
|
+
|
|
192
|
+
### Removing a Specific Node
|
|
193
|
+
|
|
194
|
+
To remove a specific node from a cluster:
|
|
195
|
+
|
|
196
|
+
```python
|
|
197
|
+
# Remove a node by its resource name
|
|
198
|
+
orchestrator.remove_node(
|
|
199
|
+
cluster_name="your-cluster-name",
|
|
200
|
+
resource_name="aws_eloquent_feynman" # The resource identifier of the node
|
|
201
|
+
)
|
|
202
|
+
```
|
|
203
|
+
|
|
204
|
+
The **remove_node** method:
|
|
205
|
+
1. Destroys the node's infrastructure resources
|
|
206
|
+
2. Removes the node's configuration from the cluster
|
|
207
|
+
|
|
208
|
+
---
|
|
209
|
+
|
|
210
|
+
### Destroying an Entire Cluster
|
|
211
|
+
|
|
212
|
+
To completely destroy a cluster and all its nodes:
|
|
213
|
+
|
|
214
|
+
```python
|
|
215
|
+
# Destroy the entire cluster
|
|
216
|
+
orchestrator.destroy(
|
|
217
|
+
cluster_name="your-cluster-name"
|
|
218
|
+
)
|
|
219
|
+
```
|
|
220
|
+
|
|
221
|
+
The **destroy** method:
|
|
222
|
+
1. Destroys all infrastructure resources associated with the cluster
|
|
223
|
+
2. Removes the cluster directory and configuration files
|
|
224
|
+
|
|
225
|
+
Note for **Edge Devices**:
|
|
226
|
+
Since the edge device is already provisioned, the `destroy` method will not remove K3s directly from the edge device. You will need to manually uninstall K3s from your edge device after the cluster is destroyed.
|
|
227
|
+
|
|
228
|
+
---
|
|
229
|
+
|
|
230
|
+
### Important Configuration Requirements
|
|
231
|
+
#### High Availability Flag (ha):
|
|
232
|
+
|
|
233
|
+
- For k3s_role="worker" or k3s_role="ha", you must specify a master_ip (the IP address of the master node).
|
|
234
|
+
|
|
235
|
+
- For k3s_role="master", you must not specify a master_ip.
|
|
236
|
+
|
|
237
|
+
- The ha flag should be set to True for high availability deployment (usually when adding a ha or worker node to an existing master).
|
|
238
|
+
|
|
239
|
+
#### SSH Credentials:
|
|
240
|
+
|
|
241
|
+
- For all roles (k3s_role="master", k3s_role="worker", k3s_role="ha"), you must specify both ssh_user and ssh_private_key_path except for edge.
|
|
242
|
+
|
|
243
|
+
- The ssh_private_key_path should be the path to your SSH private key file. Ensure that the SSH key is copied to the specified path before running the script.
|
|
244
|
+
|
|
245
|
+
- The ssh_key_name and the ssh_private_key_path are different—ensure that your SSH key is placed correctly at the provided ssh_private_key_path.
|
|
246
|
+
|
|
247
|
+
#### Ports:
|
|
248
|
+
You can specify custom ports for your nodes in the tcp_ports and udp_ports fields. However, certain ports are required for Kubernetes deployment (even if not specified explicitly):
|
|
249
|
+
|
|
250
|
+
**TCP Ports:**
|
|
251
|
+
|
|
252
|
+
- 2379-2380: For etcd communication
|
|
253
|
+
- 6443: K3s API server
|
|
254
|
+
- 10250: Kubelet metrics
|
|
255
|
+
- 51820-51821: WireGuard (for encrypted networking)
|
|
256
|
+
- 22: SSH access
|
|
257
|
+
- 80, 443: HTTP/HTTPS access
|
|
258
|
+
- 53: DNS (CoreDNS)
|
|
259
|
+
- 5432: PostgreSQL access (master node)
|
|
260
|
+
|
|
261
|
+
**UDP Ports:**
|
|
262
|
+
|
|
263
|
+
- 8472: VXLAN for Flannel
|
|
264
|
+
- 53: DNS
|
|
265
|
+
|
|
266
|
+
#### OpenStack:
|
|
267
|
+
When provisioning on OpenStack, you should provide the value for 'floating_ip_pool' from which floating IPs can be allocated for the instance. If not specified, OpenTofu will not assign floating IP.
|
|
268
|
+
|
|
269
|
+
---
|
|
270
|
+
|
|
271
|
+
## Advanced Usage
|
|
272
|
+
|
|
273
|
+
### Dry Run Mode
|
|
274
|
+
|
|
275
|
+
All operations support a **dryrun** parameter, which validates the configuration
|
|
276
|
+
without making changes. A node created with dryrun should be removed with dryrun.
|
|
277
|
+
|
|
278
|
+
```python
|
|
279
|
+
# Validate configuration without deploying
|
|
280
|
+
orchestrator.add_node(config, dryrun=True)
|
|
281
|
+
|
|
282
|
+
# Validate removal without destroying
|
|
283
|
+
orchestrator.remove_node(cluster_name, resource_name, dryrun=True)
|
|
284
|
+
|
|
285
|
+
# Validate destruction without destroying
|
|
286
|
+
orchestrator.destroy(cluster_name, dryrun=True)
|
|
287
|
+
```
|
|
288
|
+
|
|
289
|
+
### Custom Cluster Names
|
|
290
|
+
|
|
291
|
+
By default, cluster names are generated automatically. To specify a custom name:
|
|
292
|
+
|
|
293
|
+
```python
|
|
294
|
+
config = {
|
|
295
|
+
"cloud": "aws",
|
|
296
|
+
"k3s_role": "master",
|
|
297
|
+
"cluster_name": "production-cluster",
|
|
298
|
+
# ... other configuration ...
|
|
299
|
+
}
|
|
300
|
+
|
|
301
|
+
orchestrator.add_node(config)
|
|
302
|
+
```
|
|
303
|
+
|
|
304
|
+
---
|
|
305
|
+
|
|
306
|
+
## Template Structure
|
|
307
|
+
|
|
308
|
+
Templates should be organised as follows:
|
|
309
|
+
- `templates/` - Base directory for templates
|
|
310
|
+
- `templates/{cloud}/` - Terraform modules for each cloud provider
|
|
311
|
+
- `templates/{role}_user_data.sh.tpl` - Node initialisation scripts
|
|
312
|
+
- `templates/{cloud}_provider.tf.j2` - Provider configuration templates
|
|
313
|
+
|
|
314
|
+
---
|
|
315
|
+
|
|
316
|
+
## DEMO
|
|
317
|
+
Some test scripts have been created for demonstrating the functionality of the cluster builder. These scripts can be referred to for understanding how the system works and for testing various configurations.
|
|
318
|
+
|
|
319
|
+
For detailed service deployment examples and to explore the test scripts, refer to the [test scripts](docs/test-scripts.md) document
|
|
320
|
+
|
|
321
|
+
---
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
cluster_builder/__init__.py,sha256=p2Rb2BTVm-ScqCKE38436WsItY1BjVAnvx7zwmneSLs,256
|
|
2
|
+
cluster_builder/swarmchestrate.py,sha256=jopT6PVt4bpv2nBqiKp6Fszv2W7gkeNcL0IQfSunnEI,23141
|
|
3
|
+
cluster_builder/config/__init__.py,sha256=HqCua7nqa0m4RNrH-wAw-GNZ8PfmKOeYs2Ur81xGIKU,222
|
|
4
|
+
cluster_builder/config/cluster.py,sha256=igIj7HJNThaUvefwaKqes4-tRq0iCPh4T5tZFEWlgEw,4945
|
|
5
|
+
cluster_builder/config/postgres.py,sha256=nQ5QxxI00GmGAbDl_9I1uEU2eBy1D2eJWGzhsBYUFMc,3354
|
|
6
|
+
cluster_builder/infrastructure/__init__.py,sha256=e8XY3K7Y6FJS-ODr5ufB_myV7btFvYHnwA9sxkob8o8,247
|
|
7
|
+
cluster_builder/infrastructure/executor.py,sha256=oymr_ZP8xAOcNDAuGCp1v4F81-chR3VRotoD732l4q0,2874
|
|
8
|
+
cluster_builder/infrastructure/templates.py,sha256=TAdNP-012L76dOYsd7JVIQOD4K9XNobK9QWfOoYrbeU,4084
|
|
9
|
+
cluster_builder/templates/aws_provider.tf,sha256=VIRuH_-8pYtJ0Mkck38WUSszHiN3DesFOWkx75aoOIY,425
|
|
10
|
+
cluster_builder/templates/copy_manifest.tf,sha256=APVAN-xrgDcavzApwl3ZNiXAOx5V3B1EDYp1zHZ3BqM,865
|
|
11
|
+
cluster_builder/templates/ha_user_data.sh.tpl,sha256=NJOH_yelvK5RW4aMKwIEKTBhnXHaxcxHzDUEq0lzSqE,977
|
|
12
|
+
cluster_builder/templates/master_user_data.sh.tpl,sha256=b7xo2weZfdAubaEO2L7NeVtFkZoSCT6_l15UG1AVEkc,1473
|
|
13
|
+
cluster_builder/templates/openstack_provider.tf,sha256=wFUmkws5xSTOM1GW0Jd8JD__VAUBPNF4j1amo2SRyVM,2049
|
|
14
|
+
cluster_builder/templates/worker_user_data.sh.tpl,sha256=9Sp-rwLJvsjOopb85vumBCcMJfHFG7BNCWkUu6qUAeA,1030
|
|
15
|
+
cluster_builder/templates/aws/main.tf,sha256=UXH_mEldtVI-bHBGlc6q7wG5BKiYyLsSzpSpbocqysk,4907
|
|
16
|
+
cluster_builder/templates/edge/main.tf,sha256=y2X61imI-i97Cqz9FmPR-qNwGgQo2i7Aks7l2Tidq6E,2329
|
|
17
|
+
cluster_builder/templates/openstack/main.tf,sha256=fw7i3NJQn752Pct5wYtijhKI2A2cLK5h0FkRkyeVmXY,7331
|
|
18
|
+
cluster_builder/utils/__init__.py,sha256=TeronqOND-SIfi0e76lwD1HfUiPO2h2ZfYhLIwZ3Aks,145
|
|
19
|
+
cluster_builder/utils/hcl.py,sha256=VptRAt2Cy0AxowqMJBZ60KGe4Uptji3Y9WiYrDQsrqY,11534
|
|
20
|
+
cluster_builder/utils/logging.py,sha256=rwDViuqG8PMcXJWHOdtdgbGhWMnbSZ4MwfKsXHxu2B4,1242
|
|
21
|
+
cluster_builder-0.3.1.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
22
|
+
cluster_builder-0.3.1.dist-info/METADATA,sha256=62YtIg8aex7OFATqAeDE9OldH2B29tOJgYAGCuKjQOM,10350
|
|
23
|
+
cluster_builder-0.3.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
24
|
+
cluster_builder-0.3.1.dist-info/top_level.txt,sha256=fTW8EW1mcWoeWprjwxSHRWpqfXYX8iN-ByEt8HPXIcs,16
|
|
25
|
+
cluster_builder-0.3.1.dist-info/RECORD,,
|
|
@@ -1,40 +0,0 @@
|
|
|
1
|
-
locals {
|
|
2
|
-
cluster_name = "{{ cluster_name }}"
|
|
3
|
-
edge_device_ip = "{{ edge_device_ip }}"
|
|
4
|
-
k3s_token = "{{ k3s_token }}"
|
|
5
|
-
k3s_role = "{{ k3s_role }}"
|
|
6
|
-
random_name = "{{ random_name }}"
|
|
7
|
-
|
|
8
|
-
{% if k3s_role != "master" %}
|
|
9
|
-
master_ip = "{{ master_ip }}"
|
|
10
|
-
{% endif %}
|
|
11
|
-
}
|
|
12
|
-
|
|
13
|
-
resource "null_resource" "deploy_k3s_edge" {
|
|
14
|
-
|
|
15
|
-
connection {
|
|
16
|
-
type = "ssh"
|
|
17
|
-
user = "{{ ssh_user }}"
|
|
18
|
-
password = "{{ ssh_password }}"
|
|
19
|
-
host = local.edge_device_ip
|
|
20
|
-
}
|
|
21
|
-
|
|
22
|
-
provisioner "file" {
|
|
23
|
-
source = "${local.k3s_role}_user_data.sh"
|
|
24
|
-
destination = "/tmp/edge_user_data.sh"
|
|
25
|
-
}
|
|
26
|
-
|
|
27
|
-
provisioner "remote-exec" {
|
|
28
|
-
inline = [
|
|
29
|
-
"chmod +x /tmp/edge_user_data.sh",
|
|
30
|
-
"sudo K3S_TOKEN='${local.k3s_token}' {% if k3s_role != 'master' %}MASTER_IP='${local.master_ip}'{% endif %} /tmp/edge_user_data.sh"
|
|
31
|
-
]
|
|
32
|
-
}
|
|
33
|
-
|
|
34
|
-
triggers = {
|
|
35
|
-
Name = "K3s-${local.k3s_role}-${local.cluster_name}-${local.random_name}"
|
|
36
|
-
cluster_name = local.cluster_name
|
|
37
|
-
role = local.k3s_role
|
|
38
|
-
random_name = local.random_name
|
|
39
|
-
}
|
|
40
|
-
}
|