cluster-builder 0.3.1__py3-none-any.whl → 0.3.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cluster-builder might be problematic. Click here for more details.
- cluster_builder/config/cluster.py +6 -3
- cluster_builder/swarmchestrate.py +53 -47
- cluster_builder/templates/aws/main.tf +3 -3
- cluster_builder/templates/{copy_manifest.tf → deploy_manifest.tf} +10 -3
- cluster_builder/templates/edge/main.tf +4 -4
- cluster_builder/templates/ha_user_data.sh.tpl +1 -1
- cluster_builder/templates/master_user_data.sh.tpl +2 -2
- cluster_builder/templates/openstack/main.tf +4 -4
- cluster_builder/templates/worker_user_data.sh.tpl +1 -1
- {cluster_builder-0.3.1.dist-info → cluster_builder-0.3.2.dist-info}/METADATA +44 -26
- cluster_builder-0.3.2.dist-info/RECORD +25 -0
- cluster_builder-0.3.1.dist-info/RECORD +0 -25
- {cluster_builder-0.3.1.dist-info → cluster_builder-0.3.2.dist-info}/WHEEL +0 -0
- {cluster_builder-0.3.1.dist-info → cluster_builder-0.3.2.dist-info}/licenses/LICENSE +0 -0
- {cluster_builder-0.3.1.dist-info → cluster_builder-0.3.2.dist-info}/top_level.txt +0 -0
|
@@ -131,9 +131,12 @@ class ClusterConfig:
|
|
|
131
131
|
logger.debug(f"Cluster directory: {cluster_dir}")
|
|
132
132
|
|
|
133
133
|
# Generate a resource name
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
134
|
+
if "resource_name" not in prepared_config:
|
|
135
|
+
random_name = self.generate_random_name()
|
|
136
|
+
prepared_config["resource_name"] = f"{cloud}-{random_name}"
|
|
137
|
+
logger.debug(f"Resource name: {prepared_config['resource_name']}")
|
|
138
|
+
else:
|
|
139
|
+
logger.debug(f" USing provded Resource name: {prepared_config['resource_name']}")
|
|
137
140
|
|
|
138
141
|
# Create the cluster directory
|
|
139
142
|
try:
|
|
@@ -217,7 +217,7 @@ class Swarmchestrate:
|
|
|
217
217
|
outputs_file = os.path.join(cluster_dir, "outputs.tf")
|
|
218
218
|
|
|
219
219
|
# Define common output names
|
|
220
|
-
output_names = ["cluster_name", "master_ip", "worker_ip", "ha_ip", "k3s_token", "
|
|
220
|
+
output_names = ["cluster_name", "master_ip", "worker_ip", "ha_ip", "k3s_token", "resource_name"]
|
|
221
221
|
|
|
222
222
|
# Include additional outputs based on the cloud type
|
|
223
223
|
if "aws" in cluster_dir:
|
|
@@ -234,9 +234,9 @@ class Swarmchestrate:
|
|
|
234
234
|
try:
|
|
235
235
|
self.deploy(cluster_dir, dryrun)
|
|
236
236
|
cluster_name = prepared_config["cluster_name"]
|
|
237
|
-
|
|
237
|
+
resource_name = prepared_config["resource_name"]
|
|
238
238
|
logger.info(
|
|
239
|
-
f"✅ Successfully added '{
|
|
239
|
+
f"✅ Successfully added '{resource_name}' for cluster '{cluster_name}'"
|
|
240
240
|
)
|
|
241
241
|
# Run 'tofu output -json' to get outputs
|
|
242
242
|
result = subprocess.run(
|
|
@@ -256,7 +256,7 @@ class Swarmchestrate:
|
|
|
256
256
|
"k3s_token": outputs.get("k3s_token", {}).get("value"),
|
|
257
257
|
"worker_ip": outputs.get("worker_ip", {}).get("value"),
|
|
258
258
|
"ha_ip": outputs.get("ha_ip", {}).get("value"),
|
|
259
|
-
"
|
|
259
|
+
"resource_name": outputs.get("resource_name", {}).get("value")
|
|
260
260
|
}
|
|
261
261
|
# Add cloud-specific output
|
|
262
262
|
if "aws" in cluster_dir:
|
|
@@ -520,7 +520,7 @@ class Swarmchestrate:
|
|
|
520
520
|
if connection:
|
|
521
521
|
connection.close()
|
|
522
522
|
|
|
523
|
-
def
|
|
523
|
+
def deploy_manifests(
|
|
524
524
|
self,
|
|
525
525
|
manifest_folder: str,
|
|
526
526
|
master_ip: str,
|
|
@@ -528,7 +528,7 @@ class Swarmchestrate:
|
|
|
528
528
|
ssh_user: str,
|
|
529
529
|
):
|
|
530
530
|
"""
|
|
531
|
-
Copy and apply manifests to a cluster using copy_manifest.tf in a
|
|
531
|
+
Copy and apply manifests to a cluster using copy_manifest.tf in a temporaryfolder.
|
|
532
532
|
|
|
533
533
|
Args:
|
|
534
534
|
manifest_folder: Path to local manifest folder
|
|
@@ -539,49 +539,55 @@ class Swarmchestrate:
|
|
|
539
539
|
# Dedicated folder for copy-manifest operations
|
|
540
540
|
copy_dir = Path(self.output_dir) / "copy-manifest"
|
|
541
541
|
copy_dir.mkdir(parents=True, exist_ok=True)
|
|
542
|
-
logger.info(f"Using separate copy-manifest folder: {copy_dir}")
|
|
543
542
|
|
|
544
|
-
|
|
545
|
-
tf_source_file = Path(self.template_manager.templates_dir) / "copy_manifest.tf"
|
|
546
|
-
if not tf_source_file.exists():
|
|
547
|
-
logger.error(f"copy_manifest.tf not found at: {tf_source_file}")
|
|
548
|
-
raise RuntimeError(f"copy_manifest.tf not found at: {tf_source_file}")
|
|
549
|
-
shutil.copy(tf_source_file, copy_dir)
|
|
550
|
-
logger.info(f"Copied copy_manifest.tf to {copy_dir}")
|
|
551
|
-
|
|
552
|
-
# Prepare environment for OpenTofu
|
|
553
|
-
env_vars = os.environ.copy()
|
|
554
|
-
env_vars["TF_LOG"] = os.getenv("TF_LOG", "INFO")
|
|
555
|
-
env_vars["TF_LOG_PATH"] = os.getenv("TF_LOG_PATH", "/tmp/opentofu.log")
|
|
543
|
+
logger.debug(f"Using copy-manifest folder: {copy_dir}")
|
|
556
544
|
|
|
557
545
|
try:
|
|
558
|
-
#
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
logger.info("✅ Copy-manifest applied successfully on master node.")
|
|
546
|
+
# Copy copy_manifest.tf from templates
|
|
547
|
+
tf_source_file = Path(self.template_manager.templates_dir) / "deploy_manifest.tf"
|
|
548
|
+
if not tf_source_file.exists():
|
|
549
|
+
logger.debug(f"deploy_manifest.tf not found at: {tf_source_file}")
|
|
550
|
+
raise RuntimeError(f"deploy_manifest.tf not found at: {tf_source_file}")
|
|
551
|
+
shutil.copy(tf_source_file, copy_dir)
|
|
552
|
+
logger.debug(f"Copied copy_manifest.tf to {copy_dir}")
|
|
553
|
+
|
|
554
|
+
# Prepare environment for OpenTofu
|
|
555
|
+
env_vars = os.environ.copy()
|
|
556
|
+
env_vars["TF_LOG"] = os.getenv("TF_LOG", "INFO")
|
|
557
|
+
env_vars["TF_LOG_PATH"] = os.getenv("TF_LOG_PATH", "/tmp/opentofu.log")
|
|
558
|
+
|
|
559
|
+
logger.info(f"------------ Applying manifest on node: {master_ip} -------------------")
|
|
560
|
+
|
|
561
|
+
# Run tofu init with spinner
|
|
562
|
+
CommandExecutor.run_command(
|
|
563
|
+
["tofu", "init"],
|
|
564
|
+
cwd=str(copy_dir),
|
|
565
|
+
description="OpenTofu init",
|
|
566
|
+
env=env_vars,
|
|
567
|
+
)
|
|
582
568
|
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
569
|
+
# Run tofu apply with spinner
|
|
570
|
+
CommandExecutor.run_command(
|
|
571
|
+
[
|
|
572
|
+
"tofu",
|
|
573
|
+
"apply",
|
|
574
|
+
"-auto-approve",
|
|
575
|
+
f"-var=manifest_folder={manifest_folder}",
|
|
576
|
+
f"-var=master_ip={master_ip}",
|
|
577
|
+
f"-var=ssh_private_key_path={ssh_key_path}",
|
|
578
|
+
f"-var=ssh_user={ssh_user}"
|
|
579
|
+
],
|
|
580
|
+
cwd=str(copy_dir),
|
|
581
|
+
description="OpenTofu apply",
|
|
582
|
+
env=env_vars,
|
|
583
|
+
)
|
|
584
|
+
|
|
585
|
+
logger.info("------------ Successfully applied manifests -------------------")
|
|
586
|
+
|
|
587
|
+
except RuntimeError as e:
|
|
588
|
+
print(f"\n---------- ERROR ----------\n{e}\n")
|
|
589
|
+
raise
|
|
587
590
|
|
|
591
|
+
finally:
|
|
592
|
+
if copy_dir.exists():
|
|
593
|
+
shutil.rmtree(copy_dir)
|
|
@@ -91,7 +91,7 @@ resource "aws_instance" "k3s_node" {
|
|
|
91
91
|
vpc_security_group_ids = var.security_group_id != "" ? [var.security_group_id] : [aws_security_group.k3s_sg[0].id]
|
|
92
92
|
|
|
93
93
|
tags = {
|
|
94
|
-
Name = "${var.
|
|
94
|
+
Name = "${var.resource_name}"
|
|
95
95
|
ClusterName = var.cluster_name
|
|
96
96
|
Role = var.k3s_role
|
|
97
97
|
}
|
|
@@ -104,7 +104,7 @@ resource "aws_instance" "k3s_node" {
|
|
|
104
104
|
master_ip = var.master_ip,
|
|
105
105
|
cluster_name = var.cluster_name,
|
|
106
106
|
public_ip = self.public_ip,
|
|
107
|
-
|
|
107
|
+
resource_name = "${var.resource_name}"
|
|
108
108
|
})
|
|
109
109
|
destination = "/tmp/k3s_user_data.sh"
|
|
110
110
|
}
|
|
@@ -151,6 +151,6 @@ output "instance_status" {
|
|
|
151
151
|
value = aws_instance.k3s_node.id
|
|
152
152
|
}
|
|
153
153
|
|
|
154
|
-
output "
|
|
154
|
+
output "resource_name" {
|
|
155
155
|
value = aws_instance.k3s_node.tags["Name"]
|
|
156
156
|
}
|
|
@@ -27,10 +27,17 @@ resource "null_resource" "copy_manifests" {
|
|
|
27
27
|
destination = "/home/${var.ssh_user}"
|
|
28
28
|
}
|
|
29
29
|
|
|
30
|
-
# Apply
|
|
30
|
+
# Apply namespace.yaml first
|
|
31
31
|
provisioner "remote-exec" {
|
|
32
32
|
inline = [
|
|
33
|
-
|
|
34
|
-
|
|
33
|
+
"sudo -E KUBECONFIG=/etc/rancher/k3s/k3s.yaml kubectl apply -f /home/${var.ssh_user}/manifests/namespace.yaml"
|
|
34
|
+
]
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
# Apply the rest of the manifests
|
|
38
|
+
provisioner "remote-exec" {
|
|
39
|
+
inline = [
|
|
40
|
+
"sudo -E KUBECONFIG=/etc/rancher/k3s/k3s.yaml kubectl apply -R -f /home/${var.ssh_user}/manifests/ --selector='!namespace.yaml'"
|
|
41
|
+
]
|
|
35
42
|
}
|
|
36
43
|
}
|
|
@@ -30,7 +30,7 @@ data "template_file" "user_data" {
|
|
|
30
30
|
ha = var.ha
|
|
31
31
|
public_ip = var.edge_device_ip
|
|
32
32
|
master_ip = var.master_ip
|
|
33
|
-
|
|
33
|
+
resource_name = "${var.resource_name}"
|
|
34
34
|
}
|
|
35
35
|
}
|
|
36
36
|
|
|
@@ -63,7 +63,7 @@ resource "null_resource" "deploy_k3s_edge" {
|
|
|
63
63
|
}
|
|
64
64
|
|
|
65
65
|
triggers = {
|
|
66
|
-
Name = "
|
|
66
|
+
Name = "${var.resource_name}"
|
|
67
67
|
cluster_name = var.cluster_name
|
|
68
68
|
role = var.k3s_role
|
|
69
69
|
resource_name = var.resource_name
|
|
@@ -93,6 +93,6 @@ output "k3s_token" {
|
|
|
93
93
|
value = var.k3s_token
|
|
94
94
|
}
|
|
95
95
|
|
|
96
|
-
output "
|
|
97
|
-
value = "
|
|
96
|
+
output "resource_name" {
|
|
97
|
+
value = "var.resource_name}"
|
|
98
98
|
}
|
|
@@ -21,7 +21,7 @@ log_message "Installing K3s HA Server and joining the cluster..."
|
|
|
21
21
|
if ! curl -sfL https://get.k3s.io | K3S_TOKEN="${k3s_token}" sh -s - server \
|
|
22
22
|
--server "https://${master_ip}:6443" \
|
|
23
23
|
--node-external-ip="${public_ip}" \
|
|
24
|
-
--node-name="${
|
|
24
|
+
--node-name="${resource_name}" \
|
|
25
25
|
--flannel-backend=wireguard-native \
|
|
26
26
|
--flannel-external-ip; then
|
|
27
27
|
log_message "ERROR: K3s server installation failed!"
|
|
@@ -25,10 +25,10 @@ else
|
|
|
25
25
|
# Templated installation based on HA configuration
|
|
26
26
|
if [[ "${ha}" == "true" ]]; then
|
|
27
27
|
log_message "Installing in HA mode using cluster-init..."
|
|
28
|
-
curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--cluster-init --node-external-ip=${public_ip} --node-name="${
|
|
28
|
+
curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--cluster-init --node-external-ip=${public_ip} --node-name="${resource_name}" --flannel-backend=wireguard-native --flannel-external-ip" K3S_TOKEN="${k3s_token}" sh -s - server
|
|
29
29
|
else
|
|
30
30
|
log_message "Installing in single-server mode..."
|
|
31
|
-
curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--node-external-ip=${public_ip} --node-name="${
|
|
31
|
+
curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--node-external-ip=${public_ip} --node-name="${resource_name}" --flannel-backend=wireguard-native --flannel-external-ip" K3S_TOKEN="${k3s_token}" sh -s - server
|
|
32
32
|
fi
|
|
33
33
|
|
|
34
34
|
log_message "K3s installation completed successfully."
|
|
@@ -113,7 +113,7 @@ resource "openstack_networking_port_secgroup_associate_v2" "port_2" {
|
|
|
113
113
|
resource "openstack_compute_instance_v2" "k3s_node" {
|
|
114
114
|
depends_on = [openstack_networking_port_v2.port_1]
|
|
115
115
|
|
|
116
|
-
name = "${var.
|
|
116
|
+
name = "${var.resource_name}"
|
|
117
117
|
flavor_name = var.openstack_flavor_id
|
|
118
118
|
key_pair = var.ssh_key_name
|
|
119
119
|
# Only add the image_id if block device is NOT used
|
|
@@ -136,7 +136,7 @@ resource "openstack_compute_instance_v2" "k3s_node" {
|
|
|
136
136
|
}
|
|
137
137
|
|
|
138
138
|
tags = [
|
|
139
|
-
"${var.
|
|
139
|
+
"${var.resource_name}",
|
|
140
140
|
"ClusterName=${var.cluster_name}",
|
|
141
141
|
"Role=${var.k3s_role}"
|
|
142
142
|
]
|
|
@@ -166,7 +166,7 @@ resource "null_resource" "k3s_provision" {
|
|
|
166
166
|
master_ip = var.master_ip,
|
|
167
167
|
cluster_name = var.cluster_name,
|
|
168
168
|
public_ip = openstack_networking_floatingip_v2.floatip_1.address,
|
|
169
|
-
|
|
169
|
+
resource_name = "${var.resource_name}"
|
|
170
170
|
})
|
|
171
171
|
destination = "/tmp/k3s_user_data.sh"
|
|
172
172
|
}
|
|
@@ -213,6 +213,6 @@ output "instance_power_state" {
|
|
|
213
213
|
value = openstack_compute_instance_v2.k3s_node.power_state
|
|
214
214
|
}
|
|
215
215
|
|
|
216
|
-
output "
|
|
216
|
+
output "resource_name" {
|
|
217
217
|
value = openstack_compute_instance_v2.k3s_node.name
|
|
218
218
|
}
|
|
@@ -23,7 +23,7 @@ else
|
|
|
23
23
|
export K3S_TOKEN="${k3s_token}"
|
|
24
24
|
|
|
25
25
|
# Install the K3s agent and join the cluster
|
|
26
|
-
if ! curl -sfL https://get.k3s.io | sh -s - agent --node-external-ip="${public_ip}" --node-name="${
|
|
26
|
+
if ! curl -sfL https://get.k3s.io | sh -s - agent --node-external-ip="${public_ip}" --node-name="${resource_name}"; then
|
|
27
27
|
log_message "ERROR: K3s agent installation failed!"
|
|
28
28
|
exit 1
|
|
29
29
|
else
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: cluster-builder
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.2
|
|
4
4
|
Summary: Swarmchestrate cluster builder
|
|
5
5
|
Author-email: Gunjan <G.Kotak@westminster.ac.uk>, Jay <J.Deslauriers@westminster.ac.uk>
|
|
6
6
|
License: Apache2
|
|
@@ -16,7 +16,7 @@ Dynamic: license-file
|
|
|
16
16
|
|
|
17
17
|
# Swarmchestrate - Cluster Builder
|
|
18
18
|
|
|
19
|
-
This repository contains the codebase for **
|
|
19
|
+
This repository contains the codebase for **cluster-builder**, which builds K3s clusters for Swarmchestrate using OpenTofu.
|
|
20
20
|
|
|
21
21
|
Key features:
|
|
22
22
|
- **Create**: Provisions infrastructure using OpenTofu and installs K3s.
|
|
@@ -33,11 +33,10 @@ Before proceeding, ensure the following prerequisites are installed:
|
|
|
33
33
|
1. **Git**: For cloning the repository.
|
|
34
34
|
2. **Python**: Version 3.9 or higher.
|
|
35
35
|
3. **pip**: Python package manager.
|
|
36
|
-
4. **
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
9. For detailed instructions on **edge device requirements**, refer to the [Edge Device Requirements](docs/edge-requirements.md) document.
|
|
36
|
+
4. **Make**: To run the provided `Makefile`.
|
|
37
|
+
5. **PostgreSQL**: For storing OpenTofu state.
|
|
38
|
+
6. (Optional) **Docker**: To create a dev Postgres
|
|
39
|
+
7. For detailed instructions on **edge device requirements**, refer to the [Edge Device Requirements](docs/edge-requirements.md) document.
|
|
41
40
|
|
|
42
41
|
---
|
|
43
42
|
|
|
@@ -142,16 +141,21 @@ orchestrator = Swarmchestrate(
|
|
|
142
141
|
To create a new k3s cluster, use the **add_node** method with the **master** role:
|
|
143
142
|
|
|
144
143
|
```python
|
|
145
|
-
# Configuration for a new cluster
|
|
144
|
+
# Configuration for a new cluster using aws provider
|
|
146
145
|
config = {
|
|
147
|
-
"cloud": "aws",
|
|
148
|
-
"k3s_role": "master",
|
|
146
|
+
"cloud": "aws",
|
|
147
|
+
"k3s_role": "master",
|
|
149
148
|
"ha": False, # Set to True for high availability (HA) deployments
|
|
150
149
|
"instance_type": "t2.small", # AWS instance type
|
|
151
150
|
"ssh_key_name": "g", # SSH key name for AWS or OpenStack
|
|
152
151
|
"ssh_user": "ec2-user", # SSH user for the instance
|
|
153
152
|
"ssh_private_key_path": "/workspaces/cluster-builder/scripts/g.pem", # Path to SSH private key
|
|
154
153
|
"ami": "ami-0c0493bbac867d427", # AMI ID for AWS (specific to region)
|
|
154
|
+
# Optional parameters
|
|
155
|
+
# If existing SG is specified, it will be used directly with no port changes
|
|
156
|
+
"security_group_id": "sg-0123456789abcdef0",
|
|
157
|
+
# No security_group_id means a new SG will be created and these ports applied as rules
|
|
158
|
+
# These ports will be used ONLY if creating a new SG
|
|
155
159
|
"tcp_ports": [10020], # Optional list of TCP ports to open
|
|
156
160
|
"udp_ports": [1003] # Optional list of UDP ports to open
|
|
157
161
|
}
|
|
@@ -161,27 +165,33 @@ cluster_name = orchestrator.add_node(config)
|
|
|
161
165
|
print(f"Created cluster: {cluster_name}")
|
|
162
166
|
```
|
|
163
167
|
|
|
168
|
+
Note: Fetch the outputs from the master node and use them when adding a worker node.
|
|
169
|
+
|
|
164
170
|
### Adding Nodes to an Existing Cluster
|
|
165
171
|
|
|
166
172
|
To add worker or high-availability nodes to an existing cluster:
|
|
167
173
|
|
|
168
174
|
```python
|
|
169
|
-
# Configuration for adding a worker node
|
|
175
|
+
# Configuration for adding a worker node using aws provider
|
|
170
176
|
worker_config = {
|
|
171
|
-
"cloud": "aws",
|
|
177
|
+
"cloud": "aws",
|
|
172
178
|
"k3s_role": "worker", # Role can be 'worker' or 'ha'
|
|
173
|
-
"ha": False, # Set to True for high availability (HA) deployments
|
|
174
179
|
"instance_type": "t2.small", # AWS instance type
|
|
175
180
|
"ssh_key_name": "g", # SSH key name
|
|
176
181
|
"ssh_user": "ec2-user", # SSH user for the instance
|
|
177
182
|
"ssh_private_key_path": "/workspaces/cluster-builder/scripts/g.pem", # Path to SSH private key
|
|
178
183
|
"ami": "ami-0c0493bbac867d427", # AMI ID for AWS
|
|
179
|
-
#
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
#
|
|
184
|
-
#
|
|
184
|
+
# Additional parameters obtained after deploying the master node:
|
|
185
|
+
"master_ip": "12.13.14.15", # IP address of the master node (required for worker/HA roles)
|
|
186
|
+
"cluster_name": "elastic_mcnulty", # Name of the cluster
|
|
187
|
+
"k3s_token": "G4lm7wEaFuCCygeU", # Token of the cluster
|
|
188
|
+
# Optional parameters
|
|
189
|
+
# If existing SG is specified, it will be used directly with no port changes
|
|
190
|
+
"security_group_id": "sg-0123456789abcdef0",
|
|
191
|
+
# No security_group_id means a new SG will be created and these ports applied as rules
|
|
192
|
+
# These ports will be used ONLY if creating a new SG
|
|
193
|
+
"tcp_ports": [10020], # Optional list of TCP ports to open
|
|
194
|
+
"udp_ports": [1003] # Optional list of UDP ports to open
|
|
185
195
|
}
|
|
186
196
|
|
|
187
197
|
# Add the worker node
|
|
@@ -226,17 +236,25 @@ Note for **Edge Devices**:
|
|
|
226
236
|
Since the edge device is already provisioned, the `destroy` method will not remove K3s directly from the edge device. You will need to manually uninstall K3s from your edge device after the cluster is destroyed.
|
|
227
237
|
|
|
228
238
|
---
|
|
239
|
+
### Deploying Manifests
|
|
229
240
|
|
|
230
|
-
|
|
231
|
-
#### High Availability Flag (ha):
|
|
241
|
+
The deploy_manifests method copies Kubernetes manifests to the target cluster node.
|
|
232
242
|
|
|
233
|
-
|
|
243
|
+
```python
|
|
244
|
+
orchestrator.deploy_manifests(
|
|
245
|
+
manifest_folder="path/to/manifests",
|
|
246
|
+
master_ip="MASTER_NODE_IP",
|
|
247
|
+
ssh_key_path="path/to/key.pem",
|
|
248
|
+
ssh_user="USERNAME"
|
|
249
|
+
)
|
|
250
|
+
```
|
|
234
251
|
|
|
235
|
-
|
|
252
|
+
## Important Configuration Requirements
|
|
253
|
+
### High Availability Flag (ha):
|
|
236
254
|
|
|
237
255
|
- The ha flag should be set to True for high availability deployment (usually when adding a ha or worker node to an existing master).
|
|
238
256
|
|
|
239
|
-
|
|
257
|
+
### SSH Credentials:
|
|
240
258
|
|
|
241
259
|
- For all roles (k3s_role="master", k3s_role="worker", k3s_role="ha"), you must specify both ssh_user and ssh_private_key_path except for edge.
|
|
242
260
|
|
|
@@ -244,7 +262,7 @@ Since the edge device is already provisioned, the `destroy` method will not remo
|
|
|
244
262
|
|
|
245
263
|
- The ssh_key_name and the ssh_private_key_path are different—ensure that your SSH key is placed correctly at the provided ssh_private_key_path.
|
|
246
264
|
|
|
247
|
-
|
|
265
|
+
### Ports:
|
|
248
266
|
You can specify custom ports for your nodes in the tcp_ports and udp_ports fields. However, certain ports are required for Kubernetes deployment (even if not specified explicitly):
|
|
249
267
|
|
|
250
268
|
**TCP Ports:**
|
|
@@ -263,7 +281,7 @@ You can specify custom ports for your nodes in the tcp_ports and udp_ports field
|
|
|
263
281
|
- 8472: VXLAN for Flannel
|
|
264
282
|
- 53: DNS
|
|
265
283
|
|
|
266
|
-
|
|
284
|
+
### OpenStack:
|
|
267
285
|
When provisioning on OpenStack, you should provide the value for 'floating_ip_pool' from which floating IPs can be allocated for the instance. If not specified, OpenTofu will not assign floating IP.
|
|
268
286
|
|
|
269
287
|
---
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
cluster_builder/__init__.py,sha256=p2Rb2BTVm-ScqCKE38436WsItY1BjVAnvx7zwmneSLs,256
|
|
2
|
+
cluster_builder/swarmchestrate.py,sha256=LwIRR1HGD6y6XKhAxDU-T68vIPjtRIeaoaHX95wM_ZM,23139
|
|
3
|
+
cluster_builder/config/__init__.py,sha256=HqCua7nqa0m4RNrH-wAw-GNZ8PfmKOeYs2Ur81xGIKU,222
|
|
4
|
+
cluster_builder/config/cluster.py,sha256=0CASucE_npbEmGnyR3UmF0v836tcx9HthAiotCh4sSo,5116
|
|
5
|
+
cluster_builder/config/postgres.py,sha256=nQ5QxxI00GmGAbDl_9I1uEU2eBy1D2eJWGzhsBYUFMc,3354
|
|
6
|
+
cluster_builder/infrastructure/__init__.py,sha256=e8XY3K7Y6FJS-ODr5ufB_myV7btFvYHnwA9sxkob8o8,247
|
|
7
|
+
cluster_builder/infrastructure/executor.py,sha256=oymr_ZP8xAOcNDAuGCp1v4F81-chR3VRotoD732l4q0,2874
|
|
8
|
+
cluster_builder/infrastructure/templates.py,sha256=TAdNP-012L76dOYsd7JVIQOD4K9XNobK9QWfOoYrbeU,4084
|
|
9
|
+
cluster_builder/templates/aws_provider.tf,sha256=VIRuH_-8pYtJ0Mkck38WUSszHiN3DesFOWkx75aoOIY,425
|
|
10
|
+
cluster_builder/templates/deploy_manifest.tf,sha256=bzvcK-5iAFDMGU32YR5os6qXF1p9d7qWqlv7Kqm72Qo,1106
|
|
11
|
+
cluster_builder/templates/ha_user_data.sh.tpl,sha256=njvsBRjdKBuUaYbujJ689wI2sfpoHVpr2kkbG9sKzpw,981
|
|
12
|
+
cluster_builder/templates/master_user_data.sh.tpl,sha256=g_uaehoi9Pm_vCx_vJhXCUqAt7DpcqnAi_QPm5VOgWw,1481
|
|
13
|
+
cluster_builder/templates/openstack_provider.tf,sha256=wFUmkws5xSTOM1GW0Jd8JD__VAUBPNF4j1amo2SRyVM,2049
|
|
14
|
+
cluster_builder/templates/worker_user_data.sh.tpl,sha256=9WP6qe6DGMHgFds_loI1N7DEuMeOI6U4SA-g3GYIIIU,1034
|
|
15
|
+
cluster_builder/templates/aws/main.tf,sha256=v_mR6tdH4-E1SKI8FNqfgl-gU2POrKqRfkXCXV1DGFQ,4875
|
|
16
|
+
cluster_builder/templates/edge/main.tf,sha256=8sBL_ofFfhMEH2biPRmB7X4H_SG3JgYygCOEo90yDTY,2255
|
|
17
|
+
cluster_builder/templates/openstack/main.tf,sha256=uMzArcNE0wbx23Y0x9B7jGIiWIgJdQUT6CPw5TjP160,7279
|
|
18
|
+
cluster_builder/utils/__init__.py,sha256=TeronqOND-SIfi0e76lwD1HfUiPO2h2ZfYhLIwZ3Aks,145
|
|
19
|
+
cluster_builder/utils/hcl.py,sha256=VptRAt2Cy0AxowqMJBZ60KGe4Uptji3Y9WiYrDQsrqY,11534
|
|
20
|
+
cluster_builder/utils/logging.py,sha256=rwDViuqG8PMcXJWHOdtdgbGhWMnbSZ4MwfKsXHxu2B4,1242
|
|
21
|
+
cluster_builder-0.3.2.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
22
|
+
cluster_builder-0.3.2.dist-info/METADATA,sha256=7M-LAWWdwURezuH4N169kDUZJPjT89ERxH6PlNJ5lOU,10937
|
|
23
|
+
cluster_builder-0.3.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
24
|
+
cluster_builder-0.3.2.dist-info/top_level.txt,sha256=fTW8EW1mcWoeWprjwxSHRWpqfXYX8iN-ByEt8HPXIcs,16
|
|
25
|
+
cluster_builder-0.3.2.dist-info/RECORD,,
|
|
@@ -1,25 +0,0 @@
|
|
|
1
|
-
cluster_builder/__init__.py,sha256=p2Rb2BTVm-ScqCKE38436WsItY1BjVAnvx7zwmneSLs,256
|
|
2
|
-
cluster_builder/swarmchestrate.py,sha256=jopT6PVt4bpv2nBqiKp6Fszv2W7gkeNcL0IQfSunnEI,23141
|
|
3
|
-
cluster_builder/config/__init__.py,sha256=HqCua7nqa0m4RNrH-wAw-GNZ8PfmKOeYs2Ur81xGIKU,222
|
|
4
|
-
cluster_builder/config/cluster.py,sha256=igIj7HJNThaUvefwaKqes4-tRq0iCPh4T5tZFEWlgEw,4945
|
|
5
|
-
cluster_builder/config/postgres.py,sha256=nQ5QxxI00GmGAbDl_9I1uEU2eBy1D2eJWGzhsBYUFMc,3354
|
|
6
|
-
cluster_builder/infrastructure/__init__.py,sha256=e8XY3K7Y6FJS-ODr5ufB_myV7btFvYHnwA9sxkob8o8,247
|
|
7
|
-
cluster_builder/infrastructure/executor.py,sha256=oymr_ZP8xAOcNDAuGCp1v4F81-chR3VRotoD732l4q0,2874
|
|
8
|
-
cluster_builder/infrastructure/templates.py,sha256=TAdNP-012L76dOYsd7JVIQOD4K9XNobK9QWfOoYrbeU,4084
|
|
9
|
-
cluster_builder/templates/aws_provider.tf,sha256=VIRuH_-8pYtJ0Mkck38WUSszHiN3DesFOWkx75aoOIY,425
|
|
10
|
-
cluster_builder/templates/copy_manifest.tf,sha256=APVAN-xrgDcavzApwl3ZNiXAOx5V3B1EDYp1zHZ3BqM,865
|
|
11
|
-
cluster_builder/templates/ha_user_data.sh.tpl,sha256=NJOH_yelvK5RW4aMKwIEKTBhnXHaxcxHzDUEq0lzSqE,977
|
|
12
|
-
cluster_builder/templates/master_user_data.sh.tpl,sha256=b7xo2weZfdAubaEO2L7NeVtFkZoSCT6_l15UG1AVEkc,1473
|
|
13
|
-
cluster_builder/templates/openstack_provider.tf,sha256=wFUmkws5xSTOM1GW0Jd8JD__VAUBPNF4j1amo2SRyVM,2049
|
|
14
|
-
cluster_builder/templates/worker_user_data.sh.tpl,sha256=9Sp-rwLJvsjOopb85vumBCcMJfHFG7BNCWkUu6qUAeA,1030
|
|
15
|
-
cluster_builder/templates/aws/main.tf,sha256=UXH_mEldtVI-bHBGlc6q7wG5BKiYyLsSzpSpbocqysk,4907
|
|
16
|
-
cluster_builder/templates/edge/main.tf,sha256=y2X61imI-i97Cqz9FmPR-qNwGgQo2i7Aks7l2Tidq6E,2329
|
|
17
|
-
cluster_builder/templates/openstack/main.tf,sha256=fw7i3NJQn752Pct5wYtijhKI2A2cLK5h0FkRkyeVmXY,7331
|
|
18
|
-
cluster_builder/utils/__init__.py,sha256=TeronqOND-SIfi0e76lwD1HfUiPO2h2ZfYhLIwZ3Aks,145
|
|
19
|
-
cluster_builder/utils/hcl.py,sha256=VptRAt2Cy0AxowqMJBZ60KGe4Uptji3Y9WiYrDQsrqY,11534
|
|
20
|
-
cluster_builder/utils/logging.py,sha256=rwDViuqG8PMcXJWHOdtdgbGhWMnbSZ4MwfKsXHxu2B4,1242
|
|
21
|
-
cluster_builder-0.3.1.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
22
|
-
cluster_builder-0.3.1.dist-info/METADATA,sha256=62YtIg8aex7OFATqAeDE9OldH2B29tOJgYAGCuKjQOM,10350
|
|
23
|
-
cluster_builder-0.3.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
24
|
-
cluster_builder-0.3.1.dist-info/top_level.txt,sha256=fTW8EW1mcWoeWprjwxSHRWpqfXYX8iN-ByEt8HPXIcs,16
|
|
25
|
-
cluster_builder-0.3.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|