cluster-builder 0.3.0__py3-none-any.whl → 0.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cluster-builder might be problematic. Click here for more details.

@@ -1,2 +1,34 @@
1
1
  #!/bin/bash
2
- curl -sfL https://get.k3s.io | K3S_TOKEN=${k3s_token} sh -s - agent --server https://${master_ip}:6443
2
+ set -euo pipefail
3
+
4
+ LOG_FILE="/var/log/k3s_agent_install.log"
5
+ exec > >(tee -a "$LOG_FILE") 2>&1
6
+ echo "=== K3s Agent Install Script Started at $(date) ==="
7
+
8
+ # Function to log messages with timestamp
9
+ log_message() {
10
+ echo "$(date) - $1"
11
+ }
12
+
13
+ # Use the provided public IP
14
+ log_message "Using provided public IP: ${public_ip}"
15
+
16
+ # Check if K3s agent is already running
17
+ if systemctl is-active --quiet k3s-agent; then
18
+ log_message "K3s agent is already running. Skipping installation."
19
+ else
20
+ log_message "K3s agent is not running. Proceeding with installation..."
21
+
22
+ export K3S_URL="https://${master_ip}:6443"
23
+ export K3S_TOKEN="${k3s_token}"
24
+
25
+ # Install the K3s agent and join the cluster
26
+ if ! curl -sfL https://get.k3s.io | sh -s - agent --node-external-ip="${public_ip}" --node-name="${resource_name}"; then
27
+ log_message "ERROR: K3s agent installation failed!"
28
+ exit 1
29
+ else
30
+ log_message "K3s agent installation succeeded."
31
+ fi
32
+ fi
33
+
34
+ log_message "=== Script completed at $(date) ==="
@@ -1,8 +1,11 @@
1
+ import json
1
2
  import os
2
3
  import hcl2
3
4
  from lark import Tree, Token
5
+ import logging
6
+ import re
4
7
 
5
-
8
+ logger = logging.getLogger("cluster_builder")
6
9
  def add_backend_config(backend_tf_path, conn_str, schema_name):
7
10
  """
8
11
  Adds a PostgreSQL backend configuration to a Terraform file.
@@ -14,7 +17,7 @@ def add_backend_config(backend_tf_path, conn_str, schema_name):
14
17
  if os.path.exists(backend_tf_path):
15
18
  with open(backend_tf_path) as f:
16
19
  if 'backend "pg"' in f.read():
17
- print("⚠️ Backend configuration already exists skipping.")
20
+ logger.debug("⚠️ Backend config already exists, skipping: %s", backend_tf_path)
18
21
  return
19
22
 
20
23
  # Build the backend configuration block
@@ -34,7 +37,7 @@ def add_backend_config(backend_tf_path, conn_str, schema_name):
34
37
  ) as f: # Use "w" instead of "a" to create/overwrite the file
35
38
  f.write("\n".join(lines) + "\n")
36
39
 
37
- print(f"✅ Added PostgreSQL backend configuration to {backend_tf_path}")
40
+ logger.debug("✅ Added PostgreSQL backend config to %s", backend_tf_path)
38
41
 
39
42
 
40
43
  def add_module_block(main_tf_path, module_name, config):
@@ -48,7 +51,7 @@ def add_module_block(main_tf_path, module_name, config):
48
51
  if os.path.exists(main_tf_path):
49
52
  with open(main_tf_path) as f:
50
53
  if f'module "{module_name}"' in f.read():
51
- print(f"⚠️ Module '{module_name}' already exists skipping.")
54
+ logger.warning("⚠️ Module '%s' already exists, skipping in %s", module_name, main_tf_path)
52
55
  return
53
56
 
54
57
  # Build the module block
@@ -60,6 +63,8 @@ def add_module_block(main_tf_path, module_name, config):
60
63
  v_str = "true" if v else "false"
61
64
  elif isinstance(v, (int, float)):
62
65
  v_str = str(v)
66
+ elif isinstance(v, (list, dict)):
67
+ v_str = json.dumps(v)
63
68
  elif v is None:
64
69
  continue
65
70
  else:
@@ -71,40 +76,51 @@ def add_module_block(main_tf_path, module_name, config):
71
76
  with open(main_tf_path, "a") as f:
72
77
  f.write("\n\n" + "\n".join(lines) + "\n")
73
78
 
74
- print(f"✅ Added module '{module_name}' to {main_tf_path}")
79
+ logger.debug("✅ Added module '%s' to %s", module_name, main_tf_path)
75
80
 
76
81
 
77
82
  def is_target_module_block(tree: Tree, module_name: str) -> bool:
78
83
  """
79
84
  Check if the tree is a module block with the specified name.
80
85
  """
86
+ logger.info(f"Checking tree with data: {tree.data}, children count: {len(tree.children)}")
87
+ logger.info(f"Children types and values: {[ (type(c), getattr(c, 'value', None)) for c in tree.children ]}")
88
+
81
89
  if tree.data != "block":
90
+ logger.debug(f"Rejected: tree.data is '{tree.data}', expected 'block'")
82
91
  return False
83
92
 
84
93
  # Need at least 3 children: identifier, name, body
85
94
  if len(tree.children) < 3:
95
+ logger.debug(f"Rejected: tree has less than 3 children ({len(tree.children)})")
86
96
  return False
87
97
 
88
98
  # First child should be an identifier tree
89
99
  first_child = tree.children[0]
90
100
  if not isinstance(first_child, Tree) or first_child.data != "identifier":
101
+ logger.debug(f"Rejected: first child is not an identifier Tree (found {type(first_child)} with data '{getattr(first_child, 'data', None)}')")
91
102
  return False
92
103
 
93
104
  # First child should have a NAME token with 'module'
94
105
  if len(first_child.children) == 0 or not isinstance(first_child.children[0], Token):
106
+ logger.debug("Rejected: first child has no Token children")
95
107
  return False
96
108
 
97
- if first_child.children[0].value != "module":
109
+ first_value = first_child.children[0].value
110
+ if first_value != "module":
111
+ logger.debug(f"Rejected: first child token value '{first_value}' is not 'module'")
98
112
  return False
99
113
 
100
- # Second child should be a STRING_LIT token with module name
114
+ # Second child: could be a Token or Tree with Token child for module name
101
115
  second_child = tree.children[1]
116
+
102
117
  if not isinstance(second_child, Token) or second_child.value != f'"{module_name}"':
118
+ logger.debug(f"Second child check failed: type={type(second_child)}, value={getattr(second_child, 'value', None)} expected=\"{module_name}\"")
103
119
  return False
104
120
 
121
+ logger.info(f"Module block matched for module name '{module_name}'")
105
122
  return True
106
123
 
107
-
108
124
  def simple_remove_module(tree, module_name, removed=False):
109
125
  """
110
126
  A simpler function to remove module blocks that maintains the exact Tree structure
@@ -116,6 +132,9 @@ def simple_remove_module(tree, module_name, removed=False):
116
132
  body_node = tree.children[0]
117
133
 
118
134
  if isinstance(body_node, Tree) and body_node.data == "body":
135
+ # Debug: Log body node children
136
+ logger.debug("Body Node Children: %s", body_node.children)
137
+
119
138
  # Create new children list for the body node
120
139
  new_body_children = []
121
140
  skip_next = False
@@ -133,6 +152,7 @@ def simple_remove_module(tree, module_name, removed=False):
133
152
  and is_target_module_block(child, module_name)
134
153
  ):
135
154
  removed = True
155
+ print(f"Module {module_name} found and removed.") # Debug log
136
156
 
137
157
  # Check if the next node is a new_line_or_comment, and skip it as well
138
158
  if i + 1 < len(body_node.children):
@@ -158,14 +178,16 @@ def remove_module_block(main_tf_path, module_name: str):
158
178
  Removes a module block by name from main.tf for this cluster.
159
179
  """
160
180
  if not os.path.exists(main_tf_path):
161
- print(f"⚠️ No main.tf found at {main_tf_path}")
181
+ logger.warning("⚠️ No main.tf found at %s", main_tf_path)
162
182
  return
163
183
 
164
184
  try:
165
185
  with open(main_tf_path, "r") as f:
166
186
  tree = hcl2.parse(f)
187
+ # Debug: Log the parsed tree structure
188
+ logger.debug("Parsed Tree: %s", tree)
167
189
  except Exception as e:
168
- print(f"❌ Failed to parse HCL: {e}")
190
+ logger.error("❌ Failed to parse HCL in %s: %s", main_tf_path, e, exc_info=True)
169
191
  return
170
192
 
171
193
  # Process tree to remove target module block
@@ -173,8 +195,11 @@ def remove_module_block(main_tf_path, module_name: str):
173
195
 
174
196
  # If no modules were removed
175
197
  if not removed:
176
- print(f"⚠️ No module named '{module_name}' found in {main_tf_path}")
198
+ logger.warning("⚠️ No module named '%s' found in %s", module_name, main_tf_path)
177
199
  return
200
+
201
+ # Debug: Log the final tree structure after removal
202
+ logger.debug("Final Tree after module removal: %s", new_tree)
178
203
 
179
204
  try:
180
205
  # Reconstruct HCL
@@ -184,9 +209,9 @@ def remove_module_block(main_tf_path, module_name: str):
184
209
  with open(main_tf_path, "w") as f:
185
210
  f.write(new_source)
186
211
 
187
- print(f"🗑️ Removed module '{module_name}' from {main_tf_path}")
212
+ logger.info("🗑️ Removed module '%s' from %s", module_name, main_tf_path)
188
213
  except Exception as e:
189
- print(f"❌ Failed to reconstruct HCL: {e}")
214
+ logger.error("❌ Failed to reconstruct HCL in %s: %s", main_tf_path, e, exc_info=True)
190
215
  # Print more detailed error information
191
216
  import traceback
192
217
 
@@ -222,10 +247,61 @@ def extract_template_variables(template_path):
222
247
  return variables
223
248
 
224
249
  except FileNotFoundError:
225
- print(f"Warning: Template file not found: {template_path}")
250
+ logger.warning(f"⚠️ Template file not found: {template_path}")
226
251
  return {}
227
252
 
228
253
  except Exception as e:
229
254
  error_msg = f"Failed to extract variables from {template_path}: {e}"
230
- print(f"Error: {error_msg}")
255
+ logger.error(f" {error_msg}")
231
256
  raise ValueError(error_msg)
257
+
258
+ def add_output_blocks(outputs_tf_path, module_name, output_names):
259
+ existing_text = ""
260
+
261
+ # Read existing content if the file exists
262
+ if os.path.exists(outputs_tf_path):
263
+ with open(outputs_tf_path, "r") as f:
264
+ existing_text = f.read()
265
+
266
+ lines_to_add = []
267
+ updated_lines = []
268
+
269
+ # Check and add output blocks
270
+ for output_name in output_names:
271
+ output_block = f'output "{output_name}" {{\n value = module.{module_name}.{output_name}\n}}'.strip()
272
+
273
+ if f'output "{output_name}"' in existing_text:
274
+ # Check if the output block already exists in the file
275
+ logger.debug(f"⚠️ Output '{output_name}' already exists in {outputs_tf_path}. Checking if it needs an update.")
276
+
277
+ # Only update if the value is None in the current output
278
+ if output_name in ["worker_ip", "ha_ip"] and "None" in existing_text:
279
+ updated_lines.append(output_block)
280
+ elif output_block not in existing_text:
281
+ # If it's there but not the same, we need to update it
282
+ updated_lines.append(output_block)
283
+ else:
284
+ logger.debug(f"Output '{output_name}' is already correctly defined in {outputs_tf_path}.")
285
+ continue
286
+ else:
287
+ # If the output doesn't exist, add it
288
+ lines_to_add.append(output_block)
289
+
290
+ # Remove old output blocks before adding or updating new ones
291
+ if lines_to_add or updated_lines:
292
+ # Remove old output definitions for those outputs that will be replaced
293
+ for output_name in output_names:
294
+ existing_text = re.sub(
295
+ f'output "{output_name}".*?}}', '', existing_text, flags=re.DOTALL
296
+ )
297
+
298
+ # Combine all new output blocks and updates to add
299
+ final_output = "\n\n".join(lines_to_add + updated_lines)
300
+
301
+ # Append new or updated blocks
302
+ with open(outputs_tf_path, "w") as f:
303
+ f.write(existing_text.strip() + "\n\n" + final_output + "\n")
304
+
305
+ logger.debug(f"✅ Added/updated outputs for module '{module_name}'")
306
+ else:
307
+ logger.debug(f"⚠️ No new outputs to add or update in {outputs_tf_path}.")
@@ -0,0 +1,339 @@
1
+ Metadata-Version: 2.4
2
+ Name: cluster-builder
3
+ Version: 0.3.2
4
+ Summary: Swarmchestrate cluster builder
5
+ Author-email: Gunjan <G.Kotak@westminster.ac.uk>, Jay <J.Deslauriers@westminster.ac.uk>
6
+ License: Apache2
7
+ Description-Content-Type: text/markdown
8
+ License-File: LICENSE
9
+ Requires-Dist: names_generator==0.2.0
10
+ Requires-Dist: python-hcl2==7.2
11
+ Requires-Dist: lark-parser==0.12.0
12
+ Requires-Dist: python-dotenv==1.1.1
13
+ Requires-Dist: psycopg2-binary==2.9.10
14
+ Requires-Dist: yaspin==3.1.0
15
+ Dynamic: license-file
16
+
17
+ # Swarmchestrate - Cluster Builder
18
+
19
+ This repository contains the codebase for **cluster-builder**, which builds K3s clusters for Swarmchestrate using OpenTofu.
20
+
21
+ Key features:
22
+ - **Create**: Provisions infrastructure using OpenTofu and installs K3s.
23
+ - **Add**: Add worker or HA nodes to existing clusters.
24
+ - **Remove**: Selectively remove nodes from existing clusters.
25
+ - **Delete**: Destroys the provisioned infrastructure when no longer required.
26
+
27
+ ---
28
+
29
+ ## Prerequisites
30
+
31
+ Before proceeding, ensure the following prerequisites are installed:
32
+
33
+ 1. **Git**: For cloning the repository.
34
+ 2. **Python**: Version 3.9 or higher.
35
+ 3. **pip**: Python package manager.
36
+ 4. **Make**: To run the provided `Makefile`.
37
+ 5. **PostgreSQL**: For storing OpenTofu state.
38
+ 6. (Optional) **Docker**: To create a dev Postgres
39
+ 7. For detailed instructions on **edge device requirements**, refer to the [Edge Device Requirements](docs/edge-requirements.md) document.
40
+
41
+ ---
42
+
43
+ ## Getting Started
44
+
45
+ ### 1. Clone the Repository
46
+
47
+ To get started, clone this repository:
48
+
49
+ ```bash
50
+ git clone https://github.com/Swarmchestrate/cluster-builder.git
51
+ ```
52
+
53
+ ### 2. Navigate to the Project Directory
54
+
55
+ ```bash
56
+ cd cluster-builder
57
+ ```
58
+
59
+ ### 3. Install Dependencies and Tools
60
+
61
+ Run the Makefile to install all necessary dependencies, including OpenTofu:
62
+
63
+ ```bash
64
+ make install
65
+ ```
66
+
67
+ This command will:
68
+ - Install Python dependencies listed in requirements.txt.
69
+ - Download and configure OpenTofu for infrastructure management.
70
+
71
+ ```bash
72
+ make db
73
+ ```
74
+
75
+ This command will:
76
+ - Spin up an empty dev Postgres DB (in Docker) for storing state
77
+
78
+ in ths makefile database details are provide you update or use that ones name pg-db -e POSTGRES_USER=admin -e POSTGRES_PASSWORD=adminpass -e POSTGRES_DB=swarmchestrate
79
+
80
+ For database setup as a service, refer to the [database setup as service](docs/database_setup.md) document
81
+
82
+ ### 4. Populate .env file with access config
83
+ The .env file is used to store environment variables required by the application. It contains configuration details for connecting to your cloud providers, the PostgreSQL database, and any other necessary resources.
84
+
85
+ #### 4.1. Rename or copy the example file to **.env**
86
+
87
+ ```bash
88
+ cp .env_example .env
89
+ ```
90
+
91
+ #### 4.2. Open the **.env** file and add the necessary configuration for your cloud providers and PostgreSQL:
92
+
93
+ ```ini
94
+ ## PG Configuration
95
+ POSTGRES_USER=postgres
96
+ POSTGRES_PASSWORD=secret
97
+ POSTGRES_HOST=db.example.com
98
+ POSTGRES_DATABASE=terraform_state
99
+ POSTGRES_SSLMODE=prefer
100
+
101
+ ## AWS Auth
102
+ TF_VAR_aws_region=us-west-2
103
+ TF_VAR_aws_access_key=AKIAXXXXXXXXXXXXXXXX
104
+ TF_VAR_aws_secret_key=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
105
+
106
+ ## OpenStack Auth - AppCreds Mode
107
+ TF_VAR_openstack_auth_method=appcreds
108
+ TF_VAR_openstack_auth_url=https://openstack.example.com:5000
109
+ TF_VAR_openstack_application_credential_id=fdXXXXXXXXXXXXXXXX
110
+ TF_VAR_openstack_application_credential_secret=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
111
+ TF_VAR_openstack_region=RegionOne
112
+
113
+ ## OpenStack Auth - User/Pass Mode
114
+ # TF_VAR_openstack_auth_method=userpass
115
+ # TF_VAR_openstack_auth_url=https://openstack.example.com:5000
116
+ # TF_VAR_openstack_region=RegionOne
117
+ # TF_VAR_openstack_user_name=myuser
118
+ # TF_VAR_openstack_password=mypassword
119
+ # TF_VAR_openstack_project_id=project-id-123
120
+ # TF_VAR_openstack_user_domain_name=Default
121
+ ```
122
+
123
+ ---
124
+
125
+ ## Basic Usage
126
+
127
+ ### Initialisation
128
+
129
+ ```python
130
+ from cluster_builder import Swarmchestrate
131
+
132
+ # Initialise the orchestrator
133
+ orchestrator = Swarmchestrate(
134
+ template_dir="/path/to/templates",
135
+ output_dir="/path/to/output"
136
+ )
137
+ ```
138
+
139
+ ### Creating a New Cluster
140
+
141
+ To create a new k3s cluster, use the **add_node** method with the **master** role:
142
+
143
+ ```python
144
+ # Configuration for a new cluster using aws provider
145
+ config = {
146
+ "cloud": "aws",
147
+ "k3s_role": "master",
148
+ "ha": False, # Set to True for high availability (HA) deployments
149
+ "instance_type": "t2.small", # AWS instance type
150
+ "ssh_key_name": "g", # SSH key name for AWS or OpenStack
151
+ "ssh_user": "ec2-user", # SSH user for the instance
152
+ "ssh_private_key_path": "/workspaces/cluster-builder/scripts/g.pem", # Path to SSH private key
153
+ "ami": "ami-0c0493bbac867d427", # AMI ID for AWS (specific to region)
154
+ # Optional parameters
155
+ # If existing SG is specified, it will be used directly with no port changes
156
+ "security_group_id": "sg-0123456789abcdef0",
157
+ # No security_group_id means a new SG will be created and these ports applied as rules
158
+ # These ports will be used ONLY if creating a new SG
159
+ "tcp_ports": [10020], # Optional list of TCP ports to open
160
+ "udp_ports": [1003] # Optional list of UDP ports to open
161
+ }
162
+
163
+ # Create the cluster (returns the cluster name)
164
+ cluster_name = orchestrator.add_node(config)
165
+ print(f"Created cluster: {cluster_name}")
166
+ ```
167
+
168
+ Note: Fetch the outputs from the master node and use them when adding a worker node.
169
+
170
+ ### Adding Nodes to an Existing Cluster
171
+
172
+ To add worker or high-availability nodes to an existing cluster:
173
+
174
+ ```python
175
+ # Configuration for adding a worker node using aws provider
176
+ worker_config = {
177
+ "cloud": "aws",
178
+ "k3s_role": "worker", # Role can be 'worker' or 'ha'
179
+ "instance_type": "t2.small", # AWS instance type
180
+ "ssh_key_name": "g", # SSH key name
181
+ "ssh_user": "ec2-user", # SSH user for the instance
182
+ "ssh_private_key_path": "/workspaces/cluster-builder/scripts/g.pem", # Path to SSH private key
183
+ "ami": "ami-0c0493bbac867d427", # AMI ID for AWS
184
+ # Additional parameters obtained after deploying the master node:
185
+ "master_ip": "12.13.14.15", # IP address of the master node (required for worker/HA roles)
186
+ "cluster_name": "elastic_mcnulty", # Name of the cluster
187
+ "k3s_token": "G4lm7wEaFuCCygeU", # Token of the cluster
188
+ # Optional parameters
189
+ # If existing SG is specified, it will be used directly with no port changes
190
+ "security_group_id": "sg-0123456789abcdef0",
191
+ # No security_group_id means a new SG will be created and these ports applied as rules
192
+ # These ports will be used ONLY if creating a new SG
193
+ "tcp_ports": [10020], # Optional list of TCP ports to open
194
+ "udp_ports": [1003] # Optional list of UDP ports to open
195
+ }
196
+
197
+ # Add the worker node
198
+ cluster_name = orchestrator.add_node(worker_config)
199
+ print(f"Added worker node to cluster: {cluster_name}")
200
+ ```
201
+
202
+ ### Removing a Specific Node
203
+
204
+ To remove a specific node from a cluster:
205
+
206
+ ```python
207
+ # Remove a node by its resource name
208
+ orchestrator.remove_node(
209
+ cluster_name="your-cluster-name",
210
+ resource_name="aws_eloquent_feynman" # The resource identifier of the node
211
+ )
212
+ ```
213
+
214
+ The **remove_node** method:
215
+ 1. Destroys the node's infrastructure resources
216
+ 2. Removes the node's configuration from the cluster
217
+
218
+ ---
219
+
220
+ ### Destroying an Entire Cluster
221
+
222
+ To completely destroy a cluster and all its nodes:
223
+
224
+ ```python
225
+ # Destroy the entire cluster
226
+ orchestrator.destroy(
227
+ cluster_name="your-cluster-name"
228
+ )
229
+ ```
230
+
231
+ The **destroy** method:
232
+ 1. Destroys all infrastructure resources associated with the cluster
233
+ 2. Removes the cluster directory and configuration files
234
+
235
+ Note for **Edge Devices**:
236
+ Since the edge device is already provisioned, the `destroy` method will not remove K3s directly from the edge device. You will need to manually uninstall K3s from your edge device after the cluster is destroyed.
237
+
238
+ ---
239
+ ### Deploying Manifests
240
+
241
+ The deploy_manifests method copies Kubernetes manifests to the target cluster node.
242
+
243
+ ```python
244
+ orchestrator.deploy_manifests(
245
+ manifest_folder="path/to/manifests",
246
+ master_ip="MASTER_NODE_IP",
247
+ ssh_key_path="path/to/key.pem",
248
+ ssh_user="USERNAME"
249
+ )
250
+ ```
251
+
252
+ ## Important Configuration Requirements
253
+ ### High Availability Flag (ha):
254
+
255
+ - The ha flag should be set to True for high availability deployment (usually when adding a ha or worker node to an existing master).
256
+
257
+ ### SSH Credentials:
258
+
259
+ - For all roles (k3s_role="master", k3s_role="worker", k3s_role="ha"), you must specify both ssh_user and ssh_private_key_path except for edge.
260
+
261
+ - The ssh_private_key_path should be the path to your SSH private key file. Ensure that the SSH key is copied to the specified path before running the script.
262
+
263
+ - The ssh_key_name and the ssh_private_key_path are different—ensure that your SSH key is placed correctly at the provided ssh_private_key_path.
264
+
265
+ ### Ports:
266
+ You can specify custom ports for your nodes in the tcp_ports and udp_ports fields. However, certain ports are required for Kubernetes deployment (even if not specified explicitly):
267
+
268
+ **TCP Ports:**
269
+
270
+ - 2379-2380: For etcd communication
271
+ - 6443: K3s API server
272
+ - 10250: Kubelet metrics
273
+ - 51820-51821: WireGuard (for encrypted networking)
274
+ - 22: SSH access
275
+ - 80, 443: HTTP/HTTPS access
276
+ - 53: DNS (CoreDNS)
277
+ - 5432: PostgreSQL access (master node)
278
+
279
+ **UDP Ports:**
280
+
281
+ - 8472: VXLAN for Flannel
282
+ - 53: DNS
283
+
284
+ ### OpenStack:
285
+ When provisioning on OpenStack, you should provide the value for 'floating_ip_pool' from which floating IPs can be allocated for the instance. If not specified, OpenTofu will not assign floating IP.
286
+
287
+ ---
288
+
289
+ ## Advanced Usage
290
+
291
+ ### Dry Run Mode
292
+
293
+ All operations support a **dryrun** parameter, which validates the configuration
294
+ without making changes. A node created with dryrun should be removed with dryrun.
295
+
296
+ ```python
297
+ # Validate configuration without deploying
298
+ orchestrator.add_node(config, dryrun=True)
299
+
300
+ # Validate removal without destroying
301
+ orchestrator.remove_node(cluster_name, resource_name, dryrun=True)
302
+
303
+ # Validate destruction without destroying
304
+ orchestrator.destroy(cluster_name, dryrun=True)
305
+ ```
306
+
307
+ ### Custom Cluster Names
308
+
309
+ By default, cluster names are generated automatically. To specify a custom name:
310
+
311
+ ```python
312
+ config = {
313
+ "cloud": "aws",
314
+ "k3s_role": "master",
315
+ "cluster_name": "production-cluster",
316
+ # ... other configuration ...
317
+ }
318
+
319
+ orchestrator.add_node(config)
320
+ ```
321
+
322
+ ---
323
+
324
+ ## Template Structure
325
+
326
+ Templates should be organised as follows:
327
+ - `templates/` - Base directory for templates
328
+ - `templates/{cloud}/` - Terraform modules for each cloud provider
329
+ - `templates/{role}_user_data.sh.tpl` - Node initialisation scripts
330
+ - `templates/{cloud}_provider.tf.j2` - Provider configuration templates
331
+
332
+ ---
333
+
334
+ ## DEMO
335
+ Some test scripts have been created for demonstrating the functionality of the cluster builder. These scripts can be referred to for understanding how the system works and for testing various configurations.
336
+
337
+ For detailed service deployment examples and to explore the test scripts, refer to the [test scripts](docs/test-scripts.md) document
338
+
339
+ ---
@@ -0,0 +1,25 @@
1
+ cluster_builder/__init__.py,sha256=p2Rb2BTVm-ScqCKE38436WsItY1BjVAnvx7zwmneSLs,256
2
+ cluster_builder/swarmchestrate.py,sha256=LwIRR1HGD6y6XKhAxDU-T68vIPjtRIeaoaHX95wM_ZM,23139
3
+ cluster_builder/config/__init__.py,sha256=HqCua7nqa0m4RNrH-wAw-GNZ8PfmKOeYs2Ur81xGIKU,222
4
+ cluster_builder/config/cluster.py,sha256=0CASucE_npbEmGnyR3UmF0v836tcx9HthAiotCh4sSo,5116
5
+ cluster_builder/config/postgres.py,sha256=nQ5QxxI00GmGAbDl_9I1uEU2eBy1D2eJWGzhsBYUFMc,3354
6
+ cluster_builder/infrastructure/__init__.py,sha256=e8XY3K7Y6FJS-ODr5ufB_myV7btFvYHnwA9sxkob8o8,247
7
+ cluster_builder/infrastructure/executor.py,sha256=oymr_ZP8xAOcNDAuGCp1v4F81-chR3VRotoD732l4q0,2874
8
+ cluster_builder/infrastructure/templates.py,sha256=TAdNP-012L76dOYsd7JVIQOD4K9XNobK9QWfOoYrbeU,4084
9
+ cluster_builder/templates/aws_provider.tf,sha256=VIRuH_-8pYtJ0Mkck38WUSszHiN3DesFOWkx75aoOIY,425
10
+ cluster_builder/templates/deploy_manifest.tf,sha256=bzvcK-5iAFDMGU32YR5os6qXF1p9d7qWqlv7Kqm72Qo,1106
11
+ cluster_builder/templates/ha_user_data.sh.tpl,sha256=njvsBRjdKBuUaYbujJ689wI2sfpoHVpr2kkbG9sKzpw,981
12
+ cluster_builder/templates/master_user_data.sh.tpl,sha256=g_uaehoi9Pm_vCx_vJhXCUqAt7DpcqnAi_QPm5VOgWw,1481
13
+ cluster_builder/templates/openstack_provider.tf,sha256=wFUmkws5xSTOM1GW0Jd8JD__VAUBPNF4j1amo2SRyVM,2049
14
+ cluster_builder/templates/worker_user_data.sh.tpl,sha256=9WP6qe6DGMHgFds_loI1N7DEuMeOI6U4SA-g3GYIIIU,1034
15
+ cluster_builder/templates/aws/main.tf,sha256=v_mR6tdH4-E1SKI8FNqfgl-gU2POrKqRfkXCXV1DGFQ,4875
16
+ cluster_builder/templates/edge/main.tf,sha256=8sBL_ofFfhMEH2biPRmB7X4H_SG3JgYygCOEo90yDTY,2255
17
+ cluster_builder/templates/openstack/main.tf,sha256=uMzArcNE0wbx23Y0x9B7jGIiWIgJdQUT6CPw5TjP160,7279
18
+ cluster_builder/utils/__init__.py,sha256=TeronqOND-SIfi0e76lwD1HfUiPO2h2ZfYhLIwZ3Aks,145
19
+ cluster_builder/utils/hcl.py,sha256=VptRAt2Cy0AxowqMJBZ60KGe4Uptji3Y9WiYrDQsrqY,11534
20
+ cluster_builder/utils/logging.py,sha256=rwDViuqG8PMcXJWHOdtdgbGhWMnbSZ4MwfKsXHxu2B4,1242
21
+ cluster_builder-0.3.2.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
22
+ cluster_builder-0.3.2.dist-info/METADATA,sha256=7M-LAWWdwURezuH4N169kDUZJPjT89ERxH6PlNJ5lOU,10937
23
+ cluster_builder-0.3.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
24
+ cluster_builder-0.3.2.dist-info/top_level.txt,sha256=fTW8EW1mcWoeWprjwxSHRWpqfXYX8iN-ByEt8HPXIcs,16
25
+ cluster_builder-0.3.2.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (78.1.0)
2
+ Generator: setuptools (80.9.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,40 +0,0 @@
1
- locals {
2
- cluster_name = "{{ cluster_name }}"
3
- edge_device_ip = "{{ edge_device_ip }}"
4
- k3s_token = "{{ k3s_token }}"
5
- k3s_role = "{{ k3s_role }}"
6
- random_name = "{{ random_name }}"
7
-
8
- {% if k3s_role != "master" %}
9
- master_ip = "{{ master_ip }}"
10
- {% endif %}
11
- }
12
-
13
- resource "null_resource" "deploy_k3s_edge" {
14
-
15
- connection {
16
- type = "ssh"
17
- user = "{{ ssh_user }}"
18
- password = "{{ ssh_password }}"
19
- host = local.edge_device_ip
20
- }
21
-
22
- provisioner "file" {
23
- source = "${local.k3s_role}_user_data.sh"
24
- destination = "/tmp/edge_user_data.sh"
25
- }
26
-
27
- provisioner "remote-exec" {
28
- inline = [
29
- "chmod +x /tmp/edge_user_data.sh",
30
- "sudo K3S_TOKEN='${local.k3s_token}' {% if k3s_role != 'master' %}MASTER_IP='${local.master_ip}'{% endif %} /tmp/edge_user_data.sh"
31
- ]
32
- }
33
-
34
- triggers = {
35
- Name = "K3s-${local.k3s_role}-${local.cluster_name}-${local.random_name}"
36
- cluster_name = local.cluster_name
37
- role = local.k3s_role
38
- random_name = local.random_name
39
- }
40
- }