cluster-builder 0.3.0__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cluster-builder might be problematic. Click here for more details.
- cluster_builder/config/cluster.py +30 -5
- cluster_builder/config/postgres.py +4 -1
- cluster_builder/infrastructure/executor.py +48 -48
- cluster_builder/infrastructure/templates.py +2 -2
- cluster_builder/swarmchestrate.py +261 -47
- cluster_builder/templates/aws/main.tf +109 -46
- cluster_builder/templates/copy_manifest.tf +36 -0
- cluster_builder/templates/edge/main.tf +98 -0
- cluster_builder/templates/ha_user_data.sh.tpl +32 -1
- cluster_builder/templates/master_user_data.sh.tpl +36 -5
- cluster_builder/templates/openstack/main.tf +218 -0
- cluster_builder/templates/openstack_provider.tf +70 -0
- cluster_builder/templates/worker_user_data.sh.tpl +33 -1
- cluster_builder/utils/hcl.py +91 -15
- cluster_builder-0.3.1.dist-info/METADATA +321 -0
- cluster_builder-0.3.1.dist-info/RECORD +25 -0
- {cluster_builder-0.3.0.dist-info → cluster_builder-0.3.1.dist-info}/WHEEL +1 -1
- cluster_builder/templates/edge/main.tf.j2 +0 -40
- cluster_builder/templates/openstack/main.tf.j2 +0 -76
- cluster_builder/templates/openstack/network_security_group.tf.j2 +0 -34
- cluster_builder-0.3.0.dist-info/METADATA +0 -264
- cluster_builder-0.3.0.dist-info/RECORD +0 -24
- {cluster_builder-0.3.0.dist-info → cluster_builder-0.3.1.dist-info}/licenses/LICENSE +0 -0
- {cluster_builder-0.3.0.dist-info → cluster_builder-0.3.1.dist-info}/top_level.txt +0 -0
|
@@ -1,76 +0,0 @@
|
|
|
1
|
-
provider "openstack" {
|
|
2
|
-
auth_url = "{{ auth_url }}"
|
|
3
|
-
application_credential_id = "{{ application_credential_id }}"
|
|
4
|
-
application_credential_secret = "{{ application_credential_secret }}"
|
|
5
|
-
region = "{{ region }}"
|
|
6
|
-
}
|
|
7
|
-
|
|
8
|
-
locals {
|
|
9
|
-
cluster_name = "{{ cluster_name }}"
|
|
10
|
-
random_name = "{{ random_name }}"
|
|
11
|
-
}
|
|
12
|
-
|
|
13
|
-
# Common variables for HA and worker nodes
|
|
14
|
-
{% if k3s_role != "master" %}
|
|
15
|
-
variable "master_ip" {
|
|
16
|
-
description = "Master node IP"
|
|
17
|
-
type = string
|
|
18
|
-
}
|
|
19
|
-
{% endif %}
|
|
20
|
-
|
|
21
|
-
# Network common to all nodes
|
|
22
|
-
resource "openstack_networking_network_v2" "cluster_network" {
|
|
23
|
-
name = "network-${local.cluster_name}"
|
|
24
|
-
}
|
|
25
|
-
|
|
26
|
-
# Block storage for each node role
|
|
27
|
-
resource "openstack_blockstorage_volume_v3" "root_volume" {
|
|
28
|
-
name = "K3s-{{ 'Master-Node' if k3s_role == 'master' else ('HA-Node' if k3s_role == 'ha' else 'Worker-Node') }}-volume-${local.cluster_name}-${local.random_name}"
|
|
29
|
-
size = "{{ size }}"
|
|
30
|
-
volume_type = "{{ volume_type }}"
|
|
31
|
-
image_id = "{{ image_id }}"
|
|
32
|
-
}
|
|
33
|
-
|
|
34
|
-
# Compute instance for each role
|
|
35
|
-
resource "openstack_compute_instance_v2" "k3s_node" {
|
|
36
|
-
name = "K3s-{{ 'Master-Node' if k3s_role == 'master' else ('HA-Server' if k3s_role == 'ha' else 'Worker-Node') }}-${local.cluster_name}-${local.random_name}"
|
|
37
|
-
flavor_id = "{{ flavor_id }}"
|
|
38
|
-
key_pair = "{{ ssh_key_name }}"
|
|
39
|
-
security_groups = [
|
|
40
|
-
openstack_networking_secgroup_v2.k3s_security_group.name
|
|
41
|
-
]
|
|
42
|
-
|
|
43
|
-
block_device {
|
|
44
|
-
uuid = openstack_blockstorage_volume_v3.root_volume.id
|
|
45
|
-
source_type = "volume"
|
|
46
|
-
destination_type = "volume"
|
|
47
|
-
boot_index = 0
|
|
48
|
-
delete_on_termination = true
|
|
49
|
-
}
|
|
50
|
-
|
|
51
|
-
network {
|
|
52
|
-
uuid = openstack_networking_network_v2.cluster_network.id
|
|
53
|
-
}
|
|
54
|
-
|
|
55
|
-
user_data = templatefile(
|
|
56
|
-
"{{ 'master_user_data.sh.tpl' if k3s_role == 'master' else ('ha_user_data.sh.tpl' if k3s_role == 'ha' else 'worker_user_data.sh.tpl') }}",
|
|
57
|
-
{
|
|
58
|
-
k3s_token = "{{ k3s_token }}"{% if k3s_role != 'master' %},
|
|
59
|
-
master_ip = var.master_ip
|
|
60
|
-
{% endif %}
|
|
61
|
-
}
|
|
62
|
-
)
|
|
63
|
-
}
|
|
64
|
-
|
|
65
|
-
{% if k3s_role == "master" %}
|
|
66
|
-
# Outputs only for master node
|
|
67
|
-
output "cluster_name" {
|
|
68
|
-
description = "The unique cluster name"
|
|
69
|
-
value = local.cluster_name
|
|
70
|
-
}
|
|
71
|
-
|
|
72
|
-
output "master_ip" {
|
|
73
|
-
description = "The floating IP of the master node"
|
|
74
|
-
value = openstack_compute_instance_v2.k3s_node.access_ip_v4
|
|
75
|
-
}
|
|
76
|
-
{% endif %}
|
|
@@ -1,34 +0,0 @@
|
|
|
1
|
-
resource "openstack_networking_secgroup_v2" "k3s_security_group" {
|
|
2
|
-
name = "k3s-security-group-${local.cluster_name}-${local.random_name}" # Add the cluster name here
|
|
3
|
-
description = "Security group for K3s nodes"
|
|
4
|
-
|
|
5
|
-
tags = [
|
|
6
|
-
"K3s-Security-Group-${local.cluster_name}-${local.random_name}"
|
|
7
|
-
]
|
|
8
|
-
}
|
|
9
|
-
{% set ingress_rules = [
|
|
10
|
-
{"port_min": 2379, "port_max": 2380, "protocol": "tcp", "description": "Embedded etcd communication"},
|
|
11
|
-
{"port_min": 6443, "port_max": 6443, "protocol": "tcp", "description": "K3s API server access"},
|
|
12
|
-
{"port_min": 8472, "port_max": 8472, "protocol": "udp", "description": "Flannel VXLAN communication"},
|
|
13
|
-
{"port_min": 10250, "port_max": 10250, "protocol": "tcp", "description": "Kubelet metrics"},
|
|
14
|
-
{"port_min": 51820, "port_max": 51820, "protocol": "udp", "description": "Wireguard IPv4"},
|
|
15
|
-
{"port_min": 51821, "port_max": 51821, "protocol": "udp", "description": "Wireguard IPv6"},
|
|
16
|
-
{"port_min": 5001, "port_max": 5001, "protocol": "tcp", "description": "Embedded registry (Spegel)"},
|
|
17
|
-
{"port_min": 22, "port_max": 22, "protocol": "tcp", "description": "SSH access"},
|
|
18
|
-
{"port_min": 80, "port_max": 80, "protocol": "tcp", "description": "HTTP access"},
|
|
19
|
-
{"port_min": 443, "port_max": 443, "protocol": "tcp", "description": "HTTPS access"},
|
|
20
|
-
{"port_min": 53, "port_max": 53, "protocol": "udp", "description": "DNS for CoreDNS"},
|
|
21
|
-
{"port_min": 5432, "port_max": 5432, "protocol": "tcp", "description": "pg database access"}
|
|
22
|
-
] %}
|
|
23
|
-
|
|
24
|
-
{% for rule in ingress_rules %}
|
|
25
|
-
resource "openstack_networking_secgroup_rule_v2" "{{ rule.description | replace(' ', '_') | replace('(', '') | replace(')', '') | lower }}" {
|
|
26
|
-
direction = "ingress"
|
|
27
|
-
ethertype = "IPv4"
|
|
28
|
-
protocol = "{{ rule.protocol }}"
|
|
29
|
-
port_range_min = {{ rule.port_min }}
|
|
30
|
-
port_range_max = {{ rule.port_max }}
|
|
31
|
-
remote_ip_prefix = "0.0.0.0/0"
|
|
32
|
-
security_group_id = openstack_networking_secgroup_v2.k3s_security_group.id
|
|
33
|
-
}
|
|
34
|
-
{% endfor %}
|
|
@@ -1,264 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.4
|
|
2
|
-
Name: cluster-builder
|
|
3
|
-
Version: 0.3.0
|
|
4
|
-
Summary: Swarmchestrate cluster builder
|
|
5
|
-
Author-email: Gunjan <G.Kotak@westminster.ac.uk>, Jay <J.Deslauriers@westminster.ac.uk>
|
|
6
|
-
License: Apache2
|
|
7
|
-
Description-Content-Type: text/markdown
|
|
8
|
-
License-File: LICENSE
|
|
9
|
-
Requires-Dist: names_generator
|
|
10
|
-
Requires-Dist: python-hcl2
|
|
11
|
-
Requires-Dist: lark-parser
|
|
12
|
-
Requires-Dist: python-dotenv
|
|
13
|
-
Dynamic: license-file
|
|
14
|
-
|
|
15
|
-
# Swarmchestrate - Cluster Builder
|
|
16
|
-
|
|
17
|
-
This repository contains the codebase for **[cluster-builder]**, which builds K3s clusters for Swarmchestrate using OpenTofu.
|
|
18
|
-
|
|
19
|
-
Key features:
|
|
20
|
-
- **Create**: Provisions infrastructure using OpenTofu and installs K3s.
|
|
21
|
-
- **Add**: Add worker or HA nodes to existing clusters.
|
|
22
|
-
- **Remove**: Selectively remove nodes from existing clusters.
|
|
23
|
-
- **Delete**: Destroys the provisioned infrastructure when no longer required.
|
|
24
|
-
|
|
25
|
-
---
|
|
26
|
-
|
|
27
|
-
## Prerequisites
|
|
28
|
-
|
|
29
|
-
Before proceeding, ensure the following prerequisites are installed:
|
|
30
|
-
|
|
31
|
-
1. **Git**: For cloning the repository.
|
|
32
|
-
2. **Python**: Version 3.9 or higher.
|
|
33
|
-
3. **pip**: Python package manager.
|
|
34
|
-
4. **OpenTofu**: Version 1.6 or higher for infrastructure provisioning.
|
|
35
|
-
6. **Make**: To run the provided `Makefile`.
|
|
36
|
-
7. **PostgreSQL**: For storing OpenTofu state.
|
|
37
|
-
8. (Optional) **Docker**: To create a dev Postgres
|
|
38
|
-
---
|
|
39
|
-
|
|
40
|
-
## Getting Started
|
|
41
|
-
|
|
42
|
-
### 1. Clone the Repository
|
|
43
|
-
|
|
44
|
-
To get started, clone this repository:
|
|
45
|
-
|
|
46
|
-
```bash
|
|
47
|
-
git clone https://github.com/Swarmchestrate/cluster-builder.git
|
|
48
|
-
```
|
|
49
|
-
|
|
50
|
-
### 2. Navigate to the Project Directory
|
|
51
|
-
|
|
52
|
-
```bash
|
|
53
|
-
cd cluster-builder
|
|
54
|
-
```
|
|
55
|
-
|
|
56
|
-
### 3. Install Dependencies and Tools
|
|
57
|
-
|
|
58
|
-
Run the Makefile to install all necessary dependencies, including OpenTofu:
|
|
59
|
-
|
|
60
|
-
```bash
|
|
61
|
-
make install
|
|
62
|
-
```
|
|
63
|
-
|
|
64
|
-
This command will:
|
|
65
|
-
- Install Python dependencies listed in requirements.txt.
|
|
66
|
-
- Download and configure OpenTofu for infrastructure management.
|
|
67
|
-
|
|
68
|
-
**Optional**
|
|
69
|
-
|
|
70
|
-
```bash
|
|
71
|
-
make db
|
|
72
|
-
```
|
|
73
|
-
|
|
74
|
-
This command will:
|
|
75
|
-
- Spin up an empty dev Postgres DB (in Docker) for storing state
|
|
76
|
-
|
|
77
|
-
### 4. Populate .env file with access config
|
|
78
|
-
|
|
79
|
-
First, rename or copy the example file to `.env`
|
|
80
|
-
|
|
81
|
-
```bash
|
|
82
|
-
cp .env_example .env
|
|
83
|
-
```
|
|
84
|
-
|
|
85
|
-
Then populate postgres connection details and needed cloud credential data.
|
|
86
|
-
|
|
87
|
-
```
|
|
88
|
-
## PG Configuration
|
|
89
|
-
POSTGRES_USER=postgres
|
|
90
|
-
POSTGRES_PASSWORD=secret
|
|
91
|
-
POSTGRES_HOST=db.example.com
|
|
92
|
-
POSTGRES_DATABASE=terraform_state
|
|
93
|
-
POSTGRES_SSLMODE=prefer
|
|
94
|
-
|
|
95
|
-
## AWS Auth
|
|
96
|
-
AWS_REGION=us-west-2
|
|
97
|
-
AWS_ACCESS_KEY=AKIAXXXXXXXXXXXXXXXX
|
|
98
|
-
AWS_SECRET_KEY=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
|
99
|
-
```
|
|
100
|
-
|
|
101
|
-
---
|
|
102
|
-
|
|
103
|
-
## Basic Usage
|
|
104
|
-
|
|
105
|
-
### Initialisation
|
|
106
|
-
|
|
107
|
-
```python
|
|
108
|
-
from cluster_builder import Swarmchestrate
|
|
109
|
-
|
|
110
|
-
# Initialise the orchestrator
|
|
111
|
-
orchestrator = Swarmchestrate(
|
|
112
|
-
template_dir="/path/to/templates",
|
|
113
|
-
output_dir="/path/to/output"
|
|
114
|
-
)
|
|
115
|
-
```
|
|
116
|
-
|
|
117
|
-
### Creating a New Cluster
|
|
118
|
-
|
|
119
|
-
To create a new k3s cluster, use the `add_node` method with the `master` role:
|
|
120
|
-
|
|
121
|
-
```python
|
|
122
|
-
# Configuration for a new cluster
|
|
123
|
-
config = {
|
|
124
|
-
"cloud": "aws",
|
|
125
|
-
"k3s_role": "master",
|
|
126
|
-
"ami": "ami-0123456789abcdef",
|
|
127
|
-
"instance_type": "t3.medium",
|
|
128
|
-
"ssh_key_name": "your-ssh-key",
|
|
129
|
-
"k3s_token": "your-k3s-token"
|
|
130
|
-
}
|
|
131
|
-
|
|
132
|
-
# Create the cluster (returns the cluster name)
|
|
133
|
-
cluster_name = orchestrator.add_node(config)
|
|
134
|
-
print(f"Created cluster: {cluster_name}")
|
|
135
|
-
```
|
|
136
|
-
|
|
137
|
-
### Adding Nodes to an Existing Cluster
|
|
138
|
-
|
|
139
|
-
To add worker or high-availability nodes to an existing cluster:
|
|
140
|
-
|
|
141
|
-
```python
|
|
142
|
-
# Configuration for adding a worker node
|
|
143
|
-
worker_config = {
|
|
144
|
-
"cloud": "aws",
|
|
145
|
-
"k3s_role": "worker", # can be "worker" or "ha"
|
|
146
|
-
"master_ip": "1.2.3.4", # IP of the master node
|
|
147
|
-
"cluster_name": "existing-cluster-name", # specify an existing cluster
|
|
148
|
-
"ami": "ami-0123456789abcdef",
|
|
149
|
-
"instance_type": "t2.medium",
|
|
150
|
-
"ssh_key_name": "your-ssh-key",
|
|
151
|
-
"k3s_token": "k3s-cluster-token" # Token of existing cluster
|
|
152
|
-
}
|
|
153
|
-
|
|
154
|
-
# Add the worker node
|
|
155
|
-
cluster_name = orchestrator.add_node(worker_config)
|
|
156
|
-
print(f"Added worker node to cluster: {cluster_name}")
|
|
157
|
-
```
|
|
158
|
-
|
|
159
|
-
Important requirements:
|
|
160
|
-
- For `k3s_role="worker"` or `k3s_role="ha"`, you must specify a `master_ip`
|
|
161
|
-
- For `k3s_role="master"`, you must not specify a `master_ip`
|
|
162
|
-
|
|
163
|
-
### Removing a Specific Node
|
|
164
|
-
|
|
165
|
-
To remove a specific node from a cluster:
|
|
166
|
-
|
|
167
|
-
```python
|
|
168
|
-
# Remove a node by its resource name
|
|
169
|
-
orchestrator.remove_node(
|
|
170
|
-
cluster_name="your-cluster-name",
|
|
171
|
-
resource_name="aws_eloquent_feynman" # The resource identifier of the node
|
|
172
|
-
)
|
|
173
|
-
```
|
|
174
|
-
|
|
175
|
-
The `remove_node` method:
|
|
176
|
-
1. Destroys the node's infrastructure resources
|
|
177
|
-
2. Removes the node's configuration from the cluster
|
|
178
|
-
|
|
179
|
-
### Destroying an Entire Cluster
|
|
180
|
-
|
|
181
|
-
To completely destroy a cluster and all its nodes:
|
|
182
|
-
|
|
183
|
-
```python
|
|
184
|
-
# Destroy the entire cluster
|
|
185
|
-
orchestrator.destroy(
|
|
186
|
-
cluster_name="your-cluster-name"
|
|
187
|
-
)
|
|
188
|
-
```
|
|
189
|
-
|
|
190
|
-
The `destroy` method:
|
|
191
|
-
1. Destroys all infrastructure resources associated with the cluster
|
|
192
|
-
2. Removes the cluster directory and configuration files
|
|
193
|
-
|
|
194
|
-
## Advanced Usage
|
|
195
|
-
|
|
196
|
-
### Dry Run Mode
|
|
197
|
-
|
|
198
|
-
All operations support a `dryrun` parameter, which validates the configuration
|
|
199
|
-
without making changes. A node created with dryrun should be removed with dryrun.
|
|
200
|
-
|
|
201
|
-
```python
|
|
202
|
-
# Validate configuration without deploying
|
|
203
|
-
orchestrator.add_node(config, dryrun=True)
|
|
204
|
-
|
|
205
|
-
# Validate removal without destroying
|
|
206
|
-
orchestrator.remove_node(cluster_name, resource_name, dryrun=True)
|
|
207
|
-
|
|
208
|
-
# Validate destruction without destroying
|
|
209
|
-
orchestrator.destroy(cluster_name, dryrun=True)
|
|
210
|
-
```
|
|
211
|
-
|
|
212
|
-
### Custom Cluster Names
|
|
213
|
-
|
|
214
|
-
By default, cluster names are generated automatically. To specify a custom name:
|
|
215
|
-
|
|
216
|
-
```python
|
|
217
|
-
config = {
|
|
218
|
-
"cloud": "aws",
|
|
219
|
-
"k3s_role": "master",
|
|
220
|
-
"cluster_name": "production-cluster",
|
|
221
|
-
# ... other configuration ...
|
|
222
|
-
}
|
|
223
|
-
|
|
224
|
-
orchestrator.add_node(config)
|
|
225
|
-
```
|
|
226
|
-
|
|
227
|
-
---
|
|
228
|
-
|
|
229
|
-
## Template Structure
|
|
230
|
-
|
|
231
|
-
Templates should be organised as follows:
|
|
232
|
-
- `templates/` - Base directory for templates
|
|
233
|
-
- `templates/{cloud}/` - Terraform modules for each cloud provider
|
|
234
|
-
- `templates/{role}_user_data.sh.tpl` - Node initialisation scripts
|
|
235
|
-
- `templates/{cloud}_provider.tf.j2` - Provider configuration templates
|
|
236
|
-
|
|
237
|
-
---
|
|
238
|
-
|
|
239
|
-
## Edge Device Requirements
|
|
240
|
-
|
|
241
|
-
To connect **edge devices** as part of your K3s cluster, ensure that the following **ports are open** on each edge device to enable communication within nodes:
|
|
242
|
-
|
|
243
|
-
### Inbound Rules:
|
|
244
|
-
|
|
245
|
-
| Port Range| Protocol| Purpose |
|
|
246
|
-
|-----------|---------|-------------------------------------------------------------|
|
|
247
|
-
| 2379-2380 | TCP | Internal servers communication for embedded etcd |
|
|
248
|
-
| 6443 | TCP | K3s API server communication |
|
|
249
|
-
| 8472 | UDP | Flannel VXLAN (network overlay) |
|
|
250
|
-
| 10250 | TCP | Kubelet metrics and communication |
|
|
251
|
-
| 51820 | UDP | WireGuard IPv4 (for encrypted networking) |
|
|
252
|
-
| 51821 | UDP | WireGuard IPv6 (for encrypted networking) |
|
|
253
|
-
| 5001 | TCP | Embedded registry (Spegel) |
|
|
254
|
-
| 22 | TCP | SSH access for provisioning and management |
|
|
255
|
-
| 80 | TCP | HTTP communication for web access |
|
|
256
|
-
| 443 | TCP | HTTPS communication for secure access |
|
|
257
|
-
| 53 | UDP | DNS (CoreDNS) for internal service discovery |
|
|
258
|
-
| 5432 | TCP | PostgreSQL database access |
|
|
259
|
-
|
|
260
|
-
### Outbound Rule:
|
|
261
|
-
|
|
262
|
-
| Port Range| Protocol | Purpose |
|
|
263
|
-
|-----------|----------|--------------------------------------------------------|
|
|
264
|
-
| all | all | Allow all outbound traffic for the system's operations |
|
|
@@ -1,24 +0,0 @@
|
|
|
1
|
-
cluster_builder/__init__.py,sha256=p2Rb2BTVm-ScqCKE38436WsItY1BjVAnvx7zwmneSLs,256
|
|
2
|
-
cluster_builder/swarmchestrate.py,sha256=cIlnKvKQ3jrefWCoqzAO5dPg2s2tnQA1oAzz0NBSJPA,13242
|
|
3
|
-
cluster_builder/config/__init__.py,sha256=HqCua7nqa0m4RNrH-wAw-GNZ8PfmKOeYs2Ur81xGIKU,222
|
|
4
|
-
cluster_builder/config/cluster.py,sha256=fKn3rH0aOMhbU-ejyv2sGDBwpmySrfeRKkN60O9kvyg,4031
|
|
5
|
-
cluster_builder/config/postgres.py,sha256=unrCox0x0037T7N1NJ_GXYZSvVBaOvHb_mSasHUQtHA,2852
|
|
6
|
-
cluster_builder/infrastructure/__init__.py,sha256=e8XY3K7Y6FJS-ODr5ufB_myV7btFvYHnwA9sxkob8o8,247
|
|
7
|
-
cluster_builder/infrastructure/executor.py,sha256=PJuNFT-L6QlSTq37jVPgVAvsoT_GxSOuCOqYrvmjSlU,3113
|
|
8
|
-
cluster_builder/infrastructure/templates.py,sha256=WG9cjnPgMl2GYzdrFpbo3VUu7DEuiAwX9RuNpldNRo8,4082
|
|
9
|
-
cluster_builder/templates/aws_provider.tf,sha256=VIRuH_-8pYtJ0Mkck38WUSszHiN3DesFOWkx75aoOIY,425
|
|
10
|
-
cluster_builder/templates/ha_user_data.sh.tpl,sha256=tXrsT0E1i-DyWL9YVXG_hWGg7vREeuMArvFAKti6nmY,116
|
|
11
|
-
cluster_builder/templates/master_user_data.sh.tpl,sha256=UYteRuhOUgljU0LxBusYUQjFDygO49xC6rTxUlfOizw,197
|
|
12
|
-
cluster_builder/templates/worker_user_data.sh.tpl,sha256=jk6NouCBCwF3PntOlMtClXPssaqxGKjrLrVHOuE6ZEU,115
|
|
13
|
-
cluster_builder/templates/aws/main.tf,sha256=j6GXNIjQ-pZtEWTtPAD231z85hdb6gXgz5ummYugkto,3073
|
|
14
|
-
cluster_builder/templates/edge/main.tf.j2,sha256=9TDRS-KplWZ18VuYtAAghMZMECbTk8sgyt47c6z73Jo,1069
|
|
15
|
-
cluster_builder/templates/openstack/main.tf.j2,sha256=HUgeIo-117ICF_JFoiCN4qbDfF9_wde3YwAPxJ9BKd8,2391
|
|
16
|
-
cluster_builder/templates/openstack/network_security_group.tf.j2,sha256=s9fxaiYY-FKESHnraCyygDDCnOd46Q3WCol4kJUq0DE,1984
|
|
17
|
-
cluster_builder/utils/__init__.py,sha256=TeronqOND-SIfi0e76lwD1HfUiPO2h2ZfYhLIwZ3Aks,145
|
|
18
|
-
cluster_builder/utils/hcl.py,sha256=9PeZLTdWY0XspypiBYqYOJwjRmt7L8NlyqCNB2ymXVc,7611
|
|
19
|
-
cluster_builder/utils/logging.py,sha256=rwDViuqG8PMcXJWHOdtdgbGhWMnbSZ4MwfKsXHxu2B4,1242
|
|
20
|
-
cluster_builder-0.3.0.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
21
|
-
cluster_builder-0.3.0.dist-info/METADATA,sha256=vpHfmBpp3k2IgBvBhSzHUb_mJne8RgOlgkfjs_DRBZc,7541
|
|
22
|
-
cluster_builder-0.3.0.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
|
|
23
|
-
cluster_builder-0.3.0.dist-info/top_level.txt,sha256=fTW8EW1mcWoeWprjwxSHRWpqfXYX8iN-ByEt8HPXIcs,16
|
|
24
|
-
cluster_builder-0.3.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|