cluster-builder 0.2.0__tar.gz → 0.3.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cluster-builder might be problematic. Click here for more details.

Files changed (31) hide show
  1. {cluster_builder-0.2.0 → cluster_builder-0.3.0}/PKG-INFO +1 -2
  2. {cluster_builder-0.2.0 → cluster_builder-0.3.0}/cluster_builder/config/cluster.py +1 -1
  3. {cluster_builder-0.2.0 → cluster_builder-0.3.0}/cluster_builder/infrastructure/__init__.py +2 -1
  4. cluster_builder-0.2.0/cluster_builder/templates/manager.py → cluster_builder-0.3.0/cluster_builder/infrastructure/templates.py +2 -4
  5. {cluster_builder-0.2.0 → cluster_builder-0.3.0}/cluster_builder/swarmchestrate.py +2 -2
  6. cluster_builder-0.3.0/cluster_builder/templates/aws/main.tf +93 -0
  7. cluster_builder-0.3.0/cluster_builder/templates/aws_provider.tf +22 -0
  8. cluster_builder-0.3.0/cluster_builder/templates/edge/main.tf.j2 +40 -0
  9. cluster_builder-0.3.0/cluster_builder/templates/ha_user_data.sh.tpl +2 -0
  10. cluster_builder-0.3.0/cluster_builder/templates/master_user_data.sh.tpl +6 -0
  11. cluster_builder-0.3.0/cluster_builder/templates/openstack/main.tf.j2 +76 -0
  12. cluster_builder-0.3.0/cluster_builder/templates/openstack/network_security_group.tf.j2 +34 -0
  13. cluster_builder-0.3.0/cluster_builder/templates/worker_user_data.sh.tpl +2 -0
  14. {cluster_builder-0.2.0 → cluster_builder-0.3.0}/cluster_builder.egg-info/PKG-INFO +1 -2
  15. {cluster_builder-0.2.0 → cluster_builder-0.3.0}/cluster_builder.egg-info/SOURCES.txt +9 -2
  16. {cluster_builder-0.2.0 → cluster_builder-0.3.0}/cluster_builder.egg-info/requires.txt +0 -1
  17. {cluster_builder-0.2.0 → cluster_builder-0.3.0}/pyproject.toml +7 -2
  18. cluster_builder-0.2.0/cluster_builder/templates/__init__.py +0 -7
  19. {cluster_builder-0.2.0 → cluster_builder-0.3.0}/LICENSE +0 -0
  20. {cluster_builder-0.2.0 → cluster_builder-0.3.0}/README.md +0 -0
  21. {cluster_builder-0.2.0 → cluster_builder-0.3.0}/cluster_builder/__init__.py +0 -0
  22. {cluster_builder-0.2.0 → cluster_builder-0.3.0}/cluster_builder/config/__init__.py +0 -0
  23. {cluster_builder-0.2.0 → cluster_builder-0.3.0}/cluster_builder/config/postgres.py +0 -0
  24. {cluster_builder-0.2.0 → cluster_builder-0.3.0}/cluster_builder/infrastructure/executor.py +0 -0
  25. {cluster_builder-0.2.0 → cluster_builder-0.3.0}/cluster_builder/utils/__init__.py +0 -0
  26. {cluster_builder-0.2.0 → cluster_builder-0.3.0}/cluster_builder/utils/hcl.py +0 -0
  27. {cluster_builder-0.2.0 → cluster_builder-0.3.0}/cluster_builder/utils/logging.py +0 -0
  28. {cluster_builder-0.2.0 → cluster_builder-0.3.0}/cluster_builder.egg-info/dependency_links.txt +0 -0
  29. {cluster_builder-0.2.0 → cluster_builder-0.3.0}/cluster_builder.egg-info/top_level.txt +0 -0
  30. {cluster_builder-0.2.0 → cluster_builder-0.3.0}/setup.cfg +0 -0
  31. {cluster_builder-0.2.0 → cluster_builder-0.3.0}/tests/test_hcl.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: cluster-builder
3
- Version: 0.2.0
3
+ Version: 0.3.0
4
4
  Summary: Swarmchestrate cluster builder
5
5
  Author-email: Gunjan <G.Kotak@westminster.ac.uk>, Jay <J.Deslauriers@westminster.ac.uk>
6
6
  License: Apache2
@@ -9,7 +9,6 @@ License-File: LICENSE
9
9
  Requires-Dist: names_generator
10
10
  Requires-Dist: python-hcl2
11
11
  Requires-Dist: lark-parser
12
- Requires-Dist: jinja2
13
12
  Requires-Dist: python-dotenv
14
13
  Dynamic: license-file
15
14
 
@@ -7,7 +7,7 @@ import logging
7
7
 
8
8
  from names_generator import generate_name
9
9
 
10
- from cluster_builder.templates.manager import TemplateManager
10
+ from cluster_builder.infrastructure import TemplateManager
11
11
 
12
12
  logger = logging.getLogger("swarmchestrate")
13
13
 
@@ -3,5 +3,6 @@ Infrastructure management for the Cluster Builder.
3
3
  """
4
4
 
5
5
  from cluster_builder.infrastructure.executor import CommandExecutor
6
+ from cluster_builder.infrastructure.templates import TemplateManager
6
7
 
7
- __all__ = ["CommandExecutor"]
8
+ __all__ = ["CommandExecutor", "TemplateManager"]
@@ -16,10 +16,8 @@ class TemplateManager:
16
16
 
17
17
  def __init__(self):
18
18
  """Initialise the TemplateManager."""
19
- current_dir = os.path.dirname(os.path.abspath(__file__)) # templates directory
20
- self.base_dir = os.path.dirname(
21
- os.path.dirname(current_dir)
22
- ) # Go up two levels
19
+ current_dir = os.path.dirname(os.path.abspath(__file__))
20
+ self.base_dir = os.path.dirname(current_dir) # templates directory
23
21
  self.templates_dir = os.path.join(self.base_dir, "templates")
24
22
  logger.debug(
25
23
  f"Initialised TemplateManager with templates_dir={self.templates_dir}"
@@ -11,8 +11,8 @@ from dotenv import load_dotenv
11
11
 
12
12
  from cluster_builder.config.postgres import PostgresConfig
13
13
  from cluster_builder.config.cluster import ClusterConfig
14
- from cluster_builder.templates.manager import TemplateManager
15
- from cluster_builder.infrastructure.executor import CommandExecutor
14
+ from cluster_builder.infrastructure import TemplateManager
15
+ from cluster_builder.infrastructure import CommandExecutor
16
16
  from cluster_builder.utils import hcl
17
17
 
18
18
  logger = logging.getLogger("swarmchestrate")
@@ -0,0 +1,93 @@
1
+ # variables.tf
2
+ variable "cluster_name" {}
3
+ variable "resource_name" {}
4
+ variable "k3s_role" {}
5
+ variable "master_ip" {
6
+ default = null
7
+ }
8
+ variable "ami" {}
9
+ variable "instance_type" {}
10
+ variable "ssh_key_name" {}
11
+ variable "k3s_token" {}
12
+ variable "cloud" {
13
+ default = null
14
+ }
15
+ variable "ha" {
16
+ default = null
17
+ }
18
+
19
+ # main.tf
20
+ resource "aws_instance" "k3s_node" {
21
+ ami = var.ami
22
+ instance_type = var.instance_type
23
+ key_name = var.ssh_key_name
24
+
25
+ vpc_security_group_ids = [
26
+ aws_security_group.k3s_sg.id
27
+ ]
28
+
29
+ user_data = templatefile(
30
+ "${path.module}/${var.k3s_role}_user_data.sh.tpl",
31
+ {
32
+ ha = var.ha,
33
+ k3s_token = var.k3s_token,
34
+ master_ip = var.master_ip,
35
+ cluster_name = var.cluster_name
36
+ }
37
+ )
38
+
39
+ tags = {
40
+ Name = "${var.cluster_name}-${var.resource_name}"
41
+ ClusterName = var.cluster_name
42
+ Role = var.k3s_role
43
+ }
44
+ }
45
+
46
+ resource "aws_security_group" "k3s_sg" {
47
+ name = "${var.k3s_role}-${var.cluster_name}-${var.resource_name}"
48
+ description = "Security group for K3s node in cluster ${var.cluster_name}"
49
+
50
+ dynamic "ingress" {
51
+ for_each = toset([
52
+ { from = 2379, to = 2380, proto = "tcp", desc = "etcd communication", roles = ["master", "ha"] },
53
+ { from = 6443, to = 6443, proto = "tcp", desc = "K3s API server", roles = ["master", "ha", "worker"] },
54
+ { from = 8472, to = 8472, proto = "udp", desc = "VXLAN for Flannel", roles = ["master", "ha", "worker"] },
55
+ { from = 10250, to = 10250, proto = "tcp", desc = "Kubelet metrics", roles = ["master", "ha", "worker"] },
56
+ { from = 51820, to = 51820, proto = "udp", desc = "Wireguard IPv4", roles = ["master", "ha", "worker"] },
57
+ { from = 51821, to = 51821, proto = "udp", desc = "Wireguard IPv6", roles = ["master", "ha", "worker"] },
58
+ { from = 5001, to = 5001, proto = "tcp", desc = "Embedded registry", roles = ["master", "ha"] },
59
+ { from = 22, to = 22, proto = "tcp", desc = "SSH access", roles = ["master", "ha", "worker"] },
60
+ { from = 80, to = 80, proto = "tcp", desc = "HTTP access", roles = ["master", "ha", "worker"] },
61
+ { from = 443, to = 443, proto = "tcp", desc = "HTTPS access", roles = ["master", "ha", "worker"] },
62
+ { from = 53, to = 53, proto = "udp", desc = "DNS for CoreDNS", roles = ["master", "ha", "worker"] },
63
+ { from = 5432, to = 5432, proto = "tcp", desc = "PostgreSQL access", roles = ["master"] }
64
+ ])
65
+ content {
66
+ from_port = ingress.value.from
67
+ to_port = ingress.value.to
68
+ protocol = ingress.value.proto
69
+ cidr_blocks = ["0.0.0.0/0"]
70
+ description = ingress.value.desc
71
+ }
72
+ }
73
+
74
+ egress {
75
+ from_port = 0
76
+ to_port = 0
77
+ protocol = "-1"
78
+ cidr_blocks = ["0.0.0.0/0"]
79
+ }
80
+
81
+ tags = {
82
+ Name = "${var.k3s_role}-${var.cluster_name}-${var.resource_name}"
83
+ }
84
+ }
85
+
86
+ # outputs.tf
87
+ output "cluster_name" {
88
+ value = var.k3s_role == "master" ? var.cluster_name : null
89
+ }
90
+
91
+ output "master_ip" {
92
+ value = var.k3s_role == "master" ? aws_instance.k3s_node.public_ip : null
93
+ }
@@ -0,0 +1,22 @@
1
+ provider "aws" {
2
+ region = var.aws_region
3
+ access_key = var.aws_access_key
4
+ secret_key = var.aws_secret_key
5
+ }
6
+
7
+ variable "aws_region" {
8
+ description = "AWS region for resources"
9
+ type = string
10
+ }
11
+
12
+ variable "aws_access_key" {
13
+ description = "AWS access key"
14
+ type = string
15
+ sensitive = true
16
+ }
17
+
18
+ variable "aws_secret_key" {
19
+ description = "AWS secret key"
20
+ type = string
21
+ sensitive = true
22
+ }
@@ -0,0 +1,40 @@
1
+ locals {
2
+ cluster_name = "{{ cluster_name }}"
3
+ edge_device_ip = "{{ edge_device_ip }}"
4
+ k3s_token = "{{ k3s_token }}"
5
+ k3s_role = "{{ k3s_role }}"
6
+ random_name = "{{ random_name }}"
7
+
8
+ {% if k3s_role != "master" %}
9
+ master_ip = "{{ master_ip }}"
10
+ {% endif %}
11
+ }
12
+
13
+ resource "null_resource" "deploy_k3s_edge" {
14
+
15
+ connection {
16
+ type = "ssh"
17
+ user = "{{ ssh_user }}"
18
+ password = "{{ ssh_password }}"
19
+ host = local.edge_device_ip
20
+ }
21
+
22
+ provisioner "file" {
23
+ source = "${local.k3s_role}_user_data.sh"
24
+ destination = "/tmp/edge_user_data.sh"
25
+ }
26
+
27
+ provisioner "remote-exec" {
28
+ inline = [
29
+ "chmod +x /tmp/edge_user_data.sh",
30
+ "sudo K3S_TOKEN='${local.k3s_token}' {% if k3s_role != 'master' %}MASTER_IP='${local.master_ip}'{% endif %} /tmp/edge_user_data.sh"
31
+ ]
32
+ }
33
+
34
+ triggers = {
35
+ Name = "K3s-${local.k3s_role}-${local.cluster_name}-${local.random_name}"
36
+ cluster_name = local.cluster_name
37
+ role = local.k3s_role
38
+ random_name = local.random_name
39
+ }
40
+ }
@@ -0,0 +1,2 @@
1
+ #!/bin/bash
2
+ curl -sfL https://get.k3s.io | K3S_TOKEN=${k3s_token} sh -s - server --server https://${master_ip}:6443
@@ -0,0 +1,6 @@
1
+ #!/bin/bash
2
+ %{ if ha }
3
+ curl -sfL https://get.k3s.io | K3S_TOKEN=${k3s_token} sh -s - server --cluster-init
4
+ %{ else }
5
+ curl -sfL https://get.k3s.io | K3S_TOKEN=${k3s_token} sh -s - server
6
+ %{ endif }
@@ -0,0 +1,76 @@
1
+ provider "openstack" {
2
+ auth_url = "{{ auth_url }}"
3
+ application_credential_id = "{{ application_credential_id }}"
4
+ application_credential_secret = "{{ application_credential_secret }}"
5
+ region = "{{ region }}"
6
+ }
7
+
8
+ locals {
9
+ cluster_name = "{{ cluster_name }}"
10
+ random_name = "{{ random_name }}"
11
+ }
12
+
13
+ # Common variables for HA and worker nodes
14
+ {% if k3s_role != "master" %}
15
+ variable "master_ip" {
16
+ description = "Master node IP"
17
+ type = string
18
+ }
19
+ {% endif %}
20
+
21
+ # Network common to all nodes
22
+ resource "openstack_networking_network_v2" "cluster_network" {
23
+ name = "network-${local.cluster_name}"
24
+ }
25
+
26
+ # Block storage for each node role
27
+ resource "openstack_blockstorage_volume_v3" "root_volume" {
28
+ name = "K3s-{{ 'Master-Node' if k3s_role == 'master' else ('HA-Node' if k3s_role == 'ha' else 'Worker-Node') }}-volume-${local.cluster_name}-${local.random_name}"
29
+ size = "{{ size }}"
30
+ volume_type = "{{ volume_type }}"
31
+ image_id = "{{ image_id }}"
32
+ }
33
+
34
+ # Compute instance for each role
35
+ resource "openstack_compute_instance_v2" "k3s_node" {
36
+ name = "K3s-{{ 'Master-Node' if k3s_role == 'master' else ('HA-Server' if k3s_role == 'ha' else 'Worker-Node') }}-${local.cluster_name}-${local.random_name}"
37
+ flavor_id = "{{ flavor_id }}"
38
+ key_pair = "{{ ssh_key_name }}"
39
+ security_groups = [
40
+ openstack_networking_secgroup_v2.k3s_security_group.name
41
+ ]
42
+
43
+ block_device {
44
+ uuid = openstack_blockstorage_volume_v3.root_volume.id
45
+ source_type = "volume"
46
+ destination_type = "volume"
47
+ boot_index = 0
48
+ delete_on_termination = true
49
+ }
50
+
51
+ network {
52
+ uuid = openstack_networking_network_v2.cluster_network.id
53
+ }
54
+
55
+ user_data = templatefile(
56
+ "{{ 'master_user_data.sh.tpl' if k3s_role == 'master' else ('ha_user_data.sh.tpl' if k3s_role == 'ha' else 'worker_user_data.sh.tpl') }}",
57
+ {
58
+ k3s_token = "{{ k3s_token }}"{% if k3s_role != 'master' %},
59
+ master_ip = var.master_ip
60
+ {% endif %}
61
+ }
62
+ )
63
+ }
64
+
65
+ {% if k3s_role == "master" %}
66
+ # Outputs only for master node
67
+ output "cluster_name" {
68
+ description = "The unique cluster name"
69
+ value = local.cluster_name
70
+ }
71
+
72
+ output "master_ip" {
73
+ description = "The floating IP of the master node"
74
+ value = openstack_compute_instance_v2.k3s_node.access_ip_v4
75
+ }
76
+ {% endif %}
@@ -0,0 +1,34 @@
1
+ resource "openstack_networking_secgroup_v2" "k3s_security_group" {
2
+ name = "k3s-security-group-${local.cluster_name}-${local.random_name}" # Add the cluster name here
3
+ description = "Security group for K3s nodes"
4
+
5
+ tags = [
6
+ "K3s-Security-Group-${local.cluster_name}-${local.random_name}"
7
+ ]
8
+ }
9
+ {% set ingress_rules = [
10
+ {"port_min": 2379, "port_max": 2380, "protocol": "tcp", "description": "Embedded etcd communication"},
11
+ {"port_min": 6443, "port_max": 6443, "protocol": "tcp", "description": "K3s API server access"},
12
+ {"port_min": 8472, "port_max": 8472, "protocol": "udp", "description": "Flannel VXLAN communication"},
13
+ {"port_min": 10250, "port_max": 10250, "protocol": "tcp", "description": "Kubelet metrics"},
14
+ {"port_min": 51820, "port_max": 51820, "protocol": "udp", "description": "Wireguard IPv4"},
15
+ {"port_min": 51821, "port_max": 51821, "protocol": "udp", "description": "Wireguard IPv6"},
16
+ {"port_min": 5001, "port_max": 5001, "protocol": "tcp", "description": "Embedded registry (Spegel)"},
17
+ {"port_min": 22, "port_max": 22, "protocol": "tcp", "description": "SSH access"},
18
+ {"port_min": 80, "port_max": 80, "protocol": "tcp", "description": "HTTP access"},
19
+ {"port_min": 443, "port_max": 443, "protocol": "tcp", "description": "HTTPS access"},
20
+ {"port_min": 53, "port_max": 53, "protocol": "udp", "description": "DNS for CoreDNS"},
21
+ {"port_min": 5432, "port_max": 5432, "protocol": "tcp", "description": "pg database access"}
22
+ ] %}
23
+
24
+ {% for rule in ingress_rules %}
25
+ resource "openstack_networking_secgroup_rule_v2" "{{ rule.description | replace(' ', '_') | replace('(', '') | replace(')', '') | lower }}" {
26
+ direction = "ingress"
27
+ ethertype = "IPv4"
28
+ protocol = "{{ rule.protocol }}"
29
+ port_range_min = {{ rule.port_min }}
30
+ port_range_max = {{ rule.port_max }}
31
+ remote_ip_prefix = "0.0.0.0/0"
32
+ security_group_id = openstack_networking_secgroup_v2.k3s_security_group.id
33
+ }
34
+ {% endfor %}
@@ -0,0 +1,2 @@
1
+ #!/bin/bash
2
+ curl -sfL https://get.k3s.io | K3S_TOKEN=${k3s_token} sh -s - agent --server https://${master_ip}:6443
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: cluster-builder
3
- Version: 0.2.0
3
+ Version: 0.3.0
4
4
  Summary: Swarmchestrate cluster builder
5
5
  Author-email: Gunjan <G.Kotak@westminster.ac.uk>, Jay <J.Deslauriers@westminster.ac.uk>
6
6
  License: Apache2
@@ -9,7 +9,6 @@ License-File: LICENSE
9
9
  Requires-Dist: names_generator
10
10
  Requires-Dist: python-hcl2
11
11
  Requires-Dist: lark-parser
12
- Requires-Dist: jinja2
13
12
  Requires-Dist: python-dotenv
14
13
  Dynamic: license-file
15
14
 
@@ -13,8 +13,15 @@ cluster_builder/config/cluster.py
13
13
  cluster_builder/config/postgres.py
14
14
  cluster_builder/infrastructure/__init__.py
15
15
  cluster_builder/infrastructure/executor.py
16
- cluster_builder/templates/__init__.py
17
- cluster_builder/templates/manager.py
16
+ cluster_builder/infrastructure/templates.py
17
+ cluster_builder/templates/aws_provider.tf
18
+ cluster_builder/templates/ha_user_data.sh.tpl
19
+ cluster_builder/templates/master_user_data.sh.tpl
20
+ cluster_builder/templates/worker_user_data.sh.tpl
21
+ cluster_builder/templates/aws/main.tf
22
+ cluster_builder/templates/edge/main.tf.j2
23
+ cluster_builder/templates/openstack/main.tf.j2
24
+ cluster_builder/templates/openstack/network_security_group.tf.j2
18
25
  cluster_builder/utils/__init__.py
19
26
  cluster_builder/utils/hcl.py
20
27
  cluster_builder/utils/logging.py
@@ -1,5 +1,4 @@
1
1
  names_generator
2
2
  python-hcl2
3
3
  lark-parser
4
- jinja2
5
4
  python-dotenv
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "cluster-builder"
7
- version = "0.2.0"
7
+ version = "0.3.0"
8
8
  description = "Swarmchestrate cluster builder"
9
9
  readme = "README.md"
10
10
  authors = [
@@ -17,10 +17,15 @@ dependencies = [
17
17
  "names_generator",
18
18
  "python-hcl2",
19
19
  "lark-parser",
20
- "jinja2",
21
20
  "python-dotenv"
22
21
  ]
23
22
 
24
23
  [tool.setuptools.packages.find]
25
24
  where = ["."]
26
25
  include = ["cluster_builder*"]
26
+
27
+ [tool.setuptools]
28
+ include-package-data = true
29
+
30
+ [tool.setuptools.package-data]
31
+ "cluster_builder" = ["templates/**/*"]
@@ -1,7 +0,0 @@
1
- """
2
- Template management for the Cluster Builder.
3
- """
4
-
5
- from cluster_builder.templates.manager import TemplateManager
6
-
7
- __all__ = ["TemplateManager"]
File without changes