cluster-builder 0.3.0__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cluster-builder might be problematic. Click here for more details.

@@ -7,48 +7,28 @@ variable "master_ip" {
7
7
  }
8
8
  variable "ami" {}
9
9
  variable "instance_type" {}
10
+ variable "ssh_user" {}
11
+ variable "ssh_private_key_path" {}
10
12
  variable "ssh_key_name" {}
11
13
  variable "k3s_token" {}
12
- variable "cloud" {
13
- default = null
14
- }
14
+ variable "cloud" {}
15
15
  variable "ha" {
16
- default = null
16
+ default = false
17
17
  }
18
-
19
- # main.tf
20
- resource "aws_instance" "k3s_node" {
21
- ami = var.ami
22
- instance_type = var.instance_type
23
- key_name = var.ssh_key_name
24
-
25
- vpc_security_group_ids = [
26
- aws_security_group.k3s_sg.id
27
- ]
28
-
29
- user_data = templatefile(
30
- "${path.module}/${var.k3s_role}_user_data.sh.tpl",
31
- {
32
- ha = var.ha,
33
- k3s_token = var.k3s_token,
34
- master_ip = var.master_ip,
35
- cluster_name = var.cluster_name
36
- }
37
- )
38
-
39
- tags = {
40
- Name = "${var.cluster_name}-${var.resource_name}"
41
- ClusterName = var.cluster_name
42
- Role = var.k3s_role
43
- }
18
+ variable "security_group_id" {
19
+ default = ""
20
+ }
21
+ variable "tcp_ports" {
22
+ default = []
23
+ }
24
+ variable "udp_ports" {
25
+ default = []
44
26
  }
45
27
 
46
- resource "aws_security_group" "k3s_sg" {
47
- name = "${var.k3s_role}-${var.cluster_name}-${var.resource_name}"
48
- description = "Security group for K3s node in cluster ${var.cluster_name}"
49
-
50
- dynamic "ingress" {
51
- for_each = toset([
28
+ #main.tf
29
+ locals {
30
+ ingress_rules = var.security_group_id == "" ? concat(
31
+ [
52
32
  { from = 2379, to = 2380, proto = "tcp", desc = "etcd communication", roles = ["master", "ha"] },
53
33
  { from = 6443, to = 6443, proto = "tcp", desc = "K3s API server", roles = ["master", "ha", "worker"] },
54
34
  { from = 8472, to = 8472, proto = "udp", desc = "VXLAN for Flannel", roles = ["master", "ha", "worker"] },
@@ -61,15 +41,34 @@ resource "aws_security_group" "k3s_sg" {
61
41
  { from = 443, to = 443, proto = "tcp", desc = "HTTPS access", roles = ["master", "ha", "worker"] },
62
42
  { from = 53, to = 53, proto = "udp", desc = "DNS for CoreDNS", roles = ["master", "ha", "worker"] },
63
43
  { from = 5432, to = 5432, proto = "tcp", desc = "PostgreSQL access", roles = ["master"] }
64
- ])
65
- content {
66
- from_port = ingress.value.from
67
- to_port = ingress.value.to
68
- protocol = ingress.value.proto
69
- cidr_blocks = ["0.0.0.0/0"]
70
- description = ingress.value.desc
71
- }
44
+ ],
45
+ [
46
+ for port in var.tcp_ports : {
47
+ from = port, to = port, proto = "tcp", desc = "Custom TCP rule for port ${port}", roles = ["master", "ha", "worker"]
48
+ }
49
+ ],
50
+ [
51
+ for port in var.udp_ports : {
52
+ from = port, to = port, proto = "udp", desc = "Custom UDP rule for port ${port}", roles = ["master", "ha", "worker"]
53
+ }
54
+ ]
55
+ ) : []
56
+ }
57
+ resource "aws_security_group" "k3s_sg" {
58
+ count = var.security_group_id == "" ? 1 : 0
59
+ name = "${var.k3s_role}-${var.cluster_name}-${var.resource_name}"
60
+ description = "Security group for K3s node in cluster ${var.cluster_name}"
61
+
62
+ dynamic "ingress" {
63
+ for_each = { for idx, rule in local.ingress_rules : idx => rule if contains(rule.roles, var.k3s_role) }
64
+ content {
65
+ from_port = ingress.value.from
66
+ to_port = ingress.value.to
67
+ protocol = ingress.value.proto
68
+ cidr_blocks = ["0.0.0.0/0"]
69
+ description = ingress.value.desc
72
70
  }
71
+ }
73
72
 
74
73
  egress {
75
74
  from_port = 0
@@ -83,11 +82,75 @@ resource "aws_security_group" "k3s_sg" {
83
82
  }
84
83
  }
85
84
 
85
+ resource "aws_instance" "k3s_node" {
86
+ ami = var.ami
87
+ instance_type = var.instance_type
88
+ key_name = var.ssh_key_name
89
+
90
+ # Use the provided security group ID if available or the one created by the security group resource.
91
+ vpc_security_group_ids = var.security_group_id != "" ? [var.security_group_id] : [aws_security_group.k3s_sg[0].id]
92
+
93
+ tags = {
94
+ Name = "${var.cluster_name}-${var.resource_name}"
95
+ ClusterName = var.cluster_name
96
+ Role = var.k3s_role
97
+ }
98
+
99
+ # Upload the rendered user data script to the VM
100
+ provisioner "file" {
101
+ content = templatefile("${path.module}/${var.k3s_role}_user_data.sh.tpl", {
102
+ ha = var.ha,
103
+ k3s_token = var.k3s_token,
104
+ master_ip = var.master_ip,
105
+ cluster_name = var.cluster_name,
106
+ public_ip = self.public_ip,
107
+ node_name = "${var.cluster_name}-${var.resource_name}"
108
+ })
109
+ destination = "/tmp/k3s_user_data.sh"
110
+ }
111
+
112
+ provisioner "remote-exec" {
113
+ inline = [
114
+ "rm -f ~/.ssh/known_hosts",
115
+ "echo 'Executing remote provisioning script on ${var.k3s_role} node'",
116
+ "chmod +x /tmp/k3s_user_data.sh",
117
+ "sudo /tmp/k3s_user_data.sh"
118
+ ]
119
+ }
120
+
121
+ connection {
122
+ type = "ssh"
123
+ user = var.ssh_user
124
+ private_key = file(var.ssh_private_key_path)
125
+ host = self.public_ip
126
+ }
127
+ }
128
+
86
129
  # outputs.tf
87
130
  output "cluster_name" {
88
- value = var.k3s_role == "master" ? var.cluster_name : null
131
+ value = var.cluster_name
89
132
  }
90
133
 
91
134
  output "master_ip" {
92
- value = var.k3s_role == "master" ? aws_instance.k3s_node.public_ip : null
135
+ value = var.k3s_role == "master" ? aws_instance.k3s_node.public_ip : var.master_ip
136
+ }
137
+
138
+ output "worker_ip" {
139
+ value = var.k3s_role == "worker" ? aws_instance.k3s_node.public_ip : null
93
140
  }
141
+
142
+ output "ha_ip" {
143
+ value = var.k3s_role == "ha" ? aws_instance.k3s_node.public_ip : null
144
+ }
145
+
146
+ output "k3s_token" {
147
+ value = var.k3s_token
148
+ }
149
+
150
+ output "instance_status" {
151
+ value = aws_instance.k3s_node.id
152
+ }
153
+
154
+ output "node_name" {
155
+ value = aws_instance.k3s_node.tags["Name"]
156
+ }
@@ -0,0 +1,36 @@
1
+ # main.tf
2
+
3
+ variable "manifest_folder" {}
4
+ variable "ssh_private_key_path" {}
5
+ variable "master_ip" {}
6
+ variable "ssh_user" {}
7
+
8
+ resource "null_resource" "copy_manifests" {
9
+ connection {
10
+ type = "ssh"
11
+ user = var.ssh_user
12
+ private_key = file(var.ssh_private_key_path)
13
+ host = var.master_ip
14
+ }
15
+
16
+ # Ensure the manifests folder exists on the remote host
17
+ provisioner "remote-exec" {
18
+ inline = [
19
+ "mkdir -p /home/${var.ssh_user}/manifests",
20
+ "sudo chmod 755 /home/${var.ssh_user}/manifests"
21
+ ]
22
+ }
23
+
24
+ # Copy the manifests
25
+ provisioner "file" {
26
+ source = var.manifest_folder
27
+ destination = "/home/${var.ssh_user}"
28
+ }
29
+
30
+ # Apply manifests using K3s kubeconfig
31
+ provisioner "remote-exec" {
32
+ inline = [
33
+ "sudo -E KUBECONFIG=/etc/rancher/k3s/k3s.yaml kubectl apply -R -f /home/ubuntu/manifests/"
34
+ ]
35
+ }
36
+ }
@@ -0,0 +1,98 @@
1
+ # variables.tf
2
+ variable "cluster_name" {}
3
+ variable "edge_device_ip" {}
4
+ variable "k3s_token" {}
5
+ variable "cloud" {
6
+ default = "edge"
7
+ }
8
+ variable "k3s_role" {}
9
+ variable "resource_name" {}
10
+ variable "ssh_auth_method" {}
11
+ variable "ssh_user" {}
12
+ variable "ssh_password" {
13
+ sensitive = true
14
+ default = null
15
+ }
16
+ variable "ssh_private_key" {
17
+ }
18
+ variable "master_ip" {
19
+ default = null
20
+ }
21
+ variable "ha" {
22
+ default = false
23
+ }
24
+
25
+ #main.tf
26
+ data "template_file" "user_data" {
27
+ template = file("${path.module}/${var.k3s_role}_user_data.sh.tpl")
28
+ vars = {
29
+ k3s_token = var.k3s_token
30
+ ha = var.ha
31
+ public_ip = var.edge_device_ip
32
+ master_ip = var.master_ip
33
+ node_name = "${var.cluster_name}-${var.resource_name}"
34
+ }
35
+ }
36
+
37
+ resource "local_file" "rendered_user_data" {
38
+ content = data.template_file.user_data.rendered
39
+ filename = "${path.module}/${var.k3s_role}_user_data.sh"
40
+ }
41
+
42
+ resource "null_resource" "deploy_k3s_edge" {
43
+ connection {
44
+ type = "ssh"
45
+ user = var.ssh_user
46
+ host = var.edge_device_ip
47
+ password = var.ssh_auth_method == "password" ? var.ssh_password : null
48
+ private_key = var.ssh_auth_method == "key" ? file(var.ssh_private_key) : null
49
+ }
50
+
51
+ provisioner "file" {
52
+ source = "${path.module}/${var.k3s_role}_user_data.sh"
53
+ destination = "/tmp/edge_user_data.sh"
54
+ }
55
+
56
+ provisioner "remote-exec" {
57
+ inline = [
58
+ "rm -f ~/.ssh/known_hosts",
59
+ "echo 'Executing remote provisioning script on ${var.k3s_role} node'",
60
+ "chmod +x /tmp/edge_user_data.sh",
61
+ "sudo /tmp/edge_user_data.sh"
62
+ ]
63
+ }
64
+
65
+ triggers = {
66
+ Name = "K3s-${var.k3s_role}-${var.cluster_name}-${var.resource_name}"
67
+ cluster_name = var.cluster_name
68
+ role = var.k3s_role
69
+ resource_name = var.resource_name
70
+ edge_ip = var.edge_device_ip
71
+ }
72
+ depends_on = [local_file.rendered_user_data]
73
+ }
74
+
75
+ # outputs.tf
76
+ output "cluster_name" {
77
+ value = var.cluster_name
78
+ }
79
+
80
+ output "master_ip" {
81
+ value = var.k3s_role == "master" ? var.edge_device_ip : var.master_ip
82
+ }
83
+
84
+ output "worker_ip" {
85
+ value = var.k3s_role == "worker" ? var.edge_device_ip : null
86
+ }
87
+
88
+ output "ha_ip" {
89
+ value = var.k3s_role == "ha" ? var.edge_device_ip : null
90
+ }
91
+
92
+ output "k3s_token" {
93
+ value = var.k3s_token
94
+ }
95
+
96
+ output "node_name" {
97
+ value = "${var.cluster_name}-${var.resource_name}"
98
+ }
@@ -1,2 +1,33 @@
1
1
  #!/bin/bash
2
- curl -sfL https://get.k3s.io | K3S_TOKEN=${k3s_token} sh -s - server --server https://${master_ip}:6443
2
+ set -euo pipefail
3
+
4
+ LOG_FILE="/var/log/k3s_server_install.log"
5
+ exec > >(tee -a "$LOG_FILE") 2>&1
6
+ echo "=== K3s HA Server Install Script Started at $(date) ==="
7
+
8
+ # Function to log messages with timestamp
9
+ log_message() {
10
+ echo "$(date) - $1"
11
+ }
12
+
13
+ # Check if K3s server is already running
14
+ if systemctl is-active --quiet k3s; then
15
+ log_message "K3s is already running. Skipping installation."
16
+ exit 0
17
+ fi
18
+
19
+ # Install K3s HA server and join the cluster
20
+ log_message "Installing K3s HA Server and joining the cluster..."
21
+ if ! curl -sfL https://get.k3s.io | K3S_TOKEN="${k3s_token}" sh -s - server \
22
+ --server "https://${master_ip}:6443" \
23
+ --node-external-ip="${public_ip}" \
24
+ --node-name="${node_name}" \
25
+ --flannel-backend=wireguard-native \
26
+ --flannel-external-ip; then
27
+ log_message "ERROR: K3s server installation failed!"
28
+ exit 1
29
+ else
30
+ log_message "K3s server installation succeeded."
31
+ fi
32
+
33
+ log_message "=== Script completed at $(date) ==="
@@ -1,6 +1,37 @@
1
1
  #!/bin/bash
2
- %{ if ha }
3
- curl -sfL https://get.k3s.io | K3S_TOKEN=${k3s_token} sh -s - server --cluster-init
4
- %{ else }
5
- curl -sfL https://get.k3s.io | K3S_TOKEN=${k3s_token} sh -s - server
6
- %{ endif }
2
+ set -euo pipefail
3
+
4
+ LOG_FILE="/var/log/k3s_server_install.log"
5
+ exec > >(tee -a "$LOG_FILE") 2>&1
6
+ echo "=== K3s Server Install Script Started at $(date) ==="
7
+
8
+ # Function to log messages with timestamp
9
+ log_message() {
10
+ echo "$(date) - $1"
11
+ }
12
+
13
+ # Trap errors and print a message
14
+ trap 'log_message "ERROR: Script failed at line $LINENO with exit code $?."' ERR
15
+
16
+ # Check if K3s server is already running
17
+ if systemctl is-active --quiet k3s; then
18
+ log_message "K3s is already running. Skipping installation."
19
+ else
20
+ log_message "K3s is not running. Proceeding with installation..."
21
+
22
+ # Use the provided public IP
23
+ log_message "Using provided public IP: ${public_ip}"
24
+
25
+ # Templated installation based on HA configuration
26
+ if [[ "${ha}" == "true" ]]; then
27
+ log_message "Installing in HA mode using cluster-init..."
28
+ curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--cluster-init --node-external-ip=${public_ip} --node-name="${node_name}" --flannel-backend=wireguard-native --flannel-external-ip" K3S_TOKEN="${k3s_token}" sh -s - server
29
+ else
30
+ log_message "Installing in single-server mode..."
31
+ curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--node-external-ip=${public_ip} --node-name="${node_name}" --flannel-backend=wireguard-native --flannel-external-ip" K3S_TOKEN="${k3s_token}" sh -s - server
32
+ fi
33
+
34
+ log_message "K3s installation completed successfully."
35
+ fi
36
+
37
+ log_message "=== Script completed at $(date) ==="
@@ -0,0 +1,218 @@
1
+ # variables.tf
2
+ variable "cluster_name" {}
3
+ variable "resource_name" {}
4
+ variable "k3s_role" {}
5
+ variable "master_ip" {
6
+ default = null
7
+ }
8
+ variable "volume_size" {}
9
+ variable "openstack_image_id" {}
10
+ variable "openstack_flavor_id" {}
11
+ variable "ssh_user" {}
12
+ variable "ssh_private_key_path" {}
13
+ variable "ssh_key_name" {}
14
+ variable "k3s_token" {}
15
+ variable "cloud" {}
16
+ variable "ha" {
17
+ default = false
18
+ }
19
+
20
+ variable "floating_ip_pool" {}
21
+ variable "network_id" {}
22
+ variable "use_block_device" {
23
+ default = false
24
+ }
25
+ variable "security_group_id" {
26
+ default = ""
27
+ }
28
+ variable "tcp_ports" {
29
+ default = []
30
+ }
31
+ variable "udp_ports" {
32
+ default = []
33
+ }
34
+
35
+ # main.tf
36
+ # Block storage for each node role
37
+ resource "openstack_blockstorage_volume_v3" "root_volume" {
38
+ count = var.use_block_device ? 1 : 0 # Only create the volume if block device is required
39
+ name = "${var.cluster_name}-${var.resource_name}-volume"
40
+ size = var.volume_size
41
+ image_id = var.openstack_image_id
42
+ }
43
+
44
+ # Defining the port to use while instance creation
45
+ resource "openstack_networking_port_v2" "port_1" {
46
+ network_id = var.network_id
47
+ }
48
+
49
+ # Security group rules
50
+ locals {
51
+ ingress_rules = var.security_group_id == "" ? concat(
52
+ [
53
+ { from = 2379, to = 2380, proto = "tcp", desc = "etcd communication", roles = ["master", "ha"] },
54
+ { from = 6443, to = 6443, proto = "tcp", desc = "K3s API server", roles = ["master", "ha", "worker"] },
55
+ { from = 8472, to = 8472, proto = "udp", desc = "VXLAN for Flannel", roles = ["master", "ha", "worker"] },
56
+ { from = 10250, to = 10250, proto = "tcp", desc = "Kubelet metrics", roles = ["master", "ha", "worker"] },
57
+ { from = 51820, to = 51820, proto = "udp", desc = "Wireguard IPv4", roles = ["master", "ha", "worker"] },
58
+ { from = 51821, to = 51821, proto = "udp", desc = "Wireguard IPv6", roles = ["master", "ha", "worker"] },
59
+ { from = 5001, to = 5001, proto = "tcp", desc = "Embedded registry", roles = ["master", "ha"] },
60
+ { from = 22, to = 22, proto = "tcp", desc = "SSH access", roles = ["master", "ha", "worker"] },
61
+ { from = 80, to = 80, proto = "tcp", desc = "HTTP access", roles = ["master", "ha", "worker"] },
62
+ { from = 443, to = 443, proto = "tcp", desc = "HTTPS access", roles = ["master", "ha", "worker"] },
63
+ { from = 53, to = 53, proto = "udp", desc = "DNS for CoreDNS", roles = ["master", "ha", "worker"] },
64
+ { from = 5432, to = 5432, proto = "tcp", desc = "PostgreSQL access", roles = ["master"] }
65
+ ],
66
+ [
67
+ for port in var.tcp_ports : {
68
+ from = port, to = port, proto = "tcp", desc = "Custom TCP rule for port ${port}", roles = ["master", "ha", "worker"]
69
+ }
70
+ ],
71
+ [
72
+ for port in var.udp_ports : {
73
+ from = port, to = port, proto = "udp", desc = "Custom UDP rule for port ${port}", roles = ["master", "ha", "worker"]
74
+ }
75
+ ]
76
+ ) : []
77
+ }
78
+
79
+ # Security Group Resource
80
+ resource "openstack_networking_secgroup_v2" "k3s_sg" {
81
+ count = var.security_group_id == "" ? 1 : 0 # Only create if no SG ID is provided
82
+ name = "${var.cluster_name}-${var.resource_name}-sg"
83
+ description = "Security group for ${var.k3s_role} in cluster ${var.cluster_name}"
84
+ }
85
+
86
+ # Security Group Rule Resource
87
+ resource "openstack_networking_secgroup_rule_v2" "k3s_sg_rules" {
88
+ # Only create rules if the security group is created (not passed)
89
+ for_each = var.security_group_id == "" ? {
90
+ for idx, rule in local.ingress_rules :
91
+ "${rule.from}-${rule.to}-${rule.proto}-${rule.desc}" => rule
92
+ } : {}
93
+
94
+ security_group_id = openstack_networking_secgroup_v2.k3s_sg[0].id # Use index 0 since only 1 security group is created when count > 0
95
+ direction = "ingress"
96
+ ethertype = "IPv4"
97
+ port_range_min = each.value.from
98
+ port_range_max = each.value.to
99
+ protocol = each.value.proto
100
+ remote_ip_prefix = "0.0.0.0/0"
101
+ description = each.value.desc
102
+ }
103
+
104
+
105
+ resource "openstack_networking_port_secgroup_associate_v2" "port_2" {
106
+ port_id = openstack_networking_port_v2.port_1.id
107
+ enforce = true
108
+ # Use the provided security group ID if available, otherwise use the generated security group
109
+ security_group_ids = var.security_group_id != "" ? [var.security_group_id] : [openstack_networking_secgroup_v2.k3s_sg[0].id]
110
+ }
111
+
112
+ # Compute instance for each role
113
+ resource "openstack_compute_instance_v2" "k3s_node" {
114
+ depends_on = [openstack_networking_port_v2.port_1]
115
+
116
+ name = "${var.cluster_name}-${var.resource_name}"
117
+ flavor_name = var.openstack_flavor_id
118
+ key_pair = var.ssh_key_name
119
+ # Only add the image_id if block device is NOT used
120
+ image_id = var.use_block_device ? null : var.openstack_image_id
121
+
122
+ # Conditional block_device for boot volume
123
+ dynamic "block_device" {
124
+ for_each = var.use_block_device ? [1] : [] # Include block_device only if use_block_device is true
125
+ content {
126
+ uuid = openstack_blockstorage_volume_v3.root_volume[0].id
127
+ source_type = "volume"
128
+ destination_type = "volume"
129
+ boot_index = 0
130
+ delete_on_termination = true
131
+ }
132
+ }
133
+
134
+ network {
135
+ port = openstack_networking_port_v2.port_1.id
136
+ }
137
+
138
+ tags = [
139
+ "${var.cluster_name}-${var.resource_name}",
140
+ "ClusterName=${var.cluster_name}",
141
+ "Role=${var.k3s_role}"
142
+ ]
143
+ }
144
+
145
+ resource "openstack_networking_floatingip_v2" "floatip_1" {
146
+ pool = var.floating_ip_pool
147
+ }
148
+
149
+ resource "openstack_networking_floatingip_associate_v2" "fip_association" {
150
+ floating_ip = openstack_networking_floatingip_v2.floatip_1.address
151
+ port_id = openstack_networking_port_v2.port_1.id
152
+
153
+ depends_on = [
154
+ openstack_compute_instance_v2.k3s_node # Ensure the instance is created first
155
+ ]
156
+ }
157
+
158
+ # Provisioning via SSH
159
+ resource "null_resource" "k3s_provision" {
160
+ depends_on = [openstack_networking_floatingip_v2.floatip_1]
161
+
162
+ provisioner "file" {
163
+ content = templatefile("${path.module}/${var.k3s_role}_user_data.sh.tpl", {
164
+ ha = var.ha,
165
+ k3s_token = var.k3s_token,
166
+ master_ip = var.master_ip,
167
+ cluster_name = var.cluster_name,
168
+ public_ip = openstack_networking_floatingip_v2.floatip_1.address,
169
+ node_name = "${var.cluster_name}-${var.resource_name}"
170
+ })
171
+ destination = "/tmp/k3s_user_data.sh"
172
+ }
173
+
174
+ provisioner "remote-exec" {
175
+ inline = [
176
+ "rm -f ~/.ssh/known_hosts",
177
+ "echo 'Executing remote provisioning script on ${var.k3s_role} node'",
178
+ "chmod +x /tmp/k3s_user_data.sh",
179
+ "sudo /tmp/k3s_user_data.sh"
180
+ ]
181
+ }
182
+
183
+ connection {
184
+ type = "ssh"
185
+ user = var.ssh_user
186
+ private_key = file(var.ssh_private_key_path)
187
+ host = openstack_networking_floatingip_v2.floatip_1.address
188
+ }
189
+ }
190
+
191
+ # outputs.tf
192
+ output "cluster_name" {
193
+ value = var.cluster_name
194
+ }
195
+
196
+ output "master_ip" {
197
+ value = var.k3s_role == "master" ? openstack_networking_floatingip_v2.floatip_1.address : var.master_ip
198
+ }
199
+
200
+ output "worker_ip" {
201
+ value = var.k3s_role == "worker" ? openstack_networking_floatingip_v2.floatip_1.address : null
202
+ }
203
+
204
+ output "ha_ip" {
205
+ value = var.k3s_role == "ha" ? openstack_networking_floatingip_v2.floatip_1.address : null
206
+ }
207
+
208
+ output "k3s_token" {
209
+ value = var.k3s_token
210
+ }
211
+
212
+ output "instance_power_state" {
213
+ value = openstack_compute_instance_v2.k3s_node.power_state
214
+ }
215
+
216
+ output "node_name" {
217
+ value = openstack_compute_instance_v2.k3s_node.name
218
+ }
@@ -0,0 +1,70 @@
1
+ variable "openstack_auth_method" {
2
+ description = "Auth method: 'appcreds' or 'userpass'"
3
+ type = string
4
+ default = "appcreds"
5
+ }
6
+
7
+ variable "openstack_auth_url" {
8
+ description = "Openstack secret key"
9
+ type = string
10
+ }
11
+
12
+ variable "openstack_region" {
13
+ description = "Openstack region for resources"
14
+ type = string
15
+ }
16
+
17
+ # AppCred variables
18
+ variable "openstack_application_credential_id" {
19
+ description = "Openstack application application credential id"
20
+ type = string
21
+ sensitive = true
22
+ default = ""
23
+ }
24
+
25
+ variable "openstack_application_credential_secret" {
26
+ description = "Openstack application credential secret"
27
+ type = string
28
+ sensitive = true
29
+ default = ""
30
+ }
31
+
32
+ # Username/password variables
33
+ variable "openstack_user_name" {
34
+ description = "Username for OpenStack (if not using appcred)"
35
+ type = string
36
+ default = ""
37
+ }
38
+
39
+ variable "openstack_password" {
40
+ description = "Password for OpenStack user"
41
+ type = string
42
+ sensitive = true
43
+ default = ""
44
+ }
45
+
46
+ variable "openstack_project_id" {
47
+ description = "Project ID to use with OpenStack"
48
+ type = string
49
+ default = ""
50
+ }
51
+
52
+ variable "openstack_user_domain_name" {
53
+ description = "User domain name"
54
+ type = string
55
+ default = ""
56
+ }
57
+
58
+ # Dynamic provider config (manually switching fields)
59
+ provider "openstack" {
60
+ auth_url = var.openstack_auth_url
61
+ region = var.openstack_region
62
+
63
+ application_credential_id = var.openstack_auth_method == "appcreds" ? var.openstack_application_credential_id : null
64
+ application_credential_secret = var.openstack_auth_method == "appcreds" ? var.openstack_application_credential_secret : null
65
+
66
+ user_name = var.openstack_auth_method == "userpass" ? var.openstack_user_name : null
67
+ password = var.openstack_auth_method == "userpass" ? var.openstack_password : null
68
+ tenant_id = var.openstack_auth_method == "userpass" ? var.openstack_project_id : null
69
+ user_domain_name = var.openstack_auth_method == "userpass" ? var.openstack_user_domain_name : null
70
+ }