cluster-builder 0.3.0__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cluster-builder might be problematic. Click here for more details.

@@ -4,7 +4,8 @@ Cluster configuration management.
4
4
 
5
5
  import os
6
6
  import logging
7
-
7
+ import secrets
8
+ import string
8
9
  from names_generator import generate_name
9
10
 
10
11
  from cluster_builder.infrastructure import TemplateManager
@@ -50,9 +51,25 @@ class ClusterConfig:
50
51
  A randomly generated name
51
52
  """
52
53
  name = generate_name()
54
+ name = name.replace("_", "-")
53
55
  logger.debug(f"Generated random name: {name}")
54
56
  return name
55
57
 
58
+ def generate_k3s_token(self, length: int = 16) -> str:
59
+ """
60
+ Generate a secure random alphanumeric token for K3s.
61
+
62
+ Args:
63
+ length: Length of the token (default: 16)
64
+
65
+ Returns:
66
+ A secure, randomly generated alphanumeric token
67
+ """
68
+ chars = string.ascii_letters + string.digits
69
+ token = ''.join(secrets.choice(chars) for _ in range(length))
70
+ logger.debug(f"Generated K3s token: {token}")
71
+ return token
72
+
56
73
  def prepare(self, config: dict[str, any]) -> tuple[str, dict[str, any]]:
57
74
  """
58
75
  Prepare the configuration and template files for deployment.
@@ -84,7 +101,7 @@ class ClusterConfig:
84
101
 
85
102
  cloud = prepared_config["cloud"]
86
103
  role = prepared_config["k3s_role"]
87
- logger.info(f"Preparing configuration for cloud={cloud}, role={role}")
104
+ logger.debug(f"Preparing configuration for cloud={cloud}, role={role}")
88
105
 
89
106
  # Set module source path
90
107
  prepared_config["module_source"] = self.template_manager.get_module_source_path(
@@ -92,14 +109,22 @@ class ClusterConfig:
92
109
  )
93
110
  logger.debug(f"Using module source: {prepared_config['module_source']}")
94
111
 
112
+ # create k3s-token if not provided
113
+ if "k3s_token" not in prepared_config:
114
+ logger.debug("Generating k3s token for cluster")
115
+ k3s_token = self.generate_k3s_token()
116
+ prepared_config["k3s_token"] = k3s_token
117
+ else:
118
+ logger.debug(f"Using provided K3s token: {prepared_config['k3s_token']}")
119
+
95
120
  # Generate a cluster name if not provided
96
121
  if "cluster_name" not in prepared_config:
97
122
  cluster_name = self.generate_random_name()
98
123
  prepared_config["cluster_name"] = cluster_name
99
- logger.info(f"Generated cluster name: {cluster_name}")
124
+ logger.info(f"Creating new cluster: {cluster_name}")
100
125
  else:
101
126
  logger.info(
102
- f"Using provided cluster name: {prepared_config['cluster_name']}"
127
+ f"Adding node to existing cluster: {prepared_config['cluster_name']}"
103
128
  )
104
129
 
105
130
  cluster_dir = self.get_cluster_output_dir(prepared_config["cluster_name"])
@@ -107,7 +132,7 @@ class ClusterConfig:
107
132
 
108
133
  # Generate a resource name
109
134
  random_name = self.generate_random_name()
110
- prepared_config["resource_name"] = f"{cloud}_{random_name}"
135
+ prepared_config["resource_name"] = f"{cloud}-{random_name}"
111
136
  logger.debug(f"Resource name: {prepared_config['resource_name']}")
112
137
 
113
138
  # Create the cluster directory
@@ -37,10 +37,12 @@ class PostgresConfig:
37
37
  missing_keys = [key for key in required_keys if key not in config]
38
38
 
39
39
  if missing_keys:
40
+ logger.error(f"Missing required PostgreSQL configuration keys: {', '.join(missing_keys)}")
40
41
  raise ValueError(
41
42
  f"Missing required PostgreSQL configuration: {', '.join(missing_keys)}"
42
43
  )
43
44
 
45
+ logger.info(f"Creating PostgresConfig from dict with user={config.get('user')} host={config.get('host')} database={config.get('database')}")
44
46
  return cls(
45
47
  user=config["user"],
46
48
  password=config["password"],
@@ -77,11 +79,12 @@ class PostgresConfig:
77
79
  missing_vars = [var for var in required_vars if not os.environ.get(var)]
78
80
 
79
81
  if missing_vars:
82
+ logger.error(f"Missing required PostgreSQL environment variables: {', '.join(missing_vars)}")
80
83
  raise ValueError(
81
84
  f"Missing required PostgreSQL environment variables: {', '.join(missing_vars)}"
82
85
  )
83
86
 
84
- # Create config from environment variables
87
+ logger.info(f"Creating PostgresConfig from environment with user={os.environ.get('POSTGRES_USER')} host={os.environ.get('POSTGRES_HOST')} database={os.environ.get('POSTGRES_DATABASE')}")
85
88
  return cls(
86
89
  user=os.environ["POSTGRES_USER"],
87
90
  password=os.environ["POSTGRES_PASSWORD"],
@@ -5,6 +5,9 @@ Command execution utilities for infrastructure management.
5
5
  import subprocess
6
6
  import logging
7
7
 
8
+ from yaspin import yaspin
9
+ from yaspin.spinners import Spinners
10
+
8
11
  logger = logging.getLogger("swarmchestrate")
9
12
 
10
13
 
@@ -13,7 +16,11 @@ class CommandExecutor:
13
16
 
14
17
  @staticmethod
15
18
  def run_command(
16
- command: list, cwd: str, description: str = "command", timeout: int = None
19
+ command: list,
20
+ cwd: str,
21
+ description: str = "command",
22
+ timeout: int = None,
23
+ env: dict = None, # <-- Add optional env param
17
24
  ) -> str:
18
25
  """
19
26
  Execute a shell command with proper logging and error handling.
@@ -33,56 +40,49 @@ class CommandExecutor:
33
40
  cmd_str = " ".join(command)
34
41
  logger.info(f"Running {description}: {cmd_str}")
35
42
 
36
- try:
37
- # Start the process using Popen
38
- process = subprocess.Popen(
39
- command,
40
- cwd=cwd,
41
- stdout=subprocess.PIPE,
42
- stderr=subprocess.PIPE,
43
- text=True,
44
- )
45
-
46
- # Wait for the process with timeout
43
+ show_spinner = timeout is None or timeout > 15
44
+
45
+ process = subprocess.Popen(
46
+ command,
47
+ cwd=cwd,
48
+ stdout=subprocess.PIPE,
49
+ stderr=subprocess.PIPE,
50
+ text=True,
51
+ env=env,
52
+ )
53
+
54
+ if show_spinner:
47
55
  try:
48
- stdout, stderr = process.communicate(timeout=timeout)
56
+ process.wait(timeout=15)
57
+ stdout, stderr = process.communicate()
58
+ return CommandExecutor._check_result(stdout, stderr, process.returncode, description)
59
+ except subprocess.TimeoutExpired:
60
+ pass # Still running → spinner starts
49
61
 
50
- # Check if the process was successful
51
- if process.returncode != 0:
52
- error_msg = f"Error executing {description}: {stderr}"
53
- logger.error(error_msg)
54
- raise RuntimeError(error_msg)
62
+ # Either timeout <= 15s, or process still running after 15s
63
+ spinner = yaspin(Spinners.point, text=f"Running {description}...", color="cyan") if show_spinner else None
64
+ if spinner:
65
+ spinner.start()
55
66
 
56
- logger.debug(f"{description.capitalize()} output: {stdout}")
57
- return stdout
67
+ try:
68
+ stdout, stderr = process.communicate(timeout=timeout)
69
+ except subprocess.TimeoutExpired:
70
+ process.kill()
71
+ stdout, stderr = process.communicate()
72
+ if spinner:
73
+ spinner.fail("⏰")
74
+ raise RuntimeError(f"{description.capitalize()} timed out after {timeout} seconds")
58
75
 
59
- except subprocess.TimeoutExpired:
60
- # Process timed out - try to get any output so far
61
- # Kill the process
62
- process.kill()
76
+ if spinner:
77
+ spinner.ok("✅") if process.returncode == 0 else spinner.fail("💥")
63
78
 
64
- # Capture any output that was generated before the timeout
65
- stdout, stderr = process.communicate()
79
+ return CommandExecutor._check_result(stdout, stderr, process.returncode, description)
66
80
 
67
- # Print and log the captured output
68
- print(f"\n--- {description.capitalize()} stdout before timeout ---")
69
- print(stdout)
70
- print(f"\n--- {description.capitalize()} stderr before timeout ---")
71
- print(stderr)
72
-
73
- error_msg = (
74
- f"{description.capitalize()} timed out after {timeout} seconds"
75
- )
76
- logger.error(error_msg)
77
- raise RuntimeError(error_msg) from None
78
-
79
- except subprocess.CalledProcessError as e:
80
- error_msg = f"Error executing {description}: {e.stderr}"
81
- logger.error(error_msg)
82
- raise RuntimeError(error_msg)
83
- except Exception as e:
84
- if not isinstance(e, RuntimeError): # Avoid re-wrapping our own exceptions
85
- error_msg = f"Unexpected error during {description}: {str(e)}"
86
- logger.error(error_msg)
87
- raise RuntimeError(error_msg)
88
- raise
81
+ @staticmethod
82
+ def _check_result(stdout, stderr, returncode, description):
83
+ if returncode != 0:
84
+ err = f"Error executing {description}: {stderr}"
85
+ logger.error(err)
86
+ raise RuntimeError(err)
87
+ logger.debug(f"{description.capitalize()} output: {stdout}")
88
+ return stdout
@@ -65,7 +65,7 @@ class TemplateManager:
65
65
  try:
66
66
  # Simply copy the provider config file to the cluster directory
67
67
  shutil.copy2(provider_template_path, provider_file)
68
- logger.info(f"Created {cloud} provider configuration at {provider_file}")
68
+ logger.debug(f"Created {cloud} provider configuration at {provider_file}")
69
69
  except Exception as e:
70
70
  error_msg = f"Failed to create provider configuration: {e}"
71
71
  logger.error(error_msg)
@@ -94,7 +94,7 @@ class TemplateManager:
94
94
 
95
95
  try:
96
96
  shutil.copy2(user_data_src, user_data_dst)
97
- logger.info(
97
+ logger.debug(
98
98
  f"Copied user data template from {user_data_src} to {user_data_dst}"
99
99
  )
100
100
  except (OSError, shutil.Error) as e: