vantage6 5.0.0a35__py3-none-any.whl → 5.0.0a37__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of vantage6 might be problematic. Click here for more details.
- vantage6/cli/algorithm/generate_algorithm_json.py +9 -10
- vantage6/cli/algorithm/update.py +1 -1
- vantage6/cli/algostore/attach.py +1 -0
- vantage6/cli/algostore/files.py +3 -2
- vantage6/cli/algostore/list.py +0 -3
- vantage6/cli/algostore/new.py +3 -2
- vantage6/cli/algostore/start.py +14 -3
- vantage6/cli/algostore/stop.py +3 -0
- vantage6/cli/auth/attach.py +60 -0
- vantage6/cli/auth/files.py +16 -0
- vantage6/cli/auth/list.py +13 -0
- vantage6/cli/auth/new.py +81 -0
- vantage6/cli/auth/remove.py +31 -0
- vantage6/cli/auth/start.py +94 -0
- vantage6/cli/auth/stop.py +67 -0
- vantage6/cli/cli.py +56 -5
- vantage6/cli/common/decorator.py +24 -5
- vantage6/cli/common/new.py +27 -7
- vantage6/cli/common/start.py +49 -41
- vantage6/cli/common/stop.py +23 -5
- vantage6/cli/common/utils.py +25 -0
- vantage6/cli/config.py +10 -2
- vantage6/cli/{configuration_wizard.py → configuration_create.py} +28 -15
- vantage6/cli/configuration_manager.py +97 -17
- vantage6/cli/context/__init__.py +10 -5
- vantage6/cli/context/algorithm_store.py +11 -5
- vantage6/cli/context/auth.py +125 -0
- vantage6/cli/context/base_server.py +0 -4
- vantage6/cli/context/node.py +25 -8
- vantage6/cli/context/server.py +18 -6
- vantage6/cli/dev/clean.py +28 -0
- vantage6/cli/dev/common.py +34 -0
- vantage6/cli/dev/rebuild.py +39 -0
- vantage6/cli/dev/start.py +36 -0
- vantage6/cli/dev/stop.py +23 -0
- vantage6/cli/globals.py +5 -1
- vantage6/cli/node/common/__init__.py +26 -10
- vantage6/cli/node/list.py +5 -4
- vantage6/cli/node/new.py +13 -6
- vantage6/cli/node/set_api_key.py +1 -1
- vantage6/cli/node/start.py +19 -4
- vantage6/cli/node/stop.py +153 -7
- vantage6/cli/node/task_cleanup/__init__.py +153 -0
- vantage6/cli/node/version.py +5 -4
- vantage6/cli/prometheus/monitoring_manager.py +5 -3
- vantage6/cli/sandbox/config/base.py +101 -0
- vantage6/cli/sandbox/config/core.py +300 -0
- vantage6/cli/sandbox/config/node.py +314 -0
- vantage6/cli/sandbox/data/olympic_athletes_2016.csv +2425 -0
- vantage6/cli/sandbox/new.py +207 -0
- vantage6/cli/sandbox/populate/__init__.py +173 -0
- vantage6/cli/sandbox/populate/helpers/connect_store.py +203 -0
- vantage6/cli/sandbox/populate/helpers/delete_fixtures.py +67 -0
- vantage6/cli/sandbox/populate/helpers/load_fixtures.py +476 -0
- vantage6/cli/sandbox/populate/helpers/utils.py +35 -0
- vantage6/cli/sandbox/remove.py +173 -0
- vantage6/cli/sandbox/start.py +341 -0
- vantage6/cli/sandbox/stop.py +106 -0
- vantage6/cli/server/attach.py +1 -0
- vantage6/cli/server/common/__init__.py +6 -33
- vantage6/cli/server/import_.py +137 -119
- vantage6/cli/server/new.py +22 -7
- vantage6/cli/server/start.py +10 -1
- vantage6/cli/server/stop.py +2 -0
- vantage6/cli/template/auth_config.j2 +253 -0
- vantage6/cli/template/node_config.j2 +8 -8
- vantage6/cli/template/node_config_nonk8s.j2 +33 -0
- vantage6/cli/template/server_config.j2 +10 -7
- vantage6/cli/test/common/diagnostic_runner.py +5 -3
- vantage6/cli/use/namespace.py +2 -1
- vantage6/cli/utils.py +33 -1
- {vantage6-5.0.0a35.dist-info → vantage6-5.0.0a37.dist-info}/METADATA +4 -4
- vantage6-5.0.0a37.dist-info/RECORD +97 -0
- vantage6/cli/dev/create.py +0 -693
- vantage6/cli/dev/remove.py +0 -112
- vantage6/cli/rabbitmq/__init__.py +0 -0
- vantage6/cli/rabbitmq/definitions.py +0 -26
- vantage6/cli/rabbitmq/queue_manager.py +0 -218
- vantage6/cli/rabbitmq/rabbitmq.config +0 -8
- vantage6/cli/server/shell.py +0 -54
- vantage6-5.0.0a35.dist-info/RECORD +0 -75
- /vantage6/cli/{dev → sandbox}/data/km_dataset.csv +0 -0
- {vantage6-5.0.0a35.dist-info → vantage6-5.0.0a37.dist-info}/WHEEL +0 -0
- {vantage6-5.0.0a35.dist-info → vantage6-5.0.0a37.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import subprocess
|
|
2
|
+
import sys
|
|
3
|
+
|
|
4
|
+
import click
|
|
5
|
+
|
|
6
|
+
from vantage6.common import error, info
|
|
7
|
+
|
|
8
|
+
from vantage6.cli.dev.common import check_devspace_installed
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@click.command()
|
|
12
|
+
@click.option("--only-server", is_flag=True, help="Rebuild the server image.")
|
|
13
|
+
@click.option("--only-node", is_flag=True, help="Rebuild the node image.")
|
|
14
|
+
@click.option("--only-store", is_flag=True, help="Rebuild the store image.")
|
|
15
|
+
@click.option("--only-ui", is_flag=True, help="Rebuild the ui image.")
|
|
16
|
+
def cli_rebuild_dev_env(
|
|
17
|
+
only_server: bool, only_node: bool, only_store: bool, only_ui: bool
|
|
18
|
+
):
|
|
19
|
+
"""Rebuild Docker images for your development environment."""
|
|
20
|
+
check_devspace_installed()
|
|
21
|
+
|
|
22
|
+
try:
|
|
23
|
+
info("🔄 Rebuilding development environment with devspace...")
|
|
24
|
+
cmd = ["devspace", "run", "rebuild"]
|
|
25
|
+
|
|
26
|
+
if only_server:
|
|
27
|
+
cmd.append("--server")
|
|
28
|
+
if only_node:
|
|
29
|
+
cmd.append("--node")
|
|
30
|
+
if only_store:
|
|
31
|
+
cmd.append("--store")
|
|
32
|
+
if only_ui:
|
|
33
|
+
cmd.append("--ui")
|
|
34
|
+
|
|
35
|
+
subprocess.run(cmd, check=True, capture_output=False)
|
|
36
|
+
info("✅ Development environment rebuilt successfully!")
|
|
37
|
+
except subprocess.CalledProcessError as e:
|
|
38
|
+
error(f"❌ Error rebuilding development environment: {e}")
|
|
39
|
+
sys.exit(e.returncode)
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
import subprocess
|
|
2
|
+
import sys
|
|
3
|
+
|
|
4
|
+
import click
|
|
5
|
+
|
|
6
|
+
from vantage6.common import error, info
|
|
7
|
+
|
|
8
|
+
from vantage6.cli.dev.common import check_devspace_installed
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@click.command()
|
|
12
|
+
def cli_start_dev_env():
|
|
13
|
+
"""Start the development environment using devspace."""
|
|
14
|
+
check_devspace_installed()
|
|
15
|
+
|
|
16
|
+
try:
|
|
17
|
+
info("🚀 Starting development environment with devspace...")
|
|
18
|
+
|
|
19
|
+
# Build the devspace command
|
|
20
|
+
cmd = ["devspace", "run", "start-dev"]
|
|
21
|
+
|
|
22
|
+
# Run the devspace command
|
|
23
|
+
result = subprocess.run(cmd, check=True, capture_output=False)
|
|
24
|
+
|
|
25
|
+
if result.returncode == 0:
|
|
26
|
+
info("✅ Development environment started successfully!")
|
|
27
|
+
else:
|
|
28
|
+
error("❌ Failed to start development environment.")
|
|
29
|
+
sys.exit(result.returncode)
|
|
30
|
+
|
|
31
|
+
except subprocess.CalledProcessError as e:
|
|
32
|
+
error(f"❌ Error running devspace: {e}")
|
|
33
|
+
sys.exit(e.returncode)
|
|
34
|
+
except Exception as e:
|
|
35
|
+
error(f"❌ Unexpected error: {e}")
|
|
36
|
+
sys.exit(1)
|
vantage6/cli/dev/stop.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import subprocess
|
|
2
|
+
import sys
|
|
3
|
+
|
|
4
|
+
import click
|
|
5
|
+
|
|
6
|
+
from vantage6.common import error, info
|
|
7
|
+
|
|
8
|
+
from vantage6.cli.dev.common import check_devspace_installed
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@click.command()
|
|
12
|
+
def cli_stop_dev_env():
|
|
13
|
+
"""Stop the development environment."""
|
|
14
|
+
check_devspace_installed()
|
|
15
|
+
|
|
16
|
+
try:
|
|
17
|
+
info("🛑 Stopping development environment with devspace...")
|
|
18
|
+
cmd = ["devspace", "run", "stop-dev"]
|
|
19
|
+
subprocess.run(cmd, check=True, capture_output=False)
|
|
20
|
+
info("✅ Development environment stopped successfully!")
|
|
21
|
+
except subprocess.CalledProcessError as e:
|
|
22
|
+
error(f"❌ Error stopping development environment: {e}")
|
|
23
|
+
sys.exit(e.returncode)
|
vantage6/cli/globals.py
CHANGED
|
@@ -56,11 +56,12 @@ DEFAULT_PROMETHEUS_IMAGE = "prom/prometheus"
|
|
|
56
56
|
PROMETHEUS_CONFIG = "prometheus.yml"
|
|
57
57
|
PROMETHEUS_DIR = "prometheus"
|
|
58
58
|
|
|
59
|
-
# template
|
|
59
|
+
# template configuration files
|
|
60
60
|
TEMPLATE_FOLDER = PACKAGE_FOLDER / APPNAME / "cli" / "template"
|
|
61
61
|
SERVER_TEMPLATE_FILE = "server_config.j2"
|
|
62
62
|
NODE_TEMPLATE_FILE = "node_config.j2"
|
|
63
63
|
ALGO_STORE_TEMPLATE_FILE = "algo_store_config.j2"
|
|
64
|
+
AUTH_TEMPLATE_FILE = "auth_config.j2"
|
|
64
65
|
|
|
65
66
|
|
|
66
67
|
# datasets included in the nodes of the dev network
|
|
@@ -95,8 +96,10 @@ class CLICommandName(StrEnumBase):
|
|
|
95
96
|
NODE = "node"
|
|
96
97
|
ALGORITHM = "algorithm"
|
|
97
98
|
TEST = "test"
|
|
99
|
+
SANDBOX = "sandbox"
|
|
98
100
|
DEV = "dev"
|
|
99
101
|
USE = "use"
|
|
102
|
+
AUTH = "auth"
|
|
100
103
|
|
|
101
104
|
|
|
102
105
|
class InfraComponentName(StrEnumBase):
|
|
@@ -105,3 +108,4 @@ class InfraComponentName(StrEnumBase):
|
|
|
105
108
|
SERVER = "server"
|
|
106
109
|
ALGORITHM_STORE = "store"
|
|
107
110
|
NODE = "node"
|
|
111
|
+
AUTH = "auth"
|
|
@@ -8,11 +8,16 @@ import docker
|
|
|
8
8
|
from colorama import Fore, Style
|
|
9
9
|
|
|
10
10
|
from vantage6.common import debug, error, info
|
|
11
|
-
from vantage6.common.globals import
|
|
11
|
+
from vantage6.common.globals import (
|
|
12
|
+
APPNAME,
|
|
13
|
+
HTTP_LOCALHOST,
|
|
14
|
+
InstanceType,
|
|
15
|
+
RequiredNodeEnvVars,
|
|
16
|
+
)
|
|
12
17
|
|
|
13
18
|
from vantage6.client import UserClient
|
|
14
19
|
|
|
15
|
-
from vantage6.cli.
|
|
20
|
+
from vantage6.cli.configuration_create import select_configuration_questionnaire
|
|
16
21
|
from vantage6.cli.context.node import NodeContext
|
|
17
22
|
|
|
18
23
|
|
|
@@ -33,7 +38,7 @@ def create_client(ctx: NodeContext) -> UserClient:
|
|
|
33
38
|
# if the server is run locally, we need to use localhost here instead of
|
|
34
39
|
# the host address of docker
|
|
35
40
|
if host in ["http://host.docker.internal", "http://172.17.0.1"]:
|
|
36
|
-
host =
|
|
41
|
+
host = HTTP_LOCALHOST
|
|
37
42
|
port = ctx.config["port"]
|
|
38
43
|
api_path = ctx.config["api_path"]
|
|
39
44
|
info(f"Connecting to server at '{host}:{port}{api_path}'")
|
|
@@ -64,26 +69,37 @@ def create_client_and_authenticate(ctx: NodeContext) -> UserClient:
|
|
|
64
69
|
client.authenticate()
|
|
65
70
|
except Exception as exc:
|
|
66
71
|
error("Could not authenticate with server!")
|
|
67
|
-
debug(exc)
|
|
72
|
+
debug(str(exc))
|
|
68
73
|
exit(1)
|
|
69
74
|
|
|
70
75
|
return client
|
|
71
76
|
|
|
72
77
|
|
|
73
|
-
def select_node(name: str, system_folders: bool) ->
|
|
78
|
+
def select_node(name: str, system_folders: bool) -> str:
|
|
74
79
|
"""
|
|
75
80
|
Let user select node through questionnaire if name is not given.
|
|
76
81
|
|
|
82
|
+
Parameters
|
|
83
|
+
----------
|
|
84
|
+
name : str
|
|
85
|
+
Name of the node to select
|
|
86
|
+
system_folders : bool
|
|
87
|
+
Whether to use system folders or not
|
|
88
|
+
|
|
77
89
|
Returns
|
|
78
90
|
-------
|
|
79
91
|
str
|
|
80
92
|
Name of the configuration file
|
|
81
93
|
"""
|
|
82
|
-
|
|
83
|
-
name
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
94
|
+
try:
|
|
95
|
+
name = (
|
|
96
|
+
name
|
|
97
|
+
if name
|
|
98
|
+
else select_configuration_questionnaire(InstanceType.NODE, system_folders)
|
|
99
|
+
)
|
|
100
|
+
except Exception:
|
|
101
|
+
error("No configurations could be found!")
|
|
102
|
+
exit()
|
|
87
103
|
|
|
88
104
|
# raise error if config could not be found
|
|
89
105
|
if not NodeContext.config_exists(name, system_folders):
|
vantage6/cli/node/list.py
CHANGED
|
@@ -3,8 +3,9 @@ import docker
|
|
|
3
3
|
from colorama import Fore, Style
|
|
4
4
|
|
|
5
5
|
from vantage6.common import warning
|
|
6
|
-
from vantage6.common.globals import APPNAME
|
|
7
6
|
from vantage6.common.docker.addons import check_docker_running
|
|
7
|
+
from vantage6.common.globals import APPNAME
|
|
8
|
+
|
|
8
9
|
from vantage6.cli.context.node import NodeContext
|
|
9
10
|
from vantage6.cli.node.common import find_running_node_names
|
|
10
11
|
|
|
@@ -39,7 +40,7 @@ def cli_node_list() -> None:
|
|
|
39
40
|
if f"{APPNAME}-{config.name}-system" in running_node_names
|
|
40
41
|
else stopped
|
|
41
42
|
)
|
|
42
|
-
click.echo(f"{config.name:25}
|
|
43
|
+
click.echo(f"{config.name:25}{status:25}System ")
|
|
43
44
|
|
|
44
45
|
# user folders
|
|
45
46
|
configs, f2 = NodeContext.available_configurations(system_folders=False)
|
|
@@ -49,8 +50,8 @@ def cli_node_list() -> None:
|
|
|
49
50
|
if f"{APPNAME}-{config.name}-user" in running_node_names
|
|
50
51
|
else stopped
|
|
51
52
|
)
|
|
52
|
-
click.echo(f"{config.name:25}
|
|
53
|
+
click.echo(f"{config.name:25}{status:25}User ")
|
|
53
54
|
|
|
54
55
|
click.echo("-" * 53)
|
|
55
56
|
if len(f1) + len(f2):
|
|
56
|
-
warning(f"{Fore.RED}Failed imports: {len(f1)+len(f2)}{Style.RESET_ALL}")
|
|
57
|
+
warning(f"{Fore.RED}Failed imports: {len(f1) + len(f2)}{Style.RESET_ALL}")
|
vantage6/cli/node/new.py
CHANGED
|
@@ -6,8 +6,10 @@ import questionary as q
|
|
|
6
6
|
|
|
7
7
|
from vantage6.common import error, info, warning
|
|
8
8
|
from vantage6.common.client.node_client import NodeClient
|
|
9
|
+
from vantage6.common.context import AppContext
|
|
9
10
|
from vantage6.common.globals import (
|
|
10
11
|
FILE_BASED_DATABASE_TYPES,
|
|
12
|
+
HTTP_LOCALHOST,
|
|
11
13
|
SERVICE_BASED_DATABASE_TYPES,
|
|
12
14
|
InstanceType,
|
|
13
15
|
NodePolicy,
|
|
@@ -17,6 +19,7 @@ from vantage6.common.globals import (
|
|
|
17
19
|
|
|
18
20
|
from vantage6.cli.common.new import new
|
|
19
21
|
from vantage6.cli.globals import DEFAULT_NODE_SYSTEM_FOLDERS as N_FOL
|
|
22
|
+
from vantage6.cli.utils import prompt_config_name
|
|
20
23
|
|
|
21
24
|
|
|
22
25
|
@click.command()
|
|
@@ -52,8 +55,12 @@ def cli_node_new_configuration(
|
|
|
52
55
|
Checks if the configuration already exists. If this is not the case
|
|
53
56
|
a questionnaire is invoked to create a new configuration file.
|
|
54
57
|
"""
|
|
58
|
+
name = prompt_config_name(name)
|
|
59
|
+
dirs = AppContext.instance_folders(InstanceType.NODE, name, system_folders)
|
|
60
|
+
default_data_dir = str(dirs["data"])
|
|
55
61
|
new(
|
|
56
|
-
|
|
62
|
+
config_producing_func=node_configuration_questionaire,
|
|
63
|
+
config_producing_func_args=(default_data_dir, name),
|
|
57
64
|
name=name,
|
|
58
65
|
system_folders=system_folders,
|
|
59
66
|
namespace=namespace,
|
|
@@ -62,14 +69,14 @@ def cli_node_new_configuration(
|
|
|
62
69
|
)
|
|
63
70
|
|
|
64
71
|
|
|
65
|
-
def node_configuration_questionaire(
|
|
72
|
+
def node_configuration_questionaire(data_dir: str, instance_name: str) -> dict:
|
|
66
73
|
"""
|
|
67
74
|
Questionary to generate a config file for the node instance.
|
|
68
75
|
|
|
69
76
|
Parameters
|
|
70
77
|
----------
|
|
71
|
-
|
|
72
|
-
|
|
78
|
+
data_dir : str
|
|
79
|
+
Path to the data directory of the node instance.
|
|
73
80
|
instance_name : str
|
|
74
81
|
Name of the node instance.
|
|
75
82
|
|
|
@@ -85,7 +92,7 @@ def node_configuration_questionaire(dirs: dict, instance_name: str) -> dict:
|
|
|
85
92
|
"type": "text",
|
|
86
93
|
"name": "server_url",
|
|
87
94
|
"message": "The base-URL of the server:",
|
|
88
|
-
"default":
|
|
95
|
+
"default": HTTP_LOCALHOST,
|
|
89
96
|
},
|
|
90
97
|
]
|
|
91
98
|
)
|
|
@@ -117,7 +124,7 @@ def node_configuration_questionaire(dirs: dict, instance_name: str) -> dict:
|
|
|
117
124
|
"type": "text",
|
|
118
125
|
"name": "task_dir",
|
|
119
126
|
"message": "Task directory path:",
|
|
120
|
-
"default":
|
|
127
|
+
"default": data_dir,
|
|
121
128
|
},
|
|
122
129
|
]
|
|
123
130
|
)
|
vantage6/cli/node/set_api_key.py
CHANGED
|
@@ -3,7 +3,7 @@ import questionary as q
|
|
|
3
3
|
|
|
4
4
|
from vantage6.common import ensure_config_dir_writable, error, info
|
|
5
5
|
|
|
6
|
-
from vantage6.cli.
|
|
6
|
+
from vantage6.cli.configuration_create import NodeConfigurationManager
|
|
7
7
|
from vantage6.cli.context.node import NodeContext
|
|
8
8
|
from vantage6.cli.globals import DEFAULT_NODE_SYSTEM_FOLDERS as N_FOL
|
|
9
9
|
from vantage6.cli.node.common import select_node
|
vantage6/cli/node/start.py
CHANGED
|
@@ -12,6 +12,7 @@ from vantage6.cli.common.start import (
|
|
|
12
12
|
from vantage6.cli.common.utils import (
|
|
13
13
|
attach_logs,
|
|
14
14
|
create_directory_if_not_exists,
|
|
15
|
+
select_context_and_namespace,
|
|
15
16
|
)
|
|
16
17
|
from vantage6.cli.context.node import NodeContext
|
|
17
18
|
from vantage6.cli.globals import ChartName
|
|
@@ -27,7 +28,14 @@ from vantage6.node.globals import DEFAULT_PROXY_SERVER_PORT
|
|
|
27
28
|
default=False,
|
|
28
29
|
help="Show node logs on the current console after starting the node",
|
|
29
30
|
)
|
|
30
|
-
@
|
|
31
|
+
@click.option("--local-chart-dir", default=None, help="Local chart directory to use")
|
|
32
|
+
@click.option("--sandbox/--no-sandbox", "sandbox", default=False)
|
|
33
|
+
@click_insert_context(
|
|
34
|
+
InstanceType.NODE,
|
|
35
|
+
include_name=True,
|
|
36
|
+
include_system_folders=True,
|
|
37
|
+
sandbox_param="sandbox",
|
|
38
|
+
)
|
|
31
39
|
def cli_node_start(
|
|
32
40
|
ctx: NodeContext,
|
|
33
41
|
name: str,
|
|
@@ -35,13 +43,19 @@ def cli_node_start(
|
|
|
35
43
|
context: str,
|
|
36
44
|
namespace: str,
|
|
37
45
|
attach: bool,
|
|
46
|
+
local_chart_dir: str,
|
|
38
47
|
) -> None:
|
|
39
48
|
"""
|
|
40
49
|
Start the node.
|
|
41
50
|
"""
|
|
42
51
|
info("Starting node...")
|
|
43
52
|
|
|
44
|
-
prestart_checks(ctx, InstanceType.NODE, name, system_folders
|
|
53
|
+
prestart_checks(ctx, InstanceType.NODE, name, system_folders)
|
|
54
|
+
|
|
55
|
+
context, namespace = select_context_and_namespace(
|
|
56
|
+
context=context,
|
|
57
|
+
namespace=namespace,
|
|
58
|
+
)
|
|
45
59
|
|
|
46
60
|
create_directory_if_not_exists(ctx.log_dir)
|
|
47
61
|
create_directory_if_not_exists(ctx.data_dir)
|
|
@@ -96,13 +110,14 @@ def cli_node_start(
|
|
|
96
110
|
values_file=ctx.config_file,
|
|
97
111
|
context=context,
|
|
98
112
|
namespace=namespace,
|
|
113
|
+
local_chart_dir=local_chart_dir,
|
|
99
114
|
)
|
|
100
115
|
|
|
101
116
|
# start port forward for the node proxy server
|
|
102
117
|
start_port_forward(
|
|
103
118
|
service_name=f"{ctx.helm_release_name}-node-service",
|
|
104
|
-
service_port=ctx.config["node"].get("
|
|
105
|
-
port=ctx.config["node"].get("
|
|
119
|
+
service_port=ctx.config["node"].get("proxyPort", DEFAULT_PROXY_SERVER_PORT),
|
|
120
|
+
port=ctx.config["node"].get("proxyPort", DEFAULT_PROXY_SERVER_PORT),
|
|
106
121
|
context=context,
|
|
107
122
|
namespace=namespace,
|
|
108
123
|
)
|
vantage6/cli/node/stop.py
CHANGED
|
@@ -1,13 +1,20 @@
|
|
|
1
1
|
import click
|
|
2
|
+
from kubernetes import client as k8s_client, config as k8s_config
|
|
3
|
+
from kubernetes.client import ApiException
|
|
4
|
+
from kubernetes.config.config_exception import ConfigException
|
|
2
5
|
|
|
3
|
-
from vantage6.common import info
|
|
4
|
-
from vantage6.common.globals import InstanceType
|
|
6
|
+
from vantage6.common import error, info, warning
|
|
7
|
+
from vantage6.common.globals import APPNAME, InstanceType
|
|
5
8
|
|
|
6
9
|
from vantage6.cli.common.stop import execute_stop, helm_uninstall, stop_port_forward
|
|
10
|
+
from vantage6.cli.common.utils import get_config_name_from_helm_release_name
|
|
11
|
+
from vantage6.cli.context import get_context
|
|
12
|
+
from vantage6.cli.context.node import NodeContext
|
|
7
13
|
from vantage6.cli.globals import (
|
|
8
14
|
DEFAULT_NODE_SYSTEM_FOLDERS as N_FOL,
|
|
9
15
|
InfraComponentName,
|
|
10
16
|
)
|
|
17
|
+
from vantage6.cli.node.task_cleanup import delete_job_related_pods
|
|
11
18
|
|
|
12
19
|
|
|
13
20
|
@click.command()
|
|
@@ -29,18 +36,23 @@ from vantage6.cli.globals import (
|
|
|
29
36
|
"system folders. This is the default.",
|
|
30
37
|
)
|
|
31
38
|
@click.option("--all", "all_nodes", flag_value=True, help="Stop all running nodes")
|
|
39
|
+
@click.option(
|
|
40
|
+
"--sandbox", "is_sandbox", flag_value=True, help="Stop a sandbox environment"
|
|
41
|
+
)
|
|
32
42
|
def cli_node_stop(
|
|
33
43
|
name: str,
|
|
34
44
|
context: str,
|
|
35
45
|
namespace: str,
|
|
36
46
|
system_folders: bool,
|
|
37
47
|
all_nodes: bool,
|
|
48
|
+
is_sandbox: bool,
|
|
38
49
|
) -> None:
|
|
39
50
|
"""
|
|
40
51
|
Stop one or all running nodes.
|
|
41
52
|
"""
|
|
42
53
|
execute_stop(
|
|
43
54
|
stop_function=_stop_node,
|
|
55
|
+
stop_function_args={"system_folders": system_folders, "is_sandbox": is_sandbox},
|
|
44
56
|
instance_type=InstanceType.NODE,
|
|
45
57
|
infra_component=InfraComponentName.NODE,
|
|
46
58
|
stop_all=all_nodes,
|
|
@@ -48,24 +60,158 @@ def cli_node_stop(
|
|
|
48
60
|
namespace=namespace,
|
|
49
61
|
context=context,
|
|
50
62
|
system_folders=system_folders,
|
|
63
|
+
is_sandbox=is_sandbox,
|
|
51
64
|
)
|
|
52
65
|
|
|
53
66
|
|
|
54
|
-
def _stop_node(
|
|
67
|
+
def _stop_node(
|
|
68
|
+
node_helm_name: str,
|
|
69
|
+
namespace: str,
|
|
70
|
+
context: str,
|
|
71
|
+
system_folders: bool,
|
|
72
|
+
is_sandbox: bool,
|
|
73
|
+
) -> None:
|
|
55
74
|
"""
|
|
56
75
|
Stop a node
|
|
57
76
|
|
|
58
77
|
Parameters
|
|
59
78
|
----------
|
|
60
|
-
|
|
79
|
+
node_helm_name : str
|
|
61
80
|
Name of the node to stop
|
|
62
81
|
namespace : str
|
|
63
82
|
Kubernetes namespace to use
|
|
64
83
|
context : str
|
|
65
84
|
Kubernetes context to use
|
|
85
|
+
system_folders: bool
|
|
86
|
+
Whether to use the system folders or not
|
|
87
|
+
is_sandbox: bool
|
|
88
|
+
Whether node is a sandbox node or not
|
|
66
89
|
"""
|
|
67
|
-
helm_uninstall(release_name=
|
|
90
|
+
helm_uninstall(release_name=node_helm_name, context=context, namespace=namespace)
|
|
91
|
+
|
|
92
|
+
stop_port_forward(service_name=f"{node_helm_name}-node-service")
|
|
93
|
+
|
|
94
|
+
_stop_node_tasks(node_helm_name, system_folders, is_sandbox)
|
|
95
|
+
|
|
96
|
+
info(f"Node {node_helm_name} stopped successfully.")
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def _stop_node_tasks(
|
|
100
|
+
node_helm_name: str, system_folders: bool, is_sandbox: bool
|
|
101
|
+
) -> None:
|
|
102
|
+
"""
|
|
103
|
+
Stop the tasks of a node
|
|
104
|
+
"""
|
|
105
|
+
node_name = get_config_name_from_helm_release_name(node_helm_name)
|
|
106
|
+
node_ctx = get_context(
|
|
107
|
+
InstanceType.NODE, node_name, system_folders, is_sandbox=is_sandbox
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
task_namespace = node_ctx.config.get("node", {}).get("taskNamespace")
|
|
111
|
+
if not task_namespace:
|
|
112
|
+
warning("Could not find node's task namespace. Node tasks will not be stopped.")
|
|
113
|
+
return
|
|
114
|
+
|
|
115
|
+
# detect tasks from the task namespace that this node is assigned to
|
|
116
|
+
cleanup_task_jobs(task_namespace, node_ctx, all_nodes=False)
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def cleanup_task_jobs(
|
|
120
|
+
namespace: str, node_ctx: NodeContext | None = None, all_nodes: bool = False
|
|
121
|
+
) -> bool:
|
|
122
|
+
"""
|
|
123
|
+
Cleanup Vantage6 task jobs in a given namespace.
|
|
124
|
+
|
|
125
|
+
Parameters
|
|
126
|
+
----------
|
|
127
|
+
namespace: str
|
|
128
|
+
Namespace to cleanup
|
|
129
|
+
node_ctx: NodeContext | None
|
|
130
|
+
Node context to cleanup. If not given, all_nodes must be True.
|
|
131
|
+
all_nodes: bool
|
|
132
|
+
Cleanup all nodes. If not given, node_name must be given.
|
|
133
|
+
|
|
134
|
+
Returns
|
|
135
|
+
-------
|
|
136
|
+
bool
|
|
137
|
+
True if cleanup was successful, False otherwise
|
|
138
|
+
"""
|
|
139
|
+
info(f"Cleaning up Vantage6 task jobs in namespace '{namespace}'")
|
|
140
|
+
|
|
141
|
+
if not all_nodes and not node_ctx:
|
|
142
|
+
error("Either all_nodes or node_ctx must be given to cleanup task jobs")
|
|
143
|
+
return False
|
|
144
|
+
|
|
145
|
+
# Load Kubernetes configuration (in-cluster first, fallback to kubeconfig)
|
|
146
|
+
try:
|
|
147
|
+
k8s_config.load_incluster_config()
|
|
148
|
+
except ConfigException:
|
|
149
|
+
try:
|
|
150
|
+
k8s_config.load_kube_config()
|
|
151
|
+
except ConfigException as exc:
|
|
152
|
+
error(f"Failed to load Kubernetes configuration: {exc}")
|
|
153
|
+
return False
|
|
154
|
+
|
|
155
|
+
core_api = k8s_client.CoreV1Api()
|
|
156
|
+
batch_api = k8s_client.BatchV1Api()
|
|
157
|
+
|
|
158
|
+
jobs = _get_jobs(namespace, batch_api)
|
|
159
|
+
|
|
160
|
+
deletions = 0
|
|
161
|
+
for job in jobs:
|
|
162
|
+
if not all_nodes and job.metadata.labels.get("node_id") != node_ctx.identifier:
|
|
163
|
+
# if all_nodes is False, we should only delete jobs assigned to the current
|
|
164
|
+
# node
|
|
165
|
+
continue
|
|
166
|
+
elif all_nodes and not _is_vantage6_task_job(job):
|
|
167
|
+
# if all_nodes is True, we should only delete vantage6 task jobs, not other
|
|
168
|
+
# jobs
|
|
169
|
+
continue
|
|
170
|
+
|
|
171
|
+
run_id = _get_job_run_id(job)
|
|
172
|
+
if run_id is None:
|
|
173
|
+
error(f"Job '{job.metadata.name}' has no run_id annotation, skipping...")
|
|
174
|
+
continue
|
|
175
|
+
|
|
176
|
+
# Use shared cleanup to delete job, pods and related secret
|
|
177
|
+
job_name = job.metadata.name
|
|
178
|
+
info(f"Deleting job '{job_name}' (run_id={run_id})")
|
|
179
|
+
delete_job_related_pods(
|
|
180
|
+
run_id=run_id,
|
|
181
|
+
container_name=f"{APPNAME}-run-{run_id}",
|
|
182
|
+
namespace=namespace,
|
|
183
|
+
core_api=core_api,
|
|
184
|
+
batch_api=batch_api,
|
|
185
|
+
)
|
|
186
|
+
deletions += 1
|
|
187
|
+
|
|
188
|
+
if deletions == 0:
|
|
189
|
+
info(f"No Vantage6 task jobs found to delete in namespace '{namespace}'")
|
|
190
|
+
else:
|
|
191
|
+
info(f"Deleted {deletions} Vantage6 task job(s) in namespace '{namespace}'")
|
|
192
|
+
return True
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
def _is_vantage6_task_job(job: k8s_client.V1Job) -> bool:
|
|
196
|
+
# Vantage6 task jobs can be identified by their name, which is of the form
|
|
197
|
+
# "vantage6-run-<run_id>"
|
|
198
|
+
return job.metadata.name.startswith(f"{APPNAME}-run-")
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
def _get_jobs(
|
|
202
|
+
namespace: str, batch_api: k8s_client.BatchV1Api
|
|
203
|
+
) -> list[k8s_client.V1Job]:
|
|
204
|
+
try:
|
|
205
|
+
return batch_api.list_namespaced_job(namespace=namespace).items
|
|
206
|
+
except ApiException as exc:
|
|
207
|
+
error(f"Failed to list jobs in namespace {namespace}: {exc}")
|
|
208
|
+
return []
|
|
68
209
|
|
|
69
|
-
stop_port_forward(service_name=f"{node_name}-vantage6-node-service")
|
|
70
210
|
|
|
71
|
-
|
|
211
|
+
def _get_job_run_id(job: k8s_client.V1Job) -> int | None:
|
|
212
|
+
annotations = job.metadata.annotations or {}
|
|
213
|
+
try:
|
|
214
|
+
return int(annotations.get("run_id"))
|
|
215
|
+
except ValueError:
|
|
216
|
+
error(f"Job '{job.metadata.name}' has no run_id annotation, skipping")
|
|
217
|
+
return None
|