vantage6 5.0.0a36__py3-none-any.whl → 5.0.0a38__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vantage6 might be problematic. Click here for more details.

Files changed (80) hide show
  1. vantage6/cli/algorithm/generate_algorithm_json.py +0 -1
  2. vantage6/cli/algostore/attach.py +28 -3
  3. vantage6/cli/algostore/list.py +2 -2
  4. vantage6/cli/algostore/new.py +3 -2
  5. vantage6/cli/algostore/start.py +25 -6
  6. vantage6/cli/algostore/stop.py +3 -0
  7. vantage6/cli/algostore/version.py +62 -0
  8. vantage6/cli/auth/attach.py +1 -1
  9. vantage6/cli/auth/list.py +2 -2
  10. vantage6/cli/auth/new.py +3 -2
  11. vantage6/cli/auth/remove.py +58 -0
  12. vantage6/cli/auth/start.py +27 -9
  13. vantage6/cli/auth/stop.py +3 -0
  14. vantage6/cli/cli.py +21 -0
  15. vantage6/cli/common/attach.py +114 -0
  16. vantage6/cli/common/decorator.py +25 -4
  17. vantage6/cli/common/list.py +68 -0
  18. vantage6/cli/common/new.py +27 -7
  19. vantage6/cli/common/remove.py +18 -0
  20. vantage6/cli/common/start.py +48 -40
  21. vantage6/cli/common/stop.py +16 -4
  22. vantage6/cli/common/utils.py +65 -74
  23. vantage6/cli/common/version.py +82 -0
  24. vantage6/cli/config.py +10 -2
  25. vantage6/cli/{configuration_wizard.py → configuration_create.py} +22 -14
  26. vantage6/cli/configuration_manager.py +70 -21
  27. vantage6/cli/context/__init__.py +10 -5
  28. vantage6/cli/context/algorithm_store.py +13 -7
  29. vantage6/cli/context/auth.py +23 -5
  30. vantage6/cli/context/node.py +25 -8
  31. vantage6/cli/context/server.py +18 -6
  32. vantage6/cli/globals.py +1 -0
  33. vantage6/cli/node/attach.py +27 -3
  34. vantage6/cli/node/common/__init__.py +26 -10
  35. vantage6/cli/node/common/task_cleanup.py +153 -0
  36. vantage6/cli/node/list.py +3 -44
  37. vantage6/cli/node/new.py +13 -6
  38. vantage6/cli/node/set_api_key.py +1 -1
  39. vantage6/cli/node/start.py +30 -7
  40. vantage6/cli/node/stop.py +151 -7
  41. vantage6/cli/node/version.py +96 -33
  42. vantage6/cli/sandbox/config/base.py +109 -0
  43. vantage6/cli/sandbox/config/core.py +300 -0
  44. vantage6/cli/sandbox/config/node.py +311 -0
  45. vantage6/cli/sandbox/data/km_dataset.csv +2401 -0
  46. vantage6/cli/sandbox/data/olympic_athletes_2016.csv +2425 -0
  47. vantage6/cli/sandbox/new.py +207 -0
  48. vantage6/cli/sandbox/populate/__init__.py +173 -0
  49. vantage6/cli/sandbox/populate/helpers/connect_store.py +203 -0
  50. vantage6/cli/sandbox/populate/helpers/delete_fixtures.py +67 -0
  51. vantage6/cli/sandbox/populate/helpers/load_fixtures.py +476 -0
  52. vantage6/cli/sandbox/populate/helpers/utils.py +35 -0
  53. vantage6/cli/sandbox/remove.py +155 -0
  54. vantage6/cli/sandbox/start.py +349 -0
  55. vantage6/cli/sandbox/stop.py +106 -0
  56. vantage6/cli/server/attach.py +28 -3
  57. vantage6/cli/server/common/__init__.py +5 -6
  58. vantage6/cli/server/import_.py +137 -119
  59. vantage6/cli/server/list.py +2 -2
  60. vantage6/cli/server/new.py +5 -3
  61. vantage6/cli/server/start.py +21 -4
  62. vantage6/cli/server/stop.py +2 -0
  63. vantage6/cli/server/version.py +31 -18
  64. vantage6/cli/template/algo_store_config.j2 +3 -0
  65. vantage6/cli/template/auth_config.j2 +24 -1
  66. vantage6/cli/template/node_config.j2 +2 -0
  67. vantage6/cli/template/server_config.j2 +10 -7
  68. vantage6/cli/use/context.py +8 -1
  69. vantage6/cli/use/namespace.py +10 -7
  70. vantage6/cli/utils.py +33 -1
  71. vantage6/cli/utils_kubernetes.py +270 -0
  72. {vantage6-5.0.0a36.dist-info → vantage6-5.0.0a38.dist-info}/METADATA +4 -4
  73. vantage6-5.0.0a38.dist-info/RECORD +102 -0
  74. vantage6/cli/rabbitmq/__init__.py +0 -0
  75. vantage6/cli/rabbitmq/definitions.py +0 -26
  76. vantage6/cli/rabbitmq/queue_manager.py +0 -220
  77. vantage6/cli/rabbitmq/rabbitmq.config +0 -8
  78. vantage6-5.0.0a36.dist-info/RECORD +0 -86
  79. {vantage6-5.0.0a36.dist-info → vantage6-5.0.0a38.dist-info}/WHEEL +0 -0
  80. {vantage6-5.0.0a36.dist-info → vantage6-5.0.0a38.dist-info}/entry_points.txt +0 -0
vantage6/cli/node/stop.py CHANGED
@@ -1,13 +1,23 @@
1
1
  import click
2
+ from kubernetes import client as k8s_client
3
+ from kubernetes.client import ApiException
2
4
 
3
- from vantage6.common import info
4
- from vantage6.common.globals import InstanceType
5
+ from vantage6.common import error, info, warning
6
+ from vantage6.common.globals import APPNAME, InstanceType
5
7
 
6
8
  from vantage6.cli.common.stop import execute_stop, helm_uninstall, stop_port_forward
9
+ from vantage6.cli.common.utils import (
10
+ extract_name_and_is_sandbox,
11
+ get_config_name_from_helm_release_name,
12
+ )
13
+ from vantage6.cli.context import get_context
14
+ from vantage6.cli.context.node import NodeContext
7
15
  from vantage6.cli.globals import (
8
16
  DEFAULT_NODE_SYSTEM_FOLDERS as N_FOL,
9
17
  InfraComponentName,
10
18
  )
19
+ from vantage6.cli.node.common.task_cleanup import delete_job_related_pods
20
+ from vantage6.cli.utils_kubernetes import create_kubernetes_apis_with_ssl_handling
11
21
 
12
22
 
13
23
  @click.command()
@@ -29,18 +39,24 @@ from vantage6.cli.globals import (
29
39
  "system folders. This is the default.",
30
40
  )
31
41
  @click.option("--all", "all_nodes", flag_value=True, help="Stop all running nodes")
42
+ @click.option(
43
+ "--sandbox", "is_sandbox", flag_value=True, help="Stop a sandbox environment"
44
+ )
32
45
  def cli_node_stop(
33
46
  name: str,
34
47
  context: str,
35
48
  namespace: str,
36
49
  system_folders: bool,
37
50
  all_nodes: bool,
51
+ is_sandbox: bool,
38
52
  ) -> None:
39
53
  """
40
54
  Stop one or all running nodes.
41
55
  """
56
+ name, is_sandbox = extract_name_and_is_sandbox(name, is_sandbox)
42
57
  execute_stop(
43
58
  stop_function=_stop_node,
59
+ stop_function_args={"system_folders": system_folders, "is_sandbox": is_sandbox},
44
60
  instance_type=InstanceType.NODE,
45
61
  infra_component=InfraComponentName.NODE,
46
62
  stop_all=all_nodes,
@@ -48,24 +64,152 @@ def cli_node_stop(
48
64
  namespace=namespace,
49
65
  context=context,
50
66
  system_folders=system_folders,
67
+ is_sandbox=is_sandbox,
51
68
  )
52
69
 
53
70
 
54
- def _stop_node(node_name: str, namespace: str, context: str) -> None:
71
+ def _stop_node(
72
+ node_helm_name: str,
73
+ namespace: str,
74
+ context: str,
75
+ system_folders: bool,
76
+ is_sandbox: bool,
77
+ ) -> None:
55
78
  """
56
79
  Stop a node
57
80
 
58
81
  Parameters
59
82
  ----------
60
- node_name : str
83
+ node_helm_name : str
61
84
  Name of the node to stop
62
85
  namespace : str
63
86
  Kubernetes namespace to use
64
87
  context : str
65
88
  Kubernetes context to use
89
+ system_folders: bool
90
+ Whether to use the system folders or not
91
+ is_sandbox: bool
92
+ Whether node is a sandbox node or not
66
93
  """
67
- helm_uninstall(release_name=node_name, context=context, namespace=namespace)
94
+ helm_uninstall(release_name=node_helm_name, context=context, namespace=namespace)
95
+
96
+ stop_port_forward(service_name=f"{node_helm_name}-node-service")
97
+
98
+ _stop_node_tasks(node_helm_name, system_folders, is_sandbox)
99
+
100
+ info(f"Node {node_helm_name} stopped successfully.")
101
+
102
+
103
+ def _stop_node_tasks(
104
+ node_helm_name: str, system_folders: bool, is_sandbox: bool
105
+ ) -> None:
106
+ """
107
+ Stop the tasks of a node
108
+ """
109
+ node_name = get_config_name_from_helm_release_name(node_helm_name)
110
+ node_ctx = get_context(
111
+ InstanceType.NODE, node_name, system_folders, is_sandbox=is_sandbox
112
+ )
113
+
114
+ task_namespace = node_ctx.config.get("node", {}).get("taskNamespace")
115
+ if not task_namespace:
116
+ warning("Could not find node's task namespace. Node tasks will not be stopped.")
117
+ return
118
+
119
+ # detect tasks from the task namespace that this node is assigned to
120
+ cleanup_task_jobs(task_namespace, node_ctx, all_nodes=False)
121
+
122
+
123
+ def cleanup_task_jobs(
124
+ namespace: str, node_ctx: NodeContext | None = None, all_nodes: bool = False
125
+ ) -> bool:
126
+ """
127
+ Cleanup Vantage6 task jobs in a given namespace.
128
+
129
+ Parameters
130
+ ----------
131
+ namespace: str
132
+ Namespace to cleanup
133
+ node_ctx: NodeContext | None
134
+ Node context to cleanup. If not given, all_nodes must be True.
135
+ all_nodes: bool
136
+ Cleanup all nodes. If not given, node_name must be given.
137
+
138
+ Returns
139
+ -------
140
+ bool
141
+ True if cleanup was successful, False otherwise
142
+ """
143
+ info(f"Cleaning up Vantage6 task jobs in namespace '{namespace}'")
144
+
145
+ if not all_nodes and not node_ctx:
146
+ error("Either all_nodes or node_ctx must be given to cleanup task jobs")
147
+ return False
148
+
149
+ # Load Kubernetes configuration with SSL handling
150
+ try:
151
+ core_api, batch_api = create_kubernetes_apis_with_ssl_handling()
152
+ except RuntimeError as exc:
153
+ error(f"Failed to load Kubernetes configuration: {exc}")
154
+ return False
155
+
156
+ jobs = _get_jobs(namespace, batch_api)
157
+
158
+ deletions = 0
159
+ for job in jobs:
160
+ if not all_nodes and job.metadata.labels.get("node_id") != node_ctx.identifier:
161
+ # if all_nodes is False, we should only delete jobs assigned to the current
162
+ # node
163
+ continue
164
+ elif all_nodes and not _is_vantage6_task_job(job):
165
+ # if all_nodes is True, we should only delete vantage6 task jobs, not other
166
+ # jobs
167
+ continue
168
+
169
+ run_id = _get_job_run_id(job)
170
+ if run_id is None:
171
+ error(f"Job '{job.metadata.name}' has no run_id annotation, skipping...")
172
+ continue
173
+
174
+ # Use shared cleanup to delete job, pods and related secret
175
+ job_name = job.metadata.name
176
+ info(f"Deleting job '{job_name}' (run_id={run_id})")
177
+ delete_job_related_pods(
178
+ run_id=run_id,
179
+ container_name=f"{APPNAME}-run-{run_id}",
180
+ namespace=namespace,
181
+ core_api=core_api,
182
+ batch_api=batch_api,
183
+ )
184
+ deletions += 1
185
+
186
+ if deletions == 0:
187
+ info(f"No Vantage6 task jobs found to delete in namespace '{namespace}'")
188
+ else:
189
+ info(f"Deleted {deletions} Vantage6 task job(s) in namespace '{namespace}'")
190
+ return True
191
+
192
+
193
+ def _is_vantage6_task_job(job: k8s_client.V1Job) -> bool:
194
+ # Vantage6 task jobs can be identified by their name, which is of the form
195
+ # "vantage6-run-<run_id>"
196
+ return job.metadata.name.startswith(f"{APPNAME}-run-")
197
+
198
+
199
+ def _get_jobs(
200
+ namespace: str, batch_api: k8s_client.BatchV1Api
201
+ ) -> list[k8s_client.V1Job]:
202
+ try:
203
+ return batch_api.list_namespaced_job(namespace=namespace).items
204
+ except ApiException as exc:
205
+ error(f"Failed to list jobs in namespace {namespace}: {exc}")
206
+ return []
68
207
 
69
- stop_port_forward(service_name=f"{node_name}-vantage6-node-service")
70
208
 
71
- info(f"Node {node_name} stopped successfully.")
209
+ def _get_job_run_id(job: k8s_client.V1Job) -> int | None:
210
+ annotations = job.metadata.annotations or {}
211
+ try:
212
+ return int(annotations.get("run_id"))
213
+ except ValueError:
214
+ error(f"Job '{job.metadata.name}' has no run_id annotation, skipping")
215
+ return None
@@ -1,14 +1,16 @@
1
1
  import click
2
- import docker
3
- import questionary as q
2
+ from kubernetes import config as k8s_config
3
+ from kubernetes.config.config_exception import ConfigException
4
+ from kubernetes.stream import stream
4
5
 
5
- from vantage6.common import error
6
- from vantage6.common.docker.addons import check_docker_running
7
- from vantage6.common.globals import APPNAME
6
+ from vantage6.common import error, info
7
+ from vantage6.common.globals import APPNAME, InstanceType
8
8
 
9
9
  from vantage6.cli import __version__
10
+ from vantage6.cli.common.utils import select_context_and_namespace
11
+ from vantage6.cli.common.version import get_and_select_ctx
10
12
  from vantage6.cli.globals import DEFAULT_NODE_SYSTEM_FOLDERS as N_FOL
11
- from vantage6.cli.node.common import find_running_node_names
13
+ from vantage6.cli.utils_kubernetes import get_core_api_with_ssl_handling
12
14
 
13
15
 
14
16
  @click.command()
@@ -27,36 +29,97 @@ from vantage6.cli.node.common import find_running_node_names
27
29
  help="Search for configuration in user folders rather than "
28
30
  "system folders. This is the default",
29
31
  )
30
- def cli_node_version(name: str, system_folders: bool) -> None:
32
+ @click.option("--context", default=None, help="Kubernetes context to use")
33
+ @click.option("--namespace", default=None, help="Kubernetes namespace to use")
34
+ @click.option(
35
+ "--sandbox", "is_sandbox", flag_value=True, help="Is this a sandbox environment?"
36
+ )
37
+ def cli_node_version(
38
+ name: str, system_folders: bool, context: str, namespace: str, is_sandbox: bool
39
+ ) -> None:
31
40
  """
32
41
  Returns current version of a vantage6 node.
33
42
  """
34
- check_docker_running()
35
- client = docker.from_env()
43
+ context, namespace = select_context_and_namespace(
44
+ context=context,
45
+ namespace=namespace,
46
+ )
47
+ ctx = get_and_select_ctx(
48
+ InstanceType.NODE, name, system_folders, context, namespace, is_sandbox
49
+ )
50
+ version = _get_node_version_from_k8s(ctx.helm_release_name, namespace, context)
51
+ info("")
52
+ info(f"Node version: {version}")
53
+ info(f"CLI version: {__version__}")
36
54
 
37
- running_node_names = find_running_node_names(client)
38
55
 
39
- if not name:
40
- if not running_node_names:
41
- error(
42
- "No nodes are running! You can only check the version for "
43
- "nodes that are running"
44
- )
45
- exit(1)
56
+ def _get_node_version_from_k8s(
57
+ helm_release: str,
58
+ namespace: str,
59
+ context: str,
60
+ ) -> str:
61
+ """
62
+ Runs 'vnode-local version' in the node pod belonging to the Helm release.
63
+ """
64
+ pod = _get_pod_name_for_helm_release(helm_release, namespace, context)
65
+ output = _exec_pod_command(
66
+ pod_name=pod,
67
+ namespace=namespace,
68
+ command=["vnode-local", "version"],
69
+ )
70
+ return output.strip()
71
+
72
+
73
+ def _get_pod_name_for_helm_release(
74
+ helm_release: str,
75
+ namespace: str,
76
+ context: str,
77
+ ) -> str:
78
+ """
79
+ Returns the first pod name for a given Helm release in a namespace.
80
+ Looks up pods using the standard Helm label 'app.kubernetes.io/instance'.
81
+ """
82
+ try:
83
+ # Load kubeconfig (context optional). Falls back to in-cluster if not available.
46
84
  try:
47
- name = q.select(
48
- "Select the node you wish to inspect:", choices=running_node_names
49
- ).unsafe_ask()
50
- except KeyboardInterrupt:
51
- error("Aborted by user!")
52
- return
53
- else:
54
- post_fix = "system" if system_folders else "user"
55
- name = f"{APPNAME}-{name}-{post_fix}"
56
-
57
- if name in running_node_names:
58
- container = client.containers.get(name)
59
- version = container.exec_run(cmd="vnode-local version", stdout=True)
60
- click.echo({"node": version.output.decode("utf-8"), "cli": __version__})
61
- else:
62
- error(f"Node {name} is not running! Cannot provide version...")
85
+ k8s_config.load_kube_config(context=context) # desktop/dev
86
+ except ConfigException:
87
+ k8s_config.load_incluster_config() # in-cluster
88
+ except ConfigException as exc:
89
+ raise RuntimeError(f"Failed to load Kubernetes config: {exc}") from exc
90
+
91
+ core = get_core_api_with_ssl_handling()
92
+ selector = f"app={APPNAME}-node,release={helm_release}"
93
+ pods = core.list_namespaced_pod(namespace=namespace, label_selector=selector).items
94
+ if not pods:
95
+ error(f"No pods found for Helm release '{helm_release}' in ns '{namespace}'")
96
+ exit(1)
97
+ # Prefer a Ready pod
98
+ for p in pods:
99
+ for cond in p.status.conditions or []:
100
+ if cond.type == "Ready" and cond.status == "True":
101
+ return p.metadata.name
102
+ # Fallback to first pod
103
+ return pods[0].metadata.name
104
+
105
+
106
+ def _exec_pod_command(
107
+ pod_name: str,
108
+ namespace: str,
109
+ command: list[str],
110
+ ) -> str:
111
+ """
112
+ Executes a command inside the specified pod (and optional container) and returns stdout.
113
+ """
114
+ core = get_core_api_with_ssl_handling()
115
+ resp = stream(
116
+ core.connect_get_namespaced_pod_exec,
117
+ pod_name,
118
+ namespace,
119
+ command=command,
120
+ stderr=True,
121
+ stdin=False,
122
+ stdout=True,
123
+ tty=False,
124
+ )
125
+ return resp
@@ -0,0 +1,109 @@
1
+ from pathlib import Path
2
+
3
+ import yaml
4
+
5
+ from vantage6.common.globals import InstanceType
6
+
7
+ from vantage6.cli.context import select_context_class
8
+ from vantage6.cli.sandbox.populate.helpers.utils import replace_wsl_path
9
+
10
+
11
+ class BaseSandboxConfigManager:
12
+ """
13
+ Base class for sandbox configuration managers.
14
+
15
+ Parameters
16
+ ----------
17
+ server_name : str
18
+ Name of the server.
19
+ custom_data_dir : Path | None
20
+ Path to the custom data directory. Useful on WSL because of mount issues for
21
+ default directories.
22
+ """
23
+
24
+ def __init__(self, server_name: str, custom_data_dir: Path | None) -> None:
25
+ self.server_name = server_name
26
+ self.custom_data_dir = Path(custom_data_dir) if custom_data_dir else None
27
+
28
+ @staticmethod
29
+ def _read_extra_config_file(extra_config_file: Path | None) -> dict:
30
+ """Reads extra configuration file.
31
+
32
+ Parameters
33
+ ----------
34
+ extra_config_file : Path | None
35
+ Path to file with additional configuration.
36
+
37
+ Returns
38
+ -------
39
+ dict
40
+ Extra configuration parsed from YAML. Empty dict if none provided.
41
+ """
42
+ if extra_config_file:
43
+ with open(extra_config_file, "r", encoding="utf-8") as f:
44
+ loaded = yaml.safe_load(f) or {}
45
+ if not isinstance(loaded, dict):
46
+ # Ensure we always return a dictionary
47
+ return {"value": loaded}
48
+ return loaded
49
+ return {}
50
+
51
+ def _create_and_get_data_dir(
52
+ self,
53
+ instance_type: InstanceType,
54
+ is_data_folder: bool = False,
55
+ node_name: str | None = None,
56
+ ) -> Path:
57
+ """
58
+ Create and get the data directory.
59
+
60
+ Parameters
61
+ ----------
62
+ instance_type: InstanceType
63
+ Type of vantage6 component
64
+ is_data_folder: bool
65
+ Whether or not to create the data folder or a config folder. This is only
66
+ used for node databases. Default is False.
67
+ node_name: str | None
68
+ Name of the node. Only used if is_data_folder is True.
69
+
70
+ Returns
71
+ -------
72
+ Path
73
+ Path to the data directory
74
+ """
75
+ ctx_class = select_context_class(instance_type)
76
+ folders = ctx_class.instance_folders(
77
+ instance_type=InstanceType.SERVER,
78
+ instance_name=self.server_name,
79
+ system_folders=False,
80
+ )
81
+ main_data_dir = (
82
+ Path(folders["dev"]) if not self.custom_data_dir else self.custom_data_dir
83
+ )
84
+
85
+ if instance_type == InstanceType.SERVER:
86
+ data_dir = main_data_dir / self.server_name / "server"
87
+ elif instance_type == InstanceType.ALGORITHM_STORE:
88
+ data_dir = main_data_dir / self.server_name / "store"
89
+ elif instance_type == InstanceType.NODE:
90
+ if is_data_folder:
91
+ if node_name:
92
+ last_subfolder = f"data_{node_name}"
93
+ else:
94
+ last_subfolder = "data"
95
+ else:
96
+ last_subfolder = "node"
97
+ data_dir = main_data_dir / self.server_name / last_subfolder
98
+ else:
99
+ raise ValueError(f"Invalid instance type to get data dir: {instance_type}")
100
+
101
+ # For the directory to be created, ensure that if a WSL path is used, the path
102
+ # is converted to /mnt/wsl to create the directory on the host (not
103
+ # /run/desktop/mnt/host/wsl as will raise non-existent directory errors)
104
+ data_dir = replace_wsl_path(data_dir, to_mnt_wsl=True)
105
+ data_dir.mkdir(parents=True, exist_ok=True)
106
+ # now ensure that the wsl path is properly replaced to /run/desktop/mnt/host/wsl
107
+ # if it is a WSL path, because that path will be used in the node configuration
108
+ # files and is required to successfully mount the volumes.
109
+ return replace_wsl_path(data_dir, to_mnt_wsl=False)